text stringlengths 8 6.05M |
|---|
value1 = '1,4,7,-4,88,102,-1234'
# write your solution here
def sort_list(integers):
return ','.join(sorted(integers.split(','), key=int, reverse=True))
print(sort_list(value1))
|
"""Search/report page."""
import json
import flask
from dnstwister import app
import dnstwister.tools as tools
def html_render(domain):
"""Render and return the html report."""
reports = dict([tools.analyse(domain)])
return flask.render_template(
'www/report.html',
reports=reports,
atoms=dict([(domain, tools.encode_domain(domain))]),
exports={'json': 'json', 'csv': 'csv'},
domain_encoded=tools.encode_domain(domain),
)
def json_render(domain):
"""Render and return the json-formatted report.
The hand-assembly is due to the streaming of the response.
"""
reports = dict([tools.analyse(domain)])
json_filename = 'dnstwister_report_{}.json'.format(domain.encode('idna'))
def generate():
"""Streaming download generator."""
indent_size = 4
indent = ' ' * indent_size
yield '{\n'
# TODO: We only have one domain now, simplify this.
for (dom, rept) in reports.items():
yield indent + '"' + dom.encode('idna') + '": {\n'
yield indent * 2 + '"fuzzy_domains": [\n'
fuzzy_domains = rept['fuzzy_domains']
for (j, entry) in enumerate(fuzzy_domains):
ip_addr, error = tools.resolve(entry['domain-name'])
data = {
'domain-name': entry['domain-name'].encode('idna'),
'fuzzer': entry['fuzzer'],
'hex': entry['hex'],
'resolution': {
'error': error,
'ip': ip_addr,
},
}
json_str = json.dumps(
data,
sort_keys=True,
indent=indent_size,
separators=(',', ': ')
)
yield '\n'.join([indent * 3 + line
for line
in json_str.split('\n')])
if j < len(fuzzy_domains) - 1:
yield ','
yield '\n'
yield indent * 2 + ']\n'
yield indent + '}'
yield '\n'
yield '}\n'
return flask.Response(
generate(),
headers={
'Content-Disposition': 'attachment; filename=' + json_filename
},
content_type='application/json'
)
def csv_render(domain):
"""Render and return the csv-formatted report."""
headers = ('Domain', 'Type', 'Tweak', 'IP', 'Error')
reports = dict([tools.analyse(domain)])
csv_filename = 'dnstwister_report_{}.csv'.format(domain.encode('idna'))
def generate():
"""Streaming download generator."""
yield ','.join(headers) + '\n'
for (domain, rept) in reports.items():
for entry in rept['fuzzy_domains']:
ip_addr, error = tools.resolve(entry['domain-name'])
row = (
domain.encode('idna'),
entry['fuzzer'],
entry['domain-name'].encode('idna'),
str(ip_addr),
str(error),
)
# comma not possible in any of the row values.
yield u','.join(row) + '\n'
return flask.Response(
generate(),
headers={
'Content-Disposition': 'attachment; filename=' + csv_filename
},
mimetype='text/csv',
)
@app.route('/search', methods=['POST'])
def search_post():
"""Handle form submit."""
try:
post_data = flask.request.form['domains']
except KeyError:
app.logger.info(
'Missing "domains" key from POST: {}'.format(flask.request.form)
)
return flask.redirect('/error/2')
if post_data is None or post_data.strip() == '':
app.logger.info(
'No data in "domains" key in POST'
)
return flask.redirect('/error/2')
search_parameter = tools.encode_domain(post_data)
if search_parameter is None:
app.logger.info(
'Invalid POST Unicode data:{}'.format(repr(post_data))
)
return flask.redirect('/error/0')
return flask.redirect('/search/{}'.format(search_parameter))
def handle_invalid_domain(search_term_as_hex):
"""Called when no valid domain found in GET param, creates a suggestion
to return to the user.
"""
decoded_search = None
try:
decoded_search = tools.decode_domain(search_term_as_hex)
except:
pass
if decoded_search is not None:
suggestion = tools.suggest_domain(decoded_search)
if suggestion is not None:
app.logger.info(
'Not a valid domain in GET: {}, suggesting: {}'.format(
search_term_as_hex, suggestion
)
)
encoded_suggestion = tools.encode_domain(suggestion)
return flask.redirect(
'/error/0?suggestion={}'.format(encoded_suggestion)
)
app.logger.info(
'Not a valid domain in GET: {}'.format(search_term_as_hex)
)
return flask.redirect('/error/0')
@app.route('/search/<search_domain>')
@app.route('/search/<search_domain>/<fmt>')
def search(search_domain, fmt=None):
"""Handle redirect from form submit."""
domain = tools.parse_post_data(search_domain)
if domain is None:
return handle_invalid_domain(search_domain)
if fmt is None:
return html_render(domain)
elif fmt == 'json':
return json_render(domain)
elif fmt == 'csv':
return csv_render(domain)
else:
flask.abort(400, 'Unknown export format: {}'.format(fmt))
|
"""
This is an example for image reading
"""
import h5py
import matplotlib.pyplot as plt
read_path = '/media/blade/road_hackers/training_images/137.h5'
image_object = h5py.File(read_path, 'r')
for utc_time in image_object: # utc_time is a string
print(utc_time)
# get an image from 137.h5 dictionary
selected_image = image_object[utc_time]
# print information of selected_image dataset
print(selected_image)
# access to one pixel
print(selected_image[100][100])
print(len(image_object))
# display selected image
plt.imshow(selected_image)
plt.show()
image_object.close()
|
from __future__ import division
import sys
import traceback
from PyQt5.QtWidgets import QApplication, QDialog, QTextBrowser, QLineEdit
from PyQt5.QtWidgets import QVBoxLayout
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.broswer = QTextBrowser() # display
self.lineedit = QLineEdit("Type an expression and press Enter") # input
self.lineedit.selectAll() # ctr + a
layout = QVBoxLayout() # a frame with 'X' and '--' and '[]'
layout.addWidget(self.broswer)
layout.addWidget(self.lineedit)
self.setLayout(layout)
self.lineedit.setFocus()
setting = self.broswer.document()
setting.setMaximumBlockCount(5)
self.lineedit.returnPressed.connect(self.updateUi)
self.setWindowTitle("Calculate")
def updateUi(self):
try:
text = str(self.lineedit.text())
self.broswer.append("%s = <b>%s</b>" % (text, eval(text))) # when fresh the list in broswer
# maybe append triggle updating ui
except Exception:
traceback.print_exc()
self.broswer.append("<font color=red>%s is invalid</font>" % text)
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
|
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
ret = []
mapper = {}
for i in range(1, len(nums)+1):
if i in mapper:
pass
else:
mapper[i] = 0
if nums[i-1] in mapper:
mapper[nums[i-1]]+=1
else:
mapper[nums[i-1]] = 1
for k, v in mapper.iteritems():
if v == 0:
ret.append(k)
return ret |
import pafy
import numpy as np
import time
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
print('tensorflow.__version__', tf.__version__)
#print('GPU name test',tf.test.gpu_device_name())
import cv2
print('cv2 version',cv2.__version__)
import imutils
#Get youtube stream url by IDT
url = 'fdqOdTvGc9I'
vPafy = pafy.new(url)
play = vPafy.getbest()
print(play.resolution, play.extension, play.get_filesize())
# initialize the video stream
cap = cv2.VideoCapture(play.url)
frame_width = int( cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height =int( cap.get( cv2.CAP_PROP_FRAME_HEIGHT))
fps = int( cap.get(cv2.CAP_PROP_FPS))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
out = cv2.VideoWriter("output.mov", fourcc, fps, (frame_width,frame_height))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
ret, frame3 = cap.read()
print(frame1.shape)
#Load NN
mp='./KersModel.h5'
model=tf.keras.models.load_model(mp)
targetxy = (96,96)
print('NN model loaded')
def statslambda(stat, cf,fm):
s = stat[cv2.CC_STAT_AREA]
(x, y, w, h) = (stat[cv2.CC_STAT_LEFT], stat[cv2.CC_STAT_TOP], stat[cv2.CC_STAT_WIDTH], stat[cv2.CC_STAT_HEIGHT])
if s < 50 or s > 500 or (h / w) > 2 or (w / h) > 2:
pass
else:
if fm[x, y] > 0:
for l in enumerate(cf):
if (x, y, w, h) == l[1]:
fm[x, y] += 1
break
elif fm[x, y] == 0:
fm[x, y] += 1
cf.append((x, y, w, h))
return cf,fm
for i in tqdm(range(700)):
#while cap.isOpened():
ret, frame4 = cap.read()
diffm1 = cv2.absdiff(cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
diff = cv2.absdiff(cv2.cvtColor(frame4, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
diffp1 = cv2.absdiff(cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY))
ret, dbp = cv2.threshold(diffp1, 10, 255, cv2.THRESH_BINARY)
ret, dbm = cv2.threshold(diffm1, 10, 255, cv2.THRESH_BINARY)
ret, db0 = cv2.threshold(diff, 10, 255, cv2.THRESH_BINARY)
diff = cv2.bitwise_and(dbm, db0)
num, labels, stats, centroids = cv2.connectedComponentsWithStats(diff, ltype=cv2.CV_16U, connectivity=8)
difffast = cv2.bitwise_and(cv2.bitwise_and(dbm, db0),
cv2.bitwise_not(dbp))
numf, labelsf, statsf, centroidsf = cv2.connectedComponentsWithStats(difffast, ltype=cv2.CV_16U, connectivity=8)
contoursFilered = []
contoursBall = []
frame_contur = frame1.copy()
fm = np.zeros((frame_width, frame_height), np.int16)
for stat in stats:
contoursFilered,fm=statslambda(stat,contoursFilered,fm)
for stat in statsf:
contoursFilered,fm=statslambda(stat,contoursFilered,fm)
crop_imgs = []
for l in enumerate(contoursFilered):
(x, y, w, h) = l[1]
if fm[x, y] == 1:
rectcolor = (255, 255, 255)
else:
rectcolor = (0, 255, 255)
cv2.rectangle(frame_contur, (x, y), (x + w, y + h), rectcolor, 2)
crop_img = frame_contur[y:y + h, x:x + w].copy()
crop_img =cv2.cvtColor(cv2.resize(crop_img, targetxy), cv2.COLOR_BGR2RGB)
crop_imgs.append(preprocess_input(crop_img))
X = np.array(crop_imgs)
if X.shape[0]>0:
Y = model.predict(X)
rectcolor = (0, 0, 255)
for p in enumerate(Y.reshape(-1)):
if p[1]>0.8:
(x, y, w, h) = contoursFilered[p[0]]
cv2.circle(frame_contur, (x+ w//2, y+h//2), (h+w)//2, rectcolor, 2)
cv2.putText(frame_contur, str(p), (x, y - 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, rectcolor, 2)
out.write(frame_contur)
frame1 = frame2
frame2 = frame3
frame3 = frame4
cv2.destroyAllWindows()
cap.release()
out.release() |
import sys
import numpy as np
import matplotlib.pyplot as plt
#plt.switch_backend('agg')
filename = 'PA_final.dat'
x = [[] for i in range(3)]
y = [[] for i in range(3)]
shells = int(sys.argv[1])
hw = float(sys.argv[2])
with open(filename) as f:
data = f.read()
data = data.split('\n')
for num in range(2,len(data)):
line = data[num].split()
if(int(line[1]) == shells and float(line[4]) == hw):
ind = int(line[2])
x[ind].append(float(line[0]))
y[ind].append(float(line[6]))
#for num in range(15):
# plt.plot(x1[num],y1[num],x2[num],y2[num])
# plt.savefig('fig'+str(num+1)+'.pdf', format='pdf')
# plt.clf()
plt.plot(x[0],y[0],x[1],y[1],x[2],y[2])
plt.show()
|
#!/usr/bin/python
import sys
import os
if( __name__ == '__main__' ):
ver = tuple( map( int, sys.argv[1].split( '.' ) ) )
hdbfs = None
if( ver[0] > 8 or ver[0] == 8 and ver[1] > 0 ):
import hdbfs
else:
import higu
hdbfs = higu
if( ver[0] >= 5 ):
hdbfs.ark.MIN_THUMB_EXP = 2
if( ver[0] < 5 ):
hdbfs.DEFAULT_ENVIRON = os.environ['MKDB_LIB_PATH']
h = hdbfs.init_default()
elif( ver[0] < 8 or ver[0] == 8 and ver[1] == 0 ):
hdbfs.init( 'build_dbs.cfg' )
h = hdbfs.Database()
else:
hdbfs.init( os.environ['MKDB_LIB_PATH'] )
h = hdbfs.Database()
if( ver[0] >= 8 ):
h.enable_write_access()
mo = h.register_file( 'magenta_sq.png' )
ro = h.register_file( 'red_sq.png' )
yo = h.register_file( 'yellow_sq.png' )
go = h.register_file( 'green_sq.png' )
co = h.register_file( 'cyan_sq.png' )
bo = h.register_file( 'blue_sq.png' )
if( ver == ( 1, 0, ) ):
wo = h.register_file( 'white_sq.png' )
else:
wo = h.register_file( 'white_sq.png', add_name = False )
lo = h.register_file( 'grey_sq.png' )
lo = h.register_file( 'grey_sq2.png' )
ko = h.register_file( 'black_sq.png' )
if( ver[0] > 7 ):
wo.rotate( 1 )
if( ver[0] < 5 ):
pass
elif( ver[0] < 10 ):
if( ver[0] < 8 ):
# Old versions of the database don't move the image files until
# commit is called. This causes read_thumb() to fail
h.commit()
wo.read_thumb( 10 )
lo.read_thumb( 3 )
lo.read_thumb( 4 )
ko.read_thumb( 3 )
ko.read_thumb( 4 )
else:
wo.get_thumb_stream( 10 )
lo.get_thumb_stream( 3 )
lo.get_thumb_stream( 4 )
ko.get_thumb_stream( 3 )
ko.get_thumb_stream( 4 )
if( ver[0] < 4 ):
mo.tag( 'colour' )
ro.tag( 'colour' )
yo.tag( 'colour' )
go.tag( 'colour' )
co.tag( 'colour' )
bo.tag( 'colour' )
mo.tag( 'warm' )
ro.tag( 'warm' )
yo.tag( 'warm' )
go.tag( 'cool' )
co.tag( 'cool' )
bo.tag( 'cool' )
wo.tag( 'greyscale' )
lo.tag( 'greyscale' )
ko.tag( 'greyscale' )
ro.tag( 'red' )
wo.tag( 'white' )
lo.tag( 'grey' )
ko.tag( 'black' )
else:
cl = h.make_tag( 'colour' )
mo.assign( cl )
ro.assign( cl )
yo.assign( cl )
go.assign( cl )
co.assign( cl )
bo.assign( cl )
wc = h.make_tag( 'warm' )
mo.assign( wc )
ro.assign( wc )
yo.assign( wc )
cc = h.make_tag( 'cool' )
go.assign( cc )
co.assign( cc )
bo.assign( cc )
bw = h.make_tag( 'greyscale' )
wo.assign( bw )
lo.assign( bw )
ko.assign( bw )
ro.assign( h.make_tag( 'red' ) )
wo.assign( h.make_tag( 'white' ) )
lo.assign( h.make_tag( 'grey' ) )
ko.assign( h.make_tag( 'black' ) )
if( ver[0] < 2 ):
ro.set_parent( mo )
yo.set_parent( mo )
go.set_parent( mo )
co.set_parent( mo )
bo.set_parent( mo )
elif( ver[0] < 4 ):
al = h.create_album()
al.add_file( mo, 5 )
al.add_file( ro, 4 )
al.add_file( yo, 3 )
al.add_file( go, 2 )
al.add_file( co, 1 )
al.add_file( bo, 0 )
al.register_name( 'colours' )
al.tag( 'colour_album' )
al = h.create_album()
al.add_file( wo )
al.add_file( bo )
al.register_name( 'white_and_blue' )
al.tag( 'white_blue_album' )
elif( ver[0] < 5 ):
al = h.create_album()
mo.assign( al )
ro.assign( al )
yo.assign( al )
go.assign( al )
co.assign( al )
bo.assign( al )
al.register_name( 'colours' )
al.assign( h.make_tag( 'colour_album' ) )
al = h.create_album()
wo.assign( al )
bo.assign( al )
al.register_name( 'white_and_blue' )
al.assign( h.make_tag( 'white_blue_album' ) )
else:
al = h.create_album()
mo.assign( al, 5 )
ro.assign( al, 4 )
yo.assign( al, 3 )
go.assign( al, 2 )
co.assign( al, 1 )
bo.assign( al, 0 )
al.add_name( 'colours' )
al.assign( h.make_tag( 'colour_album' ) )
al = h.create_album()
wo.assign( al )
bo.assign( al )
al.add_name( 'white_and_blue' )
al.assign( h.make_tag( 'white_blue_album' ) )
al.set_text( 'White & Blue' )
if( ver[0] > 8 or ver[0] == 8 and ver[1] > 0 ):
lo.set_variant_of( wo )
bo.set_variant_of( ko )
else:
lo.set_varient_of( wo )
bo.set_varient_of( ko )
ko.set_duplicate_of( lo )
if( ver[0] < 8 ):
h.commit()
# vim:sts=4:et:sw=4
|
def result(points):
xs = sorted([point[0] for point in points])
ys = sorted([point[1] for point in points])
rtx, rty = 0, 0
if xs[0] == xs[1]:
rtx = xs[2]
else:
rtx = xs[0]
if ys[0] == ys[1]:
rty = ys[2]
else:
rty = ys[0]
print(rtx, rty)
points = []
for i in range(3):
point = list(map(int, input().split()))
points.append(point)
result(points)
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
filename = 'hahu0291@uni.sydney.edu.au--N168554113.csv'
full = pd.read_csv(filename, index_col=False, header=None, low_memory=False)
dropped = full.dropna(axis=0, subset=[5])
nodup = dropped.drop_duplicates(subset=[0])
nodup.to_csv('nodup.csv')
|
# Generated by Django 3.1.1 on 2020-11-10 14:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(verbose_name='Комментарий')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Время комментария')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарии',
},
),
migrations.CreateModel(
name='Pictures',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=256, verbose_name='Ссылка')),
],
options={
'verbose_name': 'Картинка',
'verbose_name_plural': 'Картинки',
},
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=1024, null=True, verbose_name='Заголовок')),
('description', models.TextField(blank=True, null=True, verbose_name='Описание')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
],
options={
'verbose_name': 'Публикация',
'verbose_name_plural': 'Публикации',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Никнейм')),
('birthday', models.DateField(verbose_name='Дата рождения')),
('registration_date', models.DateField(verbose_name='Дата регистрации')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('description', models.TextField(blank=True, null=True, verbose_name='Описание')),
('avatar', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rest_api.pictures', verbose_name='Аватарка')),
],
options={
'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='Заголовок')),
('publication', models.ManyToManyField(blank=True, to='rest_api.Publication', verbose_name='Публиация')),
],
options={
'verbose_name': 'Тэг',
'verbose_name_plural': 'Тэги',
},
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата подписки')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author', to='rest_api.user', verbose_name='Автор')),
('subscriber', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscriber', to='rest_api.user', verbose_name='Подписчик')),
],
options={
'verbose_name': 'Подписка',
'verbose_name_plural': 'Подписки',
},
),
migrations.AddField(
model_name='publication',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.user', verbose_name='Автор'),
),
migrations.AddField(
model_name='pictures',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.user', verbose_name='Автор'),
),
migrations.AddField(
model_name='pictures',
name='publication',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rest_api.publication', verbose_name='Публикация'),
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Время комментария')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.user', verbose_name='Автор')),
('comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rest_api.comment', verbose_name='Комментарий')),
('publication', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rest_api.publication', verbose_name='Публиация')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарии',
},
),
migrations.AddField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.user', verbose_name='Автор'),
),
migrations.AddField(
model_name='comment',
name='publication',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.publication', verbose_name='Публиация'),
),
]
|
import cv2
import os
import numpy as np
class SimpleDatasetLoader:
def __init__(self, preprocessors=None, labels_set=None):
if preprocessors is None:
self.preprocessors = []
else:
self.preprocessors = preprocessors
if labels_set is None:
self.labels_set = set([])
else:
self.labels_set = labels_set
def load(self, image_paths, verbose=50):
data, labels = [], []
count = 0
for i, image_path in enumerate(image_paths):
# split the label from its path
label = image_path.split(os.path.sep)[-2]
# check whether label is in target labels set
if label not in self.labels_set:
continue
count += 1
if verbose and count % verbose == 0:
print(f'[INFO] loading {count} images...')
# load the image, preprcess it
image = cv2.imread(image_path)
for p in self.preprocessors:
image = p.preprocess(image)
data.append(image)
labels.append(label)
return np.array(data), np.array(labels) |
while 1:
n=int(input())
if n==0: break
a=[int(input()) for i in range(n)]
temp = 0
most = -999999999
for i in range(n):
temp = 0
for j in range(i,n):
temp += a[j]
if temp > most:
most = temp
print(most) |
# Copyright 1998-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = [
"close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum',
'portage.data:portage_gid,secpass',
'portage.dbapi.dep_expand:dep_expand',
'portage.dep:Atom,dep_getkey,match_from_list,use_reduce,_match_slot',
'portage.package.ebuild.doebuild:doebuild',
'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
'portage.util.listdir:listdir',
'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp,_pkg_str',
)
from portage.cache import volatile
from portage.cache.cache_errors import CacheError
from portage.cache.mappings import Mapping
from portage.dbapi import dbapi
from portage.exception import PortageException, \
FileNotFound, InvalidAtom, InvalidData, \
InvalidDependString, InvalidPackageName
from portage.localization import _
from portage import eclass_cache, \
eapi_is_supported, \
_eapi_is_deprecated
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import OrderedDict
from portage.util._eventloop.EventLoop import EventLoop
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
import os as _os
import sys
import traceback
import warnings
if sys.hexversion >= 0x3000000:
basestring = str
long = int
class portdbapi(dbapi):
"""this tree will scan a portage directory located at root (passed to init)"""
portdbapi_instances = []
_use_mutable = True
@property
def _categories(self):
return self.settings.categories
@property
def porttree_root(self):
return self.settings.repositories.mainRepoLocation()
@property
def eclassdb(self):
main_repo = self.repositories.mainRepo()
if main_repo is None:
return None
return main_repo.eclass_db
def __init__(self, _unused_param=None, mysettings=None):
"""
@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
@type _unused_param: None
@param mysettings: an immutable config instance
@type mysettings: portage.config
"""
portdbapi.portdbapi_instances.append(self)
from portage import config
if mysettings:
self.settings = mysettings
else:
from portage import settings
self.settings = config(clone=settings)
if _unused_param is not None:
warnings.warn("The first parameter of the " + \
"portage.dbapi.porttree.portdbapi" + \
" constructor is unused since portage-2.1.8. " + \
"mysettings['PORTDIR'] is used instead.",
DeprecationWarning, stacklevel=2)
self.repositories = self.settings.repositories
self.treemap = self.repositories.treemap
# This is strictly for use in aux_get() doebuild calls when metadata
# is generated by the depend phase. It's safest to use a clone for
# this purpose because doebuild makes many changes to the config
# instance that is passed in.
self.doebuild_settings = config(clone=self.settings)
self._event_loop = EventLoop(main=False)
self.depcachedir = os.path.realpath(self.settings.depcachedir)
if os.environ.get("SANDBOX_ON") == "1":
# Make api consumers exempt from sandbox violations
# when doing metadata cache updates.
sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
if self.depcachedir not in sandbox_write:
sandbox_write.append(self.depcachedir)
os.environ["SANDBOX_WRITE"] = \
":".join(filter(None, sandbox_write))
self.porttrees = list(self.settings.repositories.repoLocationList())
# This is used as sanity check for aux_get(). If there is no
# root eclass dir, we assume that PORTDIR is invalid or
# missing. This check allows aux_get() to detect a missing
# portage tree and return early by raising a KeyError.
self._have_root_eclass_dir = os.path.isdir(
os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
#if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
self.xcache = {}
self.frozen = 0
#Keep a list of repo names, sorted by priority (highest priority first).
self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
self.auxdb = {}
self._pregen_auxdb = {}
# If the current user doesn't have depcachedir write permission,
# then the depcachedir cache is kept here read-only access.
self._ro_auxdb = {}
self._init_cache_dirs()
try:
depcachedir_st = os.stat(self.depcachedir)
depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
except OSError:
depcachedir_st = None
depcachedir_w_ok = False
cache_kwargs = {}
depcachedir_unshared = False
if portage.data.secpass < 1 and \
depcachedir_w_ok and \
depcachedir_st is not None and \
os.getuid() == depcachedir_st.st_uid and \
os.getgid() == depcachedir_st.st_gid:
# If this user owns depcachedir and is not in the
# portage group, then don't bother to set permissions
# on cache entries. This makes it possible to run
# egencache without any need to be a member of the
# portage group.
depcachedir_unshared = True
else:
cache_kwargs.update({
'gid' : portage_gid,
'perms' : 0o664
})
# If secpass < 1, we don't want to write to the cache
# since then we won't be able to apply group permissions
# to the cache entries/directories.
if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
for x in self.porttrees:
self.auxdb[x] = volatile.database(
self.depcachedir, x, self._known_keys,
**cache_kwargs)
try:
self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x,
self._known_keys, readonly=True, **cache_kwargs)
except CacheError:
pass
else:
for x in self.porttrees:
if x in self.auxdb:
continue
# location, label, auxdbkeys
self.auxdb[x] = self.auxdbmodule(
self.depcachedir, x, self._known_keys, **cache_kwargs)
if "metadata-transfer" not in self.settings.features:
for x in self.porttrees:
if x in self._pregen_auxdb:
continue
cache = self._create_pregen_cache(x)
if cache is not None:
self._pregen_auxdb[x] = cache
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
["DEPEND", "EAPI", "HDEPEND",
"INHERITED", "IUSE", "KEYWORDS", "LICENSE",
"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
"RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
self._aux_cache = {}
self._broken_ebuilds = set()
def _create_pregen_cache(self, tree):
conf = self.repositories.get_repo_for_location(tree)
cache = conf.get_pregenerated_cache(
self._known_keys, readonly=True)
if cache is not None:
try:
cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
except AttributeError:
pass
return cache
def _init_cache_dirs(self):
"""Create /var/cache/edb/dep and adjust permissions for the portage
group."""
dirmode = 0o2070
modemask = 0o2
try:
ensure_dirs(self.depcachedir, gid=portage_gid,
mode=dirmode, mask=modemask)
except PortageException:
pass
def close_caches(self):
if not hasattr(self, "auxdb"):
# unhandled exception thrown from constructor
return
for x in self.auxdb:
self.auxdb[x].sync()
self.auxdb.clear()
def flush_cache(self):
for x in self.auxdb.values():
x.sync()
def findLicensePath(self, license_name):
for x in reversed(self.porttrees):
license_path = os.path.join(x, "licenses", license_name)
if os.access(license_path, os.R_OK):
return license_path
return None
def findname(self,mycpv, mytree = None, myrepo = None):
return self.findname2(mycpv, mytree, myrepo)[0]
def getRepositoryPath(self, repository_id):
"""
This function is required for GLEP 42 compliance; given a valid repository ID
it must return a path to the repository
TreeMap = { id:path }
"""
return self.treemap.get(repository_id)
def getRepositoryName(self, canonical_repo_path):
"""
This is the inverse of getRepositoryPath().
@param canonical_repo_path: the canonical path of a repository, as
resolved by os.path.realpath()
@type canonical_repo_path: String
@return: The repo_name for the corresponding repository, or None
if the path does not correspond a known repository
@rtype: String or None
"""
try:
return self.repositories.get_name_for_location(canonical_repo_path)
except KeyError:
return None
def getRepositories(self):
"""
This function is required for GLEP 42 compliance; it will return a list of
repository IDs
TreeMap = {id: path}
"""
return self._ordered_repo_name_list
def getMissingRepoNames(self):
"""
Returns a list of repository paths that lack profiles/repo_name.
"""
return self.settings.repositories.missing_repo_names
def getIgnoredRepos(self):
"""
Returns a list of repository paths that have been ignored, because
another repo with the same name exists.
"""
return self.settings.repositories.ignored_repos
def findname2(self, mycpv, mytree=None, myrepo = None):
"""
Returns the location of the CPV, and what overlay it was in.
Searches overlays first, then PORTDIR; this allows us to return the first
matching file. As opposed to starting in portdir and then doing overlays
second, we would have to exhaustively search the overlays until we found
the file we wanted.
If myrepo is not None it will find packages from this repository(overlay)
"""
if not mycpv:
return (None, 0)
if myrepo is not None:
mytree = self.treemap.get(myrepo)
if mytree is None:
return (None, 0)
mysplit = mycpv.split("/")
psplit = pkgsplit(mysplit[1])
if psplit is None or len(mysplit) != 2:
raise InvalidPackageName(mycpv)
# For optimal performace in this hot spot, we do manual unicode
# handling here instead of using the wrapped os module.
encoding = _encodings['fs']
errors = 'strict'
if mytree:
mytrees = [mytree]
else:
mytrees = reversed(self.porttrees)
relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
mysplit[1] + ".ebuild"
for x in mytrees:
filename = x + _os.sep + relative_path
if _os.access(_unicode_encode(filename,
encoding=encoding, errors=errors), _os.R_OK):
return (filename, x)
return (None, 0)
def _write_cache(self, cpv, repo_path, metadata, ebuild_hash):
try:
cache = self.auxdb[repo_path]
chf = cache.validation_chf
metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
except CacheError:
# Normally this shouldn't happen, so we'll show
# a traceback for debugging purposes.
traceback.print_exc()
cache = None
if cache is not None:
try:
cache[cpv] = metadata
except CacheError:
# Normally this shouldn't happen, so we'll show
# a traceback for debugging purposes.
traceback.print_exc()
def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
try:
ebuild_hash = eclass_cache.hashed_path(ebuild_path)
# snag mtime since we use it later, and to trigger stat failure
# if it doesn't exist
ebuild_hash.mtime
except FileNotFound:
writemsg(_("!!! aux_get(): ebuild for " \
"'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
raise KeyError(cpv)
# Pull pre-generated metadata from the metadata/cache/
# directory if it exists and is valid, otherwise fall
# back to the normal writable cache.
auxdbs = []
pregen_auxdb = self._pregen_auxdb.get(repo_path)
if pregen_auxdb is not None:
auxdbs.append(pregen_auxdb)
ro_auxdb = self._ro_auxdb.get(repo_path)
if ro_auxdb is not None:
auxdbs.append(ro_auxdb)
auxdbs.append(self.auxdb[repo_path])
eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db
for auxdb in auxdbs:
try:
metadata = auxdb[cpv]
except KeyError:
continue
except CacheError:
if not auxdb.readonly:
try:
del auxdb[cpv]
except (KeyError, CacheError):
pass
continue
eapi = metadata.get('EAPI', '').strip()
if not eapi:
eapi = '0'
metadata['EAPI'] = eapi
if not eapi_is_supported(eapi):
# Since we're supposed to be able to efficiently obtain the
# EAPI from _parse_eapi_ebuild_head, we disregard cache entries
# for unsupported EAPIs.
continue
if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
break
else:
metadata = None
return (metadata, ebuild_hash)
def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
cache_me = False
if myrepo is not None:
mytree = self.treemap.get(myrepo)
if mytree is None:
raise KeyError(myrepo)
if mytree is not None and len(self.porttrees) == 1 \
and mytree == self.porttrees[0]:
# mytree matches our only tree, so it's safe to
# ignore mytree and cache the result
mytree = None
myrepo = None
if mytree is None:
cache_me = True
if mytree is None and not self._known_keys.intersection(
mylist).difference(self._aux_cache_keys):
aux_cache = self._aux_cache.get(mycpv)
if aux_cache is not None:
return [aux_cache.get(x, "") for x in mylist]
cache_me = True
try:
cat, pkg = mycpv.split("/", 1)
except ValueError:
# Missing slash. Can't find ebuild so raise KeyError.
raise KeyError(mycpv)
myebuild, mylocation = self.findname2(mycpv, mytree)
if not myebuild:
writemsg("!!! aux_get(): %s\n" % \
_("ebuild not found for '%s'") % mycpv, noiselevel=1)
raise KeyError(mycpv)
mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
doregen = mydata is None
if doregen:
if myebuild in self._broken_ebuilds:
raise KeyError(mycpv)
proc = EbuildMetadataPhase(cpv=mycpv,
ebuild_hash=ebuild_hash, portdb=self,
repo_path=mylocation, scheduler=self._event_loop,
settings=self.doebuild_settings)
proc.start()
proc.wait()
if proc.returncode != os.EX_OK:
self._broken_ebuilds.add(myebuild)
raise KeyError(mycpv)
mydata = proc.metadata
mydata["repository"] = self.repositories.get_name_for_location(mylocation)
mydata["_mtime_"] = ebuild_hash.mtime
eapi = mydata.get("EAPI")
if not eapi:
eapi = "0"
mydata["EAPI"] = eapi
if eapi_is_supported(eapi):
mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))
#finally, we look at our internal cache entry and return the requested data.
returnme = [mydata.get(x, "") for x in mylist]
if cache_me:
aux_cache = {}
for x in self._aux_cache_keys:
aux_cache[x] = mydata.get(x, "")
self._aux_cache[mycpv] = aux_cache
return returnme
def getFetchMap(self, mypkg, useflags=None, mytree=None):
"""
Get the SRC_URI metadata as a dict which maps each file name to a
set of alternative URIs.
@param mypkg: cpv for an ebuild
@type mypkg: String
@param useflags: a collection of enabled USE flags, for evaluation of
conditionals
@type useflags: set, or None to enable all conditionals
@param mytree: The canonical path of the tree in which the ebuild
is located, or None for automatic lookup
@type mypkg: String
@return: A dict which maps each file name to a set of alternative
URIs.
@rtype: dict
"""
try:
eapi, myuris = self.aux_get(mypkg,
["EAPI", "SRC_URI"], mytree=mytree)
except KeyError:
# Convert this to an InvalidDependString exception since callers
# already handle it.
raise portage.exception.InvalidDependString(
"getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")
if not eapi_is_supported(eapi):
# Convert this to an InvalidDependString exception
# since callers already handle it.
raise portage.exception.InvalidDependString(
"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
(mypkg, eapi))
return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
use=useflags)
def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
# returns a filename:size dictionnary of remaining downloads
myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
if myebuild is None:
raise AssertionError(_("ebuild not found for '%s'") % mypkg)
pkgdir = os.path.dirname(myebuild)
mf = self.repositories.get_repo_for_location(
os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
pkgdir, self.settings["DISTDIR"])
checksums = mf.getDigests()
if not checksums:
if debug:
writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
return {}
filesdict={}
myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
#XXX: maybe this should be improved: take partial downloads
# into account? check checksums?
for myfile in myfiles:
try:
fetch_size = int(checksums[myfile]["size"])
except (KeyError, ValueError):
if debug:
writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
continue
file_path = os.path.join(self.settings["DISTDIR"], myfile)
mystat = None
try:
mystat = os.stat(file_path)
except OSError:
pass
if mystat is None:
existing_size = 0
ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
if ro_distdirs is not None:
for x in shlex_split(ro_distdirs):
try:
mystat = os.stat(os.path.join(x, myfile))
except OSError:
pass
else:
if mystat.st_size == fetch_size:
existing_size = fetch_size
break
else:
existing_size = mystat.st_size
remaining_size = fetch_size - existing_size
if remaining_size > 0:
# Assume the download is resumable.
filesdict[myfile] = remaining_size
elif remaining_size < 0:
# The existing file is too large and therefore corrupt.
filesdict[myfile] = int(checksums[myfile]["size"])
return filesdict
def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
"""
TODO: account for PORTAGE_RO_DISTDIRS
"""
if all:
useflags = None
elif useflags is None:
if mysettings:
useflags = mysettings["USE"].split()
if myrepo is not None:
mytree = self.treemap.get(myrepo)
if mytree is None:
return False
else:
mytree = None
myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
myebuild = self.findname(mypkg, myrepo=myrepo)
if myebuild is None:
raise AssertionError(_("ebuild not found for '%s'") % mypkg)
pkgdir = os.path.dirname(myebuild)
mf = self.repositories.get_repo_for_location(
os.path.dirname(os.path.dirname(pkgdir)))
mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
mysums = mf.getDigests()
failures = {}
for x in myfiles:
if not mysums or x not in mysums:
ok = False
reason = _("digest missing")
else:
try:
ok, reason = portage.checksum.verify_all(
os.path.join(self.settings["DISTDIR"], x), mysums[x])
except FileNotFound as e:
ok = False
reason = _("File Not Found: '%s'") % (e,)
if not ok:
failures[x] = reason
if failures:
return False
return True
def cpv_exists(self, mykey, myrepo=None):
"Tells us whether an actual ebuild exists on disk (no masking)"
cps2 = mykey.split("/")
cps = catpkgsplit(mykey, silent=0)
if not cps:
#invalid cat/pkg-v
return 0
if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
return 1
else:
return 0
def cp_all(self, categories=None, trees=None):
"""
This returns a list of all keys in our tree or trees
@param categories: optional list of categories to search or
defaults to self.settings.categories
@param trees: optional list of trees to search the categories in or
defaults to self.porttrees
@rtype list of [cat/pkg,...]
"""
d = {}
if categories is None:
categories = self.settings.categories
if trees is None:
trees = self.porttrees
for x in categories:
for oroot in trees:
for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
try:
atom = Atom("%s/%s" % (x, y))
except InvalidAtom:
continue
if atom != atom.cp:
continue
d[atom.cp] = None
l = list(d)
l.sort()
return l
def cp_list(self, mycp, use_cache=1, mytree=None):
# NOTE: Cache can be safely shared with the match cache, since the
# match cache uses the result from dep_expand for the cache_key.
if self.frozen and mytree is not None \
and len(self.porttrees) == 1 \
and mytree == self.porttrees[0]:
# mytree matches our only tree, so it's safe to
# ignore mytree and cache the result
mytree = None
if self.frozen and mytree is None:
cachelist = self.xcache["cp-list"].get(mycp)
if cachelist is not None:
# Try to propagate this to the match-all cache here for
# repoman since he uses separate match-all caches for each
# profile (due to differences in _get_implicit_iuse).
self.xcache["match-all"][(mycp, mycp)] = cachelist
return cachelist[:]
mysplit = mycp.split("/")
invalid_category = mysplit[0] not in self._categories
d={}
if mytree is not None:
if isinstance(mytree, basestring):
mytrees = [mytree]
else:
# assume it's iterable
mytrees = mytree
else:
mytrees = self.porttrees
for oroot in mytrees:
try:
file_list = os.listdir(os.path.join(oroot, mycp))
except OSError:
continue
for x in file_list:
pf = None
if x[-7:] == '.ebuild':
pf = x[:-7]
if pf is not None:
ps = pkgsplit(pf)
if not ps:
writemsg(_("\nInvalid ebuild name: %s\n") % \
os.path.join(oroot, mycp, x), noiselevel=-1)
continue
if ps[0] != mysplit[1]:
writemsg(_("\nInvalid ebuild name: %s\n") % \
os.path.join(oroot, mycp, x), noiselevel=-1)
continue
ver_match = ver_regexp.match("-".join(ps[1:]))
if ver_match is None or not ver_match.groups():
writemsg(_("\nInvalid ebuild version: %s\n") % \
os.path.join(oroot, mycp, x), noiselevel=-1)
continue
d[_pkg_str(mysplit[0]+"/"+pf)] = None
if invalid_category and d:
writemsg(_("\n!!! '%s' has a category that is not listed in " \
"%setc/portage/categories\n") % \
(mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
mylist = []
else:
mylist = list(d)
# Always sort in ascending order here since it's handy
# and the result can be easily cached and reused.
self._cpv_sort_ascending(mylist)
if self.frozen and mytree is None:
cachelist = mylist[:]
self.xcache["cp-list"][mycp] = cachelist
self.xcache["match-all"][(mycp, mycp)] = cachelist
return mylist
def freeze(self):
for x in "bestmatch-visible", "cp-list", "match-all", \
"match-all-cpv-only", "match-visible", "minimum-all", \
"minimum-visible":
self.xcache[x]={}
self.frozen=1
def melt(self):
self.xcache = {}
self.frozen = 0
def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
"caching match function; very trick stuff"
if level == "list-visible":
level = "match-visible"
warnings.warn("The 'list-visible' mode of "
"portage.dbapi.porttree.portdbapi.xmatch "
"has been renamed to match-visible",
DeprecationWarning, stacklevel=2)
if mydep is None:
#this stuff only runs on first call of xmatch()
#create mydep, mykey from origdep
mydep = dep_expand(origdep, mydb=self, settings=self.settings)
mykey = mydep.cp
#if no updates are being made to the tree, we can consult our xcache...
cache_key = None
if self.frozen:
cache_key = (mydep, mydep.unevaluated_atom)
try:
return self.xcache[level][cache_key][:]
except KeyError:
pass
myval = None
mytree = None
if mydep.repo is not None:
mytree = self.treemap.get(mydep.repo)
if mytree is None:
if level.startswith("match-"):
myval = []
else:
myval = ""
if myval is not None:
# Unknown repo, empty result.
pass
elif level == "match-all-cpv-only":
# match *all* packages, only against the cpv, in order
# to bypass unnecessary cache access for things like IUSE
# and SLOT.
if mydep == mykey:
# Share cache with match-all/cp_list when the result is the
# same. Note that this requires that mydep.repo is None and
# thus mytree is also None.
level = "match-all"
myval = self.cp_list(mykey, mytree=mytree)
else:
myval = match_from_list(mydep,
self.cp_list(mykey, mytree=mytree))
elif level in ("bestmatch-visible", "match-all", "match-visible",
"minimum-all", "minimum-visible"):
# Find the minimum matching visible version. This is optimized to
# minimize the number of metadata accesses (improves performance
# especially in cases where metadata needs to be generated).
if mydep == mykey:
mylist = self.cp_list(mykey, mytree=mytree)
else:
mylist = match_from_list(mydep,
self.cp_list(mykey, mytree=mytree))
visibility_filter = level not in ("match-all", "minimum-all")
single_match = level not in ("match-all", "match-visible")
myval = []
aux_keys = list(self._aux_cache_keys)
if level == "bestmatch-visible":
iterfunc = reversed
else:
iterfunc = iter
if mydep.repo is not None:
repos = [mydep.repo]
else:
# We iterate over self.porttrees, since it's common to
# tweak this attribute in order to adjust match behavior.
repos = []
for tree in reversed(self.porttrees):
repos.append(self.repositories.get_name_for_location(tree))
for cpv in iterfunc(mylist):
for repo in repos:
try:
metadata = dict(zip(aux_keys,
self.aux_get(cpv, aux_keys, myrepo=repo)))
except KeyError:
# ebuild not in this repo, or masked by corruption
continue
try:
pkg_str = _pkg_str(cpv, metadata=metadata,
settings=self.settings)
except InvalidData:
continue
if visibility_filter and not self._visible(pkg_str, metadata):
continue
if mydep.slot is not None and \
not _match_slot(mydep, pkg_str):
continue
if mydep.unevaluated_atom.use is not None and \
not self._match_use(mydep, pkg_str, metadata):
continue
myval.append(pkg_str)
# only yield a given cpv once
break
if myval and single_match:
break
if single_match:
if myval:
myval = myval[0]
else:
myval = ""
elif level == "bestmatch-list":
#dep match -- find best match but restrict search to sublist
warnings.warn("The 'bestmatch-list' mode of "
"portage.dbapi.porttree.portdbapi.xmatch is deprecated",
DeprecationWarning, stacklevel=2)
myval = best(list(self._iter_match(mydep, mylist)))
elif level == "match-list":
#dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
warnings.warn("The 'match-list' mode of "
"portage.dbapi.porttree.portdbapi.xmatch is deprecated",
DeprecationWarning, stacklevel=2)
myval = list(self._iter_match(mydep, mylist))
else:
raise AssertionError(
"Invalid level argument: '%s'" % level)
if self.frozen:
xcache_this_level = self.xcache.get(level)
if xcache_this_level is not None:
xcache_this_level[cache_key] = myval
if not isinstance(myval, _pkg_str):
myval = myval[:]
return myval
def match(self, mydep, use_cache=1):
return self.xmatch("match-visible", mydep)
def gvisible(self, mylist):
warnings.warn("The 'gvisible' method of "
"portage.dbapi.porttree.portdbapi "
"is deprecated",
DeprecationWarning, stacklevel=2)
return list(self._iter_visible(iter(mylist)))
def visible(self, cpv_iter):
warnings.warn("The 'visible' method of "
"portage.dbapi.porttree.portdbapi "
"is deprecated",
DeprecationWarning, stacklevel=2)
if cpv_iter is None:
return []
return list(self._iter_visible(iter(cpv_iter)))
def _iter_visible(self, cpv_iter, myrepo=None):
"""
Return a new list containing only visible packages.
"""
aux_keys = list(self._aux_cache_keys)
metadata = {}
if myrepo is not None:
repos = [myrepo]
else:
# We iterate over self.porttrees, since it's common to
# tweak this attribute in order to adjust match behavior.
repos = []
for tree in reversed(self.porttrees):
repos.append(self.repositories.get_name_for_location(tree))
for mycpv in cpv_iter:
for repo in repos:
metadata.clear()
try:
metadata.update(zip(aux_keys,
self.aux_get(mycpv, aux_keys, myrepo=repo)))
except KeyError:
continue
except PortageException as e:
writemsg("!!! Error: aux_get('%s', %s)\n" %
(mycpv, aux_keys), noiselevel=-1)
writemsg("!!! %s\n" % (e,), noiselevel=-1)
del e
continue
if not self._visible(mycpv, metadata):
continue
yield mycpv
# only yield a given cpv once
break
def _visible(self, cpv, metadata):
eapi = metadata["EAPI"]
if not eapi_is_supported(eapi):
return False
if _eapi_is_deprecated(eapi):
return False
if not metadata["SLOT"]:
return False
settings = self.settings
if settings._getMaskAtom(cpv, metadata):
return False
if settings._getMissingKeywords(cpv, metadata):
return False
if settings.local_config:
metadata['CHOST'] = settings.get('CHOST', '')
if not settings._accept_chost(cpv, metadata):
return False
metadata["USE"] = ""
if "?" in metadata["LICENSE"] or \
"?" in metadata["PROPERTIES"]:
self.doebuild_settings.setcpv(cpv, mydb=metadata)
metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
try:
if settings._getMissingLicenses(cpv, metadata):
return False
if settings._getMissingProperties(cpv, metadata):
return False
except InvalidDependString:
return False
return True
def close_portdbapi_caches():
for i in portdbapi.portdbapi_instances:
i.close_caches()
portage.process.atexit_register(portage.portageexit)
class portagetree(object):
def __init__(self, root=None, virtual=DeprecationWarning, settings=None):
"""
Constructor for a PortageTree
@param root: deprecated, defaults to settings['ROOT']
@type root: String/Path
@param virtual: UNUSED
@type virtual: No Idea
@param settings: Portage Configuration object (portage.settings)
@type settings: Instance of portage.config
"""
if settings is None:
settings = portage.settings
self.settings = settings
if root is not None and root != settings['ROOT']:
warnings.warn("The root parameter of the " + \
"portage.dbapi.porttree.portagetree" + \
" constructor is now unused. Use " + \
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
warnings.warn("The 'virtual' parameter of the "
"portage.dbapi.porttree.portagetree"
" constructor is unused",
DeprecationWarning, stacklevel=2)
self.portroot = settings["PORTDIR"]
self.__virtual = virtual
self.dbapi = portdbapi(mysettings=settings)
@property
def root(self):
warnings.warn("The root attribute of " + \
"portage.dbapi.porttree.portagetree" + \
" is deprecated. Use " + \
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=3)
return self.settings['ROOT']
@property
def virtual(self):
warnings.warn("The 'virtual' attribute of " + \
"portage.dbapi.porttree.portagetree" + \
" is deprecated.",
DeprecationWarning, stacklevel=3)
return self.__virtual
def dep_bestmatch(self,mydep):
"compatibility method"
mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
if mymatch is None:
return ""
return mymatch
def dep_match(self,mydep):
"compatibility method"
mymatch = self.dbapi.xmatch("match-visible",mydep)
if mymatch is None:
return []
return mymatch
def exists_specific(self,cpv):
return self.dbapi.cpv_exists(cpv)
def getallnodes(self):
"""new behavior: these are all *unmasked* nodes. There may or may not be available
masked package for nodes in this nodes list."""
return self.dbapi.cp_all()
def getname(self, pkgname):
"returns file location for this particular package (DEPRECATED)"
if not pkgname:
return ""
mysplit = pkgname.split("/")
psplit = pkgsplit(mysplit[1])
return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
def getslot(self,mycatpkg):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
myslot = self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
pass
return myslot
class FetchlistDict(Mapping):
"""
This provide a mapping interface to retrieve fetch lists. It's used
to allow portage.manifest.Manifest to access fetch lists via a standard
mapping interface rather than use the dbapi directly.
"""
def __init__(self, pkgdir, settings, mydbapi):
"""pkgdir is a directory containing ebuilds and settings is passed into
portdbapi.getfetchlist for __getitem__ calls."""
self.pkgdir = pkgdir
self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
self.settings = settings
self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
self.portdb = mydbapi
def __getitem__(self, pkg_key):
"""Returns the complete fetch list for a given package."""
return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
def __contains__(self, cpv):
return cpv in self.__iter__()
def has_key(self, pkg_key):
"""Returns true if the given package exists within pkgdir."""
warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
"deprecated, use the 'in' operator instead",
DeprecationWarning, stacklevel=2)
return pkg_key in self
def __iter__(self):
return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
def __len__(self):
"""This needs to be implemented in order to avoid
infinite recursion in some cases."""
return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
def keys(self):
"""Returns keys for all packages within pkgdir"""
return self.portdb.cp_list(self.cp, mytree=self.mytree)
if sys.hexversion >= 0x3000000:
keys = __iter__
def _parse_uri_map(cpv, metadata, use=None):
myuris = use_reduce(metadata.get('SRC_URI', ''),
uselist=use, matchall=(use is None),
is_src_uri=True,
eapi=metadata['EAPI'])
uri_map = OrderedDict()
myuris.reverse()
while myuris:
uri = myuris.pop()
if myuris and myuris[-1] == "->":
myuris.pop()
distfile = myuris.pop()
else:
distfile = os.path.basename(uri)
if not distfile:
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI has no file " + \
"name: '%s'") % (cpv, uri))
uri_set = uri_map.get(distfile)
if uri_set is None:
# Use OrderedDict to preserve order from SRC_URI
# while ensuring uniqueness.
uri_set = OrderedDict()
uri_map[distfile] = uri_set
uri_set[uri] = True
# Convert OrderedDicts to tuples.
for k, v in uri_map.items():
uri_map[k] = tuple(v)
return uri_map
|
class Solution:
def convertToBase7(self, num: int) -> str:
if num == 0:
return "0"
res = []
flag = True
if num < 0:
flag = False
num = - num
while num:
res.append(str(num % 7))
num //= 7
if not flag:
res.append("-")
return "".join(res[::-1])
|
import sys, types
from thUtils.stdlib import *
from threading import Thread
# Builtin Commands
class thCommands:
def __init__(self):
return None
class thMonitor:
def __init__(self):
cmd = Commands()
self.commands = {}
self.hosts = {}
self.hostg = {'default': []}
def _is_in_list(self, itm, lst):
for item in lst:
if itm == item: return True
return False
def _chk_str(self, itm):
if type(itm) == types.StringType and itm != '': return True
else: return False
def register_command(self, cmd_name=None, cmd_cback=None):
if cmd_name == None or cmd_cback == None:
th_error('Invalid paramaters given to mstat.Monitor.register_command(cmd_name, cmd_cback)')
return False
else:
if self._chk_str(cmd_name) == True:
if type(cmd_cback) == types.FunctionType or type(cmd_cback) == types.MethodType:
self.commands[cmd_name] = cmd_cback
return True
else:
return False
def register_host(self, host='', addr='', commands=[]):
if self._chk_str(host) == True and self._chk_str(addr) == True:
if self._is_in_list(host, self.hosts.keys()) == True:
th_error('Trying to add duplicate host %s' % host)
return False
for i in commands:
if self._is_in_list(i, self.commands.keys()) == False:
th_error('Trying to add host %s with nonexisting command %s' % (host,i))
return False
self.hosts[host] = {'commands': commands, 'address': addr}
return True
else: return False
def register_hostg(self, hostg='', members=[]):
if self._chk_str(hostg) == True:
if self._is_in_list(hostg, self.hostg.keys()) == True:
th_error('Trying to add duplicate hostgroup %s' % hostg)
return False
for i in members:
if self._is_in_list(i, self.hosts.keys()) == False:
th_error('Trying to add nonexisting host %s to hostgroup %s' % (i, hostg))
return False
self.hostg[hostg] = members
return True
else: return False
def hostg_member_add(self, hostg='', member=''):
if self._chk_str(hostg) == True and self._chk_str(member) == True:
if self._is_in_list(hostg, self.hostg.keys()) == False:
th_error('Trying to add member to nonexisting hostgroup %s' % hostg)
return False
elif self._is_in_list(member, self.hosts.keys()) == False:
th_error('Trying to add nonexisting host %s to hostgroup %s' % (member,hostg))
return False
if self._is_in_list(member, self.hostg[hostg]) == True:
th_warn('Trying to add duplicate host %s to hostgroup %s' % (member,hostg))
return False
else:
self.hostg[hostg].append(member)
return True
else: return False
def check_host(self, host):
if _is_in_list(host, self.hosts.keys()) == True:
for cmd in self.hosts[host]['commands']:
self.commands[host](self.hosts[host]['address'])
def check_all(self):
hgrps = self.hostg.keys()
hgrps.sort()
for grp in hgrps:
hosts = self.hostg[grp]
hosts.sort()
for hst in hosts:
print 'Checking '+hst
for cmd in self.hosts[hst]['commands']:
self.commands[cmd](self.hosts[hst]['address'])
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtWidgets
import Methods.Bisection as Bisection
import Methods.RegularFalse as RegularFalse
import Methods.Secant as Secant
import Methods.FixedPoint as Fixed
import Methods.Newton as Newton
import Methods.Excact as Excact
import time
from View.graph import Ui_SecondWindow
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(753, 453)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("background-color: rgb(0, 0, 40)")
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(170, 20, 391, 61))
self.label.setStyleSheet("background: transparent;\n"
"color: rgb(166, 203, 255);\n"
"margin: 5px;\n"
"font-size: 36px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralWidget)
self.label_2.setGeometry(QtCore.QRect(30, 90, 111, 31))
self.label_2.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 18px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.label_2.setObjectName("label_2")
self.functionLineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.functionLineEdit.setGeometry(QtCore.QRect(160, 80, 241, 41))
self.functionLineEdit.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.functionLineEdit.setInputMask("")
self.functionLineEdit.setText("")
self.functionLineEdit.setObjectName("functionLineEdit")
self.checkBox = QtWidgets.QCheckBox(self.centralWidget)
self.checkBox.setGeometry(QtCore.QRect(210, 130, 191, 21))
self.checkBox.setStyleSheet("outline: none;\n"
"background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;")
self.checkBox.setObjectName("checkBox")
self.label_3 = QtWidgets.QLabel(self.centralWidget)
self.label_3.setGeometry(QtCore.QRect(30, 180, 141, 51))
self.label_3.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 18px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;")
self.label_3.setObjectName("label_3")
self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)
self.tabWidget.setGeometry(QtCore.QRect(140, 200, 521, 231))
self.tabWidget.setStyleSheet("border: 0;\n"
"border: 1px solid #999;\n"
"border-top: 0;\n"
"background: transparent;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"background-color: rgb(0, 0, 40);")
self.tabWidget.setObjectName("tabWidget")
self.Bisection = QtWidgets.QWidget()
self.Bisection.setObjectName("Bisection")
self.groupBox_2 = QtWidgets.QGroupBox(self.Bisection)
self.groupBox_2.setGeometry(QtCore.QRect(-1, -1, 521, 201))
self.groupBox_2.setStyleSheet("background: transparent;\n"
"")
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.label_16 = QtWidgets.QLabel(self.groupBox_2)
self.label_16.setGeometry(QtCore.QRect(20, 20, 121, 31))
self.label_16.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_16.setObjectName("label_16")
self.bisectionLowerBound = QtWidgets.QLineEdit(self.groupBox_2)
self.bisectionLowerBound.setGeometry(QtCore.QRect(150, 20, 81, 20))
self.bisectionLowerBound.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.bisectionLowerBound.setInputMask("")
self.bisectionLowerBound.setText("")
self.bisectionLowerBound.setObjectName("bisectionLowerBound")
self.label_17 = QtWidgets.QLabel(self.groupBox_2)
self.label_17.setGeometry(QtCore.QRect(260, 20, 121, 31))
self.label_17.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_17.setObjectName("label_17")
self.bisectionUpperBound = QtWidgets.QLineEdit(self.groupBox_2)
self.bisectionUpperBound.setGeometry(QtCore.QRect(390, 20, 81, 21))
self.bisectionUpperBound.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.bisectionUpperBound.setInputMask("")
self.bisectionUpperBound.setText("")
self.bisectionUpperBound.setObjectName("bisectionUpperBound")
self.bisectionCalcBtn = QtWidgets.QPushButton(self.groupBox_2)
self.bisectionCalcBtn.setGeometry(QtCore.QRect(40, 150, 112, 32))
self.bisectionCalcBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.bisectionCalcBtn.setObjectName("bisectionCalcBtn")
self.bisectionShowIterBtn = QtWidgets.QPushButton(self.groupBox_2)
self.bisectionShowIterBtn.setGeometry(QtCore.QRect(180, 150, 161, 32))
self.bisectionShowIterBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.bisectionShowIterBtn.setObjectName("bisectionShowIterBtn")
self.bisectionShowGpBtn = QtWidgets.QPushButton(self.groupBox_2)
self.bisectionShowGpBtn.setGeometry(QtCore.QRect(370, 150, 112, 32))
self.bisectionShowGpBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.bisectionShowGpBtn.setObjectName("bisectionShowGpBtn")
self.label_19 = QtWidgets.QLabel(self.groupBox_2)
self.label_19.setGeometry(QtCore.QRect(80, 60, 171, 31))
self.label_19.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_19.setObjectName("label_19")
self.bisectionRoot = QtWidgets.QLabel(self.groupBox_2)
self.bisectionRoot.setGeometry(QtCore.QRect(240, 60, 141, 31))
self.bisectionRoot.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.bisectionRoot.setObjectName("bisectionRoot")
self.label_23 = QtWidgets.QLabel(self.groupBox_2)
self.label_23.setGeometry(QtCore.QRect(20, 100, 131, 31))
self.label_23.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_23.setObjectName("label_23")
self.label_26 = QtWidgets.QLabel(self.groupBox_2)
self.label_26.setGeometry(QtCore.QRect(260, 100, 141, 31))
self.label_26.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_26.setObjectName("label_26")
self.excactRootBisection = QtWidgets.QLabel(self.groupBox_2)
self.excactRootBisection.setGeometry(QtCore.QRect(140, 100, 101, 31))
self.excactRootBisection.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.excactRootBisection.setObjectName("excactRootBisection")
self.etBisection = QtWidgets.QLabel(self.groupBox_2)
self.etBisection.setGeometry(QtCore.QRect(390, 100, 101, 31))
self.etBisection.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.etBisection.setObjectName("etBisection")
self.tabWidget.addTab(self.Bisection, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.groupBox = QtWidgets.QGroupBox(self.tab_4)
self.groupBox.setGeometry(QtCore.QRect(-1, -1, 521, 201))
self.groupBox.setStyleSheet("background: transparent;\n"
"")
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_12 = QtWidgets.QLabel(self.groupBox)
self.label_12.setGeometry(QtCore.QRect(20, 20, 121, 31))
self.label_12.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_12.setObjectName("label_12")
self.fixedFirstGuess = QtWidgets.QLineEdit(self.groupBox)
self.fixedFirstGuess.setGeometry(QtCore.QRect(150, 20, 81, 20))
self.fixedFirstGuess.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.fixedFirstGuess.setInputMask("")
self.fixedFirstGuess.setText("")
self.fixedFirstGuess.setObjectName("fixedFirstGuess")
self.fixedCalcBtn = QtWidgets.QPushButton(self.groupBox)
self.fixedCalcBtn.setGeometry(QtCore.QRect(40, 150, 112, 32))
self.fixedCalcBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.fixedCalcBtn.setObjectName("fixedCalcBtn")
self.fixedShowIterBtn = QtWidgets.QPushButton(self.groupBox)
self.fixedShowIterBtn.setGeometry(QtCore.QRect(180, 150, 161, 32))
self.fixedShowIterBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.fixedShowIterBtn.setObjectName("fixedShowIterBtn")
self.fixedShowGpBtn = QtWidgets.QPushButton(self.groupBox)
self.fixedShowGpBtn.setGeometry(QtCore.QRect(370, 150, 112, 32))
self.fixedShowGpBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.fixedShowGpBtn.setObjectName("fixedShowGpBtn")
self.label_14 = QtWidgets.QLabel(self.groupBox)
self.label_14.setGeometry(QtCore.QRect(80, 60, 171, 31))
self.label_14.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_14.setObjectName("label_14")
self.fixedRoot = QtWidgets.QLabel(self.groupBox)
self.fixedRoot.setGeometry(QtCore.QRect(240, 60, 141, 31))
self.fixedRoot.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.fixedRoot.setObjectName("fixedRoot")
self.label_28 = QtWidgets.QLabel(self.groupBox)
self.label_28.setGeometry(QtCore.QRect(20, 100, 131, 31))
self.label_28.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_28.setObjectName("label_28")
self.excactRootFixed = QtWidgets.QLabel(self.groupBox)
self.excactRootFixed.setGeometry(QtCore.QRect(140, 100, 101, 31))
self.excactRootFixed.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.excactRootFixed.setObjectName("excactRootFixed")
self.label_29 = QtWidgets.QLabel(self.groupBox)
self.label_29.setGeometry(QtCore.QRect(260, 100, 141, 31))
self.label_29.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_29.setObjectName("label_29")
self.etFixed = QtWidgets.QLabel(self.groupBox)
self.etFixed.setGeometry(QtCore.QRect(390, 100, 101, 31))
self.etFixed.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.etFixed.setObjectName("etFixed")
self.label_13 = QtWidgets.QLabel(self.groupBox)
self.label_13.setGeometry(QtCore.QRect(270, 20, 61, 31))
self.label_13.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_13.setObjectName("label_13")
self.gx = QtWidgets.QLineEdit(self.groupBox)
self.gx.setGeometry(QtCore.QRect(340, 20, 131, 21))
self.gx.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.gx.setInputMask("")
self.gx.setText("")
self.gx.setObjectName("gx")
self.tabWidget.addTab(self.tab_4, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab_3)
self.groupBox_3.setGeometry(QtCore.QRect(-1, -1, 521, 201))
self.groupBox_3.setStyleSheet("background: transparent;\n"
"")
self.groupBox_3.setTitle("")
self.groupBox_3.setObjectName("groupBox_3")
self.label_18 = QtWidgets.QLabel(self.groupBox_3)
self.label_18.setGeometry(QtCore.QRect(20, 20, 121, 31))
self.label_18.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_18.setObjectName("label_18")
self.falseLowerBound = QtWidgets.QLineEdit(self.groupBox_3)
self.falseLowerBound.setGeometry(QtCore.QRect(150, 20, 81, 20))
self.falseLowerBound.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.falseLowerBound.setInputMask("")
self.falseLowerBound.setText("")
self.falseLowerBound.setObjectName("falseLowerBound")
self.label_20 = QtWidgets.QLabel(self.groupBox_3)
self.label_20.setGeometry(QtCore.QRect(260, 20, 121, 31))
self.label_20.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_20.setObjectName("label_20")
self.falseUpperBound = QtWidgets.QLineEdit(self.groupBox_3)
self.falseUpperBound.setGeometry(QtCore.QRect(390, 20, 81, 21))
self.falseUpperBound.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.falseUpperBound.setInputMask("")
self.falseUpperBound.setText("")
self.falseUpperBound.setObjectName("falseUpperBound")
self.falseCalcBtn = QtWidgets.QPushButton(self.groupBox_3)
self.falseCalcBtn.setGeometry(QtCore.QRect(40, 150, 112, 32))
self.falseCalcBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.falseCalcBtn.setObjectName("falseCalcBtn")
self.falseShowIterBtn = QtWidgets.QPushButton(self.groupBox_3)
self.falseShowIterBtn.setGeometry(QtCore.QRect(180, 150, 161, 32))
self.falseShowIterBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.falseShowIterBtn.setObjectName("falseShowIterBtn")
self.falseShowGpBtn = QtWidgets.QPushButton(self.groupBox_3)
self.falseShowGpBtn.setGeometry(QtCore.QRect(370, 150, 112, 32))
self.falseShowGpBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.falseShowGpBtn.setObjectName("falseShowGpBtn")
self.label_21 = QtWidgets.QLabel(self.groupBox_3)
self.label_21.setGeometry(QtCore.QRect(80, 60, 171, 31))
self.label_21.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_21.setObjectName("label_21")
self.falseRoot = QtWidgets.QLabel(self.groupBox_3)
self.falseRoot.setGeometry(QtCore.QRect(240, 60, 141, 31))
self.falseRoot.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.falseRoot.setObjectName("falseRoot")
self.label_30 = QtWidgets.QLabel(self.groupBox_3)
self.label_30.setGeometry(QtCore.QRect(20, 100, 131, 31))
self.label_30.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_30.setObjectName("label_30")
self.excactRootFalse = QtWidgets.QLabel(self.groupBox_3)
self.excactRootFalse.setGeometry(QtCore.QRect(140, 100, 101, 31))
self.excactRootFalse.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.excactRootFalse.setObjectName("excactRootFalse")
self.label_31 = QtWidgets.QLabel(self.groupBox_3)
self.label_31.setGeometry(QtCore.QRect(260, 100, 141, 31))
self.label_31.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_31.setObjectName("label_31")
self.etFalse = QtWidgets.QLabel(self.tab_3)
self.etFalse.setGeometry(QtCore.QRect(390, 100, 101, 31))
self.etFalse.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.etFalse.setObjectName("etFalse")
self.tabWidget.addTab(self.tab_3, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.groupBox_4 = QtWidgets.QGroupBox(self.tab_5)
self.groupBox_4.setGeometry(QtCore.QRect(-1, -1, 521, 201))
self.groupBox_4.setStyleSheet("background: transparent;\n"
"")
self.groupBox_4.setTitle("")
self.groupBox_4.setObjectName("groupBox_4")
self.label_22 = QtWidgets.QLabel(self.groupBox_4)
self.label_22.setGeometry(QtCore.QRect(20, 20, 121, 31))
self.label_22.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_22.setObjectName("label_22")
self.newtonFirstGuess = QtWidgets.QLineEdit(self.groupBox_4)
self.newtonFirstGuess.setGeometry(QtCore.QRect(150, 20, 81, 20))
self.newtonFirstGuess.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.newtonFirstGuess.setInputMask("")
self.newtonFirstGuess.setText("")
self.newtonFirstGuess.setObjectName("newtonFirstGuess")
self.newtonCalcBtn = QtWidgets.QPushButton(self.groupBox_4)
self.newtonCalcBtn.setGeometry(QtCore.QRect(40, 150, 112, 32))
self.newtonCalcBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.newtonCalcBtn.setObjectName("newtonCalcBtn")
self.newtonShowIterBtn = QtWidgets.QPushButton(self.groupBox_4)
self.newtonShowIterBtn.setGeometry(QtCore.QRect(180, 150, 161, 32))
self.newtonShowIterBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.newtonShowIterBtn.setObjectName("newtonShowIterBtn")
self.newtonShowGpBtn = QtWidgets.QPushButton(self.groupBox_4)
self.newtonShowGpBtn.setGeometry(QtCore.QRect(370, 150, 112, 32))
self.newtonShowGpBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.newtonShowGpBtn.setObjectName("newtonShowGpBtn")
self.label_25 = QtWidgets.QLabel(self.groupBox_4)
self.label_25.setGeometry(QtCore.QRect(80, 60, 191, 31))
self.label_25.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_25.setObjectName("label_25")
self.newtonRoot = QtWidgets.QLabel(self.groupBox_4)
self.newtonRoot.setGeometry(QtCore.QRect(240, 60, 141, 31))
self.newtonRoot.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.newtonRoot.setObjectName("newtonRoot")
self.label_32 = QtWidgets.QLabel(self.groupBox_4)
self.label_32.setGeometry(QtCore.QRect(20, 100, 131, 31))
self.label_32.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_32.setObjectName("label_32")
self.excactRootNewton = QtWidgets.QLabel(self.groupBox_4)
self.excactRootNewton.setGeometry(QtCore.QRect(140, 100, 101, 31))
self.excactRootNewton.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.excactRootNewton.setObjectName("excactRootNewton")
self.label_33 = QtWidgets.QLabel(self.groupBox_4)
self.label_33.setGeometry(QtCore.QRect(260, 100, 141, 31))
self.label_33.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_33.setObjectName("label_33")
self.etNewton = QtWidgets.QLabel(self.groupBox_4)
self.etNewton.setGeometry(QtCore.QRect(390, 100, 101, 31))
self.etNewton.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.etNewton.setObjectName("etNewton")
self.tabWidget.addTab(self.tab_5, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox_5 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_5.setGeometry(QtCore.QRect(-1, -1, 521, 201))
self.groupBox_5.setStyleSheet("background: transparent;\n"
"")
self.groupBox_5.setTitle("")
self.groupBox_5.setObjectName("groupBox_5")
self.label_15 = QtWidgets.QLabel(self.groupBox_5)
self.label_15.setGeometry(QtCore.QRect(20, 20, 121, 31))
self.label_15.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_15.setObjectName("label_15")
self.secantFirstGuess = QtWidgets.QLineEdit(self.groupBox_5)
self.secantFirstGuess.setGeometry(QtCore.QRect(150, 20, 81, 20))
self.secantFirstGuess.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.secantFirstGuess.setInputMask("")
self.secantFirstGuess.setText("")
self.secantFirstGuess.setObjectName("secantFirstGuess")
self.label_24 = QtWidgets.QLabel(self.groupBox_5)
self.label_24.setGeometry(QtCore.QRect(260, 20, 121, 31))
self.label_24.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_24.setObjectName("label_24")
self.secantSecondGuess = QtWidgets.QLineEdit(self.groupBox_5)
self.secantSecondGuess.setGeometry(QtCore.QRect(390, 20, 81, 21))
self.secantSecondGuess.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.secantSecondGuess.setInputMask("")
self.secantSecondGuess.setText("")
self.secantSecondGuess.setObjectName("secantSecondGuess")
self.secantCalcBtn = QtWidgets.QPushButton(self.groupBox_5)
self.secantCalcBtn.setGeometry(QtCore.QRect(40, 150, 112, 32))
self.secantCalcBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.secantCalcBtn.setObjectName("secantCalcBtn")
self.secantShowIterBtn = QtWidgets.QPushButton(self.groupBox_5)
self.secantShowIterBtn.setGeometry(QtCore.QRect(180, 150, 161, 32))
self.secantShowIterBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.secantShowIterBtn.setObjectName("secantShowIterBtn")
self.secantShowGpBtn = QtWidgets.QPushButton(self.groupBox_5)
self.secantShowGpBtn.setGeometry(QtCore.QRect(370, 150, 112, 32))
self.secantShowGpBtn.setStyleSheet("border-radius: 50px;\n"
"background-color: rgb(234, 255, 253);\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(0, 0, 40);")
self.secantShowGpBtn.setObjectName("secantShowGpBtn")
self.label_27 = QtWidgets.QLabel(self.groupBox_5)
self.label_27.setGeometry(QtCore.QRect(80, 60, 171, 31))
self.label_27.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_27.setObjectName("label_27")
self.secantRoot = QtWidgets.QLabel(self.groupBox_5)
self.secantRoot.setGeometry(QtCore.QRect(240, 60, 141, 31))
self.secantRoot.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 16px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.secantRoot.setObjectName("secantRoot")
self.label_34 = QtWidgets.QLabel(self.groupBox_5)
self.label_34.setGeometry(QtCore.QRect(20, 100, 131, 31))
self.label_34.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_34.setObjectName("label_34")
self.excactRootSecant = QtWidgets.QLabel(self.groupBox_5)
self.excactRootSecant.setGeometry(QtCore.QRect(140, 100, 101, 31))
self.excactRootSecant.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.excactRootSecant.setObjectName("excactRootSecant")
self.label_35 = QtWidgets.QLabel(self.groupBox_5)
self.label_35.setGeometry(QtCore.QRect(260, 100, 141, 31))
self.label_35.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.label_35.setObjectName("label_35")
self.etSecant = QtWidgets.QLabel(self.groupBox_5)
self.etSecant.setGeometry(QtCore.QRect(390, 100, 101, 31))
self.etSecant.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);\n"
"border: none;")
self.etSecant.setObjectName("etSecant")
self.tabWidget.addTab(self.tab_2, "")
self.label_5 = QtWidgets.QLabel(self.centralWidget)
self.label_5.setGeometry(QtCore.QRect(440, 90, 161, 31))
self.label_5.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 18px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.label_5.setObjectName("label_5")
self.maxIterations = QtWidgets.QSpinBox(self.centralWidget)
self.maxIterations.setGeometry(QtCore.QRect(620, 81, 91, 41))
self.maxIterations.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.maxIterations.setMaximum(100000000)
self.maxIterations.setProperty("value", 50)
self.maxIterations.setObjectName("maxIterations")
self.label_6 = QtWidgets.QLabel(self.centralWidget)
self.label_6.setGeometry(QtCore.QRect(440, 140, 161, 31))
self.label_6.setStyleSheet("background: transparent;\n"
"color: rgb(234, 255, 253);\n"
"margin: 5px;\n"
"font-size: 18px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.label_6.setObjectName("label_6")
self.Epsilon = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.Epsilon.setGeometry(QtCore.QRect(620, 130, 91, 41))
self.Epsilon.setStyleSheet("border: 0;\n"
"border-bottom: 1px solid #999;\n"
"background: transparent;\n"
"outline: none;\n"
"font-size: 14px;\n"
"font-family: \"Lucida Console\", \"Courier New\", monospace;\n"
"color: rgb(234, 255, 253);")
self.Epsilon.setDecimals(6)
self.Epsilon.setMaximum(1.99)
self.Epsilon.setProperty("value", 1e-05)
self.Epsilon.setObjectName("Epsilon")
MainWindow.setCentralWidget(self.centralWidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# Connect Bisection button to it's function
self.bisectionCalcBtn.clicked.connect(self.BisectionCalcBtnClicked)
self.falseCalcBtn.clicked.connect(self.RegularFalseCalcBtnClicked)
self.secantCalcBtn.clicked.connect(self.SecantCalcBtnClicked)
self.fixedCalcBtn.clicked.connect(self.FixedPointCalcBtnClicked)
self.newtonCalcBtn.clicked.connect(self.NewtonCalcBtnClicked)
self.bisectionShowGpBtn.clicked.connect(self.graphBisectionBtnClicked)
def BisectionCalcBtnClicked(self):
# Same for all methods
function = ""
maxIteration = 0
epsilon = 0
lowerBound = 0
upperBound = 0
if (self.checkBox.isChecked()):
file = open('../View/input.txt', "r")
function = file.readline()
maxIteration = int(file.readline())
epsilon = float(file.readline())
# Bisection parameters
lowerBound = float(file.readline())
upperBound = float(file.readline())
else:
function = self.functionLineEdit.text()
maxIteration = int(self.maxIterations.text())
epsilon = float(self.Epsilon.text())
# Bisection parameters
lowerBound = float(self.bisectionLowerBound.text())
upperBound = float(self.bisectionUpperBound.text())
start_time = time.time()
result = Bisection.mainFunc(function, maxIteration, epsilon, lowerBound, upperBound)
self.bisectionRoot.setText(str(result))
end_time = time.time()
t2 = end_time - start_time
self.etBisection.setText("%.6f s" % t2)
self.excactRootBisection.setText(str(Excact.mainFunc(function)))
def graphBisectionBtnClicked(self):
# Same for all methods
if (self.checkBox.isChecked()):
file = open('../View/input.txt', "r")
self.function = file.readline()
self.maxIteration = int(file.readline())
self.epsilon = float(file.readline())
# Bisection parameters
self.lowerBound = float(file.readline())
self.upperBound = float(file.readline())
else:
self.function = self.functionLineEdit.text()
self.maxIteration = int(self.maxIterations.text())
self.epsilon = float(self.Epsilon.text())
# Bisection parameters
self.lowerBound = float(self.bisectionLowerBound.text())
self.upperBound = float(self.bisectionUpperBound.text())
self.window = QtWidgets.QMainWindow()
self.ui = Ui_SecondWindow(self.function, self.maxIteration, self.epsilon, self.lowerBound, self.upperBound)
self.ui.setupUi(self.window)
# MainWindow.hide()
self.window.show()
def RegularFalseCalcBtnClicked(self):
# Same for all methods
function = ""
maxIteration = 0
epsilon = 0
lowerBound = 0
upperBound = 0
if (self.checkBox.isChecked()):
file = open('../View/input.txt', "r")
function = file.readline()
maxIteration = int(file.readline())
epsilon = float(file.readline())
# Bisection parameters
lowerBound = float(file.readline())
upperBound = float(file.readline())
else:
function = self.functionLineEdit.text()
maxIteration = int(self.maxIterations.text())
epsilon = float(self.Epsilon.text())
# Bisection parameters
lowerBound = float(self.falseLowerBound.text())
upperBound = float(self.falseUpperBound.text())
start_time = time.time()
result = RegularFalse.mainFunc(function, maxIteration, epsilon, lowerBound, upperBound)
self.falseRoot.setText(str(result))
end_time = time.time()
t2 = end_time - start_time
self.etFalse.setText("%.6f s" % t2)
self.excactRootFalse.setText(str(Excact.mainFunc(function)))
def SecantCalcBtnClicked(self):
# Same for all methods
function = ""
maxIteration = 0
epsilon = 0
firstGuess = 0
secondGuess = 0
if (self.checkBox.isChecked()):
file = open('../View/input.txt', "r")
function = file.readline()
maxIteration = int(file.readline())
epsilon = float(file.readline())
# Bisection parameters
firstGuess = float(file.readline())
secondGuess = float(file.readline())
else:
function = self.functionLineEdit.text()
maxIteration = int(self.maxIterations.text())
epsilon = float(self.Epsilon.text())
# Bisection parameters
firstGuess = float(self.secantFirstGuess.text())
secondGuess = float(self.secantSecondGuess.text())
start_time = time.time()
result = Secant.mainFunc(function, maxIteration, epsilon, firstGuess, secondGuess)
self.secantRoot.setText(str(result))
end_time = time.time()
t2 = end_time - start_time
self.etSecant.setText("%.6f s" % t2)
self.excactRootSecant.setText(str(Excact.mainFunc(function)))
def FixedPointCalcBtnClicked(self):
# Same for all methods
function = ""
maxIteration = 0
epsilon = 0
firstGuess = 0
if (self.checkBox.isChecked()):
file = open('../View/input.txt', "r")
function = file.readline()
maxIteration = int(file.readline())
epsilon = float(file.readline())
# Bisection parameters
firstGuess = float(file.readline())
file.readline()
g_x = file.readline()
else:
function = self.functionLineEdit.text()
maxIteration = int(self.maxIterations.text())
epsilon = float(self.Epsilon.text())
# Fixed Point parameters
firstGuess = float(self.fixedFirstGuess.text())
gxFunction = self.gx.text()
start_time = time.time()
result = Fixed.mainFunc(function, gxFunction, maxIteration, epsilon, firstGuess)
self.fixedRoot.setText(str(result))
end_time = time.time()
t2 = end_time - start_time
self.etFixed.setText("%.6f s" % t2)
self.excactRootFixed.setText(str(Excact.mainFunc(function)))
def NewtonCalcBtnClicked(self):
# Same for all methods
function = ""
maxIteration = 0
epsilon = 0
firstGuess = 0
if (self.checkBox.isChecked()):
file = open('../View/input.txt', "r")
function = file.readline()
maxIteration = int(file.readline())
epsilon = float(file.readline())
# Bisection parameters
firstGuess = float(file.readline())
else:
function = self.functionLineEdit.text()
maxIteration = int(self.maxIterations.text())
epsilon = float(self.Epsilon.text())
# Fixed Point parameters
firstGuess = float(self.newtonFirstGuess.text())
start_time = time.time()
result = Newton.mainFunc(function, maxIteration, epsilon, firstGuess)
self.newtonRoot.setText(str(result))
end_time = time.time()
t2 = end_time - start_time
self.etNewton.setText("%.6f s" % t2)
self.excactRootNewton.setText(str(Excact.mainFunc(function)))
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "NUMERICAL PROJECT"))
self.label.setText(_translate("MainWindow", "Numerical Project"))
self.label_2.setText(_translate("MainWindow", "Function:"))
self.checkBox.setText(_translate("MainWindow", "Select from File"))
self.label_3.setText(_translate("MainWindow", "Method:"))
self.label_16.setText(_translate("MainWindow", "Lower Bound:"))
self.label_17.setText(_translate("MainWindow", "Upper Bound:"))
self.bisectionCalcBtn.setText(_translate("MainWindow", "Calculate"))
self.bisectionShowIterBtn.setText(_translate("MainWindow", "Show Iterations"))
self.bisectionShowGpBtn.setText(_translate("MainWindow", "Show graph"))
self.label_19.setText(_translate("MainWindow", "Root Result: X("))
self.bisectionRoot.setText(_translate("MainWindow", "0 ): 0"))
self.label_23.setText(_translate("MainWindow", "Excact Root X:"))
self.label_26.setText(_translate("MainWindow", "Execution Time:"))
self.excactRootBisection.setText(_translate("MainWindow", "0"))
self.etBisection.setText(_translate("MainWindow", "0 ms"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Bisection), _translate("MainWindow", "Bisection"))
self.label_12.setText(_translate("MainWindow", "First Guess:"))
self.fixedCalcBtn.setText(_translate("MainWindow", "Calculate"))
self.fixedShowIterBtn.setText(_translate("MainWindow", "Show Iterations"))
self.fixedShowGpBtn.setText(_translate("MainWindow", "Show graph"))
self.label_14.setText(_translate("MainWindow", "Root Result: X("))
self.fixedRoot.setText(_translate("MainWindow", "0 ): 0"))
self.label_28.setText(_translate("MainWindow", "Excact Root X:"))
self.excactRootFixed.setText(_translate("MainWindow", "0"))
self.label_29.setText(_translate("MainWindow", "Execution Time:"))
self.etFixed.setText(_translate("MainWindow", "0 ms"))
self.label_13.setText(_translate("MainWindow", "g(x):"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Fixed point"))
self.label_18.setText(_translate("MainWindow", "Lower Bound:"))
self.label_20.setText(_translate("MainWindow", "Upper Bound:"))
self.falseCalcBtn.setText(_translate("MainWindow", "Calculate"))
self.falseShowIterBtn.setText(_translate("MainWindow", "Show Iterations"))
self.falseShowGpBtn.setText(_translate("MainWindow", "Show graph"))
self.label_21.setText(_translate("MainWindow", "Root Result: X("))
self.falseRoot.setText(_translate("MainWindow", "0 ): 0"))
self.label_30.setText(_translate("MainWindow", "Exact Root X:"))
self.excactRootFalse.setText(_translate("MainWindow", "0"))
self.label_31.setText(_translate("MainWindow", "Execution Time:"))
self.etFalse.setText(_translate("MainWindow", "0 ms"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "False-position"))
self.label_22.setText(_translate("MainWindow", "First Guess:"))
self.newtonCalcBtn.setText(_translate("MainWindow", "Calculate"))
self.newtonShowIterBtn.setText(_translate("MainWindow", "Show Iterations"))
self.newtonShowGpBtn.setText(_translate("MainWindow", "Show graph"))
self.label_25.setText(_translate("MainWindow", "Root Result: X("))
self.newtonRoot.setText(_translate("MainWindow", "0 ): 0"))
self.label_32.setText(_translate("MainWindow", "Excact Root X:"))
self.excactRootNewton.setText(_translate("MainWindow", "0"))
self.label_33.setText(_translate("MainWindow", "Execution Time:"))
self.etNewton.setText(_translate("MainWindow", "0 ms"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("MainWindow", "Newton-Raphson"))
self.label_15.setText(_translate("MainWindow", "First Guess:"))
self.label_24.setText(_translate("MainWindow", "Second Guess:"))
self.secantCalcBtn.setText(_translate("MainWindow", "Calculate"))
self.secantShowIterBtn.setText(_translate("MainWindow", "Show Iterations"))
self.secantShowGpBtn.setText(_translate("MainWindow", "Show graph"))
self.label_27.setText(_translate("MainWindow", "Root Result: X("))
self.secantRoot.setText(_translate("MainWindow", "0 ): 0"))
self.label_34.setText(_translate("MainWindow", "Excact Root X:"))
self.excactRootSecant.setText(_translate("MainWindow", "0"))
self.label_35.setText(_translate("MainWindow", "Execution Time:"))
self.etSecant.setText(_translate("MainWindow", "0 ms"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Secant"))
self.label_5.setText(_translate("MainWindow", "Max Iteration:"))
self.label_6.setText(_translate("MainWindow", "Epsilon:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
import kopf
import re
from kubernetes import client, config
@kopf.on.delete('clustersecret.io', 'v1', 'clustersecrets')
def on_delete(spec,body,name,logger=None, **_):
syncedns = body['status']['create_fn']['syncedns']
v1 = client.CoreV1Api()
for ns in syncedns:
logger.info(f'deleting secret {name} from naespace {ns}')
v1.delete_namespaced_secret(name,ns)
@kopf.on.field('clustersecret.io', 'v1', 'clustersecrets', field='data')
def on_field_data(old, new, body,name,logger=None, **_):
logger.debug('----------------')
logger.info(f'Data changed: {old} -> {new}')
syncedns = body['status']['create_fn']['syncedns']
v1 = client.CoreV1Api()
for ns in syncedns:
logger.info(f'patching secret {name} in ns {ns}')
metadata = {'name': name, 'namespace': ns}
api_version = 'v1'
kind = 'Secret'
data = new
body = client.V1Secret(api_version, data , kind, metadata, type='kubernetes.io/tls')
# response = v1.patch_namespaced_secret(name,ns,body)
response = v1.replace_namespaced_secret(name,ns,body)
logger.debug(response)
@kopf.on.resume('clustersecret.io', 'v1', 'clustersecrets')
@kopf.on.create('clustersecret.io', 'v1', 'clustersecrets')
def create_fn(spec,logger=None,body=None,**kwargs):
try:
matchNamespace = body.get('matchNamespace')
except KeyError:
matchNamespace = '*'
logger.debug("matching all namespaces.")
logger.debug(f'Matching namespaces: {matchNamespace}')
try:
avoidNamespaces = body.get('avoidNamespaces')
except KeyError:
avoidNamespaces = ''
logger.debug("not avoiding namespaces")
try:
name = body['metadata']['name']
logger.debug (f"name: {name}")
except KeyError:
logger.debug("No name ?")
raise kopf.TemporaryError("can not get the name.")
try:
data = body.get('data')
except KeyError:
data = ''
logger.debug("Empty secret??")
v1 = client.CoreV1Api()
matchedns = get_ns_list(v1,logger,matchNamespace,avoidNamespaces)
#sync in all matched NS
for ns in matchedns:
create_secret(v1,logger,ns,name,data)
return {'syncedns': matchedns}
def get_ns_list(v1,logger,matchNamespace,avoidNamespaces):
nss = v1.list_namespace().items
matchedns = []
avoidedns = []
for matchns in matchNamespace:
for ns in nss:
if re.match(matchns, ns.metadata.name):
matchedns.append(ns.metadata.name)
logger.info(f'Matched namespaces: {ns.metadata.name} matchpathern: {matchns}')
for avoidns in avoidNamespaces:
for ns in nss:
if re.match(avoidns, ns.metadata.name):
avoidedns.append(ns.metadata.name)
logger.info(f'Skipping namespaces: {ns.metadata.name} avoidpatrn: {avoidns}')
# purge
for ns in matchedns:
if ns in avoidedns:
matchedns.remove(ns)
logger.info(f'Syncing on Namespaces: {matchedns}')
return matchedns
def create_secret(v1,logger,namespace,name,data):
metadata = {'name': name, 'namespace': namespace}
api_version = 'v1'
kind = 'Secret'
body = client.V1Secret(api_version, data , kind, metadata, type='kubernetes.io/tls')
# kopf.adopt(body)
try:
api_response = v1.create_namespaced_secret(namespace, body)
except client.rest.ApiException as e:
if e.reason == 'Conflict':
logger.info(f"Conflict creating the secret: It may be already a `{name}` secret in namespace: '{namespace}'")
return 0
logger.error(f'Can not create a secret, it is base64 encoded? data: {data}')
logger.error(f'Kube exception {e}')
return 1
return 0
|
import streamlit as st
import json
from io import StringIO
from pybtex.database import parse_string
from datosConexion import conectarBd
from clases import Paper
def mostrarSeccionCarga():
conectarBd()
uploaded_file = st.file_uploader("Archivo Bibtex con la información de los papers")
before = len(Paper.objects)
if uploaded_file is not None:
# To read file as bytes:
bytes_data = uploaded_file.read()
data = bytes_data.decode("utf-8")
bib_data = parse_string(data, 'bibtex')
notdoi = []
papers = []
with st.spinner("Preprocesando el archivo para la carga..."):
total = sum(1 for entry in bib_data.entries.values())
st.success("Se iniciará la carga de "+str(total)+" papers a la base de datos.")
my_bar = st.progress(.0)
loaded = 0
for entry in bib_data.entries.values():
fields = entry.fields
title = fields["title"].replace('{', '').replace('}', '')
doi = fields.get("doi")
isOnlyReference = False
loaded+=1
my_bar.progress(loaded/total)
if doi is None:
notdoi.append(title)
continue
abstract = fields.get("abstract","")
paper = Paper(title = title, doi = doi , abstract = abstract, isOnlyReference = isOnlyReference).save()
papers.append(paper)
after = len(Paper.objects)
st.success("Se ingresaron "+ str(after-before) + " papers a la base de datos")
st.write([x.title for x in papers])
if len(notdoi):
st.error ("No se pudo ingresar " + str(len(notdoi)) + " debido a que no se conocía su doi")
st.write(notdoi)
|
# coding with UTF-8
# ******************************************
# *****CIFAR-10 with ResNet8 in Pytorch*****
# *****test_classify.py *****
# *****Author:Shiyi Liu *****
# *****Time: Oct 22nd, 2019 *****
# ******************************************import torch
import torch
import torch.backends.cudnn as cudnn
import conv_visual
import train_classify
import argparse
import numpy as np
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import warnings
warnings.filterwarnings("ignore")
PTH = r'model/2019-10-25-00-25-26/best_model_with_0.9203_acc.pth'
VPATH = 'plots/test/'
def load_model_from_path(pthfile):
return torch.load(pthfile)
def test():
model.eval()
batch_loss = 0.0
acc = 0.0
total = 0.0
with torch.no_grad():
for batch_num, (img, label) in enumerate(test_loader):
img, label = img.to(device), label.to(device)
output = model(img)
loss = loss_function(output, label)
batch_loss += loss.item()
outlabel = torch.max(output, 1)
total += label.size(0)
acc += np.sum(outlabel[1].cpu().numpy() == label.cpu().numpy())
print('Test set: Average loss: {:.4f}, Accuracy: {:.4f}% ({acc}/{total})'.format(
batch_loss / len(test_loader),
100.0 * acc / total,
acc=int(acc),
total=int(total)
))
return batch_loss / len(test_loader.dataset), acc/total
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="cifar-10 with ResNet18 in PyTorch")
parser.add_argument('--visnum', default=100, type=int, help='visual num')
parser.add_argument('--testBatchSize', default=100, type=int, help='testing batch size')
parser.add_argument('--pth', default=PTH, help='training batch size')
parser.add_argument('--cuda', default=torch.cuda.is_available(), type=bool, help='whether cuda is in use')
args = parser.parse_args()
pthfile = args.pth
device = train_classify.get_device()
model = load_model_from_path(pthfile).to(device)
mean, std = train_classify.load_mestd()
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)
test_loader = DataLoader(dataset=test_set, batch_size=16)
loss_function = torch.nn.CrossEntropyLoss().to(device)
test()
conv_visual.visual_filter(model, VPATH)
conv_visual.visual_feature_map(model, VPATH, device)
|
# -*- coding: utf-8 -*-
import ctypes as ct
from .trezor_ctypes import *
from .trezor_cfunc_gen import *
def random_buffer_r(sz):
buff = (ct.c_uint8 * sz)()
cl().random_buffer(buff, sz)
return bytes(buff)
def init256_modm(r, a):
cl().set256_modm(r, ct.c_uint64(a))
return r
def init256_modm_r(a):
r = tt.MODM()
cl().set256_modm(r, ct.c_uint64(a))
return r
def get256_modm_r(a):
r = ct.c_uint64()
res = cl().get256_modm(ct.byref(r), a)
if not res:
raise ValueError('Get256_modm failed')
return r.value
def expand256_modm(r, buff, ln=None):
cl().expand256_modm(r, buff, len(buff) if ln is None else ln)
return r
def expand256_modm_r(buff, ln=None):
m = tt.MODM()
cl().expand256_modm(m, buff, len(buff) if ln is None else ln)
return m
def curve25519_clone(a):
r = tt.Ge25519()
cl().curve25519_copy(r, a)
return r
def new_ge25519():
return tt.Ge25519()
def ge25519_unpack_vartime_r(buff):
pt = tt.Ge25519()
# buff = tt.KEY_BUFF(*buff)
r = cl().ge25519_unpack_vartime(ct.byref(pt), buff)
if r != 1:
raise ValueError('Point decoding error')
return pt
def ge25519_unpack_vartime(pt, buff):
r = cl().ge25519_unpack_vartime(ct.byref(pt), buff)
if r != 1:
raise ValueError('Point decoding error')
return pt
def xmr_fast_hash_r(a, ln=None):
r = tt.KEY_BUFF()
cl().xmr_fast_hash(r, a, len(a) if ln is None else ln)
return bytes(r)
def xmr_hasher_update(h, buff, ln=None):
cl().xmr_hasher_update(ct.byref(h), buff, len(buff) if ln is None else ln)
def xmr_hash_to_scalar(r, a, ln=None):
return cl().xmr_hash_to_scalar(r, a, len(a) if ln is None else ln)
def xmr_hash_to_scalar_r(a, ln=None):
r = tt.MODM()
cl().xmr_hash_to_scalar(r, a, len(a) if ln is None else ln)
return r
def xmr_hash_to_ec(r, a, ln=None):
return cl().xmr_hash_to_ec(ct.byref(r), a, len(a) if ln is None else ln)
def xmr_hash_to_ec_r(a, ln=None):
r = tt.Ge25519()
cl().xmr_hash_to_ec(ct.byref(r), a, len(a) if ln is None else ln)
return r
def gen_range_proof(amount, last_mask):
"""
Trezor crypto range proof
:param amount:
:param last_mask:
:return:
"""
rsig = tt.XmrRangeSig()
C = tt.Ge25519()
mask = tt.MODM()
last_mask_ptr = ct.byref(last_mask) if last_mask else None
cl().xmr_gen_range_sig(ct.byref(rsig), ct.byref(C), mask, amount, last_mask_ptr)
return C, mask, rsig
|
import json
import re
def extract(title):
with open(file) as file_current:
for line in file_current:
json_line = json.loads(line)
if json_line['title'] == title:
return json_line['text']
def clean(text):
pattern_emphasis = re.compile(r' (\'{2,5}) (.*?) (\1) ', re.MULTILINE + re.VERBOSE)
text = pattern_emphasis.sub(r'\2', text)
pattern_interlink = re.compile(r' \[\[ (?: [^|] *? \|) *? ([^|]*?) \]\] ', re.MULTILINE + re.VERBOSE)
text = pattern_interlink.sub(r'\1',text)
pattern_language = re.compile(r' \{\{lang (?: [^|] *? \|) *? ([^|]*?) \}\} ', re.MULTILINE + re.VERBOSE)
text = pattern_language.sub(r'\1', text)
pattern_outerlink = re.compile(r' \[http:\/\/ (?: [^\s] *? \s) ? ([^]] *?) \] ', re.MULTILINE + re.VERBOSE)
text = pattern_outerlink.sub(r'\1', text)
pattern_br_ref = re.compile(r' < \/? [br | ref] [^>] *? > ', re.MULTILINE + re.VERBOSE)
text = pattern_br_ref.sub('', text)
return text
file = 'jawiki-country.json'
pattern_basic_information = re.compile(r' ^\{\{基礎情報.*?$ (.*?) ^\}\}$ ', re.MULTILINE + re.VERBOSE + re.DOTALL)
basic_information = pattern_basic_information.findall(extract('イギリス'))
pattern_fields = re.compile(r' ^\| (.+?) \s* = \s* (.+?) (?: (?= \n\|) | (?= \n$) ) ', re.MULTILINE + re.VERBOSE + re.DOTALL)
fields = pattern_fields.findall(basic_information[0])
res = {}
keys = []
for field in fields:
res[field[0]] = clean(field[1])
keys.append(field[0])
for item in sorted(res.items(), key = lambda field: keys.index(field[0])):
print(item)
|
import NvRules
def get_identifier():
return "CPIStallMioThrottle"
def get_name():
return "CPI Stall 'MIO Throttle'"
def get_description():
return "Warp stall analysis for 'MIO Throttle' issues"
def get_section_identifier():
return "WarpStateStats"
def apply(handle):
ctx = NvRules.get_context(handle)
action = ctx.range_by_idx(0).action_by_idx(0)
fe = ctx.frontend()
ccMajor = action.metric_by_name("device__attribute_compute_capability_major").as_uint64()
ccMinor = action.metric_by_name("device__attribute_compute_capability_minor").as_uint64()
if ccMajor == 7 and ccMinor >= 0:
issueActive = action.metric_by_name("smsp__issue_active.avg.per_cycle_active").as_double()
warpCyclesPerStall = action.metric_by_name("smsp__average_warps_issue_stalled_mio_throttle_per_issue_active.ratio").as_double()
warpCyclesPerIssue = action.metric_by_name("smsp__average_warps_active_per_issue_active.ratio").as_double()
else:
issueActive = action.metric_by_name("smsp__issue_active_avg_per_active_cycle").as_double()
warpCyclesPerStall = action.metric_by_name("smsp__warp_cycles_per_issue_stall_mio_throttle").as_double()
warpCyclesPerIssue = action.metric_by_name("smsp__warp_cycles_per_issue_active").as_double()
if issueActive < 0.8 and 0.3 < (warpCyclesPerStall / warpCyclesPerIssue):
message = "On average each warp of this kernel spends {:.1f} cycles being stalled waiting for the MIO instruction queue to be not full. This represents about {:.1f}% of the total average of {:.1f} cycles between issuing two instructions. This stall reason is high in cases of extreme utilization of the MIO pipelines, which include special math instructions, dynamic branches, as well as shared memory instructions.".format(warpCyclesPerStall, 100.*warpCyclesPerStall/warpCyclesPerIssue, warpCyclesPerIssue)
fe.message(NvRules.IFrontend.MsgType_MSG_WARNING, message)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from telegram.ext import Updater, InlineQueryHandler, CommandHandler
from urllib.parse import quote_plus
import requests, logging
from bs4 import BeautifulSoup
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
ConversationHandler)
from telegram.ext import Updater, CommandHandler,MessageHandler,Filters
token = "948524958:AAHjNH_bag_dKMaxs9ibjXxkbbPCePCxxGY"
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def error(bot, update, error):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, error)
def help(bot, update):
msg = "Usage: /book <keyword> and I will give you links to EPUB/PDF/MOBI/AZW3 :)\n Enter /cancel to cancel search"
update.message.reply_text(msg)
def cancel(bot, update,user_data):
user = update.message.from_user
#logger.info("User %s canceled the conversation." % user.first_name)
update.message.reply_text('Cancellation Successful :) ',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
mlist=[]
def book(bot, update, user_data):
mdict={}
line = update["message"]["text"].split()
if len(line) < 2:
update.message.reply_text('Hey! I need a keyword >:(')
return
url = "https://libgen.is"
bookname = quote_plus(' '.join(line[1:]))
query = url+"/search?req="+bookname.replace(' ','+')
print(query)
r = requests.get(query)
html = r.text
soup = BeautifulSoup(html)
items = soup.find("table", {"class": "c"})
msg = ""
#print(items)
# for row in items.findAll("tr")[1:]:
# for col in row.findAll("td")[:9]:
# print(col.text)
results = {}
for row in items.findAll('tr'):
results={}
aux = row.findAll('td')
results['id'] = str(aux[0].text)
results["author"]=str(aux[1].text)
results["title"]=str(aux[2].a.text)
results["publisher"]=str(aux[3].text)
results["year"] = str(aux[4].string)
results['language']=str(aux[5].string)
results['size']=str(aux[7].string)
results['extension']=str(aux[8].string)
if len(aux)>12:
results['mirrors']={
'1':str(aux[9].a['href']),
'2':str(aux[10].a['href']),
'3':str(aux[11].a['href']),
'4':str(aux[12].a['href']),
'5':str(aux[13].a['href'])
}
else:
results['mirrors']={}
mlist.append(results)
#print(mlist)
user_data['mlist']=mlist[1:]
for i in range(len(mlist)-1):
update.message.reply_text("id:"+mlist[i+1]["id"]+"\nauthor:"+mlist[i+1]["author"]+"\ntitle:"+mlist[i+1]["title"]+"\npublisher:"+mlist[i+1]["publisher"]+"\nyear:"+mlist[i+1]["year"]+"\nlanguage:"+mlist[i+1]["language"]+"\nsize:"+mlist[i+1]["size"]+"\nextension:"+mlist[i+1]["extension"])#+str(mlist[i+1]["mirrors"]))
update.message.reply_text("Enter id:")
return 1
GET_TEXT=1
def get_text(bot, update,user_data):
user = update.message.from_user
user_text=update.message.text
mlist = user_data['mlist']
#update.message.reply_text("hello "+user_text)
foundid=False
index = -999
for i in range(len(mlist)):
if mlist[i]["id"]==user_text:
foundid =True
index = i
break
if not foundid:
update.message.reply_text("Invalid ID")
return ConversationHandler.END
else:
print("valid")
mirrors = mlist[int(index)]["mirrors"]
for i in range(len(mirrors)):
try:
print(mirrors[str(i)])
r = requests.get(mirrors[str(i)])
html = r.text
soup = BeautifulSoup(html)
print("============================================================================")
link = soup.findAll('a')[0]
url = 'http://'+mirrors[str(i)].split('/')[2]+link.get('href')
#print(url)
update.message.reply_text(url)
break
print("============================================================================")
#print(html)
except Exception as e:
print(e)
return GET_TEXT
def main():
# Create the Updater and pass it your bot's token.
updater = Updater(token)
# Get the dispatcher to register handlers
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('book', book,pass_user_data=True)],
states={
GET_TEXT: [MessageHandler(Filters.text, get_text,pass_user_data=True),CommandHandler('cancel', cancel,pass_user_data=True)]},
fallbacks=[CommandHandler('cancel', cancel,pass_user_data=True)]
)
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("help", help))
#dp.add_handler(CommandHandler("book", book))
dp.add_handler(conv_handler)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Block until the user presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
"""
Performs continuous communication with an Arduino (or any Serial device) through the serial port.
Type "exit" to quit.
"""
import serial, sys, getopt
from select import select
_DEBUG = False
_DEVICE_FILE = "/dev/cu.usbmodem1411"
_BAUDRATE = 9600
_TIMEOUT = 1
_JSON_CMD = 'json\n'
_EXIT = 'exit\n'
def openSerial():
ser = serial.Serial(port = _DEVICE_FILE, baudrate=_BAUDRATE, timeout=_TIMEOUT)
# Reset port before opening
ser.close()
ser.open()
return ser
def ioWithArduino(port):
"""
Standard In is used to send messages to the Arduino.
"""
while True:
# Want to do a non-blocking read, so we can move on to check for input from Arduino
# From: http://stackoverflow.com/questions/3471461/raw-input-and-timeout
rlist, _, _ = select([sys.stdin], [], [], _TIMEOUT)
# Attempt to read from stdin
if rlist:
line = sys.stdin.readline()
if line == _EXIT:
return
elif line == _JSON_CMD:
with open('testJson.txt', 'r') as myfile:
json = myfile.read()
if _DEBUG: print "Sending: |" + json + "|"
port.write(json)
else:
if _DEBUG: print "Sending: " + line
port.write(line)
# Attempt to read from Arduino
rxLine = port.readline()
if rxLine:
if _DEBUG: print "Received: ",
# Strip off newline
rxLine = rxLine[:-1] if '\n' in rxLine else rxLine
print rxLine
# Check command line option to turn debug statements on
if len(sys.argv) > 1:
if sys.argv[1] == '-d' and sys.argv[2] == '1':
_DEBUG = True
else:
print "Usage: python serialExample.py [-d 1]"
exit()
port = openSerial()
if port.isOpen():
if _DEBUG: print "Serial port " + _DEVICE_FILE + " has been opened"
else:
print "Failed to open " + _DEVICE_FILE + ". Exiting."
exit()
# Begin communication
ioWithArduino(port)
if _DEBUG: print "Closing serial port"
port.close()
|
from flask import Blueprint
plate = Blueprint('plate', __name__, url_prefix='/')
# never forget
from . import routes |
class Test:
pass
test_type = type(Test)
assert test_type == type
# That's kind of weird
assert type(type) == type
# Other way to define a class
bases = ()
attr = {}
Foo = type('Foo', bases, attr)
assert type(Foo()) == Foo
# Sample metaclass for building
# Singleton objects
class SingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Singleton(metaclass=SingletonMeta):
pass
assert Singleton() == Singleton()
# What if we have attributes?
class AttrSingleton(metaclass=SingletonMeta):
def __init__(self, x):
self.x = x
# They seem the same
assert AttrSingleton(1) == AttrSingleton(2)
inst1 = AttrSingleton(1)
inst2 = AttrSingleton(2)
assert inst1.x == inst2.x
# Be carefull with this!
assert inst1.x == 1
assert inst2.x == 1
# If we delete those we still can't
# build inst with different value
del inst1
del inst2
inst3 = AttrSingleton(3)
assert inst3.x == 1
# Try using weakref
from weakref import WeakKeyDictionary
class WeakrefSingletonMeta(type):
_instances = WeakKeyDictionary()
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(WeakrefSingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class WeakSingleton(metaclass=WeakrefSingletonMeta):
def __init__(self, x):
self.x = x
inst1 = WeakSingleton(1)
assert inst1.x == 1
del inst1
inst2 = WeakSingleton(2)
# Still no luch in redefinig it
assert inst2.x == 1
# But it is still there
assert WeakSingleton._instances[WeakSingleton].x == 1
|
"""Treadmill metrics collector.
Collects Treadmill metrics and sends them to Graphite.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import glob
import logging
import os
import socket
import time
import click
from six.moves import urllib_parse
from treadmill import appenv
from treadmill import exc
from treadmill import fs
from treadmill import restclient
from treadmill import rrdutils
from treadmill.fs import linux as fs_linux
from treadmill.metrics import rrd
#: Metric collection interval (every X seconds)
_METRIC_STEP_SEC_MIN = 30
_METRIC_STEP_SEC_MAX = 300
_LOGGER = logging.getLogger(__name__)
# ( rrd file's basename, cgroup name)
CORE_RRDS = {'apps': 'treadmill.apps.rrd',
'core': 'treadmill.core.rrd',
'treadmill': 'treadmill.system.rrd'}
RRD_SOCKET = '/tmp/treadmill.rrd'
class RRDClientLoader(object):
"""Class to load rrd client
"""
INTERVAL = 5
def __init__(self, timeout=600):
"""Load client if failed until timeout
"""
self.client = None
time_begin = time.time()
while True:
self.client = self._get_client()
if self.client is not None:
break
if time.time() - time_begin > timeout:
raise Exception(
'Unable to connect {} in {}'.format(RRD_SOCKET, timeout)
)
time.sleep(self.INTERVAL)
@staticmethod
def _get_client():
"""Get RRD client"""
try:
return rrdutils.RRDClient(RRD_SOCKET)
except socket.error:
return None
def _sys_svcs(root_dir):
"""Contructs list of system services."""
return sorted([
os.path.basename(s)
for s in glob.glob(os.path.join(root_dir, 'init', '*'))
if not (s.endswith('.out') or s.endswith('.err'))])
def _update_core_rrds(data, core_metrics_dir, rrdclient, step, sys_maj_min):
"""Update core rrds"""
interval = int(step) * 2
total = 0
for cgrp in data:
rrd_basename = CORE_RRDS[cgrp]
rrdfile = os.path.join(core_metrics_dir, rrd_basename)
rrd.prepare(rrdclient, rrdfile, step, interval)
if rrd.update(rrdclient, rrdfile, data[cgrp], sys_maj_min):
total += 1
return total
def _update_service_rrds(data, core_metrics_dir, rrdclient, step, sys_maj_min):
"""Update core services rrds"""
interval = int(step) * 2
total = 0
for svc in data:
rrdfile = os.path.join(core_metrics_dir, '{svc}.rrd'.format(svc=svc))
rrd.prepare(rrdclient, rrdfile, step, interval)
if rrd.update(rrdclient, rrdfile, data[svc], sys_maj_min):
total += 1
_LOGGER.debug(
'Updated %d service metrics from maj:min %s',
total, sys_maj_min
)
return total
def _update_app_rrds(data, app_metrics_dir, rrdclient, step, tm_env):
"""Update core services rrds"""
interval = int(step) * 2
total = 0
for app_unique_name in data:
try:
localdisk = tm_env.svc_localdisk.get(app_unique_name)
blkio_major_minor = '{major}:{minor}'.format(
major=localdisk['dev_major'],
minor=localdisk['dev_minor'],
)
except (exc.TreadmillError, IOError, OSError):
blkio_major_minor = None
rrdfile = os.path.join(
app_metrics_dir, '{app}.rrd'.format(app=app_unique_name))
_LOGGER.debug(
'Update %s metrics from maj:min %s',
app_unique_name, blkio_major_minor)
rrd.prepare(rrdclient, rrdfile, step, interval)
if rrd.update(
rrdclient, rrdfile, data[app_unique_name], blkio_major_minor):
total += 1
_LOGGER.debug('Updated %d container metrics', total)
return total
def init():
"""Top level command handler."""
# TODO: main is too long (R0915) and has too many branches (R0912),
# need to be refactored.
#
# pylint: disable=R0915,R0912
@click.command()
@click.option('--step', '-s',
type=click.IntRange(_METRIC_STEP_SEC_MIN,
_METRIC_STEP_SEC_MAX),
default=_METRIC_STEP_SEC_MIN,
help='Metrics collection frequency (sec)')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--socket', 'api_socket',
help='unix-socket of cgroup API service',
required=True)
def metrics(step, approot, api_socket):
"""Collect node and container metrics."""
remote = 'http+unix://{}'.format(urllib_parse.quote_plus(api_socket))
_LOGGER.info('remote cgroup API address %s', remote)
tm_env = appenv.AppEnvironment(root=approot)
app_metrics_dir = os.path.join(tm_env.metrics_dir, 'apps')
core_metrics_dir = os.path.join(tm_env.metrics_dir, 'core')
fs.mkdir_safe(app_metrics_dir)
fs.mkdir_safe(core_metrics_dir)
# Initiate the list for monitored applications
monitored_apps = set(
os.path.basename(metric_name)[:-len('.rrd')]
for metric_name in glob.glob('%s/*' % app_metrics_dir)
if metric_name.endswith('.rrd')
)
sys_maj_min = '{}:{}'.format(*fs_linux.maj_min_from_path(approot))
_LOGGER.info('Device sys maj:min = %s for approot: %s',
sys_maj_min, approot)
_LOGGER.info('Loading rrd client')
rrd_loader = RRDClientLoader()
second_used = 0
while True:
if step > second_used:
time.sleep(step - second_used)
starttime_sec = time.time()
count = 0
data = restclient.get(remote, '/cgroup/_bulk', auth=None).json()
count += _update_core_rrds(
data['treadmill'], core_metrics_dir,
rrd_loader.client,
step, sys_maj_min
)
count += _update_service_rrds(
data['core'],
core_metrics_dir,
rrd_loader.client,
step, sys_maj_min
)
count += _update_app_rrds(
data['app'],
app_metrics_dir,
rrd_loader.client,
step, tm_env
)
# Removed metrics for apps that are not present anymore
seen_apps = set(data['app'].keys())
for app_unique_name in monitored_apps - seen_apps:
rrdfile = os.path.join(
app_metrics_dir, '{app}.rrd'.format(app=app_unique_name))
_LOGGER.info('removing %r', rrdfile)
rrd.finish(rrd_loader.client, rrdfile)
monitored_apps = seen_apps
second_used = time.time() - starttime_sec
_LOGGER.info('Got %d cgroups metrics in %.3f seconds',
count, second_used)
# Gracefull shutdown.
_LOGGER.info('service shutdown.')
return metrics
|
from django.urls import path
from news.views import news_list, news_in_category, news_items, like_or_dislike
app_name = 'news'
urlpatterns = [
path('', news_list, name='news_list'),
path('<int:news_pk>/', news_items, name='news_items'),
path('<int:news_pk>/<int:comment_pk>/<str:vote>', like_or_dislike, name='like_or_dislike'),
path('<slug>/', news_in_category, name='news_in_category'),
]
|
#I pledge my honor that I have abided by the Stevens Honor System. Jake Roux
def main():
print("This program will return the sum of numbers you enter")
num=eval(input("Separated by commas, enter your numbers:"))
sum=0
for i in num:
sum+= int(i)
print(sum)
main()
|
#!/usr/bin/python3
'''Takes in an argument and displays all values in the states
table of hbtn_0e_0_usa where name matches the argument.'''
from sys import argv
import MySQLdb
if __name__ == "__main__":
MY_USER = argv[1]
MY_PASS = argv[2]
MY_DB = argv[3]
STATE = argv[4]
db = MySQLdb.connect(host="localhost", port=3306,
user=MY_USER, passwd=MY_PASS,
db=MY_DB, charset="utf8")
cur = db.cursor()
cur.execute("SELECT * FROM states WHERE\
BINARY name = '{}'".format(STATE))
query_row = cur.fetchall()
for row in query_row:
print(row)
cur.close()
db.close()
|
#!/usr/bin/python
import math
def power(a,b):
if( b==0):
return 1;
elif(b==1):
return a;
else:
if(b%2==0):
t=power(a,b/2);
return t*t;
else:
t=power(a,b/2);
return t*t*a;
def main():
T=int(raw_input());
c=0;
while (c<T):
c=c+1;
L,D,S,C = [int(x) for x in raw_input().split()] ;
lres=math.log10(S)+(D-1)*math.log10(1+C);
lL=math.log10(L);
temp=(1+C)**(D-1);
res=S*temp;
if(lres>=lL):
print "ALIVE AND KICKING"
else:
print "DEAD AND ROTTING"
main();
|
from __future__ import unicode_literals
import arrow
from pyaib.plugins import keyword, observe, plugin_class
def serialize_seen(seen):
return {
'user': seen['user'],
'timestamp': seen['timestamp'].isoformat(),
'channel': seen['channel'],
'message': seen['message']
}
def deserialize_seen(serialized):
return {
'user': serialized['user'] if 'user' in serialized else None,
'timestamp': arrow.get(serialized['timestamp']),
'channel': serialized['channel'],
'message': serialized['message']
}
def pretty_print_ago(timestamp):
ago = arrow.utcnow() - timestamp
days = ago.days
hours = ago.seconds / 3600
minutes = (ago.seconds % 3600) / 60
message = ''
if ago.total_seconds() < 60:
return 'just now'
if days > 0:
message += str(days) + ' days'
if hours > 0:
message += ' ' + str(hours) + ' hours'
if minutes > 0:
if len(message > 0):
message += ' '
message += str(minutes) + ' minutes ago'
return message
@plugin_class('seen')
class Seen(object):
def __init__(self, ctx, config):
self._db = ctx.db.get('seen')
@keyword('seen')
@keyword.autohelp
def seen(self, ctx, msg, trigger, args, kwargs):
'''[user] :: See when a user was last seen and what they said.'''
if len(args) != 1:
return msg.reply('I can only show info for one (1) user at a time!')
db = self._db
user = args[0]
item = db.get(user.lower())
if item.value is None:
return msg.reply("I've never seen %(user)s in my life!" % {
'user': user
})
value = deserialize_seen(item.value)
ago = arrow.utcnow() - value['timestamp']
msg.reply('%(user)s was last seen %(time)s in %(channel)s saying `%(message)s`' % {
'user': value['user'] if 'user' in value and value['user'] is not None else user,
'time': pretty_print_ago(value['timestamp']),
'channel': value['channel'],
'message': value['message']
})
@observe('IRC_MSG_PRIVMSG')
def record(self, ctx, msg):
db = self._db
message = msg.message.strip()
channel = msg.channel
user = msg.sender.nick
item = db.get(user.lower())
item.value = serialize_seen({
'user': user,
'timestamp': arrow.utcnow(),
'channel': channel,
'message': message
})
item.commit()
|
"""
Functions to count and cluster amino acid sequences.
"""
import numpy as np
from . import utils
class PSFM:
"""Meta class for a position specific scoring matrix"""
def __init__(self, pssm, alphabet=utils.AMINO_ACIDS, comments=(), consensus=None):
self._psfm = psfm
self.alphabet = alphabet
self.comments = comments
self.consensus = consensus
@classmethod
def from_txt(cls, lines):
"""Load a scoring/frequency matrix from text"""
alphabet = None
dtype = int
matlines = []
comments = []
consensus = []
for line in lines:
line = line.strip()
if line.startswith('#'):
comments.append(line.lstrip('#'))
line = line.split('#')[0]
if not line:
continue
if alphabet is None:
alphabet = tuple(line.strip().split())
else:
if dtype is float or '.' in line:
dtype = float
line = line.split()
consensus.append(line[1])
matlines.append([dtype(n) for n in line[2:]])
mat = np.array(matlines)
return cls(mat, alphabet=''.join(alphabet), comments=comments, consensus=''.join(consensus))
def to_txt(self, mat=None):
"""Output a matrix in text format."""
lines = []
for line in self.comments:
lines.append(f'#{line}')
if mat is None:
mat = self.pssm
consensus = self.consensus
if self.consensus is None:
consensus = ['X'] * mat.shape[0]
l = len(str(mat.shape[0]))
lines.append(' ' * (l + 3) + ' '.join([f' {a:<5}' for a in self.alphabet]))
for i in range(mat.shape[0]):
line = f'{i+1:>{l}} {consensus[i]} ' + ' '.join([f'{n:>6.3f}' if n else ' 0 ' for n in mat[i]])
lines.append(line)
return lines
def psfm(self):
return self._psfm
def pssm(self, bg='blosum62', return_psfm=False):
if bg is None:
bg = np.ones(len(self.alphabet)) / len(self.alphabet)
elif bg in ('blosum62', 'blosum', 'bl62', 'bl'):
bg = utils.bgfreq_array(self.alphabet)
psfm = self.psfm()
pssm = np.zeros_like(psfm, dtype='float')
mask = psfm > 0
pssm[mask] = np.log2((psfm / bg)[mask])
if return_psfm:
return pssm, psfm
return pssm
def shannon_logo(self):
pssm, psfm = self.pssm(bg=None, return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * psfm
def kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * psfm * np.sign(pssm)
def weighted_kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * pssm / np.sum(np.abs(pssm), axis=1, keepdims=True)
def p_weighted_kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * pssm * psfm / np.sum(
np.abs(pssm) * psfm, axis=1, keepdims=True)
class AlignmentPSFM(PSFM):
"""Position-specific scoring matrix from a set of sequences"""
def __init__(self, sequences, alphabet=utils.AMINO_ACIDS, clustering='hobohm1', weight_on_prior=200, **kwargs):
self.seqlen = None
self.alphabet = str(alphabet)
self.sequences = []
#Validate sequence lengths
for seq in sequences:
seq = str(seq)
self.sequences.append(seq)
if self.seqlen == None:
self.seqlen = len(seq)
elif self.seqlen != len(seq):
raise Exception('All sequences must be of same length!')
if isinstance(clustering, str):
if clustering.lower() in ('hobohm', 'hobohm1'):
clustering = hobohm1_factory(kwargs.get('hobohm1_threshold', 0.63))
elif clustering.lower() in ('heuristic', ):
clustering = heuristic
else:
raise Exception(f'Unknown clustering method: {clustering}')
self.clustering = clustering
self.weight_on_prior = weight_on_prior
self.consensus = None
self.comments = []
self.gaps = '-.'
@property
def alphabet_index(self):
alpha_index = {a: i for i, a in enumerate(self.alphabet, 1)}
for gap in self.gaps:
alpha_index[gap] = 0
return alpha_index
def alignment_array(self):
if set(self.gaps).intersection(set(self.alphabet)):
raise Exception(f'Alphabet ({self.alphabet}) must not contain gaps ({self.gaps})')
alphabet_index = self.alphabet_index
alignment_arr = []
for seq in self.sequences:
seq_coded = []
for letter in seq:
seq_coded.append(alphabet_index[letter])
alignment_arr.append(seq_coded)
return np.array(alignment_arr)
def sequence_weights(self):
if self.clustering is None:
return np.ones(len(self.sequences))
return self.clustering(self.alignment_array())
def psfm(self):
"""Position specific frequency matrix"""
a_index = self.alphabet_index
sequence_weights = self.sequence_weights()
countmat = np.zeros([len(self.sequences[0]), len(self.alphabet)], dtype='float')
for j, seq in enumerate(self.sequences):
for i, a in enumerate(seq):
a_idx = a_index[a] - 1
if a_idx >= 0:
countmat[i, a_idx] += sequence_weights[j]
frequency_matrix = countmat / np.sum(sequence_weights)
if self.weight_on_prior:
blosum_mat = utils.blosum62_array(self.alphabet)
pseudo_counts = np.dot(frequency_matrix, blosum_mat)
a = np.sum(sequence_weights) - 1
b = self.weight_on_prior
frequency_matrix = (a * frequency_matrix + b * pseudo_counts) / (a + b)
return frequency_matrix
#
# Clustering/weighting
def hobohm1_factory(threshold):
"""Return Hobohm1 clustering func with a given threshold"""
def _hobohm1(alignment_array):
return hobohm1(alignment_array, threshold)
return _hobohm1
def hobohm1(alignment_array, threshold=0.63):
"""Hobohm1 as implemented in seq2logo"""
n_seqs, seqlen = alignment_array.shape
seqsort = np.argsort(np.sum(alignment_array == 0, axis=1))
alignment_array = alignment_array[seqsort]
clusters = np.arange(n_seqs)
for i in range(n_seqs):
for j in range(i + 1, n_seqs):
if clusters[j] != j:
continue
sim = alignment_array[i] == alignment_array[j]
sim[alignment_array[i] == 0] = False
sim = np.sum(sim) / np.sum(alignment_array[i] != 0)
if sim >= threshold:
clusters[j] = i
counts = np.bincount(clusters)
weights = np.ones(n_seqs) / counts[clusters]
seqsort_undo = np.empty(seqsort.size, 'int')
seqsort_undo[seqsort] = np.arange(seqsort.size)
return weights[seqsort_undo]
def heuristic(alignment_array):
"""Heuristic (position-based) clustering.
Reference: https://doi.org/10.1016/0022-2836(94)90032-9
"""
n_seqs, seqlen = alignment_array.shape
#Calculate weight per position
#weight(seq, pos) = 1 / (count(letter_seq)_pos * n_uniq_letters_pos)
weight = np.zeros_like(alignment_array, dtype='float')
for pos in range(seqlen):
uniq, indices, counts = np.unique(alignment_array[:, pos], return_counts=True, return_inverse=True)
weight[:, pos] = (1 / (counts * uniq.size))[indices]
return np.sum(weight, axis=1)
|
import os
import imaplib
import email
from email.header import decode_header
import traceback
USERNAME = os.getenv('email')
PASSWORD = os.getenv('password')
def get_otp():
try:
otp = []
# create an IMAP4 class with SSL
imap = imaplib.IMAP4_SSL("imap.gmail.com")
# authenticate
imap.login(USERNAME, PASSWORD)
status, messages = imap.select("Inbox")
# number of top emails to fetch
N = 1
# total number of emails
messages = int(messages[0])
for i in range(messages, messages - N, -1):
# fetch the email message by ID
res, msg = imap.fetch(str(i), "(RFC822)")
for response in msg:
if isinstance(response, tuple):
# parse a bytes email into a message object
msg = email.message_from_bytes(response[1])
if msg.is_multipart():
# iterate over email parts
for part in msg.walk():
# extract content type of email
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
try:
# get the email body
body = part.get_payload(decode=True).decode()
except:
pass
if content_type == "text/plain" and "attachment" not in content_disposition:
# print text/plain emails and skip attachments
# depending on your where your OTP is in the email, you will have to modify the string split method
body = body.split('To log into your account copy the code below and paste it into the corresponding form.')[1]
body = body.split('This code is valid for 15 minutes.')[0]
otp.append(int(body))
return otp
except Exception as ex:
return ""
|
import socket
import select
def rc4_crypt(data, key):
"""RC4 algorithm"""
x = 0
box = range(256)
for i in range(256):
x = (x + box[i] + ord(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256]))
return ''.join(out)
def main():
enc_key = 'itrie400'
server = ('140.96.178.37', 8090)
listen_mavproxy = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
listen_mavproxy.bind(('127.0.0.1', 14550))
fwd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
fwd_sock.sendto('hello', server)
poller = select.poll()
poller.register(listen_mavproxy, select.POLLIN | select.POLLPRI)
poller.register(fwd_sock, select.POLLIN | select.POLLPRI)
fd_to_sock = {listen_mavproxy.fileno():listen_mavproxy, fwd_sock.fileno():fwd_sock}
while True:
try:
events = poller.poll()
except KeyboardInterrupt:
break
for fd, flag in events:
sock = fd_to_sock[fd]
if sock == listen_mavproxy:
data, mavproxy = sock.recvfrom(1024)
enc_txt = rc4_crypt(data, enc_key)
fwd_sock.sendto(enc_txt, server)
else:
data, server = sock.recvfrom(1024)
plain_txt = rc4_crypt(data, enc_key)
listen_mavproxy.sendto(plain_txt, mavproxy)
listen_mavproxy.close()
main()
|
import findspark
findspark.init('C:\spark-2.4.5-bin-hadoop2.7')
# May cause deprecation warnings, safe to ignore, they aren't errors
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import desc
from pyspark.sql import Row
import re
# Can only run this once. restart your kernel for any errors.
sc = SparkContext.getOrCreate()
# sc1 = SparkSession.builder.appName("Hive TST").enableHiveSupport().getOrCreate()
ssc = StreamingContext(sc, 10 )
sqlContext = SQLContext(sc)
socket_stream = ssc.socketTextStream("192.168.1.239", 5551)
lines = socket_stream.window( 20,20 )
from collections import namedtuple
fields = ("text", "location","source" )
Tweet = namedtuple( 'Tweet', fields )
fields = ("tag", "count" )
Tweet = namedtuple( 'Tweet', fields )
# lines.map(lambda x:x.toString).foreachRDD(lambda rddRaw : rddRaw.toDF()).limit(10).registerTempTable("tweets1")
( lines.map( lambda text: text.split( "~@" ) ) #Splits to a list
.foreachRDD(
lambda rdd: rdd.toDF().registerTempTable("tweets")
)) # Registers to a table.
( lines.flatMap( lambda text: text.split( " " ) )
.filter( lambda word: word.lower().startswith("#") )
.map( lambda word: ( word.lower(), 1 ) )
.reduceByKey( lambda a, b: a + b )
.map( lambda rec: Tweet( rec[0], rec[1] ) )
.foreachRDD( lambda rdd: rdd.toDF().sort( desc("count") )
.limit(10).registerTempTable("tweets1") ) )
lines.map(lambda x: (x.replace(',','').lower(), )).foreachRDD( lambda rdd: rdd.toDF().registerTempTable("tweets2") )
def Find(string):
# findall() has been used
# with valid conditions for urls in string
return (re.search("(?P<url>https?://[^\s]+)", string))
lines.flatMap(lambda text: text.split(" ")).filter( lambda word: word.startswith("http")).map( lambda word: ( word.lower(), 1 ) ).reduceByKey( lambda a, b: a + b ).foreachRDD( lambda rdd: rdd.toDF() .limit(10).registerTempTable("tweets_url") )
lines.pprint()
import time
from IPython import display
import matplotlib.pyplot as plt
import seaborn as sns
import pandas
# Only works for Jupyter Notebooks!
get_ipython().run_line_magic('matplotlib', 'inline')
ssc.start()
count = 0
while count < 10:
time.sleep(5)
sqlContext.sql( 'Select _1 url,_2 ct from tweets_url').show(truncate=False)
display.clear_output(wait=True)
count = count + 1
count = 0
while count < 10:
time.sleep(5)
top_10_tweets = sqlContext.sql( 'Select tag, count from tweets1' )
top_10_df = top_10_tweets.toPandas()
display.clear_output(wait=True)
plt.figure( figsize = ( 10, 8 ) )
sns.barplot( x="count", y="tag", data=top_10_df)
plt.show()
count = count + 1
count = 0
while count < 10:
time.sleep( 5 )
df=sqlContext.sql( "Select 'negative' tag,count(distinct _1) ct from tweets2 WHERE (_1 LIKE '%suffered%' or _1 LIKE '%killed%' or _1 LIKE '%deaths%' or _1 LIKE '%disappoi%' or _1 LIKE '%sad%' or _1 LIKE '%concern%' or _1 LIKE '%bad%' or _1 LIKE '%fail%') UNION Select 'positive' tag,count(distinct _1) ct from tweets2 WHERE (_1 LIKE '%survived%' or _1 LIKE '%happy%' OR _1 LIKE '%wonderful%' OR _1 LIKE '%bliss%' OR _1 LIKE '%hope%' OR _1 LIKE '%win%')").toPandas()
display.clear_output(wait=True)
plt.figure( figsize = ( 10, 8 ) )
sns.set(style="whitegrid")
sns.barplot( x="tag", y="ct", data=df)
plt.show()
count = count + 1
#Displaying Sources Statistics
count = 0
while count < 1:
time.sleep( 5 )
top_10_source = sqlContext.sql('select source,count(*) as count from(select regexp_extract(tweets._3,\'Android|Twitter Web App|iPhone|Facebook|HubSpot|iPad|EcoInternet3|Echofon\',0) as source from tweets) where nullif(source,"") is not null group by source' )
top_10_df = top_10_source.toPandas()
display.clear_output(wait=True)
plt.figure( figsize = ( 10, 8 ) )
sns.barplot( x="count", y="source", data=top_10_df)
plt.show()
count = count + 1
#Dispalying Location Statistics
count = 0
while count < 1:
time.sleep( 5 )
top_10_source = sqlContext.sql( 'select tweets._2 as location,count(*) as count from tweets where nullif(tweets._2,"") is not null and tweets._2 not like "%None%" group by tweets._2' )
top_10_source.show()
top_10_df = top_10_source.toPandas()
display.clear_output(wait=True)
plt.figure( figsize = ( 10, 8 ) )
sns.barplot( x="count", y="location", data=top_10_df)
plt.show()
count = count + 1
ssc.stop()
|
from django.views import View
from django.shortcuts import render, redirect
from django.http import JsonResponse
from rest_framework import viewsets, generics
from django.contrib import messages
from .models import Priode, Matakuliah, Grade, KRS, KRSDetail
from .models import Mahasiswa, Angkatan, ProgramStudi, CalculateTemp
from .serializers import MahasiswaSerializers, AngatakanSerializers, ProgramStudiSerializers
from .serializers import PriodeSerializers, MatakuliahSerializers, GradeSerializers, KrsSerializers
from .serializers import KrsDetailSerializers
from ann.models import DatasetDetailReview as DatasetReview
#authentication
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
# Create your views here.
@method_decorator(login_required, name='get')
@method_decorator(login_required, name='post')
class DatawarehouseView(View):
def get(self, request):
#prodi
prodi_list = ProgramStudi.objects.all().order_by()
prodi_select = '<select class="form-control" name="Prodi"><option value=""></option>'
for prodi in prodi_list:
prodi_select += '<option value="' + prodi.nama + '">' + prodi.nama + '</option>'
prodi_select += '</select>'
#angkatan
angkatan_list = Angkatan.objects.all().order_by()
angkatan_select = '<select class="form-control" name="Angkatan"><option value=""></option>'
for angkatan in angkatan_list:
angkatan_select += '<option value="' + str(angkatan.tahun) + '">' + str(angkatan.tahun) + '</option>'
angkatan_select += '</select>'
return render(request, 'datawarehouse/index.html', {
'angkatan': angkatan_select,
'prodi': prodi_select
})
def post(self, request):
aksi = request.POST['aksi']
if aksi == 'update_data':
self.update_temp(request)
return redirect('dashboard:datawarehouse:index')
def update_temp(self, request):
list_mahasiswa = Mahasiswa.objects.order_by('angkatan__tahun', 'nim')
for mahasiswa in list_mahasiswa:
CalculateTemp().update_mahasiswa_temp(mahasiswa)
messages.success(request, 'Data temp berhasil diupdate')
@method_decorator(login_required, name='get')
@method_decorator(login_required, name='post')
class MahasiswaView(View):
def get(self, request, **kwargs):
id = self.kwargs['id']
mahasiswa = Mahasiswa.objects.get(id=id)
return render(request, 'datawarehouse/mahasiswa.html', {
'mahasiswa': mahasiswa
})
def post(self, request, **kwargs):
id = self.kwargs['id']
action = request.POST['action']
if action == 'DELETE':
Mahasiswa.objects.get(id=id).delete()
data = {
'message': 'Data mahasiswa berhasil dihapus',
}
return JsonResponse(data)
elif action == 'CHANGEKETERANGAN':
keterangan = request.POST.get('keterangan')
mahasiswa_nama = Mahasiswa.objects.get(id=id).nama
if keterangan == '1':
Mahasiswa.objects.filter(id=id).update(temp_keterangan=False)
return JsonResponse({
'status': 'success',
'keterangan': 0,
'message': 'Data mahasiswa ' + mahasiswa_nama +' menjadi off'
})
elif keterangan == '0':
Mahasiswa.objects.filter(id=id).update(temp_keterangan=True)
return JsonResponse({
'status': 'success',
'keterangan': 1,
'message': 'Data mahasiswa ' + mahasiswa_nama +' menjadi on'
})
return JsonResponse({
'status': 'fail',
'message': 'Terjadi kesalahan'
})
elif action == 'CHANGECUTI':
cuti = request.POST.get('cuti')
mahasiswa_nama = Mahasiswa.objects.get(id=id).nama
if cuti == '1':
Mahasiswa.objects.filter(id=id).update(temp_cuti=False)
return JsonResponse({
'status': 'success',
'cuti': 0,
'message': 'Cuti mahasiswa ' + mahasiswa_nama +' menjadi off'
})
elif cuti == '0':
Mahasiswa.objects.filter(id=id).update(temp_cuti=True)
return JsonResponse({
'status': 'success',
'cuti': 1,
'message': 'Cuti mahasiswa ' + mahasiswa_nama +' menjadi on'
})
return JsonResponse({
'status': 'fail',
'message': 'Terjadi kesalahan'
})
@method_decorator(login_required, name='get')
@method_decorator(login_required, name='post')
class KrsView(View):
def get(self, request, **kwargs):
id = self.kwargs['id']
krs = KRS.objects.get(id=id)
mahasiswa = krs.mahasiswa
return render(request, 'datawarehouse/krs.html', {
'mahasiswa': mahasiswa,
'krs': krs
})
def post(self, request, **kwargs):
id = self.kwargs['id']
action = request.POST['action']
if action == 'DELETE':
mahasiswa = KRS.objects.get(pk=id).mahasiswa
KRS.objects.get(pk=id).delete()
CalculateTemp().update_mahasiswa_temp(mahasiswa)
#update dataset
inputs = CalculateTemp().get_4_ip(mahasiswa)
DatasetReview.objects.filter(mahasiswa=mahasiswa).update(
ip_1=inputs[0],
ip_2=inputs[1],
ip_3=inputs[2],
ip_4=inputs[3],
masa_studi=mahasiswa.temp_masa_studi,
)
data = {
'message': 'Data krs berhasil dihapus',
}
return JsonResponse(data)
@method_decorator(login_required, name='get')
@method_decorator(login_required, name='post')
class KrsDetailView(View):
def get(self, request, **kwargs):
return redirect('dashboard:datawarehouse:index')
def post(self, request, **kwargs):
id = self.kwargs['id']
action = request.POST['action']
if action == 'DELETE':
krs = KRSDetail.objects.get(pk=id).krs
KRSDetail.objects.get(pk=id).delete()
CalculateTemp().update_krs_temp(krs)
data = {
'message': 'Data matakuliah berhasil dihapus',
}
return JsonResponse(data)
class MahasiswaList(generics.ListAPIView):
serializer_class = MahasiswaSerializers
def get_queryset(self):
return Mahasiswa.objects.filter(program_studi__nama__contains="S1").order_by('angkatan__tahun', 'nim')
class KrsList(generics.ListAPIView):
serializer_class = KrsSerializers
def get_queryset(self):
mahasiswa_id = self.request.query_params.get('mahasiswa_id', '')
return KRS.objects.filter(mahasiswa__id=mahasiswa_id).order_by('priode__kode',
'mahasiswa__angkatan__tahun', 'mahasiswa__nim')
class KrsDetailList(generics.ListAPIView):
serializer_class = KrsDetailSerializers
def get_queryset(self):
krs_id = self.request.query_params.get('krs_id', '')
return KRSDetail.objects.filter(krs__id=krs_id).order_by('matakuliah__nama',
'matakuliah__sks', 'grade__bobot')
# class ProgramStudiViewSet(viewsets.ModelViewSet):
# queryset = ProgramStudi.objects.all().order_by('kode')
# serializer_class = ProgramStudiSerializers
# class AngkatanViewSet(viewsets.ModelViewSet):
# queryset = Angkatan.objects.all().order_by('tahun')
# serializer_class = AngatakanSerializers
# class PriodeViewSet(viewsets.ModelViewSet):
# queryset = Priode.objects.all().order_by('kode')
# serializer_class = PriodeSerializers
# class MatakuliahViewSet(viewsets.ModelViewSet):
# queryset = Matakuliah.objects.all().order_by('priode__kode', 'nama', 'sks')
# serializer_class = MatakuliahSerializers
# class GradeViewSet(viewsets.ModelViewSet):
# queryset = Grade.objects.all().order_by('-bobot')
# serializer_class = GradeSerializers
# class MahasiswaViewSet(viewsets.ModelViewSet):
# queryset = Mahasiswa.objects.all().order_by('angkatan__tahun', 'nim')
# serializer_class = MahasiswaSerializers
# class KrsViewSet(viewsets.ModelViewSet):
# queryset = KRS.objects.all().order_by('priode__kode', 'mahasiswa__angkatan__tahun', 'mahasiswa__nim')
# serializer_class = KrsSerializers
# class KrsDetailViewSet(viewsets.ModelViewSet):
# queryset = KRSDetail.objects.all().order_by('matakuliah__priode__kode', 'matakuliah__nama')
# serializer_class = KrsDetailSerializers |
from api.send_request import futblot24
page_url = 'https://www.futbol24.com/teamCompare/Thailand/Chonburi-FC/vs/Thailand/Nakhon-Ratchasima/?statTALR-Table=1&' \
'statTALR-Limit=2&statTBLR-Table=2&statTBLR-Limit=2'
response = futblot24.send_request(page_url)
print(response)
|
example = "Python is classic"
lower_example = example.lower()
print("Lower case example - {}".format(lower_example))
upper_example = example.upper()
print("Upper case example - {}".format(upper_example))
length = len(example)
print("String length - {}".format(length))
words = example.split(" ")
print("Words in the example - {}".format(words))
first_part = example[:7]
print("First part - {}".format(first_part))
last_part = example[10:]
print("Last part - {}".format(last_part))
middle_part = example[7:10]
print("Middle part - {}".format(middle_part))
example2 = " and simple"
print("concatenated string - {}".format(example + example2))
|
import numpy as np
from pylearn2.utils import serial
from copy import deepcopy
class BinaryResult(object):
""" dummy class for binary results """
pass
class CSPResult(object):
""" For storing a result"""
def __init__(self, csp_trainer, parameters, training_time):
self.multi_class = csp_trainer.multi_class
self.templates = {}
self.training_time = training_time
self.parameters = deepcopy(parameters)
# Copy cleaning results
self.rejected_chan_names = csp_trainer.rejected_chan_names
self.rejected_trials = csp_trainer.rejected_trials
self.clean_trials = csp_trainer.clean_trials
# Copy some binary results
self.binary = BinaryResult()
self.binary.train_accuracy = csp_trainer.binary_csp.train_accuracy
self.binary.test_accuracy = csp_trainer.binary_csp.test_accuracy
self.binary.filterbands = csp_trainer.binary_csp.filterbands
def get_misclasses(self):
return {
'train':
np.array([ 1 -acc for acc in self.multi_class.train_accuracy]),
'test':
np.array([ 1 -acc for acc in self.multi_class.test_accuracy])
}
def save(self, filename):
serial.save(filename, self)
TrainCSPResult = CSPResult # backwards compatibility, printing earlier results
class CSPModel(object):
""" For storing a model. Warning can be quite big"""
def __init__(self, experiment):
self.experiment = experiment
def save(self, filename):
# Delete data sets
if (hasattr(self.experiment.binary_csp, 'cnt')):
del self.experiment.binary_csp.cnt
if hasattr(self.experiment, 'test_cnt'):
del self.experiment.test_cnt
del self.experiment.cnt
serial.save(filename, self.experiment)
|
from genderbias.detector import Report, Issue, Flag, BiasBoundsException
from pytest import fixture, raises
report_name = "Text Analyzer"
summary = "[summary]"
flag = Flag(0, 10, Issue(report_name, "A", "B"))
positive_flag = Flag(20, 30, Issue(report_name, "C", "D", bias = Issue.positive_result))
no_summary_text = " SUMMARY: [None available]"
flag_text = " [0-10]: " + report_name + ": A (B)"
base_dict = {'name': report_name, 'summary': "", 'flags': []}
positive_flag_tuple = (20, 30, report_name, "C", "D", +1.0)
negative_flag_tuple = (0, 10, report_name, "A", "B", -1.0)
@fixture
def report():
return Report(report_name)
def test_report_str_no_flags(report):
assert str(report) == "\n".join([report_name, no_summary_text])
def test_report_str_with_one_flag(report):
report.add_flag(flag)
assert str(report) == "\n".join([report_name, flag_text, no_summary_text])
def test_report_str_no_flags_with_summary(report):
report.set_summary(summary)
assert str(report) == "\n".join([report_name, " SUMMARY: " + summary])
def test_report_to_dict_no_flags(report):
assert report.to_dict() == base_dict
def test_report_to_dict_with_one_flag(report):
report.add_flag(flag)
assert report.to_dict() == dict(base_dict, flags=[negative_flag_tuple])
def test_report_to_dict_with_summary(report):
report.set_summary(summary)
assert report.to_dict() == dict(base_dict, summary=summary)
def test_report_with_positive_flags(report):
report.add_flag(positive_flag)
assert str(report) == "\n".join([report_name, no_summary_text])
assert report.to_dict() == dict(base_dict, flags=[positive_flag_tuple])
report.add_flag(positive_flag)
assert str(report) == "\n".join([report_name, no_summary_text])
assert report.to_dict() == dict(base_dict, flags=[positive_flag_tuple, positive_flag_tuple])
def test_report_with_mixed_flags(report):
report.add_flag(positive_flag)
report.add_flag(flag)
assert str(report) == "\n".join([report_name, flag_text, no_summary_text])
assert report.to_dict() == dict(base_dict, flags=[positive_flag_tuple, negative_flag_tuple])
# TODO: These should move to a new test_issue file or similar, in time
def test_issue_bias_bounds():
with raises(BiasBoundsException):
Issue("", bias=Issue.positive_result+0.0000001)
with raises(BiasBoundsException):
Issue("", bias=Issue.negative_result-0.0000001)
|
from django.contrib import admin
from .models import ExamLibItem, Paper, ExamItem, ExamResult
# Register your models here.
class ExamItemInline(admin.TabularInline):
model = ExamItem
extra = 0
class ExamLibItemAdmin(admin.ModelAdmin):
class Meta:
model = ExamLibItem
class PaperAdmin(admin.ModelAdmin):
list_display = ('id','name','type','__unicode__')
filter_horizontal = ['examlibitem']
class Media:
css = {
"all": ("admin/extra/css/changelists.css",)
}
class Meta:
model = Paper
inlines = [
ExamItemInline
]
class ExamItemAdmin(admin.ModelAdmin):
list_display = ('paper','examlibitem','answer','score_result','user',)
search_fields = ('examlibitem__title','score_result')
list_filter = ('user','score_result',)
ordering = ('user__email','score_result')
class Meta:
model = ExamItem
class ExamResultAdmin(admin.ModelAdmin):
list_display = ('paper','score','user',)
inlines = [
ExamItemInline
]
class Meta:
model = ExamResult
admin.site.register(ExamLibItem, ExamLibItemAdmin)
admin.site.register(Paper, PaperAdmin)
admin.site.register(ExamItem, ExamItemAdmin)
admin.site.register(ExamResult, ExamResultAdmin) |
from keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
import numpy as np
class BalancedGenerator(ImageDataGenerator):
"""
Generate minibatches of image data with real-time data augmentation,
while randomly over-sampling to fix the class imbalance.
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channel.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided. This is
applied after the `preprocessing_function` (if any provided)
but before any other transformation.
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None):
# Initialize variables and methods from base class (ImageDataGenerator)
super(BalancedGenerator, self).__init__(featurewise_center=featurewise_center,
samplewise_center=samplewise_center,
featurewise_std_normalization=featurewise_std_normalization,
samplewise_std_normalization=samplewise_std_normalization,
zca_whitening=zca_whitening,
zca_epsilon=zca_epsilon,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
shear_range=shear_range,
zoom_range=zoom_range,
channel_shift_range=channel_shift_range,
fill_mode=fill_mode,
cval=cval,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
rescale=rescale,
preprocessing_function=preprocessing_function,
data_format=data_format)
def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None,
class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None,
save_prefix='', save_format='png', follow_links=False, subset=None,
interpolation='nearest'):
"""
Used for generating batches of images directly from a directory. Supports
on-line data augmentation and random over-sampling.
Returns a DirectoryIterator that thinks it has more samples (from the
under-represented classes) than it actually does
"""
# Initialize the DirectoryIterator
it = DirectoryIterator(directory, self, target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode, data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir,
save_prefix=save_prefix, save_format=save_format, follow_links=follow_links,
interpolation=interpolation)
# Define target number of images for each class to reach
self.target = np.bincount(it.classes).max()
# Create lists containing the images and their respective labels,
# sampled multiple times if necessary in order to reach the target
# number for each class.
new_filenames = []
new_classes = []
print('\n ' + '-' * 31 + ' ')
print('| | Number of images |')
print('| Class | ------------------ |')
print('| | Previous | Current |')
print(' ' + '-' * 31 + ' ')
for c in range(it.num_classes):
new_filenames += self.balance(np.array(it.filenames)[it.classes == c], class_name=c, seed=seed)
new_classes += [c] * self.target
print(' ' + '-' * 31 + ' \n')
# Replaces the DirectoryIterator's lists with the ones we created
it.filenames = new_filenames
it.classes = np.array(new_classes)
del new_filenames, new_classes
# Replaces the maximum number of samples in the DirectoryIterator
it.n = it.samples = len(it.filenames)
print('Total number of images after Over-Sampling:', it.n)
return it
def balance(self, index_slice, class_name, seed=None):
"""
Function for randomly sampling images multiple times in order to reach
a target.
"""
np.random.seed(seed)
current = len(index_slice)
print('| {:^6} | {:^8} | {:^8} |'.format(class_name, current, self.target))
return list(index_slice) * (self.target // current) + \
list(index_slice[np.random.randint(current,
size=(self.target % current))])
class CustomImageGenerator(ImageDataGenerator):
"""
Customized ImageDataGenerator.
# Arguments
featurewise_center: Boolean. Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean. Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float (fraction of total width). Range for random horizontal shifts.
height_shift_range: Float (fraction of total height). Range for random vertical shifts.
shear_range: Float. Shear Intensity (Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom. If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'.
Points outside the boundaries of the input are filled according to the given mode:
'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
'nearest': aaaaaaaa|abcd|dddddddd
'reflect': abcddcba|abcd|dcbaabcd
'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int. Value used for points outside the boundaries when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: One of {"channels_first", "channels_last"}.
"channels_last" mode means that the images should have shape `(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape `(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation (strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
output_names=None):
self.output_names = output_names
super(CustomImageGenerator, self).__init__(featurewise_center=featurewise_center,
samplewise_center=samplewise_center,
featurewise_std_normalization=featurewise_std_normalization,
samplewise_std_normalization=samplewise_std_normalization,
zca_whitening=zca_whitening,
zca_epsilon=zca_epsilon,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
shear_range=shear_range,
zoom_range=zoom_range,
channel_shift_range=channel_shift_range,
fill_mode=fill_mode,
cval=cval,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
rescale=rescale,
preprocessing_function=preprocessing_function,
data_format=data_format)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
output_names=None):
"""Takes the path to a directory, and generates batches of augmented/normalized data.
# Arguments
directory: path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images inside each of the subdirectories directory tree will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d) for more details.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to have 1 or 3 color channels.
classes: optional list of class subdirectories (e.g. `['dogs', 'cats']`).
Default: None. If not provided, the list of classes will
be automatically inferred from the subdirectory names/structure under `directory`,
where each subdirectory will be treated as a different class
(and the order of the classes, which will map to the label indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: one of "categorical", "binary", "sparse", "input" or None.
Default: "categorical". Determines the type of label arrays that are
returned: "categorical" will be 2D one-hot encoded labels, "binary" will be 1D binary labels,
"sparse" will be 1D integer labels, "input" will be images identical to input images (mainly used to work with autoencoders).
If None, no labels are returned (the generator will only yield batches of image data, which is useful to use
`model.predict_generator()`, `model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory of `directory` for it to work correctly.
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally specify a directory to which to save
the augmented pictures being generated (useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures (only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg" (only relevant if `save_to_dir` is set). Default: "png".
follow_links: whether to follow symlinks inside class subdirectories (default: False).
# Returns
A CustomDirectoryIterator yielding tuples of `(x, y)` where `x` is a numpy array of image data and
`y` is a numpy array of corresponding labels.
"""
return CustomDirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
output_names=output_names)
class CustomDirectoryIterator(DirectoryIterator):
"""
Custom DirectoryIterator.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
output_names=None):
self.output_names = output_names
super(CustomDirectoryIterator, self).__init__(directory, image_data_generator,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
batch_size=batch_size, shuffle=shuffle, seed=seed,
data_format=data_format, save_to_dir=save_to_dir,
save_prefix=save_prefix, save_format=save_format,
follow_links=follow_links, interpolation=interpolation)
def _get_batches_of_transformed_samples(self, index_array):
batch_x, batch_y = super(CustomDirectoryIterator, self)._get_batches_of_transformed_samples(index_array)
# return batch_x, [batch_x, batch_y]
if self.output_names:
return batch_x, {self.output_names[0]: batch_x, self.output_names[1]: batch_y}
return batch_x, {'main_output': batch_x, 'aux_output': batch_y}
def transfer_weights(pretrained_model, new_model):
"""
Function meant to transfer the weights from a HalfDenseNetFCN to an AuxiliaryDenseNetFCN
:param pretrained_model: The model from which we want to get the weights.
:param new_model: The model to which we want to set the weights.
:return: True, if the transfer is successful.
"""
# As long as the models' architectures match, transfer the weights
i = 0
if pretrained_model.input_shape != new_model.input_shape:
raise ValueError('Models should have matching input shapes.')
while i < len(pretrained_model.layers) and \
pretrained_model.layers[i].output_shape == new_model.layers[i].output_shape:
new_model.layers[i].set_weights(pretrained_model.layers[i].get_weights())
i += 1
# For the rest of the layers (i.e. the auxiliary ones), we need to find them by name.
for layer in pretrained_model.layers[i:]:
new_model.get_layer(layer.name).set_weights(layer.get_weights())
print('Successfully transferred weights from {} layers.'.format(len(pretrained_model.layers)))
return True
def normalize_imagenet(img):
"""
This function is meant to be run with ImageDataGenerator
in order to normalize the images with the mean values of
the ImageNet dataset and feed them to an ImageNet-pretrained
model.
The exact normalization technique is taken from:
https://github.com/flyyufelix/cnn_finetune
since we use that pretrained model.
"""
img[:, :, 0] = (img[:, :, 0] - 123.68) * 0.017
img[:, :, 1] = (img[:, :, 1] - 116.78) * 0.017
img[:, :, 2] = (img[:, :, 2] - 103.94) * 0.017
return img
|
# coding: utf-8
import csv
from pathlib import Path
"""Part 1: Automate the Calculations.
Automate the calculations for the loan portfolio summaries.
First, let's start with some calculations on a list of prices for 5 loans.
1. Use the `len` function to calculate the total number of loans in the list.
2. Use the `sum` function to calculate the total of all loans in the list.
3. Using the sum of all loans and the total number of loans, calculate the average loan price.
4. Print all calculations with descriptive messages.
"""
loan_costs = [500, 600, 200, 1000, 450]
""" Calculates the total number and amount of loans in the list_costs
and based on that the average loan amount"""
number_of_loans=len(loan_costs)
total_loans=sum(loan_costs)
average_loan_amount = round(total_loans/number_of_loans,2)
"""Prints the total number and amount of loans and the average loan amount"""
print(f"The total value of the loans is ${total_loans}")
print(f"The loan portfolio consists of {number_of_loans} loans")
print(f"The average loan amount is ${average_loan_amount}")
"""Part 2: Analyze Loan Data.
Analyze the loan to determine the investment evaluation.
Using more detailed data on one of these loans, follow these steps to calculate a Present Value, or a "fair price" for what this loan would be worth.
1. Use get() on the dictionary of additional information to extract the
**Future Value** and **Remaining Months** on the loan.
a. Save these values as variables called `future_value` and `remaining_months`.
b. Print each variable.
@NOTE:
**Future Value**: The amount of money the borrower has to pay back upon maturity of the loan (a.k.a. "Face Value")
**Remaining Months**: The remaining maturity (in months) before the loan needs to be fully repaid.
2. Use the formula for Present Value to calculate a "fair value" of the loan. Use a minimum required return of 20% as the discount rate.
3. Write a conditional statement (an if-else statement) to decide if the present value represents the loan's fair value.
a. If the present value of the loan is greater than or equal to the cost, then print a message that says the loan is worth at least the cost to buy it.
b. Else, the present value of the loan is less than the loan cost, then print a message that says that the loan is too expensive and not worth the price.
@NOTE:
If Present Value represents the loan's fair value (given the required minimum return of 20%), does it make sense to buy the loan at its current cost?
"""
loan = {
"loan_price": 500,
"remaining_months": 9,
"repayment_interval": "bullet",
"future_value": 1000,
}
"""Extracts the Future Value and Remaining Value of the loan from dictionary
and assigns it to the variables"""
future_value=loan.get("future_value")
remaining_months=loan.get("remaining_months")
annual_discount_rate=0.2
"""Prints the Future Value and the amount of months remaining for the loan repayment"""
print("The future value of the loan is $", future_value)
print(f"It remains {remaining_months} months for the loan")
"""Calculates the Present Value of the Loan and prints it"""
present_value=round(future_value/(1+annual_discount_rate/12)**remaining_months,2)
print("The Present Value of this loan is $", present_value)
"""Compairs the Present Value of the Loan to its price to decide whether to buy it or not"""
if present_value >loan.get("loan_price"):
print("The cost of the loan is worth buying!")
elif present_value == loan.get("loan_price"):
print("The cost of the loan is worth at least considering!")
else:
print("The loan is too expensive and not worth the price!")
"""Part 3: Perform Financial Calculations.
Perform financial calculations using functions.
1. Define a new function that will be used to calculate present value.
a. This function should include parameters for `future_value`, `remaining_months`, and the `annual_discount_rate`
b. The function should return the `present_value` for the loan.
2. Use the function to calculate the present value of the new loan given below.
a. Use an `annual_discount_rate` of 0.2 for this new loan calculation.
"""
# Given the following loan data, you will need to calculate the present value for the loan
new_loan = {
"loan_price": 800,
"remaining_months": 12,
"repayment_interval": "bullet",
"future_value": 1000,
}
""" This function calculates the Present Value
requiered parameters are
1. future_value
2. anual_discount_rate
3. remaining_months
and returns present_value
"""
def pres_value(future_value,annual_discount_rate,remaining_months):
present_value =future_value/(1+annual_discount_rate/12)**remaining_months
return present_value
"""Assigns the values from the new_loan dictionary to the variables"""
future_value=new_loan.get("future_value")
remaining_months=new_loan.get("remaining_months")
annual_discount_rate=0.2
"""Calls the function pres_value to calculate the Present Value of the new loan"""
present_value=round(pres_value(future_value,annual_discount_rate,remaining_months), 2)
print(f"The present value of the new loan is $ {present_value}")
"""Part 4: Conditionally filter lists of loans.
In this section, you will use a loop to iterate through a series of loans and select only the inexpensive loans.
1. Create a new, empty list called `inexpensive_loans`.
2. Use a for loop to select each loan from a list of loans.
a. Inside the for loop, write an if-statement to determine if the loan_price is less than 500
b. If the loan_price is less than 500 then append that loan to the `inexpensive_loans` list.
3. Print the list of inexpensive_loans.
"""
loans = [
{
"loan_price": 700,
"remaining_months": 9,
"repayment_interval": "monthly",
"future_value": 1000,
},
{
"loan_price": 500,
"remaining_months": 13,
"repayment_interval": "bullet",
"future_value": 1000,
},
{
"loan_price": 200,
"remaining_months": 16,
"repayment_interval": "bullet",
"future_value": 1000,
},
{
"loan_price": 900,
"remaining_months": 16,
"repayment_interval": "bullet",
"future_value": 1000,
},
]
inexpensive_loans=[]
"""Finds the loans that cost less or equal than $ 500 in the list of dictionaries
and creates new list of dictionaries called 'inexpensive_loans'
then prints it"""
for each_loan in loans:
price_of_loan=each_loan.get("loan_price")
if price_of_loan<=500:
inexpensive_loans.append(each_loan)
print(inexpensive_loans)
"""Part 5: Save the results.
Output this list of inexpensive loans to a csv file
1. Use `with open` to open a new CSV file.
a. Create a `csvwriter` using the `csv` library.
b. Use the new csvwriter to write the header variable as the first row.
c. Use a for loop to iterate through each loan in `inexpensive_loans`.
d. Use the csvwriter to write the `loan.values()` to a row in the CSV file.
"""
"""Sets the csv output path to record the data
and defines the header in the file"""
csvpath=Path("inexpensive_loans.csv")
header=(inexpensive_loans[1].keys())
"""
Writes in the 'inexpensive_loans.csv' all inexpencive loans
"""
with open(csvpath,'w',) as csvfile:
csvwriter=csv.writer(csvfile, delimiter=",")
csvwriter.writerow(header)
for each_loan in inexpensive_loans:
csvwriter.writerow(each_loan.values())
|
# -*- coding: utf-8 -*-
#Variaveis:
nome = "Anderson Oliveira"
print(nome)
nome = "Padawan para Jedi"
print(nome)
#perceba que posso mudar o valor da variavel no momento em que precisar.
input("Pressione qualquer tecla para continuar") |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-05-19 22:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('selecao', '0006_auto_20190901_2325'),
]
operations = [
migrations.AddField(
model_name='candidato',
name='aceitou',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='atendimento_especial',
field=models.CharField(blank=True, max_length=1000, verbose_name='Tem problema com internet'),
),
migrations.AlterField(
model_name='candidato',
name='atividade',
field=models.CharField(blank=True, max_length=1000, verbose_name='Atividade Remunerada fora da UFAL'),
),
migrations.AlterField(
model_name='candidato',
name='bolsa',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='cpf',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='curso',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='email',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='endereco',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='estado_civil',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='local',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='mora_com',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='naturalidade',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='necessita_atendimento_especial',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='nome',
field=models.CharField(max_length=1000, verbose_name='Nome Civil'),
),
migrations.AlterField(
model_name='candidato',
name='nome_social',
field=models.CharField(blank=True, max_length=1000, verbose_name='Nome Social'),
),
migrations.AlterField(
model_name='candidato',
name='orientador',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='periodo',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='projeto',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='reserva_de_vagas',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='rg',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='situacao_projeto',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='candidato',
name='telefone',
field=models.CharField(max_length=1000),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 23:13:38 2020
@author: Abhishek Hiremath
"""
import pandas as pd
def rename(full_name):
if full_name=="United States":
return "USA"
elif full_name=="United Kingdom":
return "GBR"
elif full_name=="Spain":
return "ESP"
elif full_name=="Italy":
return "ITA"
elif full_name=="Germany":
return "DEU"
five_country = pd.read_csv('C:/Users/Tinker Zheng/Desktop/NYU/MG8411/final/fivecountrydata.csv')
five_country['TIME']=five_country['date'].apply(lambda x: x[:7])
five_country['LOCATION'] = five_country['location'].apply(rename)
five_country = five_country.groupby(['LOCATION','TIME']).sum().reset_index()
share_price = pd.read_csv('C:/Users/Tinker Zheng/Desktop/NYU/MG8411/final/shareprice.csv')
share_price = share_price[['LOCATION','TIME','share_price_value']].groupby(['LOCATION','TIME']).sum().reset_index()
inflation = pd.read_csv('C:/Users/Tinker Zheng/Desktop/NYU/MG8411/final/inflation.csv')
inflation = inflation[['LOCATION','TIME','cpi']].groupby(['LOCATION','TIME']).sum().reset_index()
from functools import partial, reduce
dfs = [five_country, share_price, inflation]
merge = partial(pd.merge, on=['LOCATION','TIME'], how='inner')
df=reduce(merge, dfs)
df.columns
df['new_case_rate']=df['total_cases'].pct_change()
df['new_case_rate']
import numpy as np
df['new_case_rate'][np.isinf(df['new_case_rate'])] = np.nan
df['new_case_rate']
df['inflation_rate']=df['cpi'].pct_change()
df['inflation_rate']
df.isnull().sum()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
df['new_case_rate'] = imputer.fit_transform(df[['new_case_rate']])
df['death_rate']=df['total_deaths']/df['total_cases']
df['death_rate']=imputer.fit_transform(df[['new_case_rate']])
df['death_rate']
df['inflation_rate']=imputer.fit_transform(df[['inflation_rate']])
df['inflation_rate']
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
X,y=df[['new_case_rate','death_rate']] , df['inflation_rate']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
clf = LinearRegression()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
r2 = r2_score(y_test, y_pred)
print('r2 score is = ' + str(r2))#r2 score is = -2.8398156875199563
from sklearn.linear_model import Lasso
from sklearn.metrics import r2_score
clf = Lasso(alpha=0.3)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
r2 = r2_score(y_test, y_pred)
print('r2 score is = ' + str(r2))# -2.539164111223717
# Ridge Regression #
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
clf = Ridge(alpha=0.3)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
r2 = r2_score(y_test, y_pred)
print('r2 score is = ' + str(r2))#-2.839815413070732
#LogisticRegression for share price
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import seaborn as sns
#European 19 countries' share price in 2019 is 104.92 and USA is 120.4798
bi = []
for i in range(len(df['share_price_value'])):
if df['share_price_value'][i]>=120.4798 and df['LOCATION'][i]=='USA':
bi.append(1)
elif df['share_price_value'][i]<120.4798 and df['LOCATION'][i]=='USA':
bi.append(0)
elif df['share_price_value'][i]<104.92 and df['LOCATION'][i]!='USA':
bi.append(0)
elif df['share_price_value'][i]>=104.92 and df['LOCATION'][i]!='USA':
bi.append(1)
df['binary']=bi
df['binary']
X=df[['new_case_rate','death_rate']]
y=df['binary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
clf=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
verbose=0, warm_start=False)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(y_pred)
f1 = f1_score(y_test, y_pred)
print('f1 score is = ' + str(f1))
#confusion matrix
import sklearn.metrics as skm
tn, fp, fn, tp = skm.confusion_matrix(y_test, y_pred).ravel()
print("tn:",tn, "\nfp:",fp, "\nfn:",fn, "\ntp:",tp)
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
cm = confusion_matrix(y_pred, y_test)
labels = ['top crescent', 'bottom cresent']
df_cm = pd.DataFrame(cm,
index = labels,
columns = labels)
# plot figure
plt.figure(figsize=(5.5,4))
sns.heatmap(df_cm, cmap="GnBu", annot=True)
#add titles and labels for the axes
plt.title('Logistic Regression \nF1 Score:{0:.3f}'.format(f1_score(y_test, y_pred)))
plt.ylabel('Prediction')
plt.xlabel('Actual Class')
plt.show()
#precision-recall curve
precision_prc, recall_prc, thresholds = skm.precision_recall_curve(y_test, y_pred, pos_label=1)
prc_auc = skm.auc(recall_prc, precision_prc)
print("Area Under Curve: %0.2f" % prc_auc)
plt.plot(recall_prc, precision_prc, 'b', label='Precision-Recall curve')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall curve: AUC=%0.2f' % prc_auc)
plt.show()
#roc-auc curve
fpr, tpr, threshold = skm.roc_curve(y_test, y_pred, pos_label=1)
roc_auc = skm.auc(fpr, tpr)
print('Area Under Curve: %0.3f' % roc_auc)
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC: AUC=%0.2f' % roc_auc)
plt.show()
#logistic regression for inflation rate
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import seaborn as sns
#world inflation rate in 2019 was 1.76%
bi = []
for i in df['inflation_rate']:
if i >= 0.00176:
bi.append(1)
else:
bi.append(0)
df['binary']=bi
X=df[['new_case_rate','death_rate']]
y=df['binary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
clf=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
verbose=0, warm_start=False)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(y_pred)
f1 = f1_score(y_test, y_pred)
print('f1 score is = ' + str(f1))
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#confusion matrix
import sklearn.metrics as skm
tn, fp, fn, tp = skm.confusion_matrix(y_test, y_pred).ravel()
print("tn:",tn, "\nfp:",fp, "\nfn:",fn, "\ntp:",tp)
cm = confusion_matrix(y_pred, y_test)
labels = ['top crescent', 'bottom cresent']
df_cm = pd.DataFrame(cm,
index = labels,
columns = labels)
plt.figure(figsize=(5.5,4))
sns.heatmap(df_cm, cmap="GnBu", annot=True)
plt.title('Logistic Regression \nF1 Score:{0:.3f}'.format(f1_score(y_test, y_pred)))
plt.ylabel('Prediction')
plt.xlabel('Actual Class')
plt.show()
plt.title('Logistic Regression \nF1 Score:{0:.3f}'.format(f1_score(y_test, y_pred)))
plt.ylabel('Prediction')
plt.xlabel('Actual Class')
plt.show()
#Precision-recall curve
precision_prc, recall_prc, thresholds = skm.precision_recall_curve(y_test, y_pred, pos_label=1)
prc_auc = skm.auc(recall_prc, precision_prc)
print("Area Under Curve: %0.2f" % prc_auc)
plt.plot(recall_prc, precision_prc, 'b', label='Precision-Recall curve')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall curve: AUC=%0.2f' % prc_auc)
plt.show()
#ROC-AUC curve
fpr, tpr, threshold = skm.roc_curve(y_test, y_pred, pos_label=1)
roc_auc = skm.auc(fpr, tpr)
print('Area Under Curve: %0.3f' % roc_auc)
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC: AUC=%0.2f' % roc_auc)
plt.show()
#ARMA test
import pandas as pd
import matplotlib.pyplot as plt
gdp=pd.read_csv('C:/Users/Tinker Zheng/Desktop/NYU/MG8411/final/gdp_growth_rate.csv',index_col=0)
gdp.index=pd.date_range('1980','2022',freq='Y')
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
def adf_test(series):
print("\n>>> Is",series.name, "stationary?")
dftest = adfuller(series, autolag='AIC')
print("Test statistic = {:.3f}".format(dftest[0]))
print("P-value = {:.3f}".format(dftest[1]))
print("Critical values :")
for k, v in dftest[4].items():
print("\t{}: {:.3f} - The data is {}stationary with {}% confidence".format(
k.ljust(4), v, "not " if v<dftest[0] else "", 100-int(k[:-1])))
adf_test(gdp["Germany"])
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import acf
plt.subplots(3,1,figsize=(10,10))
ax = plt.subplot(3,1,1)
plt.grid()
plt.plot(gdp["Germany"])
plt.title("Germany gdp")
ax = plt.subplot(3,1,2)
plt.grid()
plot_acf(gdp["Germany"],ax, lags=10, zero=False)
ax = plt.subplot(3,1,3)
plt.grid()
plot_pacf(gdp["Germany"], method="ldb", zero=False, ax=ax, lags=10)
plt.show()
#got white noise so no way to predict by using ARMA
#VAR projection
from statsmodels.tsa.api import VAR
dates=gdp.index
model = VAR(endog=gdp,dates=dates)
model_fit = model.fit()
model_fit.summary()
lag_order = model_fit.k_ar
print(model_fit.forecast(gdp.values[-lag_order:],3))
model_fit.plot_forecast(3)
|
Python 3.4.3 (v3.4.3:9b73f1c3e601, Feb 24 2015, 22:44:40) [MSC v.1600 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> Programm =("java","python","swift")
>>> if "python" in Programm:
print("Yes , 'python' is in the programm tuple")
Yes , 'python' is in the programm tuple
>>>
|
#9. Write a Python program to get the Fibonacci series between 0 to 50. Go to the editor
#Note : The Fibonacci Sequence is the series of numbers :
#0, 1, 1, 2, 3, 5, 8, 13, 21, ....
#Every next number is found by adding up the two numbers before it.
#Expected Output : 1 1 2 3 5 8 13 21 34
#index = 0
#fib = 0
#temp = 1
#fib = temp
#while index < 50:
# fib = fib + temp
# print(fib, temp)
# temp = temp + fib
#index = index + 1
x , y = 0,1
while y < 50:
print(y)
x , y = y, x + y
#y , x + y
|
# the problem is here:
# https://www.hackerrank.com/challenges/sherlock-and-array
#!/bin/bash/python
T = int(raw_input())
for i in range(T):
n = int(raw_input())
a = map(int, raw_input().split(' '))
sumRight, sumLeft, flag = 0, 0, True
for j in a:
sumRight += j
for j in range(len(a)):
sumRight -= a[j]
if j:
sumLeft += a[j-1]
if sumRight == sumLeft:
print "YES"
flag = False
break
if flag:
print "NO"
|
#input = ["Hello"," World"]
#x = input.split()
#print(input[6:11])
#print(input[0:5])
def flipWords(word):
return " ".join(word.split()[::-1])
print(flipWords("Hello World!"))
|
from game import Game
from main import new_game
from model.config import config
def test_new_game_creates_new_game():
"""Too slow to split into multiple tests."""
new_game()
assert Game.instance.area_map is not None
assert (Game.instance.area_map.width, Game.instance.area_map.height) != (0, 0)
assert Game.instance.area_map.tiles != [] # map has been generated
assert Game.instance.player is not None
if config.data.stallion.enabled:
assert Game.instance.stallion is not None
assert (Game.instance.stallion.x, Game.instance.stallion.y) in (
(dx + Game.instance.player.x, dy + Game.instance.player.y)
for dx in range(-5, 6) # make sure stallion is within 5 tiles from the player
for dy in range(-5, 6) # in fact, it could be farther, but that's highly unlikely.
)
assert Game.instance.xp_system.get(Game.instance.player).level == 5
assert Game.instance.inventory is not None
assert Game.instance.inventory == []
assert Game.instance.game_state is not None
assert Game.instance.game_state == 'playing'
assert Game.instance.game_messages is not None
assert len(Game.instance.game_messages) == 5 # 1 welcoming message, 4 level up messages.
old_tile = (Game.instance.player.x, Game.instance.player.y)
assert Game.instance.area_map.tiles[Game.instance.player.x][Game.instance.player.y].is_walkable
new_game()
new_tile = (Game.instance.player.x, Game.instance.player.y)
assert Game.instance.area_map.tiles[Game.instance.player.x][Game.instance.player.y].is_walkable
assert old_tile != new_tile
|
# coding=utf-8
from typing import List
from .bot import ZaifBot
from .config import Config
class App:
def __init__(self, configs: List[Config]) -> None:
self.configs = configs
def start(self) -> None:
for config in self.configs:
bot = ZaifBot(config)
bot.start()
|
#!/usr/bin/python3
from pathlib import Path
import urllib.request
IP_FILE = "ip-list.txt"
DATA_DIRECTORY = "data"
iplist = []
def getipFilename(ip):
return DATA_DIRECTORY + "/" + ip
def hasIPFile(ipfilename):
file = Path(ipfilename)
if file.is_file():
return True
return False
# do not call this function more than needed or +1000 times per day
def fetch(ip):
#response = urllib2.urlopen("http://ipinfo.io/{}/loc".format(ip))
#resp = response.read()
local_filename, headers = urllib.request.urlretrieve("http://ipinfo.io/{}/loc".format(ip))
html = open(local_filename)
resp = html.readline()
html.close()
filename = getipFilename(ip)
ipfs = open(filename, 'w')
ipfs.write(resp)
ipfs.close()
ipf = open(IP_FILE)
for line in ipf:
iplist.append(line[:-1])
ipf.close()
for ip in iplist:
if hasIPFile(getipFilename(ip)):
print("A file for {:15s} already exists".format(ip))
else:
print("A file for {:15s} does not exist".format(ip))
fetch(ip)
print("A file for {:15s} has been created".format(ip))
|
# 双周赛
class Solution:
def minimumCost(self, cost: List[int]) -> int:
ts = sorted(cost, key=lambda x : -x)
res, i = 0, 0
n = len(cost)
cnt = 0
while i < n:
if cnt != 2:
res += ts[i]
i += 1
cnt += 1
else:
i += 1
cnt = 0
return res |
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape
from keras.layers import Conv2D, Flatten, Dropout, MaxPooling2D, UpSampling2D, GlobalAveragePooling1D, BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
import skimage
import numpy as np
import os
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
img_dir = "images"
test_case_file = "./test_case.csv"
batch_size = 128
file_names = list()
for f in os.listdir(img_dir):
if f.endswith(".jpg"):
file_names.append(os.path.join(img_dir, f))
num_files = len(file_names)
file_names.sort()
train_x = list()
for f in file_names:
train_x.append(skimage.io.imread(f, as_gray=False).reshape((32,32,3)))
train_x = np.array(train_x) / 255
input_img = Input(shape=(32, 32, 3))
adam = Adam(lr=0.00002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_img)
#x = MaxPooling2D((2, 2), padding='same')(x)
#x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
#x = BatchNormalization()(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same', name='latent')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
#x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
#x = BatchNormalization()(x)
#x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer=adam, loss='mse')
print(autoencoder.summary())
es = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=0)
autoencoder.fit(train_x,
train_x,
batch_size=batch_size,
shuffle=True,
epochs=1000,
validation_split=0.1,
verbose=1,
callbacks=[es]
)
autoencoder.save("encoder.h5")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 21:59:46 2020
@author: thomas
"""
import numpy as np
import pandas as pd
import os, sys
import time as t
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.ticker import MaxNLocator
import pathlib
from matplotlib.colors import Normalize
import copy
norm = Normalize()
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
RHO = 1000.0
DX = 0.025/256.0
PERIOD = 0.1
OMEGA = 2.0*np.pi/PERIOD
RADIUS_LARGE = 0.002
RADIUS_SMALL = 0.001
AMPLITUDE = 0.8*RADIUS_LARGE
AMPLITUDE_SMALL = 0.8*AMPLITUDE
maxWin = 0.03
minWin = -1.0*maxWin
Re = sys.argv[1]
config = 'single'
MU = RHO*OMEGA*AMPLITUDE_SMALL*RADIUS_SMALL/float(Re)
# constructs a filepath for the plot omages of Re=$Re, config=$config, and field=$field
def plotName(cwd,Re,config,field,idx):
strDir = cwd+"../Figures/AVGForce/{0}/".format(config)
pathlib.Path(strDir).mkdir(parents=True, exist_ok=True)
return strDir+"{0}_{1}_{2}_{3}".format(config,Re,field,idx)
def AddDiscsToPlot(ax,pos):
global RADIUS_LARGE, RADIUS_SMALL
#Add Discs
circle1 = Circle((pos.loc[0,'aXU_rot'], pos.loc[0,'aYU_rot']), RADIUS_LARGE, facecolor=(0.0,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle1)
circle2 = Circle((pos.loc[0,'aXL_rot'], pos.loc[0,'aYL_rot']), RADIUS_SMALL, facecolor=(0.0,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle2)
return
def set_size(w,h, ax=None):
""" w, h: width, height in inches """
if not ax: ax=plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w)/(r-l)
figh = float(h)/(t-b)
ax.figure.set_size_inches(figw, figh)
return ax
def Rotate(xy, theta):
# https://en.wikipedia.org/wiki/Rotation_matrix#In_two_dimensions
#First Rotate based on Theta
#Allocate Arrays
rotationMatrix = np.zeros((2,2))
#Calculate rotation matrix
rotationMatrix[0,0] = np.cos(theta)
rotationMatrix[0,1] = -1.0*np.sin(theta)
rotationMatrix[1,0] = np.sin(theta)
rotationMatrix[1,1] = np.cos(theta)
return rotationMatrix.dot(xy)
def CalcLabAngle(pos):
#Find swimming axis (normal y-axis)
xU, xL = pos.loc[0,'xU'], pos.loc[0,'xL']
yU, yL = pos.loc[0,'yU'], pos.loc[0,'yL']
labX = xU - xL
labY = yU - yL
length = np.hypot(labX,labY)
normX = labX/length
normY = labY/length
#2) Calculate Theta
if(normX <= 0.0):
theta = np.arccos(normY)
else:
theta = -1.0*np.arccos(normY)+2.0*np.pi
print('theta = ',theta*180.0/np.pi)
return 2.0*np.pi - theta
def RotateVectorField(pos,mx,my,Ux,Uy,NX,NY):
#Shift field by CM
#Calculate angle of swimmer 1 from y-axis
#Rotate field by 2pi - theta
#Shift x and y by the CM location
xCM = 0.8*pos.loc[0,'xU'] + 0.2*pos.loc[0,'xL']
yCM = 0.8*pos.loc[0,'yU'] + 0.2*pos.loc[0,'yL']
#Do the same for mx and my
mx -= xCM
my -= yCM
#Shift pos data by xCM and yCM
pos['xU'] -= xCM
pos['xL'] -= xCM
pos['yU'] -= yCM
pos['yL'] -= yCM
#Rotate Reference frame by swimmer 1's axis
#Calculate Theta (Rotate by -Theta)
theta_rotate = CalcLabAngle(pos)
print('theta_rotate = ',theta_rotate*180.0/np.pi)
mxy = np.array([mx.flatten(),my.flatten()])
mxy_rot = np.zeros((2,NX*NY))
#Do the same for the U field
Uxy = np.array([Ux.flatten(),Uy.flatten()])
Uxy_rot = np.zeros((2,NX*NY))
for jdx in range(NX*NY):
mxy_rot[:,jdx] = Rotate(mxy[:,jdx],theta_rotate)
Uxy_rot[:,jdx] = Rotate(Uxy[:,jdx],theta_rotate)
mx_rot = mxy_rot[0,:].reshape((NX,NY))
my_rot = mxy_rot[1,:].reshape((NX,NY))
Ux_rot = Uxy_rot[0,:].reshape((NX,NY))
Uy_rot = Uxy_rot[1,:].reshape((NX,NY))
aU_pos = np.array([pos.loc[0,'xU'],pos.loc[0,'yU']])
aL_pos = np.array([pos.loc[0,'xL'],pos.loc[0,'yL']])
aU_rot = Rotate(aU_pos,theta_rotate)
print('aU = ',aU_pos)
print('aU_rot = ',aU_rot)
aL_rot = Rotate(aL_pos,theta_rotate)
pos['aXU_rot'], pos['aYU_rot'] = aU_rot[0], aU_rot[1]
pos['aXL_rot'], pos['aYL_rot'] = aL_rot[0], aL_rot[1]
return (pos,mx_rot,my_rot,Ux_rot,Uy_rot)
def PlotForceDensity(cwd,time,mx,my,forcex,forcey,pos,pars):
FORCETOL = 1.0e-5
SMALL_NUM = 1.0e-25
Re = pars[0]
config = pars[1]
field = pars[2]
print('field = ',field)
sys.stdout.flush()
idx = pars[3]
fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(8,8),dpi=200,num=10)
#Force Contours
forcemag = np.hypot(forcex,forcey)
#Rotate by Swimmer 1 axis around CM of pair
pos,mx_rot,my_rot,forcex_rot,forcey_rot = RotateVectorField(pos,mx,my,forcex,forcey,1022,1022)
forcemag = np.where(forcemag == 0.0, SMALL_NUM,forcemag) #Avoid undefined log numbers
#levels = MaxNLocator(nbins=21).tick_values(0.0, 0.5*np.amax(forcemag))
levels = MaxNLocator(nbins=21).tick_values(-2.0, 3.0)
ax.contourf(mx_rot,my_rot,np.log10(forcemag),cmap='YlOrRd',levels=levels,extend='both')
#Add quiver to magnitude plot
normFx, normFy = np.zeros((1022,1022)), np.zeros((1022,1022))
print('forcemagmax = ',np.amax(forcemag))
forcemag = np.where(forcemag/np.amax(forcemag) <= FORCETOL, SMALL_NUM,forcemag)
normFx = np.where(forcemag == SMALL_NUM, 0.0, forcex_rot/forcemag)
normFy = np.where(forcemag == SMALL_NUM, 0.0, forcey_rot/forcemag)
#normFx = forcex_rot/forcemag
#normFy = forcey_rot/forcemag
space=8
ax.quiver(mx_rot[::space,::space],my_rot[::space,::space],
normFx[::space,::space],normFy[::space,::space],
color='black',pivot='mid',scale=40,zorder=5,minlength=0)
#Add Discs
AddDiscsToPlot(ax,pos)
axis = [-0.02,0.02,-0.02,0.02]
ax.axis(axis)
ax.set_aspect('equal')
# Turn off tick labels
ax.set_yticklabels([])
ax.set_xticklabels([])
fig.tight_layout()
ax = set_size(6,6,ax)
fig.savefig(plotName(cwd,Re,config,field,idx)+'.png')
fig.clf()
plt.close()
return
# constructs a filepath for the pos data of Re = $Re
def pname(cwd,Re):
#return cwd+"/pd.txt"
#cwd = cwd_PYTHON
return cwd+"/pd_Re{0}.txt".format(Re)
def GetPosData(cwd,time,Re):
data = pd.read_csv(pname(cwd,Re),delimiter=' ')
topData = data[data['idx'] == 6].copy()
botData = data[data['idx'] == 19].copy()
topData = topData.sort_values(by=['time'])
botData = botData.sort_values(by=['time'])
topData = topData.reset_index(drop=True)
botData = botData.reset_index(drop=True)
dictPos = {'xU':topData['x'],'yU':topData['y'],'xL':botData['x'],'yL':botData['y'],'time':topData['time']}
pos = pd.DataFrame(data=dictPos)
pos = pos[pos['time'] == time]
pos = pos.reset_index(drop=True)
return pos
def GetAvgFieldData(cwd,config,Re,idx):
#Columns
fieldData = pd.read_csv(cwd+'Force_%s_Re%s_%04d.csv'%(config,Re,idx),delimiter=' ')
print(fieldData.head())
#All field values to a list
mxList = fieldData['mx'].values.tolist()
myList = fieldData['my'].values.tolist()
fvxList = fieldData['fvx'].values.tolist()
fvyList = fieldData['fvy'].values.tolist()
fcxList = fieldData['fcx'].values.tolist()
fcyList = fieldData['fcy'].values.tolist()
fpxList = fieldData['fpx'].values.tolist()
fpyList = fieldData['fpy'].values.tolist()
fdxList = fieldData['fdx'].values.tolist()
fdyList = fieldData['fdy'].values.tolist()
fixList = fieldData['fix'].values.tolist()
fiyList = fieldData['fiy'].values.tolist()
fsxList = fieldData['fsx'].values.tolist()
fsyList = fieldData['fsy'].values.tolist()
fnxList = fieldData['fnx'].values.tolist()
fnyList = fieldData['fny'].values.tolist()
#Convert lists to numpy arrays
#Reshape them to be Nx x Ny
Nx, Ny = 1022, 1022
mxArr = np.array(mxList).reshape((Nx,Ny))
myArr = np.array(myList).reshape((Nx,Ny))
fvxArr = np.array(fvxList).reshape((Nx,Ny))
fvyArr = np.array(fvyList).reshape((Nx,Ny))
fcxArr = np.array(fcxList).reshape((Nx,Ny))
fcyArr = np.array(fcyList).reshape((Nx,Ny))
fpxArr = np.array(fpxList).reshape((Nx,Ny))
fpyArr = np.array(fpyList).reshape((Nx,Ny))
fdxArr = np.array(fdxList).reshape((Nx,Ny))
fdyArr = np.array(fdyList).reshape((Nx,Ny))
fixArr = np.array(fixList).reshape((Nx,Ny))
fiyArr = np.array(fiyList).reshape((Nx,Ny))
fsxArr = np.array(fsxList).reshape((Nx,Ny))
fsyArr = np.array(fsyList).reshape((Nx,Ny))
fnxArr = np.array(fnxList).reshape((Nx,Ny))
fnyArr = np.array(fnyList).reshape((Nx,Ny))
return (mxArr, myArr, fvxArr, fvyArr, fcxArr, fcyArr,
fpxArr, fpyArr, fdxArr, fdyArr, fixArr, fiyArr,
fsxArr, fsyArr, fnxArr, fnyArr)
if __name__ == '__main__':
#READ ALL AVG FILES IN A SIMULATION DIRECTORY
#EXTRACT AVERAGE FIELD DATA INTO NUMPY ARRAYS
#PLOT AVERAGED FIELD DATA
#Paths to data and plots
cwd_POS = cwd_PYTHON+'../PosData/'
cwd_FORCE = cwd_PYTHON + '../ForceData/'
countPer = 100
mx,my,f_vari_x,f_vari_y,f_conv_x,f_conv_y,f_pres_x,f_pres_y,f_diff_x,f_diff_y,f_iner_x,f_iner_y,f_stre_x,f_stre_y,f_net_x,f_net_y = GetAvgFieldData(cwd_FORCE,config,Re,countPer)
#Extract Position and Time Data
time = np.round(0.05 + countPer*PERIOD,2)
#print('time = ',time)
posData = GetPosData(cwd_POS,time,Re)
#Plot Averaged Field Data
pars = [Re,config,'force_vari',countPer]
PlotForceDensity(cwd_PYTHON,time,mx,my,f_vari_x,f_vari_y,posData,pars)
pars[2] = 'force_conv_2'
PlotForceDensity(cwd_PYTHON,time,mx,my,f_conv_x,f_conv_y,posData,pars)
pars[2] = 'force_diff'
PlotForceDensity(cwd_PYTHON,time,mx,my,f_diff_x,f_diff_y,posData,pars)
pars[2] = 'force_pres'
PlotForceDensity(cwd_PYTHON,time,mx,my,f_pres_x,f_pres_y,posData,pars)
pars[2] = 'force_stre'
PlotForceDensity(cwd_PYTHON,time,mx,my,f_stre_x,f_stre_y,posData,pars)
pars[2] = 'force_iner'
PlotForceDensity(cwd_PYTHON,time,mx,my,f_iner_x,f_iner_y,posData,pars)
pars[2] = 'force_net'
PlotForceDensity(cwd_PYTHON,time,mx,my,f_net_x,f_net_y,posData,pars)
|
# -*- coding: utf-8 -*-
from odoo import fields, models
class SaleReport(models.Model):
_inherit = 'sale.report'
sales_person = fields.Many2one('res.users', 'Sales Person(s)', readonly=True)
sale_line_id = fields.Many2one('sale.order.line', 'Sales Line', readonly=True)
contribution_price = fields.Float(string='Total Exc. VAT (Salesperson)', readonly=True)
def _query(self, with_clause='', fields={}, groupby='', from_clause=''):
fields['sale_line_id'] = ', spc.sale_line_id'
fields['contribution_price'] = ', l.contribution_price'
fields['sales_person'] = ', spc.user_id as sales_person'
from_clause += ' right join sales_person_contribution spc on (spc.sale_line_id = l.id)'
groupby += ', spc.sale_line_id,spc.user_id,l.contribution_price'
return super(SaleReport, self)._query(with_clause, fields, groupby, from_clause)
|
n = input()
current_state = input()
required_state = input()
no_moves = 0
for i in range(int(n)):
current_digit = current_state[i]
required_digit = required_state[i]
digit = list(map(int, [current_digit, required_digit]))
minimum = min(digit)
maximum = max(digit)
no_moves += min(abs(minimum - maximum), abs(10 + minimum - maximum))
print(no_moves)
|
# -*- coding: utf-8 -*-
import unittest
from pycolorname.pantone.cal_print import CalPrint
class CalPrintTest(unittest.TestCase):
def setUp(self):
self.uut = CalPrint()
self.uut.load(refresh=True)
def test_data(self):
self.assertEqual(len(self.uut), 992)
self.assertIn("White (White)", self.uut)
# We check a few random colors to be sure that things are fine.
self.assertEqual(self.uut['PMS 245 (Pantone Rhodamine Red)'],
(232, 127, 201))
self.assertEqual(self.uut['PMS 202 (Pantone Rubine Red 2X)'],
(140, 38, 51))
self.assertEqual(self.uut['Pantone Cool Gray 5 (Pantone Cool Gray 5)'],
(186, 183, 175))
self.assertEqual(self.uut['PMS 1345 (Pantone Warm Gray 2)'],
(255, 214, 145))
self.assertEqual(self.uut['Pantone Purple (Pantone Purple)'],
(191, 48, 181))
# Check that the nearest color to named colors are themselves.
# As, delta_e for named colors with themselves should be minimum.
for name, color in self.uut.items():
if not name.startswith("PMS"):
original_name, nearest_name = name.split(" (")
nearest_name = nearest_name.replace(")", "")
self.assertEqual(original_name, nearest_name)
|
a=[0,2,2,3,4,4,5,5,8,9]
a=[1,1,4,2,2,3,3]
s=0
e=0
for i in range(0,len(a)-1):
a[s] = a[i]
if(a[i] != a[i+1]):
s+=1
a[s] = a[len(a)-1]
print(a[0:s+1]) |
# Limit the numbers from 0 - 100
users_input = int(input("Enter an integer value: "))
if users_input in range(0, 100 + 1):
pass
else:
print("Please enter a number between 0 - 100")
exit(1)
if users_input % 3 == 0 and users_input % 5 == 0:
print("FizzBuzz")
elif users_input % 3 == 0:
print("Fizz")
elif users_input % 5 == 0:
print("Buzz")
else:
print(users_input)
|
from django.contrib import admin
from stiltonstriders.models import *
# Register your models here.
admin.site.register(Event) |
a = 0
b = 0
c = 0
for i in range(1, 1000):
for j in range(i + 1, 1000):
k = 1000 - i - j
if (i * i + j * j == k * k) and (i + j + k == 1000):
print(i * j * k)
break |
import requests
def req_peak_data(site, start_date, end_date, url_prefix):
"""
This function first requests water peak flow data in
rdb format from NWIS peak water data service.
ARGS:
site - string site ID for the site to be charted
start_date - starting date to chart peak flow data
end_date - ending date to chart peak flow data
url_prefix - config varbile for nwis peak waterdata service endpoint url
RETURNS:
content - list of all lines in the data file
"""
# peak value historical data #
content = None
url = url_prefix + '?site_no=' + site + '&agency_cd=USGS&format=rdb' + '&end_date=' + end_date
try:
r = requests.get(url)
except requests.exceptions.RequestException as e:
print('- Bad URL -')
else:
if r.status_code is 200:
content = r.text.splitlines()
return content
def req_peak_dv_data(site, date, url_prefix):
"""
requests data from the Daily Value NWIS water data service
for creating the lollipop svg elements for the current year.
ARGS:
site - string site ID for the site to be charted
date - date to chart lollipop flow data
url_pefix - String constant for nwis peak waterdata service endpoint url
RETURNS:
content - list of all lines in the data file
"""
content = None
url = url_prefix + 'dv/?format=rdb&sites=' + site + '&startDT=' + date + '&endDT=' + date + '&siteStatus=all'
try:
r = requests.get(url)
if r.status_code is 200:
content = r.text.splitlines()
except requests.exceptions.RequestException as e:
print('- Bad URL -')
return content
def parse_peak_data(peak_data, dv_data):
"""
Parses peak flow water data peak_data and constructs a dictionary
appropriately formated for D3 charting library.
ARGS:
peak_data - list of lines from peak flow data requested from NWIS waterdata service.
dv_data - list of lines from daily value data requested from NWIS waterdata service.
RETURNS:
peak_data - A list holding the peak flow data points
(each as a dict) for a specific site
"""
all_data = []
seen = set([])
if peak_data:
# parse peak_data
for line in peak_data:
if not line.startswith('USGS'):
continue
line = line.split('\t')
year = line[2].split('-')[0]
# remove duplicate years
if year in seen:
continue
else:
seen.add(year)
if line[4]:
peak_val = int(line[4])
all_data.append({'label': year, 'value': peak_val})
if dv_data:
# parse daily_value data
for line in dv_data:
if not line.startswith('USGS'):
continue
line = line.split('\t')
year = line[2].split('-')[0]
# below conditon will favor the peak value retrieved from
# peak value data opposed to daily value data if peak value data is available
if year in seen:
break
if line[3]:
peak_val = float(line[3])
all_data.append({'label': year, 'value': peak_val})
return all_data |
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class DistributedDailyQuestSpotAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedDailyQuestSpotAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air) |
# -*- coding: utf-8 -*-
"""
Tests of neo.io.stimfitio
"""
# needed for python 3 compatibility
from __future__ import absolute_import
import sys
import unittest
from neo.io import StimfitIO
from neo.io.stimfitio import HAS_STFIO
from neo.test.iotest.common_io_test import BaseTestIO
@unittest.skipIf(sys.version_info[0] > 2, "not Python 3 compatible")
@unittest.skipUnless(HAS_STFIO, "requires stfio")
class TestStimfitIO(BaseTestIO, unittest.TestCase):
files_to_test = ['File_stimfit_1.h5',
'File_stimfit_2.h5',
'File_stimfit_3.h5',
'File_stimfit_4.h5',
'File_stimfit_5.h5',
'File_stimfit_6.h5',
]
files_to_download = files_to_test
ioclass = StimfitIO
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""
This file covers all the classes and functions for performing
Named Entity Recognition on text objects.
We developed an in-house NER tool based primarily on POS tags.
We compare our results against state-of-art NLP tools designed and
calibrated by Stanford University NLP Group.
There are several dependencies to this file and we will list down
each of them below:
- NLTK
- Java/Java SDK
- Standard POS Tagger
https://nlp.stanford.edu/software/tagger.shtml#Download
- Stanford NER Tagger
https://nlp.stanford.edu/software/CRF-NER.shtml#Download
- Stanford Neural-Network Based Dependency Parser
https://nlp.stanford.edu/software/lex-parser.html
- sklearn
Instructions to install each of these packages will be given later
The rationale for looking into dependency parsing is to see if
there is a way to identity entities in the parsed sentences.
Initial analysis shows promise, but it's premature to conclude anything
based on this.
@author: SankisaR
"""
import os
#from docx import Document
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from nltk.tag import StanfordNERTagger
from nltk.tag.stanford import StanfordPOSTagger as POS_Tag
from nltk.parse import stanford
from nltk.parse.stanford import StanfordDependencyParser
from nltk import Tree
from nltk.draw.util import CanvasFrame
from nltk.draw import TreeWidget
import spacy
from spacy.symbols import ORTH, LEMMA, POS
import re
import en_core_web_md as spacyEn
import spacy
from spacy.symbols import nsubj, VERB, PROPN, dobj, prep, agent, nsubjpass, attr, conj, neg, aux
import os
from os import listdir
from os.path import isfile, join
# extract other files
os.chdir('C:/Users/sankisar/Documents/AI/Projects/Default Automation/Code')
from produce_extractions_functions import *
os.chdir('C:/Users/sankisar/Documents/AI/Projects/Default Automation/')
# javapath needs to be set if one cannot set it through environment variables
java_path = "C:/Program Files (x86)/Java/jre1.8.0_121/bin/java.exe"
os.environ['JAVAHOME'] = java_path
home = 'C:/Users/sankisar/Documents/AI/Stanford Core NLP'
""" -- POS Tagging -- """
_path_to_model = home + '/stanford-postagger/models/english-bidirectional-distsim.tagger'
_path_to_jar = home + '/stanford-postagger/stanford-postagger.jar'
st = POS_Tag(_path_to_model, _path_to_jar)
text = 'Yeshivah Ohel Moshe filed for chapter 11 bankruptcy protection Bankr. E.D.N.Y. Case No. 16-43681 on August 16 2016.'
st.tag(text.split())
# POS tagging works
""" -- Named Entity Recognition -- """
# for Named Entity recognition
_path_to_model = home + '/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz'
_path_to_jar = home + '/stanford-ner/stanford-ner-3.8.0.jar'
st = StanfordNERTagger(_path_to_model, _path_to_jar)
st.tag(text.split())
# NER works
""" -- Neural Network Based Dependency Parsing -- """
os.environ['STANFORD_PARSER'] = home + '/stanford-parser/stanford-parser.jar'
os.environ['STANFORD_MODELS'] = home + '/stanford-parser/stanford-parser-3.8.0-models.jar'
# we need to extract these by running following command on COMMAND PROMPT from the
# directory where the stanford-parser-3.8.0-models.jar is located
# python -mzipfile -e stanford-parser-3.8.0-models.jar models
# there is other method. Please see below link
#https://stackoverflow.com/questions/11850574/python-unpacking-a-jar-file-doesnt-work
# https://stackoverflow.com/questions/13883277/stanford-parser-and-nltk
#_path_to_model = home + "/stanford-parser/models/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
_path_to_model = home + "/stanford-corenlp/models/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
#_path_to_model = home + "/stanford-parser/models/edu/stanford/nlp/models/parser/nndep/english_SD.gz"
# lexical parser
parser = stanford.StanfordParser(_path_to_model)
list(parser.raw_parse(text))
sentences = parser.raw_parse_sents(text)
# dependency parser
dep_parser=StanfordDependencyParser(_path_to_model)
# list(dep_parser.raw_parse(text))
print([parse.tree() for parse in dep_parser.raw_parse(text)])
for parse in dep_parser.parse(section.split()):
parse.tree().draw()
# neural network based parser
_path_to_model = home + "/stanford-corenlp/models/edu/stanford/nlp/models/parser/nndep/english_SD.gz"
parser = stanford.StanfordParser(_path_to_model)
list(parser.raw_parse(text))
for tree in parser.parse(section.split()):
print(tree)
tree.draw()
#NPs = list(tree.subtrees(filter=lambda x: x.label()=='NP'))
#NNs_inside_NPs = map(lambda x: list(x.subtrees(filter=lambda x: x.label()=='NNP')), NPs)
#[noun.leaves()[0] for nouns in NNs_inside_NPs for noun in nouns]
#ROOT = 'NP'
#def getNodes(parent):
# for node in parent:
# if type(node) is nltk.Tree:
# if node.label() == ROOT:
# print(" ".join(node.leaves()))
# return(" ".join(node.leaves()))
# getNodes(node)
# else:
# print("Word:", node)
#getNodes(tree)
#NEED TO WORK ON EXTRACTING TREE LEAVES AND NODES
#tree = parser.parse(text.split())
# getNodes(tree)
###########################
# WORKING WITH SPACY
###########################
nlp = spacyEn.load()
text = 'On August 16 2016 Yeshivah Ohel Moshe filed for chapter 11 bankruptcy protection Bankr. E.D.N.Y. Case No. 16-43681 on August 16 2016.'
text = "Powell Valley Health Care Inc. provides healthcare services to the greater-Powell Wyoming community. The Company filed for Chapter 11 bankruptcy protection Bankr. D. Wyo. Case No. 16-20326 on May 16 2016. The petition was signed by Michael L. Long CFO. The case is assigned to Judge Cathleen D. Parker. The Debtor estimated assets and debts at 10 million to 50 million at the time of the filing."
doc = nlp(text, parse = True)
doc = nlp(text)
for word in doc:
print(word.text, word.pos_, word.dep_, word.head.text)
root = [w for w in doc if w.head is w][0]
for word in doc:
print(word.text, word.lemma, word.lemma_, word.tag, word.tag_, word.pos, word.pos_, word.head.text, word.dep_)
[(token, token.label_) for token in doc.ents]
for ent in doc:
if ent.dep == nsubj and ent.head.pos == VERB:
print(ent.text)
for possible_verb in doc:
if possible_verb.pos == VERB:
for possible_subject in possible_verb.children:
print('verb: {}, subject {}:'.format(possible_verb, [item for item in possible_subject.lefts] + [possible_subject]))
# good implementation
entity = []
for possible_verb in doc:
if possible_verb.pos == VERB:
for possible_subject in possible_verb.children:
if possible_subject.dep == nsubj:
for descendant in possible_subject.subtree:
entity.append(descendant.text)
' '.join(entity)
root = [w for w in doc if w.head is w][0]
subject = list(root.lefts)[1]
for descendant in subject.subtree:
print(descendant)
###############################
# SOME CUSTOM FUNCTIONS
###############################
"""
Assumptions:
- Assume that all sentences are constructed in active voice. Passive voice will be supported in
next upgrade.
- Coreference resolution is addressed to reasonable degree. Only upto two sentences.
General strategy:
- receive a text paragraph.
- break it into pieces and parse using Arc dependency structure.
- remove prepositions after the verbs that we are interested in.
- Look up for nsubj and dobj parts of speech.
- if we see nsubj as Debtor or the Company, look up for main nsubj entity in
- the preceding sentence and use that. (this is where active voices comes to play)
- try and answer the question of what the company does based on the text!
We will use Spacy tools almost exclusively here.
"""
# use this example all the way
text = "Powell Valley Health Care Inc. provides healthcare services to the greater-Powell Wyoming community. The Company filed for Chapter 11 bankruptcy protection Bankr. D. Wyo. Case No. 16-20326 on May 16 2016. The petition was signed by Michael L. Long CFO. The case is assigned to Judge Cathleen D. Parker. The Debtor estimated assets and debts at 10 million to 50 million at the time of the filing."
remove_words = ['Bankr', 'bankr']
default_verbs = ['filed', 'file','sought','files']
remove_subjects = ['The Company', 'The Debtor', 'The debtor','The company', 'She','He','she','he','It','it']
def summarize_paragraph(text, nlp = nlp,
remove_words = remove_words,
default_verbs = default_verbs,
remove_subjects = remove_subjects):
""" recognizes named entities in a piece of text. """
# remove case numbers using regular expressions
s = re.sub(r'Bankr.+\d\d-\d\d\d\d\d.','',text)
s = re.sub(r'--',',',s)
s = re.sub(r'-','_',s)
s = re.sub(r'Inc.', 'Inc',s)
doc = nlp(s, parse = True)
sentences = []
subjects = []
objects = []
verbs = []
part_sent = []
recon_para = ''
for sent in doc.sents:
# check if the sentence has verb. if not, ignore the sentence
if len([w for w in sent if w.pos == VERB and w.text not in remove_words]) == 0:
continue
for possible_verb in sent:
sent_verbs = []
sent_subjects = []
sent_objects = []
if possible_verb.pos == VERB:
if possible_verb.dep_ == 'ROOT' and possible_verb.text[0].islower():
sent_verbs.append(possible_verb.text)
elif possible_verb.dep_ != 'ROOT' and possible_verb.text in default_verbs:
sent_verbs.append(possible_verb.text)
elif possible_verb.dep_ == 'ROOT' and possible_verb.text[0].isupper():
continue
else:
continue
#sent_verbs.append(possible_verb.text)
for possible_subject in possible_verb.children:
if possible_subject.dep == nsubj or possible_subject.dep == nsubjpass or possible_subject.dep == neg or possible_subject.dep == aux:
for descendant in possible_subject.subtree:
sent_subjects.append(descendant.text)
if possible_subject.dep == dobj or possible_subject.dep == prep or possible_subject.dep == agent or possible_subject.dep == attr:
for descendant in possible_subject.subtree:
sent_objects.append(descendant.text)
sent_objects = [w for w in sent_objects if w not in remove_words]
part_sent = sent_subjects + sent_verbs + sent_objects
sentences.append(' '.join(part_sent))
subjects.append(' '.join(sent_subjects))
verbs.append(' '.join(sent_verbs))
objects.append(' '.join(sent_objects))
# remove stop words from the list of words collected
sent_objects = [w for w in sent_objects if w not in remove_words]
# if we see two verbs, we need to prioritize default verbs
# if we have multiple verbs are present in the same sentence
# subjects.append(' '.join(sent_subjects))
# verbs.append(' '.join(sent_verbs))
# objects.append(' '.join(sent_objects))
# sentence = ' '.join(sent_subjects) + ' ' + ' '.join(sent_verbs) + ' ' + ' '.join(sent_objects)
# # reconstruct the paragraph
# sentences.append(sentence)
recon_para = '. '.join(sentences)
""" at this point, we have summarized the paragraph, now,
let's look at identifying defaulted entities """
defaulted_entity = "None Found"
for i, v in enumerate(verbs):
if v in default_verbs and i == 0:
defaulted_entity = subjects[0]
break
elif v in default_verbs:
# at this point, we will return the subject from the first useful sentence in the paragraph
if subjects[i] not in remove_subjects:
defaulted_entity = subjects[i]
break
else:
defaulted_entity = [ent for ent in subjects[:i] if ent not in remove_subjects][0]
break
else:
defaulted_entity = 'None Found'
# reshaping
defaulted_entity = re.sub(r'_','-', defaulted_entity)
recon_para = re.sub(r'_','-',recon_para)
sentences = [re.sub(r'_','-',sent) for sent in sentences]
subjects = [re.sub(r'_','-',subj) for subj in subjects]
objects = [re.sub(r'_','-',obj) for obj in objects]
# return output
return defaulted_entity, recon_para, sentences, subjects, verbs, objects
# testing
text = 'Yeshivah Ohel Moshe filed for chapter 11 bankruptcy protection Bankr. E.D.N.Y. Case No. 16-43681 on August 16 2016.'
#text = sections[0]
text = 'XXX works for Moody Analytics. He advises clients. The debtor filed for bankruptcy. XA was appointed as a judge'
text = 'Lewisville Texas-based ADPT DFW Holdings LLC Bankr. N.D. Tex. Case No. 17-31432 and its affiliates each filed separate Chapter 11 bankruptcy petitions on April 19 2017 listing 798.67 million in total assets and 453.48 million in total debts as of Sept. 30'
text = 'Lewisville Texas-based ADPT DFW Holdings LLC and its affiliates each filed separate Chapter 11 bankruptcy petitions on April 19 2017 listing 798.67 million in total assets and 453.48 million in total debts as of Sept. 30'
text = 'Advanced Biomedical Inc. dba Pathology Laboratories Services Inc. filed a Chapter 11 petition Bankr. C.D. Calif. Case No. 14-15938 on October 1 2014 and is represented by Robert Sabahat Esq. at Madison Harbor ALC in Irvine California. At the time of its filing the Debtors estimated assets was 100000 to 500000 and estimated liabilities was 1 million to 10 million. The petition was signed by Cyrus Karimi president. The Debtor did not file a list of its largest unsecured creditors when it filed the petition.'
# build a regular expression parser to remove case number etc.,
#text = ' '.join([w.replace('Bankr.','bankr').replace('N.D.','').replace('S.D.','') for w in text.split()])
#text = ' '.join([w.replace('Inc.','Incorporated')for w in text.split()])
ent, recon_para, sentences, subjects,verbs,objects = summarize_paragraph(text, nlp)
ent
# missing ones #
text = 'Going Ventures LLC which operates under the name Going Aire LLC filed a Chapter 11 petition Bankr. S.D. Fla. Case No. 17-12747 on March 7 2017. Carl Bradley Copeland manager signed the petition. Judge Laurel M. Isicoff is the case judge. The Debtor is represented by David R. Softness Esq. of David R. Softness P.A. At the time of filing the Debtor had total assets of 72900 and total liabilities of 1.01 million. No trustee examiner or statutory committee has been appointed in the Debtors Chapter 11 case.'
text = 'On-Call Staffing Inc. filed a Chapter 11 petition Bankr. N.D. Miss. Case No. 16-13823 on Oct. 28 2016. The Debtor is represented by J. Walter Newman IV Esq. at Newman Newman. The petition was signed by its President Lee Garner III. At the time of the filing the Debtor estimated assets at 100000 to 500000 and liabilities at 500000 to 1 million.'
text = 'R.E.S. Nation LLC filed a Chapter 11 petition Bankr. S.D. Tex. Case No. 16-34744 on Sept. 23 2016. The petition was signed by Jeffrey Nowling manager. The Debtor tapped Susan C. Matthews Esq. at Baker Donelson Bearman Caldwell Berkowitz APC. At the time of filing the Debtor estimated assets and liabilities at up to 50000.'
text = 'XXX works for Moody Analytics. He advises clients. The Debtor filed for bankruptcy. XA was appointed as a judge.'
text = "The councilmen refused the demonstrators a permit because they feared violence"
doc = nlp(s, parse = True)
#sentences = [sent for sent in doc.sents]
#s = re.sub(r'Bankr.+No\.','',text)
#s = re.sub(r'Bankr.+\d\d-\d\d\d\d\d.','',text)
# story examples
text = "Powell Valley Health Care Inc. provides healthcare services to the greater-Powell Wyoming community. The Company filed for Chapter 11 bankruptcy protection Bankr. D. Wyo. Case No. 16-20326 on May 16 2016. The petition was signed by Michael L. Long CFO. The case is assigned to Judge Cathleen D. Parker. The Debtor estimated assets and debts at 10 million to 50 million at the time of the filing."
text = 'On-Call Staffing Inc. filed a Chapter 11 petition Bankr. N.D. Miss. Case No. 16-13823 on Oct. 28 2016. The Debtor is represented by J. Walter Newman IV Esq. at Newman Newman. The petition was signed by its President Lee Garner III. At the time of the filing the Debtor estimated assets at 100000 to 500000 and liabilities at 500000 to 1 million.'
text = 'Advanced Biomedical Inc. dba Pathology Laboratories Services Inc. filed a Chapter 11 petition Bankr. C.D. Calif. Case No. 14-15938 on October 1 2014 and is represented by Robert Sabahat Esq. at Madison Harbor ALC in Irvine California. At the time of its filing the Debtors estimated assets was 100000 to 500000 and estimated liabilities was 1 million to 10 million. The petition was signed by Cyrus Karimi president. The Debtor did not file a list of its largest unsecured creditors when it filed the petition.'
text = "In Houston, after the first night of a citywide curfew, many residents went outside for the first time in days to survey the wreckage. Officials have reported at least 38 deaths."
text = "Using a fleet of Cajun-style airboats, Jet Skis and fishing boats, a massive volunteer rescue effort patrolled the roads turned rivers."
text = "Thousands are applying for federal assistance, but it may be slow to arrive and require them to take on debt that could take years to pay off."
text = "After the New America Foundation praised a large fine levied on Google, the man behind the statement was fired."
text = "Pondering California’s regions, a lingering heat wave, and a look back at Ishi, the last known survivor of the Yahi tribe."
text = "Amazon’s Alexa and Microsoft’s Cortana Can Soon Talk to Each Other"
|
import numpy as np
def initialize(K,D):
# Probabilities of the components of the mixture
#pi = np.random.uniform(size=(1, K))[0]
#pi = pi / pi.sum()
pi = 1/K * np.ones(K)
# Transition probabilities (from one state to another)
A = np.random.rand(K, K)
A = A / A.sum(axis=1,keepdims=True)
# Emission probabilities or Observation likelikoods (from latent space to observations)
B = np.random.rand(K, D)
B = B / B.sum(axis=1,keepdims=True)
return [pi,A,B]
def calculate_pb(sequence,B,K):
T = np.shape(sequence)[1]
D = np.shape(sequence)[0]
pb = np.zeros((K,T))
for t in range(T):
prob = 0
for j in range(D):
prob+= sequence[j,t]*np.log(B[:,j] + 1e-7) + (1-sequence[j,t])*np.log(1-B[:,j] + 1e-7)
pb[:,t] = np.exp(prob)
return pb
def calculate_cost(X,pi,A,B,gamma,chi):
N = len(gamma)
K = np.shape(gamma[0])[0]
T = np.shape(gamma[0])[1]
D = np.shape(B)[1]
prior_term = 0
state_transition_term = 0
log_like = 0
for n in range(N):
prior_term += np.sum(gamma[n][:, 0]*np.log(pi))
for n in range(N):
for k in range(K):
for t in range(1,T):
for k_ in range(K):
state_transition_term += chi[n][t,k,k_] * np.log(A[k, k_])
for n in range(N):
seq = X[n][0]
for k in range(K):
for t in range(T):
for j in range(D):
log_like += gamma[n][k, t] * (seq[j,t]*np.log(B[k,j]) + (1-seq[j,t])*np.log(1-B[k,j]))
Q = prior_term + state_transition_term + log_like
return Q
def Viterbi_decoder(pi_est, A_est, pb):
K = np.shape(pb)[1]
N = len(pb)
T = np.shape(pb)[2]
states = np.zeros((N,T))
delta = np.zeros((K,T,N))
fi = np.zeros((K,T,N))
# Forward
for n in range(N):
delta[:,0,n] = pi_est[0] * pb[n,:,0]
for t in range(1,T):
for n in range(N):
#print(A_est*(delta[:,t-1,n].T))
max = np.amax(A_est*delta[:,t-1,n],axis=1)
#print(max)
index_max = np.argmax(A_est*delta[:,t-1,n],axis=1)
#print(index_max)
delta[:,t,n] = pb[n,:,t]*max
fi[:,t,n] = index_max #mirar si transponer o no
# Backward
states[:,T-1] = np.argmax(delta[:,T-1,:],axis=0)
for n in range(N):
for t in range(T-2,-1,-1):
states[n,t] = fi[int(states[n,t+1]),t+1,n]
return states
def MAP_decoder(gamma_est):
N = len(gamma_est)
T = np.shape(gamma_est)[2]
states = np.zeros((N,T))
for i in range(N):
states[i,:] = np.argmax(gamma_est[i],axis = 0) # max wrt k
return states
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from textwrap import dedent
from typing import cast
from pants.backend.go.testutil import gen_module_gomodproxy
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
def test_run_binary() -> None:
sources = {
"go.mod": dedent(
"""\
module foo.example.com
go 1.17
"""
),
"main.go": dedent(
"""\
package main
import (
"fmt"
"os"
)
func main() {{
fmt.Println("Hello world!")
fmt.Fprintln(os.Stderr, "Hola mundo!")
os.Exit(23)
}}
"""
),
"BUILD": dedent(
"""\
go_mod(name='mod')
go_package(name='pkg')
go_binary(name='bin')
"""
),
}
with setup_tmpdir(sources) as tmpdir:
result = run_pants(
[
"--backend-packages=pants.backend.experimental.go",
"--pants-ignore=__pycache__",
"run",
f"{tmpdir}:bin",
]
)
assert "Hola mundo!\n" in result.stderr
assert result.stdout == "Hello world!\n"
assert result.exit_code == 23
def test_run_binary_third_party() -> None:
import_path = "pantsbuild.org/go-sample-for-test"
version = "v0.0.1"
fake_gomod = gen_module_gomodproxy(
version,
import_path,
(
(
"pkg/hello/hello.go",
dedent(
"""\
package hello
import "fmt"
func Hello() {
fmt.Println("Hello world!")
}
"""
),
),
(
"cmd/hello/main.go",
dedent(
"""\
package main
import "pantsbuild.org/go-sample-for-test/pkg/hello"
func main() {
hello.Hello()
}
"""
),
),
),
)
fake_gomod.update(
{
"BUILD": dedent(
f"""\
go_mod(name='mod')
go_binary(name="bin", main=':mod#{import_path}/cmd/hello')
"""
),
"go.mod": dedent(
f"""\
module go.example.com/foo
go 1.16
require (
\t{import_path} {version}
)
"""
),
}
)
raw_files = {
f"go-mod-proxy/{import_path}/@v/{version}.zip": fake_gomod.pop(
f"go-mod-proxy/{import_path}/@v/{version}.zip"
),
f"go-mod-proxy/{import_path}/@v/{version}.info": cast(
str, fake_gomod.pop(f"go-mod-proxy/{import_path}/@v/{version}.info")
).encode("utf-8"),
}
with setup_tmpdir(
cast("dict[str, str]", fake_gomod), cast("dict[str, bytes]", raw_files)
) as tmpdir:
# required for GOPROXY to work correctly when the go-mod-proxy
# is in a subdir of the cwd.
abspath = os.path.abspath(tmpdir)
result = run_pants(
[
"--backend-packages=pants.backend.experimental.go",
"--pants-ignore=__pycache__",
"--golang-subprocess-env-vars=GOSUMDB=off",
f"--golang-subprocess-env-vars=GOPROXY=file://{abspath}/go-mod-proxy",
"run",
f"//{tmpdir}:bin",
]
)
assert result.stdout == "Hello world!\n"
assert result.exit_code == 0
|
from nio import MatrixRoom
from dors import command_hook, Jenny, HookMessage
import random
@command_hook(['pick', 'choose', 'choice'], help=".choice <something> <something else> [third choice] ... "
"-- Makes a choice for you")
async def choice(bot: Jenny, room: MatrixRoom, event: HookMessage):
if len(event.args) < 2:
return await bot.say("Not enough parameters. Usage: .choice <something> <something else> [third choice]")
await bot.reply(random.choice(event.args))
@command_hook(['rand', 'random'], help=".random [arg1] [arg2] ... -- Picks a random number between arg1 and arg2 "
"(If one argument is missing it assumes the only argument is the upper limit "
"and the lower is 1, and if both arguments are missing it generates a number "
"between 1 and 10)")
async def rand(bot: Jenny, room: MatrixRoom, event: HookMessage):
bottom = 1
top = 10
if len(event.args):
try:
first = int(event.args[0])
except:
first = False
try:
last = int(event.args[1])
except:
last = False
if not last:
top = first
else:
bottom = first
top = last
if top <= 0:
return await bot.say("The limits must be higher than 0")
if bottom >= top:
return await bot.say("Lolwut?")
await bot.say("Your random integer is {0}".format(random.randint(1, top)))
|
# Copyright (c) 2020 Adam Souzis
# SPDX-License-Identifier: MIT
import six
from .runtime import NodeInstance
from .util import UnfurlError, Generate, toEnum
from .support import Status, NodeState, Reason
from .configurator import (
ConfigurationSpec,
getConfigSpecArgsFromImplementation,
TaskRequest,
)
from .tosca import findStandardInterface
import logging
logger = logging.getLogger("unfurl")
def isExternalTemplateCompatible(external, template):
# for now, require template names to match
if external.name == template.name:
if not external.isCompatibleType(template.type):
raise UnfurlError(
'external template "%s" not compatible with local template'
% template.name
)
return True
return False
class Plan(object):
@staticmethod
def getPlanClassForWorkflow(workflow):
return dict(
deploy=DeployPlan,
undeploy=UndeployPlan,
stop=UndeployPlan,
run=RunNowPlan,
check=ReadOnlyPlan,
discover=ReadOnlyPlan,
).get(workflow, WorkflowPlan)
interface = "None"
def __init__(self, root, toscaSpec, jobOptions):
self.jobOptions = jobOptions
self.workflow = jobOptions.workflow
self.root = root
self.tosca = toscaSpec
assert self.tosca
if jobOptions.template:
filterTemplate = self.tosca.getTemplate(jobOptions.template)
if not filterTemplate:
raise UnfurlError(
"specified template not found: %s" % jobOptions.template
)
self.filterTemplate = filterTemplate
else:
self.filterTemplate = None
def findShadowInstance(self, template, match=isExternalTemplateCompatible):
searchAll = []
for name, value in self.root.imports.items():
external = value.resource
# XXX if external is a Relationship and template isn't, get it's target template
# if no target, create with status == unknown
if match(external.template, template):
if external.shadow and external.root is self.root:
# shadowed instance already created
return external
else:
return self.createShadowInstance(external, name)
if value.spec.get("instance") == "*":
searchAll.append((name, value.resource))
# look in the topologies where were are importing everything
for name, root in searchAll:
for external in root.getSelfAndDescendents():
if match(external.template, template):
return self.createShadowInstance(external, name)
return None
def createShadowInstance(self, external, importName):
if self.root.imports[importName].resource is external:
name = importName
else:
name = importName + ":" + external.name
if external.parent and external.parent.parent:
# assumes one-to-one correspondence instance and template
parent = self.findShadowInstance(external.parent.template)
if not parent: # parent wasn't in imports, add it now
parent = self.createShadowInstance(external.parent, importName)
else:
parent = self.root
shadowInstance = external.__class__(
name, external.attributes, parent, external.template, external
)
shadowInstance.shadow = external
# Imports.__setitem__ will add or update:
self.root.imports[name] = shadowInstance
return shadowInstance
def findResourcesFromTemplate(self, template):
if template.abstract == "select":
# XXX also match node_filter if present
shadowInstance = self.findShadowInstance(template)
if shadowInstance:
yield shadowInstance
else:
logger.info(
"could not find external instance for template %s", template.name
)
# XXX also yield newly created parents that needed to be checked?
else:
for resource in self.findResourcesFromTemplateName(template.name):
yield resource
def findResourcesFromTemplateName(self, name):
# XXX make faster
for resource in self.root.getSelfAndDescendents():
if resource.template.name == name:
yield resource
def findParentResource(self, source):
parentTemplate = findParentTemplate(source.toscaEntityTemplate)
if not parentTemplate:
return self.root
for parent in self.findResourcesFromTemplateName(parentTemplate.name):
# XXX need to evaluate matches
return parent
raise UnfurlError(
"could not find instance of template: %s" % parentTemplate.name
)
def createResource(self, template):
parent = self.findParentResource(template)
# XXX if joboption.check: status = Status.unknown
if not parent.parent or parent.missing:
# parent is root or parent doesn't exist so this can't exist either, set to pending
status = Status.pending
else:
status = Status.unknown
# Set the initial status of new resources to status instead of defaulting to "unknown"
return NodeInstance(template.name, None, parent, template, status)
def findImplementation(self, interface, operation, template):
default = None
for iDef in template.getInterfaces():
if iDef.iname == interface or iDef.type == interface:
if iDef.name == operation:
return iDef
if iDef.name == "default":
default = iDef
return default
def _runOperation(self, startState, op, resource, reason=None, inputs=None):
ran = False
req = self.createTaskRequest(op, resource, reason, inputs)
if not req.error:
if startState is not None and (
not resource.state or startState > resource.state
):
resource.state = startState
task = yield req
if task:
ran = True
# if the state hasn't been set by the task, advance the state
if (
startState is not None
and task.result.success
and resource.state == startState
):
# task succeeded but didn't update nodestate
resource.state = NodeState(resource.state + 1)
yield ran
def _executeDefaultConfigure(self, resource, reason=None, inputs=None):
# 5.8.5.4 Node-Relationship configuration sequence p. 229
# Depending on which side (i.e., source or target) of a relationship a node is on, the orchestrator will:
# Invoke either the pre_configure_source or pre_configure_target operation as supplied by the relationship on the node.
targetConfigOps = resource.template.getCapabilityInterfaces()
# test for targetConfigOps to avoid creating unnecessary instances
if targetConfigOps:
for capability in resource.capabilities:
# Operation to pre-configure the target endpoint.
for relationship in capability.relationships:
# we're the target, source may not have been created yet
# XXX if not relationship.source create the instance
gen = self._runOperation(
NodeState.configuring,
"Configure.pre_configure_target",
relationship,
reason,
)
req = gen.send(None)
if req:
gen.send((yield req))
# we're the source, target has already started
sourceConfigOps = resource.template.getRequirementInterfaces()
if sourceConfigOps:
if resource.template.getRequirementInterfaces():
# Operation to pre-configure the target endpoint
for relationship in resource.requirements:
gen = self._runOperation(
NodeState.configuring,
"Configure.pre_configure_source",
relationship,
reason,
)
req = gen.send(None)
if req:
gen.send((yield req))
gen = self._runOperation(
NodeState.configuring, "Standard.configure", resource, reason, inputs
)
req = gen.send(None)
if req:
gen.send((yield req))
if sourceConfigOps:
for requirement in resource.requirements:
gen = self._runOperation(
NodeState.configuring,
"Configure.post_configure_source",
requirement,
reason,
)
req = gen.send(None)
if req:
gen.send((yield req))
if targetConfigOps:
for capability in resource.capabilities:
# we're the target, source may not have been created yet
# Operation to post-configure the target endpoint.
for relationship in capability.relationships:
# XXX if not relationship.source create the instance
gen = self._runOperation(
NodeState.configuring,
"Configure.post_configure_target",
relationship,
reason,
)
req = gen.send(None)
if req:
gen.send((yield req))
def executeDefaultDeploy(self, resource, reason=None, inputs=None):
# 5.8.5.2 Invocation Conventions p. 228
# 7.2 Declarative workflows p.249
ran = False
missing = (
resource.status in [Status.unknown, Status.absent, Status.pending]
and resource.state != NodeState.stopped # stop sets Status back to pending
)
# if the resource doesn't exist or failed while creating:
initialState = not resource.state or resource.state == NodeState.creating
if (
missing
or self.jobOptions.force
or (resource.status == Status.error and initialState)
):
gen = self._runOperation(
NodeState.creating, "Standard.create", resource, reason, inputs
)
req = gen.send(None)
if req and gen.send((yield req)):
ran = True
if resource.created is None:
resource.created = True
if (
not ran and resource.state != NodeState.stopped
) or resource.state == NodeState.created:
if resource.state and resource.state > NodeState.configured:
# rerunning configuration, reset state
assert not ran
resource.state = NodeState.creating
gen = Generate(self._executeDefaultConfigure(resource, reason, inputs))
while gen():
gen.result = yield gen.next
ran = gen.next
if ran and resource.created is None:
resource.created = True
# XXX if the resource had already existed, call target_changed
# "Operation to notify source some property or attribute of the target changed"
# if not missing:
# for requirement in requirements:
# call target_changed
if resource.state in [NodeState.configured, NodeState.stopped] or (
not ran and resource.state == NodeState.created
):
# configured or if no configure operation exists then node just needs to have been created
gen = self._runOperation(
NodeState.starting, "Standard.start", resource, reason, inputs
)
req = gen.send(None)
if req and gen.send((yield req)):
ran = True
# XXX these are only called when adding instances
# add_source: Operation to notify the target node of a source node which is now available via a relationship.
# add_target: Operation to notify source some property or attribute of the target changed
def executeDefaultUndeploy(self, resource, reason=None, inputs=None):
# XXX run check before if defined?
# XXX don't delete if dirty
# XXX remove_target: Operation called on source when a target instance is removed
# (but only called if add_target had been called)
if (
resource.state in [NodeState.starting, NodeState.started]
or self.workflow == "stop"
):
nodeState = NodeState.stopping
op = "Standard.stop"
gen = self._runOperation(nodeState, op, resource, reason, inputs)
req = gen.send(None)
if req:
gen.send((yield req))
if self.workflow == "stop":
return
if resource.created or self.jobOptions.destroyunmanaged:
nodeState = NodeState.deleting
op = "Standard.delete"
else:
nodeState = None
op = "Install.revert"
gen = self._runOperation(nodeState, op, resource, reason, inputs)
req = gen.send(None)
if req:
gen.send((yield req))
# Note: Status.absent is set in _generateConfigurations
def executeDefaultInstallOp(self, operation, resource, reason=None, inputs=None):
req = self.createTaskRequest("Install." + operation, resource, reason, inputs)
if not req.error:
yield req
def createTaskRequest(self, operation, resource, reason=None, inputs=None):
"""implementation can either be a named artifact (including a python configurator class),
or a file path"""
interface, sep, action = operation.rpartition(".")
iDef = self.findImplementation(interface, action, resource.template)
if iDef and iDef.name != "default":
# merge inputs
if inputs:
inputs = dict(iDef.inputs, **inputs)
else:
inputs = iDef.inputs or {}
kw = getConfigSpecArgsFromImplementation(iDef, inputs, resource.template)
else:
kw = None
if kw:
if reason:
name = "for %s: %s.%s" % (reason, interface, action)
if reason == self.workflow:
# set the task's workflow instead of using the default ("deploy")
kw["workflow"] = reason
else:
name = "%s.%s" % (interface, action)
configSpec = ConfigurationSpec(name, action, **kw)
logger.debug(
"creating configuration %s with %s to run for %s: %s",
configSpec.name,
configSpec.inputs,
resource.name,
reason or action,
)
else:
errorMsg = (
'unable to find an implementation for operation "%s" on node "%s"'
% (action, resource.template.name)
)
configSpec = ConfigurationSpec("#error", action, className=errorMsg)
logger.debug(errorMsg)
reason = "error"
return TaskRequest(configSpec, resource, reason or action)
def generateDeleteConfigurations(self, include):
for resource in getOperationalDependents(self.root):
# reverse to teardown leaf nodes first
logger.debug("checking instance for removal: %s", resource.name)
if resource.shadow or resource.template.abstract: # readonly resource
continue
# check if creation and deletion is managed externally
if not resource.created and not self.jobOptions.destroyunmanaged:
continue
if isinstance(resource.created, six.string_types):
# creation and deletion is managed by another instance
continue
# if resource exists (or unknown)
if resource.status not in [Status.absent, Status.pending]:
reason = include(resource)
if reason:
logger.debug("%s instance %s", reason, resource.name)
workflow = "undeploy" if reason == Reason.prune else self.workflow
gen = Generate(
self._generateConfigurations(resource, reason, workflow)
)
while gen():
gen.result = yield gen.next
def _getDefaultGenerator(self, workflow, resource, reason=None, inputs=None):
if workflow == "deploy":
return self.executeDefaultDeploy(resource, reason, inputs)
elif workflow == "undeploy" or workflow == "stop":
return self.executeDefaultUndeploy(resource, reason, inputs)
elif workflow == "check" or workflow == "discover":
return self.executeDefaultInstallOp(workflow, resource, reason, inputs)
return None
def getSuccessStatus(self, workflow):
if workflow == "deploy":
return Status.ok
elif workflow == "stop":
return Status.pending
elif workflow == "undeploy":
return Status.absent
return None
def _generateConfigurations(self, resource, reason, workflow=None):
workflow = workflow or self.workflow
# check if this workflow has been delegated to one explicitly declared
configGenerator = self.executeWorkflow(workflow, resource)
if not configGenerator:
configGenerator = self._getDefaultGenerator(workflow, resource, reason)
if not configGenerator:
raise UnfurlError("can not get default for workflow " + workflow)
oldStatus = resource.localStatus
successes = 0
failures = 0
successStatus = self.getSuccessStatus(workflow)
gen = Generate(configGenerator)
while gen():
result = yield gen.next
gen.result = result
task = gen.result
if not task: # this was skipped (not shouldRun() or filtered step)
continue
if task.configSpec.workflow == workflow and task.target is resource:
if task.result.success:
successes += 1
# if task explicitly set the status use that
if task.result.status is not None:
successStatus = task.result.status
else:
failures += 1
# note: in ConfigTask.finished():
# if any task failed and (maybe) modified, target.localStatus will be set to error or unknown
# if any task succeeded and modified, target.lastStateChange will be set, but not localStatus
# XXX we should apply ConfigTask.finished() logic here in aggregate (use aggregateStatus?):
# e.g. if any task failed and any task modified but none didn't explicily set status, set error state
# use case: configure succeeds but start fails
if successStatus is not None and successes and not failures:
resource.localStatus = successStatus
if oldStatus != successStatus:
resource._lastConfigChange = task.changeId
if successStatus == Status.ok and resource.created is None:
resource.created = True
def executeWorkflow(self, workflowName, resource):
workflow = self.tosca.getWorkflow(workflowName)
if not workflow:
return None
if not workflow.matchPreconditions(resource): # check precondition
return None
steps = [
step
for step in workflow.initialSteps()
# XXX check target_relationship too
# XXX target can be a group name too
if resource.template.isCompatibleTarget(step.target)
]
if not steps:
return None
try:
# push resource._workflow_inputs
return self.executeSteps(workflow, steps, resource)
finally:
pass # pop _workflow_inputs
def executeSteps(self, workflow, steps, resource):
queue = steps[:]
while queue:
step = queue.pop()
if not workflow.matchStepFilter(step.name, resource):
logger.debug(
"step did not match filter %s with %s", step.name, resource.name
)
continue
stepGenerator = self.executeStep(step, resource)
result = None
try:
while True:
task = stepGenerator.send(result)
if isinstance(task, list): # more steps
queue.extend([workflow.getStep(stepName) for stepName in task])
break
else:
result = yield task
except StopIteration:
pass
def executeStep(self, step, resource):
logger.debug("executing step %s for %s", step.name, resource.name)
result = None
for activity in step.activities:
if activity.type == "inline":
# XXX inputs
workflowGenerator = self.executeWorkflow(activity.inline, resource)
if not workflowGenerator:
continue
gen = Generate(workflowGenerator)
while gen():
gen.result = yield gen.next
if gen.result:
result = gen.result
elif activity.type == "call_operation":
# XXX need to pass operation_host (see 3.6.27 Workflow step definition p188)
# if target is a group can be value can be node_type or node template name
# if its a node_type select nodes matching the group
result = yield self.createTaskRequest(
activity.call_operation,
resource,
"step:" + step.name,
activity.inputs,
)
elif activity.type == "set_state":
if "managed" in activity.set_state:
resource.created = (
False if activity.set_state == "unmanaged" else True
)
else:
try:
resource.state = activity.set_state
except KeyError:
resource.localStatus = toEnum(Status, activity.set_state)
elif activity.type == "delegate":
# XXX inputs
configGenerator = self._getDefaultGenerator(
activity.delegate, resource, activity.delegate
)
if not configGenerator:
continue
gen = Generate(configGenerator)
while gen():
gen.result = yield gen.next
if gen.result:
result = gen.result
if not result or not result.result.success:
yield step.on_failure
break
else:
yield step.on_success
def _getTemplates(self):
templates = (
[]
if not self.tosca.nodeTemplates
else [
t
for t in self.tosca.nodeTemplates.values()
if not t.isCompatibleType(self.tosca.ConfiguratorType)
]
)
# order by ancestors
return list(
orderTemplates(
{t.name: t for t in templates},
self.filterTemplate and self.filterTemplate.name,
self.interface,
)
)
def includeNotFound(self, template):
return True
def _generateWorkflowConfigurations(self, instance, oldTemplate):
configGenerator = self._generateConfigurations(instance, self.workflow)
gen = Generate(configGenerator)
while gen():
gen.result = yield gen.next
def executePlan(self):
"""
Generate candidate tasks
yields TaskRequests
"""
opts = self.jobOptions
templates = self._getTemplates()
logger.debug("checking for tasks for templates %s", [t.name for t in templates])
visited = set()
for template in templates:
found = False
for resource in self.findResourcesFromTemplate(template):
found = True
visited.add(id(resource))
gen = Generate(self._generateWorkflowConfigurations(resource, template))
while gen():
gen.result = yield gen.next
if (
not found
and not template.abstract
and "dependent" not in template.directives
):
include = self.includeNotFound(template)
if include:
resource = self.createResource(template)
visited.add(id(resource))
gen = Generate(self._generateWorkflowConfigurations(resource, None))
while gen():
gen.result = yield gen.next
if opts.prune:
test = (
lambda resource: Reason.prune if id(resource) not in visited else False
)
gen = Generate(self.generateDeleteConfigurations(test))
while gen():
gen.result = yield gen.next
class DeployPlan(Plan):
interface = "Standard"
def includeNotFound(self, template):
if self.jobOptions.add or self.jobOptions.force:
return Reason.add
return None
def includeTask(self, template, resource):
# XXX doc string woefully out of date
"""Returns whether or not the config should be included in the current job.
Is it out of date?
Has its configuration changed?
Has its dependencies changed?
Are the resources it modifies in need of repair?
Reasons include: "force", "add", "upgrade", "update", "re-add", 'prune',
'missing', "config changed", "failed to apply", "degraded", "error".
Args:
config (ConfigurationSpec): The :class:`ConfigurationSpec` candidate
lastChange (Configuration): The :class:`Configuration` representing the that last time
the given :class:`ConfigurationSpec` was applied or `None`
Returns:
(str, ConfigurationSpec): Returns a pair with reason why the task was included
and the :class:`ConfigurationSpec` to run or `None` if it shound't be included.
"""
assert template and resource
jobOptions = self.jobOptions
if jobOptions.add and not resource.lastConfigChange:
# add if it's a new resource
return Reason.add
if jobOptions.force:
return Reason.force
# if the specification changed:
oldTemplate = resource.template
if template != oldTemplate:
if jobOptions.upgrade:
return Reason.upgrade
if jobOptions.update:
# only apply the new configuration if doesn't result in a major version change
if True: # XXX if isMinorDifference(template, oldTemplate)
return Reason.update
reason = self.checkForRepair(resource)
# there isn't a new config to run, see if the last applied config needs to be re-run
if not reason and (
jobOptions.upgrade or jobOptions.update
): # note: update is true by default
return Reason.reconfigure
return reason
def checkForRepair(self, instance):
jobOptions = self.jobOptions
assert instance
if jobOptions.repair == "none":
return None
status = instance.status
if status in [Status.unknown, Status.pending]:
if jobOptions.repair == "missing":
return Reason.missing
elif instance.required:
status = Status.error # treat as error
else:
return None
if status not in [Status.degraded, Status.error]:
return None
if jobOptions.repair == "degraded":
assert status > Status.ok, status
return Reason.degraded # repair this
elif status == Status.degraded:
assert jobOptions.repair == "error", jobOptions.repair
return None # skip repairing this
else:
assert jobOptions.repair == "error", "repair: %s status: %s" % (
jobOptions.repair,
instance.status,
)
return Reason.error # repair this
def isInstanceReadOnly(self, instance):
return instance.shadow or "discover" in instance.template.directives
def _generateWorkflowConfigurations(self, instance, oldTemplate):
# if oldTemplate is not None this is an existing instance, so check if we should include
if oldTemplate:
reason = self.includeTask(oldTemplate, instance)
if not reason:
logger.debug(
"not including task for %s:%s", instance.name, oldTemplate.name
)
return
else: # this is newly created resource
reason = Reason.add
if instance.status == Status.unknown or instance.shadow:
installOp = "check"
elif "discover" in instance.template.directives:
installOp = "discover"
else:
installOp = None
if installOp:
status = instance.status
configGenerator = self._generateConfigurations(
instance, installOp, installOp
)
if configGenerator:
gen = Generate(configGenerator)
while gen():
gen.result = yield gen.next
if self.isInstanceReadOnly(instance):
return # we're done
if instance.operational and status != instance.status:
return # it checked out! we're done
configGenerator = self._generateConfigurations(instance, reason)
gen = Generate(configGenerator)
while gen():
gen.result = yield gen.next
class UndeployPlan(Plan):
def executePlan(self):
"""
yields configSpec, target, reason
"""
gen = Generate(self.generateDeleteConfigurations(self.includeForDeletion))
while gen():
gen.result = yield gen.next
def includeForDeletion(self, resource):
if self.filterTemplate and resource.template != self.filterTemplate:
return None
# return value is used as "reason"
return self.workflow
class ReadOnlyPlan(Plan):
interface = "Install"
class WorkflowPlan(Plan):
def executePlan(self):
"""
yields configSpec, target, reason
"""
workflow = self.tosca.getWorkflow(self.jobOptions.workflow)
if not workflow:
raise UnfurlError('workflow not found: "%s"' % self.jobOptions.workflow)
for step in workflow.initialSteps():
if self.filterTemplate and not self.filterTemplate.isCompatibleTarget(
step.target
):
continue
if self.tosca.isTypeName(step.target):
templates = self.tosca.findMatchingTemplates(step.target)
else:
template = self.tosca.findTemplate(step.target)
if not template:
continue
templates = [template]
for template in templates:
for resource in self.findResourcesFromTemplate(template):
gen = self.executeSteps(workflow, [step], resource)
result = None
try:
while True:
configuration = gen.send(result)
result = yield configuration
except StopIteration:
pass
class RunNowPlan(Plan):
def _createConfigurator(self, args, action, inputs=None, timeout=None):
if args.get("module") or args.get("host"):
className = "unfurl.configurators.ansible.AnsibleConfigurator"
module = args.get("module") or "command"
module_args = " ".join(args["cmdline"])
params = dict(playbook=[{module: module_args}])
else:
className = "unfurl.configurators.shell.ShellConfigurator"
params = dict(command=args["cmdline"])
if inputs:
params.update(inputs)
return ConfigurationSpec(
"cmdline",
action,
className=className,
inputs=params,
operation_host=args.get("host"),
timeout=timeout,
)
def executePlan(self):
instanceFilter = self.jobOptions.instance
if instanceFilter:
resource = self.root.findResource(instanceFilter)
if not resource:
# see if there's a template with the same name and create the resource
template = self.tosca.getTemplate(instanceFilter)
if template:
resource = self.createResource(template)
else:
raise UnfurlError(
"specified instance not found: %s" % instanceFilter
)
resources = [resource]
else:
resources = [self.root]
# userConfig has the job options explicitly set by the user
operation = self.jobOptions.userConfig.get("operation")
operation_host = self.jobOptions.userConfig.get("host")
if not operation:
configSpec = self._createConfigurator(self.jobOptions.userConfig, "run")
else:
configSpec = None
interface, sep, action = operation.rpartition(".")
if not interface and findStandardInterface(operation): # shortcut
operation = findStandardInterface(operation) + "." + operation
for resource in resources:
if configSpec:
yield TaskRequest(configSpec, resource, "run")
else:
req = self.createTaskRequest(operation, resource, "run")
if not req.error: # if operation was found:
req.configSpec.operation_host = operation_host
yield req
def findExplicitOperationHosts(template, interface):
for iDef in template.getInterfaces():
if isinstance(iDef.implementation, dict):
operation_host = iDef.implementation.get("operation_host")
if operation_host and operation_host not in [
"localhost",
"ORCHESTRATOR",
"SELF",
"HOST",
"TARGET",
"SOURCE",
]:
yield operation_host
def orderTemplates(templates, filter=None, interface=None):
# templates is dict of NodeSpecs
seen = set()
for source in templates.values():
if filter and source.name != filter:
continue
if source in seen:
continue
if interface:
for operation_host in findExplicitOperationHosts(source, interface):
operationHostSpec = templates.get(operation_host)
if operationHostSpec:
if operationHostSpec in seen:
continue
seen.add(operationHostSpec)
yield operationHostSpec
for ancestor in getAncestorTemplates(source.toscaEntityTemplate):
spec = templates.get(ancestor.name)
if spec:
if spec in seen:
continue
seen.add(spec)
if spec:
yield spec
def getAncestorTemplates(source):
# note: opposite direction as NodeSpec.relationships
for (rel, req, reqDef) in source.relationships:
for ancestor in getAncestorTemplates(rel.target):
yield ancestor
yield source
def findParentTemplate(source):
for rel, req, reqDef in source.relationships:
if rel.type == "tosca.relationships.HostedOn":
return rel.target
return None
def getOperationalDependents(resource, seen=None):
if seen is None:
seen = set()
for dep in resource.getOperationalDependents():
if id(dep) not in seen:
seen.add(id(dep))
for child in getOperationalDependents(dep, seen):
yield child
yield dep
# XXX!:
# def buildDependencyGraph():
# """
# We need to find each executed configuration that is affected by a configuration change
# and re-execute them
#
# dependencies map to inbound edges
# lastConfigChange filters ConfigTasks
# ConfigTasks.configurationResource dependencies are inbound
# keys in ConfigTasks.changes map to a outbound edges to resources
#
# Need to distinguish between runtime dependencies and configuration dependencies?
# """
#
#
# def buildConfigChangedExecutionPlan():
# """
# graph = buildDependencyGraph()
#
# We follow the edges if a resource's attributes or dependencies have changed
# First we validate that we can execute the plan by validating configurationResource in the graph
# If it validates, add to plan
#
# After executing a task, only follow the dependent outputs that changed no need to follow those dependencies
# """
#
#
# def validateNode(resource, expectedTemplateOrType):
# """
# First make sure the resource conforms to the expected template (type, properties, attributes, capabilities, requirements, etc.)
# Then for each dependency that corresponds to a required relationship, call validateNode() on those resources with the declared type or template for the relationship
# """
#
#
# def buildUpdateExecutionPlan():
# """
# Only apply updates that don't change the currently applied spec,
# Starting with the start resource compare deployed artifacts and software nodes associate with it with current template
# and if diffence is no more than a minor version bump,
# retreive the old version of the topology that is associated with the appropriate configuredBy
# and with it try to find and queue a configurator that can apply those changes.
#
# For each resource dependency, call buildUpdateExecutionPlan().
# """
|
# 1. 入门配置
import argparse
parser = argparse.ArgumentParser(description="used for test")
parser.add_argument('--version', '-v', action='version', version='%(prog)s version: v 0.01', help='show the version')
parser.add_argument('--debug', '-d', action='store_true', help='show the version', default=False)
args = parser.parse_args()
print('=== end ===')
# please type 'python argparse1.py -v, python argparse1.py -v'
|
# Generated by Django 2.2.4 on 2019-11-27 06:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security_guards', '0003_auto_20191006_1603'),
('visitors', '0015_track_entry_with_vehicle'),
]
operations = [
migrations.AddField(
model_name='track_entry',
name='security',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='security_guards.Security'),
),
]
|
from flask import Flask, render_template, redirect, url_for, jsonify, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_cors import CORS
from config import config
#from .model import Role
db = SQLAlchemy()
cors = CORS()
bootstrap = Bootstrap()
login_manager = LoginManager()
login_manager.login_view = 'main.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
cors.init_app(app, resources={r"/api/*": {"origins": "*"}})
bootstrap.init_app(app)
login_manager.init_app(app)
#Role.insert_roles()
# attach routes and custom error pages here
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api')
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
def Round(a):
return int(a+.5)
def init():
glClearColor(1.0,1.0,1.0,0.0)
glColor(1.0,0.0,1.0)
glPointSize(3.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,600.0,0.0,600.0)
def setpixel(x,y):
glBegin(GL_POINTS)
glVertex2i(x,y)
glEnd()
glFlush()
def readinput():
global xc,yc,r
xc=input("Enter Xcoordinate of centre : ")
yc=input("Enter Ycoordinate of centre : ")
r=input("Enter Radius of circle : ")
def drawcir(xc,yc,r):
pk=1.25-r
x,y=0,r
setpixel(Round(xc),Round(yc))
setpixel(Round(x),Round(y))
while(x<=y):
if pk>=0:
pk1=pk+5+2*(x-y)
x+=1
y-=1
pk=pk1
else:
pk2=pk+3+2*x
x+=1
y=y
pk=pk2
setpixel(Round(xc+x),Round(yc+y))
setpixel(Round(xc-x),Round(yc+y))
setpixel(Round(xc+x),Round(yc-y))
setpixel(Round(xc-x),Round(yc-y))
setpixel(Round(xc+y),Round(yc+x))
setpixel(Round(xc+y),Round(yc-x))
setpixel(Round(xc-y),Round(yc+x))
setpixel(Round(xc-y),Round(yc-x))
def display():
glClear(GL_COLOR_BUFFER_BIT)
drawcir(xc,yc,r)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(200,200)
glutCreateWindow(" Mid Point Circle ")
readinput()
glutDisplayFunc(display)
init()
glutMainLoop()
main()
|
def dominator(arr):
for x in arr:
if arr.count(x) > len(arr)/2:
return x
return -1
'''
A zero-indexed array arr consisting of n integers is given.
The dominator of array arr is the value that occurs in more than
half of the elements of arr.
For example, consider array arr such that arr = [3,4,3,2,3,1,3,3]
The dominator of arr is 3 because it occurs in 5 out of 8 elements
of arr and 5 is more than a half of 8.
Write a function dominator(arr) that, given a zero-indexed array arr
consisting of n integers, returns the dominator of arr. The function
should return −1 if array does not have a dominator.
All values in arr will be >=0.
'''
|
"""Async and Octree config file.
Async/octree has its own little JSON config file. This is temporary
until napari has a system-wide one.
"""
import json
import logging
from pathlib import Path
from typing import Optional
from napari.settings import get_settings
from napari.utils.translations import trans
LOGGER = logging.getLogger("napari.loader")
DEFAULT_OCTREE_CONFIG = {
"loader_defaults": {
"log_path": None,
"force_synchronous": False,
"num_workers": 10,
"use_processes": False,
"auto_sync_ms": 30,
"delay_queue_ms": 100,
},
"octree": {
"enabled": True,
"tile_size": 256,
"log_path": None,
"loaders": {
0: {"num_workers": 10, "delay_queue_ms": 100},
2: {"num_workers": 10, "delay_queue_ms": 0},
},
},
}
def _get_async_config() -> Optional[dict]:
"""Get configuration implied by NAPARI_ASYNC.
Returns
-------
Optional[dict]
The async config to use or None if async not specified.
"""
async_var = get_settings().experimental.async_
if async_var in [True, False]:
async_var = str(int(async_var))
# NAPARI_ASYNC can now only be "0" or "1".
if async_var not in [None, "0", "1"]:
raise ValueError(
trans._(
'NAPARI_ASYNC can only be "0" or "1"',
deferred=True,
)
)
# If NAPARI_ASYNC is "1" use defaults but with octree disabled.
if async_var == "1":
async_config = DEFAULT_OCTREE_CONFIG.copy()
async_config['octree']['enabled'] = False
return async_config
# NAPARI_ASYNC is not enabled.
return None
def get_octree_config() -> dict:
"""Return the config data from the user's file or the default data.
Returns
-------
dict
The config data we should use.
"""
settings = get_settings()
octree_var = settings.experimental.octree
if octree_var in [True, False]:
octree_var = str(int(octree_var))
# If NAPARI_OCTREE is not enabled, defer to NAPARI_ASYNC
if octree_var in [None, "0"]:
# This will return DEFAULT_ASYNC_CONFIG or None.
return _get_async_config()
# If NAPARI_OCTREE is "1" then use default config.
if octree_var == "1":
return DEFAULT_OCTREE_CONFIG
# NAPARI_OCTREE should be a config file path
path = Path(octree_var).expanduser()
with path.open() as infile:
json_config = json.load(infile)
# Need to set this for the preferences dialog to build.
settings.experimental.octree = True
return json_config
|
"""
This is simple test suit. The test suit must be start with "test" or end with "test".
It must be annoted with pytest fixture.
"""
import logging
import os
from posixpath import basename
from typing import List
import glob
from time import sleep
import re
import pytest
from pathlib import Path
import inspect
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from utils.data_utils import DataUtils
from src.element_action import Element_Action
# uses the root logger - automatically will be enabled
logger = logging.getLogger(__name__)
cur_path = os.path.dirname(__file__)
@pytest.mark.usefixtures("intial_call")
@pytest.mark.usefixtures("driver_init")
class BaseTest:
"""
Base Test class.
"""
@pytest.fixture
def intial_call(self):
self.name = "satyanhojnroj"
#self.site_navigator = Element_Action()
pass
def lunch_site(self):
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
filename = module.__file__
site_name = os.path.basename(filename).replace(".py","").split("test_")[1]
print(site_name)
sleep(2)
#Collect data with map
self.site_data = DataUtils.site_map_data(self,site_name)
print(self.site_data["name"])
self.driver.get(self.site_data["name"])
def accept_cookies(self):
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
filename = module.__file__
site_name = os.path.basename(filename).replace(".py","").split("test_")[1]
self.navigator_data = DataUtils.data_navigator(self,site_name)
try:
sleep(2)
self.driver.find_element_by_xpath(self.navigator_data["cookies"]).click()
except:
logger.info("No cookies found")
pass
def open_registration(self):
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
filename = module.__file__
site_name = os.path.basename(filename).replace(".py","").split("test_")[1]
self.navigator_data = DataUtils.data_navigator(self,site_name)
sleep(2)
self.driver.find_element_by_xpath(self.navigator_data["registration"]).click(),"No registation page found"
def site_navigator(self,locator=None, id=False, name=False, xpath=True, link_text=False, partial_link_text=False, tag_name=False, class_name=False, css_selector=False):
driver = self.driver
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
filename = module.__file__
site_name = os.path.basename(filename).replace(".py","").split("test_")[1]
sleep(2)
obj = Element_Action(driver,site_name=site_name,locator=locator, id=id, name=name, xpath=xpath, link_text=link_text, partial_link_text=partial_link_text, tag_name=tag_name, class_name=class_name, css_selector=css_selector)
return obj
|
import arcpy
from arcpy import env
from arcpy.sa import *
from arcpy.sa import Con
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension('Spatial') #Checkout the extension
else:
print "no spatial analyst license available" #Checkout if the extension is working
#*****************************
#Set environment and workspace
#*****************************
myworkspace="D:/Unibern/Geographie/Master/Geodatenanalyse/Projekt/MeineToolbox/ToolData" #Variable for the workspace path
arcpy.env.workspace= myworkspace #Workspace is set
arcpy.env.overwriteOutput = True #Allows to overwrite other files (e.g. to overwrite tables)
#DEM
dem=arcpy.Raster(myworkspace + "/" + "dem" + "/" + "swiss10.tif") #Reading in the DEM (10x10 m cell size)
arcpy.env.cellSize = dem #Cell size setting
arcpy.env.extent = dem #Setting the extent
arcpy.env.snapRaster = dem #Tools that honor the snap raster environment will adjust the extent of output rasters so that they match the cell alignment of the specified snap raster (ArcGIs for Desktop 2018).
#ArcGIS for Desktop (2018). Snap Raster (Environment setting) [online]. Available at: "http://desktop.arcgis.com/en/arcmap/10.3/tools/environments/snap-raster.htm" [last accessed: 01.08.2018].
#*****************************************
#Mean and standard deviation from the DEM
#*****************************************
#Focal Statistics DEM: calculating the mean for each cell from the 50x50 surrounding cells
arcpy.gp.FocalStatistics_sa(dem, myworkspace + "/" + "Results" + "/" + "tpi10x10mean.tif", "Rectangle 50 50 CELL", "MEAN", "DATA") #Focal statistics function
FocalMean=arcpy.Raster(myworkspace + "/" + "Results" + "/" + "tpi10x10mean.tif") #Reading in the newly created Raster
#Focal Stastics DEM: calculating the standard deviation for each cell from the 50x50 surrounding cells
arcpy.gp.FocalStatistics_sa(dem, myworkspace + "/" + "Results" + "/" + "tpi10x10std.tif", "Rectangle 50 50 CELL", "STD", "DATA") #Focal statistics function
tpi10x10std =arcpy.Raster(myworkspace + "/" + "Results" + "/" + "tpi10x10std.tif") #Reading in the newly created Raster
#**************
#Slope function
#**************
#Determine the slopes in degrees with the arcpy slope function:
arcpy.gp.Slope_sa(dem, myworkspace + "/" + "Results" + "/" + "slope10.tif", "DEGREE", "1") #Slope function
slope10=arcpy.Raster(myworkspace + "/" + "Results" + "/" + "slope10.tif") #Reading in the newly crated slope raster
#*******************************
#TPI and landform classification
#*******************************
#Determine the TPI: difference between the actual elevation from one cell and the average elevation from the surronding cells -> DEM - mean elevation
tpi10m= dem - FocalMean #In Pyhton you work directly with Map Algebra and not the raster calculator.
tpi10m.save(myworkspace + "/" + "Results" + "/" + "tpi10m.tif") #Saving the result
tpi10m =arcpy.Raster(myworkspace + "/" + "Results" + "/" + "tpi10m.tif") #Reading in the newly created Raster
#Dividing the standard deviaton raster by 2 for later use in the classification:
#halftpi10x10std= (tpi10x10std/2) +++++ This steps creates an exit code in Python, however in the arcpy console in ArcGIS and with other IDEs this command works
#halftpi10x10std.save(myworkspace + "/" + "Results" + "/" + "hstd.tif") +++++ This steps creates an exit code in Python, however in the arcpy console in ArcGIS and with other IDEs this command works
halftpi10x10std =arcpy.Raster(myworkspace + "/" + "Results" + "/" + "hstd.tif") #Reading in the newly created Raster
#Classification with Map Algebra using conditional evaluation -> This steps creates an exit code in Python, however in the arcpy console in ArcGIS and with other IDEs this command works
#classification= Con(tpi10m < - tpi10x10std,1, Con((- halftpi10x10std > tpi10m) & (tpi10m >= - tpi10x10std),2, Con((halftpi10x10std >= tpi10m) & (tpi10m >= - halftpi10x10std) & (slope10 <= 5),3, Con((halftpi10x10std >= tpi10m) & (tpi10m >= - halftpi10x10std) & (slope10 > 5),4, Con((tpi10x10std >= tpi10m) & (tpi10m> halftpi10x10std),5, Con(tpi10m > tpi10x10std,6))))))
#classification.save(myworkspace + "/" + "Results" + "/" + "classif.tif") ++++++ This steps creates an exit code in Python, however in the arcpy console in ArcGIS and with other IDEs this command works
print "done!"
classification=arcpy.Raster(myworkspace + "/" + "Results" + "/" + "classif.tif") #Reading in the classified raster
#****************************************************************************************************
#Preparing the classified raster and the settlement shapefile for later use in the intersect function
#****************************************************************************************************
#Resample function - coarser resolution of the classified raster
arcpy.Resample_management(classification, myworkspace + "/" + "Results" + "/" + "classif200.tif", "200 200", "NEAREST") #Resampling raster to a coarser resolution (200x200m cell size), because there is an error message if the resulting shapefile exeeds the size of 2 GB, which is the case with the finer resolution.
classification200=arcpy.Raster(myworkspace + "/" + "Results" + "/" + "classif200.tif") #Reading in the resampled raster
#Convert the classified raster to a polygon for later use for the intersect function
classif200= myworkspace + "/" + "Results" + "/" + "classif200.shp" #Preparing the variable for the shapefile output
arcpy.RasterToPolygon_conversion(classification200, classif200 , "SIMPLIFY", "Value") #Raster to polygon function
#Reading in the settlement shapefile for later use in buffer function:
Settlements = myworkspace + "/" + "Siedlungen" + "/" + "CH_SiedlungenLV03.shp" #Reading in the settlement point shapefile
#******************************************************
#Erase function to exclude the lakes from the landforms
#******************************************************
#Reading in the lake shapefile for later use in the erase function:
Lakes= myworkspace + "/" + "Landcover" + "/" + "Seenmerge.shp"
#Preparing variable to save results of the erase function:
classif200land= myworkspace + "/" + "Results" + "/" + "classif200land.shp"
#Erase function: This function is used because the lakes were also classified as a landform. Since only the information of the land is of interest the erase function is used to erase all the lakes from the shapefile with the classified landforms.
arcpy.Erase_analysis(classif200, Lakes, classif200land, "")
#**********************************************
#Setting the buffers and calculating their area
#**********************************************
#Preparing the variables for the buffer function
Buffer500= myworkspace + "/" + "Landcover" + "/" + "Buffer500.shp"
Buffer1000 = myworkspace + "/" + "Landcover" + "/" + "Buffer1000.shp"
Buffer5000 = myworkspace + "/" + "Landcover" + "/" + "Buffer5000.shp"
#Setting the buffer with various radii around the settlements
arcpy.Buffer_analysis(Settlements, Buffer500, "500 Meters", "FULL", "ROUND", "NONE", "", "PLANAR")
arcpy.Buffer_analysis(Settlements, Buffer1000, "1000 Meters", "FULL", "ROUND", "NONE", "", "PLANAR")
arcpy.Buffer_analysis(Settlements, Buffer5000, "5000 Meters", "FULL", "ROUND", "NONE", "", "PLANAR")
#Buffer areas in square meters are determined
arcpy.AddGeometryAttributes_management(Buffer500, "AREA", "METERS", "SQUARE_METERS", "") #Calculation of the area of the buffers in square meters. The area is automatically saved in a newly created field "POLY_AREA"
arcpy.AddGeometryAttributes_management(Buffer1000, "AREA", "METERS", "SQUARE_METERS", "")
arcpy.AddGeometryAttributes_management(Buffer5000, "AREA", "METERS", "SQUARE_METERS", "")
#*****************************************************************
#Intersect function between the landform shapefile and the buffers
#*****************************************************************
#Intersect buffers and the landform shapefile
interland500 = myworkspace + "/" + "Landcover" + "/" + "interland500.shp" #Preparing variables for the intersect function
interland1000 = myworkspace + "/" + "Landcover" + "/" + "interland1000.shp"
interland5000 = myworkspace + "/" + "Landcover" + "/" + "interland5000.shp"
# Intersect
arcpy.Intersect_analysis([classif200land, Buffer500], interland500, "ALL", "", "INPUT") #The intersect function intersects the buffers with the classified landform shapefile
arcpy.Intersect_analysis([classif200land, Buffer1000], interland1000, "ALL", "", "INPUT")
arcpy.Intersect_analysis([classif200land, Buffer5000], interland5000, "ALL", "", "INPUT")
#************************************************************************************
#Preparing commands for the calculation of the percentages of the different landforms
#************************************************************************************
#Add Field "Percent" for later calcuations for the area percentages of the individual polygon/landform areas inside a buffer
arcpy.AddField_management(interland500, "Percent", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management(interland1000, "Percent", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management(interland5000, "Percent", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
#Add Geometry Attributes: Adds the area of the individual landform classes inside a buffer
arcpy.AddGeometryAttributes_management(interland500, "AREA", "METERS", "SQUARE_METERS", "") #The calculated area is automatically saved in a newly created field/column named "POLY_AREA"
arcpy.AddGeometryAttributes_management(interland1000, "AREA", "METERS", "SQUARE_METERS", "")
arcpy.AddGeometryAttributes_management(interland5000, "AREA", "METERS", "SQUARE_METERS", "")
#Get the area of the buffer for later calculations
AreaBuff500=arcpy.SearchCursor(Buffer500)
for Area500 in AreaBuff500:
Area500m=Area500.getValue("POLY_AREA") #The value is taken from the field "POLY_AREA" that was created in lines 118 to 120
print (Area500m) #Prints out the area in the console -> for this buffer it is: 785191,173 m2
AreaBuff1000=arcpy.SearchCursor(Buffer1000)
for Area1000 in AreaBuff1000:
Area1000m=Area1000.getValue("POLY_AREA")
print (Area1000m) #Prints out the area in the console -> for this buffer it is: 3141177 m2
AreaBuff5000=arcpy.SearchCursor(Buffer5000)
for Area5000 in AreaBuff5000:
Area5000m=Area5000.getValue("POLY_AREA")
print (Area5000m) #Prints out the area in the console -> for this buffer it is: 78537724,241 m2
#Calculate field "percentage" for the different landform classes inside the buffers
arcpy.CalculateField_management(interland500, "Percent", "[POLY_AREA] /785191.17*100", "VB", "")#The Area of the different landform classes inside the buffer is divided by the whole buffer area and multiplied by 100 to receive the percentage of coverage per polygon/landform
arcpy.CalculateField_management(interland1000, "Percent", "[POLY_AREA] /3141177*100", "VB", "")
arcpy.CalculateField_management(interland5000, "Percent", "[POLY_AREA] /78537724.241*100", "VB", "")
#Variables for table with summarized landform classes per settlement -> Sometimes in one buffer the same landform class can appear in several separated polygons.
#These variables are used later to combine these separated polygons, that the same landform only appears once per settlement in the dBase table.
SumBuffLand500 = myworkspace +"/" +"Results" + "/" + "SumBuffLand500.dbf" #These variables are later used to combine polygons with the same landform class.
SumBuffLand1000 = myworkspace +"/" +"Results" + "/" + "SumBuffLand1000.dbf"
SumBuffLand5000 = myworkspace +"/" +"Results" + "/" + "SumBuffLand5000.dbf"
#Function for summarizing the landform classes (along with their percentages of coverage in the buffer area) per settlement as stated above.
arcpy.Statistics_analysis(interland500, SumBuffLand500, "Percent SUM", "GRIDCODE;Fundort")
arcpy.Statistics_analysis(interland1000, SumBuffLand1000, "Percent SUM", "GRIDCODE;Fundort")
arcpy.Statistics_analysis(interland5000, SumBuffLand5000, "Percent SUM", "GRIDCODE;Fundort")
#Variables for the calculation of the mean surface coverage in percents per landform per buffer
LandStats500 = myworkspace + "/" + "Results" + "/" + "LandStats500.dbf"
LandStats1000 = myworkspace + "/" + "Results" + "/" + "LandStats1000.dbf"
LandStats5000 = myworkspace + "/" + "Results" + "/" + "LandStats5000.dbf"
#Calculation of the mean percentage (and the standard deviation) of land coverage of the individual landforms.
#The mean percentage is not over all settlements/buffers. If a landform e.g. only appears in 5 settlements the percentage shows what the mean coverage for those 5 settlements is and not for all 56.
arcpy.Statistics_analysis(SumBuffLand500, LandStats500, "SUM_Percen MEAN;SUM_Percen STD", "GRIDCODE")
arcpy.Statistics_analysis(SumBuffLand1000, LandStats1000, "SUM_Percen MEAN;SUM_Percen STD", "GRIDCODE")
arcpy.Statistics_analysis(SumBuffLand5000, LandStats5000, "SUM_Percen MEAN;SUM_Percen STD", "GRIDCODE")
|
#!/usr/bin/python3
import sys
if __name__ == "__main__":
contArg = len(sys.argv) - 1
print("{:d}".format(contArg), end="")
if contArg != 1:
print(" {:s}".format("arguments"), end="")
if contArg == 1:
print(" {:s}".format("argument"), end="")
if contArg == 0:
print("{:s}".format("."))
if contArg != 0:
print("{:s}".format(":"))
for i in range(1, contArg + 1):
print("{:d}: {:s}".format(i, sys.argv[i]))
|
import os
import glob
import h5py
import keras
import numpy as np
from Name import *
from PIL import Image
from keras import backend as K
from keras.utils import np_utils
from keras.models import Sequential
from keras.models import load_model
from keras.models import Model
from keras.optimizers import SGD, Adam
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.applications.imagenet_utils import preprocess_input
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from utils.utils import get_random_data
def MmNet(input_shape, output_shape):
model = Sequential() # 建立模型
# 第一层
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
# 第二层
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 第三层
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 全连接层
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
# 全连接层
model.add(Dense(output_shape, activation='softmax'))
print("-----------模型摘要----------\n") # 查看模型摘要
model.summary()
return model
input_shape = (100, 100, 3) #输入
output_shape = 2 #输出
#创建AlexNet模型
model = MmNet(input_shape, output_shape)
print(model.summary())
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import argparse
import matplotlib.pyplot as plt
from os import path, makedirs
import itertools
def load_files(authentic_file, impostor_file):
authentic = np.loadtxt(authentic_file, dtype=np.str)
if np.ndim(authentic) == 1:
authentic_score = authentic.astype(float)
else:
authentic_score = authentic[:, 2].astype(float)
impostor = np.loadtxt(impostor_file, dtype=np.str)
if np.ndim(impostor) == 1:
impostor_score = impostor.astype(float)
else:
impostor_score = impostor[:, 2].astype(float)
return authentic_score, impostor_score
def compute_dprime(authentic_file1, impostor_file1, l1,
authentic_file2, impostor_file2, l2,
authentic_file3, impostor_file3, l3):
authentic_score1, impostor_score1 = load_files(
authentic_file1, impostor_file1)
if l2 is not None:
authentic_score2, impostor_score2 = load_files(
authentic_file2, impostor_file2)
if l3 is not None:
authentic_score3, impostor_score3 = load_files(
authentic_file3, impostor_file3)
d_prime1 = (abs(np.mean(authentic_score1) - np.mean(impostor_score1)) /
np.sqrt(0.5 * (np.var(authentic_score1) + np.var(impostor_score1))))
print('d-prime for {} is: {} '.format(l1, d_prime1))
if l2 is not None:
d_prime2 = (abs(np.mean(authentic_score2) - np.mean(impostor_score2)) /
np.sqrt(0.5 * (np.var(authentic_score2) + np.var(impostor_score2))))
print('d-prime for {} is: {} '.format(l2, d_prime2))
if l3 is not None:
d_prime3 = (abs(np.mean(authentic_score3) - np.mean(impostor_score3)) /
np.sqrt(0.5 * (np.var(authentic_score3) + np.var(impostor_score3))))
print('d-prime for {} is: {} '.format(l3, d_prime3))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot Score Histogram')
parser.add_argument('-authentic1', '-a1', help='Authentic 1 scores.')
parser.add_argument('-impostor1', '-i1', help='Impostor 1 scores.')
parser.add_argument('-label1', '-l1', help='Label 1.')
parser.add_argument('-authentic2', '-a2', help='Authentic 2 scores.')
parser.add_argument('-impostor2', '-i2', help='Impostor 2 scores.')
parser.add_argument('-label2', '-l2', help='Label 2.')
parser.add_argument('-authentic3', '-a3', help='Authentic 3 scores.')
parser.add_argument('-impostor3', '-i3', help='Impostor 3 scores.')
parser.add_argument('-label3', '-l3', help='Label 3.')
args = parser.parse_args()
compute_dprime(args.authentic1, args.impostor1, args.label1,
args.authentic2, args.impostor2, args.label2,
args.authentic3, args.impostor3, args.label3)
|
import os
from django.conf import settings
from django.conf.urls.defaults import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = patterns('',
(r'^grappelli/', include('grappelli.urls')),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
# Homepage
(r'^$', TemplateView.as_view(template_name='home.html')),
)
# Static files only get served by django in DEBUG mode
if settings.DEBUG:
# Add routing js view to path
from js_routing.functions import STATIC_FILE
parts = [url(r'^%sjs/%s' % (settings.STATIC_URL[1:], STATIC_FILE),
'js_routing.views.routing_js') ]
# Add uploads
parts.append(url(r'^%s(?P<path>.*)' % settings.MEDIA_URL[1:],
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}))
# Add local directory to root
local_path = os.path.join(settings.DEV_STATIC_ROOT, 'local')
if os.path.exists(local_path):
for fname in os.listdir(local_path):
parts.append(url(r'^(?P<path>%s)' % fname,
'django.views.static.serve', {'document_root': local_path}))
urlpatterns += patterns('', *parts)
urlpatterns += staticfiles_urlpatterns()
|
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import regularizers
from keras import backend as K
from keras.optimizers import Adam
from keras import losses
from keras.utils import np_utils, generic_utils
import numpy as np
import scipy as sp
import random
import scipy.io
from scipy.stats import mode
Experiments = 1
batch_size = 128
nb_classes = 10
#use a large number of epochs
nb_epoch = 50
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
score=0
all_accuracy = 0
acquisition_iterations = 19
#use a large number of dropout iterations
dropout_iterations = 50
Queries = 50
Experiments_All_Accuracy = np.zeros(shape=(acquisition_iterations+1))
for e in range(Experiments):
print('Experiment Number ', e)
# the data, shuffled and split between tran and test sets
(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
X_train_All = X_train_All.reshape(X_train_All.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train_All = X_train_All.reshape(X_train_All.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
random_split = np.asarray(random.sample(range(0,X_train_All.shape[0]), X_train_All.shape[0]))
X_train_All = X_train_All[random_split, :, :, :]
y_train_All = y_train_All[random_split]
X_valid = X_train_All[10000:15000, :, :, :]
y_valid = y_train_All[10000:15000]
X_Pool = X_train_All[20000:60000, :, :, :]
y_Pool = y_train_All[20000:60000]
pidx_0 = np.array(np.where(y_Pool==0)).T
pidx_0 = pidx_0[0:(pidx_0.shape[0]//10),0]
pX_0 = X_Pool[pidx_0,:,:,:]
py_0 = y_Pool[pidx_0]
pidx_1 = np.array(np.where(y_Pool==1)).T
pidx_1 = pidx_1[0:(pidx_1.shape[0]//10),0]
pX_1 = X_Pool[pidx_1,:,:,:]
py_1 = y_Pool[pidx_1]
pidx_2 = np.array(np.where(y_Pool==2)).T
pidx_2 = pidx_2[0:(pidx_2.shape[0]//10),0]
pX_2 = X_Pool[pidx_2,:,:,:]
py_2 = y_Pool[pidx_2]
pidx_3 = np.array(np.where(y_Pool==3)).T
pidx_3 = pidx_3[0:(pidx_3.shape[0]//10),0]
pX_3 = X_Pool[pidx_3,:,:,:]
py_3 = y_Pool[pidx_3]
pidx_4 = np.array(np.where(y_Pool==4)).T
pidx_4 = pidx_4[0:(pidx_4.shape[0]//10),0]
pX_4 = X_Pool[pidx_4,:,:,:]
py_4 = y_Pool[pidx_4]
pidx_5 = np.array(np.where(y_Pool==5)).T
pidx_5 = pidx_5[0:(pidx_5.shape[0]//10),0]
pX_5 = X_Pool[pidx_5,:,:,:]
py_5 = y_Pool[pidx_5]
pidx_6 = np.array(np.where(y_Pool==6)).T
pidx_6 = pidx_6[0:(pidx_6.shape[0]//10),0]
pX_6 = X_Pool[pidx_6,:,:,:]
py_6 = y_Pool[pidx_6]
pidx_7 = np.array(np.where(y_Pool==7)).T
pidx_7 = pidx_7[0:(pidx_7.shape[0]//10),0]
pX_7 = X_Pool[pidx_7,:,:,:]
py_7 = y_Pool[pidx_7]
pidx_8 = np.array(np.where(y_Pool==8)).T
pidx_8 = pidx_8[0:(pidx_8.shape[0]//1),0]
pX_8 = X_Pool[pidx_8,:,:,:]
py_8 = y_Pool[pidx_8]
pidx_9 = np.array(np.where(y_Pool==9)).T
pidx_9 = pidx_9[0:(pidx_9.shape[0]//1),0]
pX_9 = X_Pool[pidx_9,:,:,:]
py_9 = y_Pool[pidx_9]
X_Pool = np.concatenate((pX_0,pX_1,pX_2,pX_3,pX_4,pX_5,pX_6,pX_7,pX_8,pX_9),axis=0)
y_Pool = np.concatenate((py_0,py_1,py_2,py_3,py_4,py_5,py_6,py_7,py_8,py_9),axis=0)
random_pool_split = np.asarray(random.sample(range(0,X_Pool.shape[0]),X_Pool.shape[0]))
X_Pool = X_Pool[random_pool_split,:,:,:]
y_Pool = y_Pool[random_pool_split]
X_train_All = X_train_All[0:10000, :, :, :]
y_train_All = y_train_All[0:10000]
#training data to have equal distribution of classes
idx_0 = np.array( np.where(y_train_All==0) ).T
idx_0 = idx_0[0:5,0]
X_0 = X_train_All[idx_0, :, :, :]
y_0 = y_train_All[idx_0]
idx_1 = np.array( np.where(y_train_All==1) ).T
idx_1 = idx_1[0:5,0]
X_1 = X_train_All[idx_1, :, :, :]
y_1 = y_train_All[idx_1]
idx_2 = np.array( np.where(y_train_All==2) ).T
idx_2 = idx_2[0:5,0]
X_2 = X_train_All[idx_2, :, :, :]
y_2 = y_train_All[idx_2]
idx_3 = np.array( np.where(y_train_All==3) ).T
idx_3 = idx_3[0:5,0]
X_3 = X_train_All[idx_3, :, :, :]
y_3 = y_train_All[idx_3]
idx_4 = np.array( np.where(y_train_All==4) ).T
idx_4 = idx_4[0:5,0]
X_4 = X_train_All[idx_4, :, :, :]
y_4 = y_train_All[idx_4]
idx_5 = np.array( np.where(y_train_All==5) ).T
idx_5 = idx_5[0:5,0]
X_5 = X_train_All[idx_5, :, :, :]
y_5 = y_train_All[idx_5]
idx_6 = np.array( np.where(y_train_All==6) ).T
idx_6 = idx_6[0:5,0]
X_6 = X_train_All[idx_6, :, :, :]
y_6 = y_train_All[idx_6]
idx_7 = np.array( np.where(y_train_All==7) ).T
idx_7 = idx_7[0:5,0]
X_7 = X_train_All[idx_7, :, :, :]
y_7 = y_train_All[idx_7]
idx_8 = np.array( np.where(y_train_All==8) ).T
idx_8 = idx_8[0:5,0]
X_8 = X_train_All[idx_8, :, :, :]
y_8 = y_train_All[idx_8]
idx_9 = np.array( np.where(y_train_All==9) ).T
idx_9 = idx_9[0:5,0]
X_9 = X_train_All[idx_9, :, :, :]
y_9 = y_train_All[idx_9]
X_train = np.concatenate((X_0, X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8, X_9), axis=0 )
y_train = np.concatenate((y_0, y_1, y_2, y_3, y_4, y_5, y_6, y_7, y_8, y_9), axis=0 )
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print('Distribution of Training Classes:', np.bincount(y_train))
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_valid = X_valid.astype('float32')
X_Pool = X_Pool.astype('float32')
X_train /= 255
X_valid /= 255
X_Pool /= 255
X_test /= 255
Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_valid = np_utils.to_categorical(y_valid, nb_classes)
Y_Pool = np_utils.to_categorical(y_Pool, nb_classes)
Y_train = np_utils.to_categorical(y_train, nb_classes)
print('Training Model Without Acquisitions in Experiment', e)
c = 3.5
Weight_Decay = c / float(X_train.shape[0])
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, kernel_regularizer = regularizers.l2(Weight_Decay),activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
hist = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=0, validation_data=(X_valid, Y_valid))
print('Evaluating Test Accuracy Without Acquisition')
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print('Starting Active Learning in Experiment ', e)
for i in range(acquisition_iterations):
print('POOLING ITERATION', i)
pool_subset = 2000
pool_subset_dropout = np.asarray(random.sample(range(0,X_Pool.shape[0]), pool_subset))
X_Pool_Dropout = X_Pool[pool_subset_dropout, :, :, :]
y_Pool_Dropout = y_Pool[pool_subset_dropout]
All_Dropout_Classes = np.zeros(shape=(X_Pool_Dropout.shape[0],1))
score_All = np.zeros(shape=(X_Pool_Dropout.shape[0], nb_classes))
for d in range(dropout_iterations):
print ('Dropout Iteration', d)
dropout_score = model.predict(X_Pool_Dropout,batch_size=batch_size, verbose=0)
score_All = score_All + dropout_score
p_y_x = np.divide(score_All, dropout_iterations)
maxpyx = 1 - p_y_x.max(axis=1)
if K.image_data_format() == 'channels_first':
normalized_x_pool = np.array([X_Pool_Dropout[i,0] / np.linalg.norm(X_Pool_Dropout[i,0]) for i in range(X_Pool_Dropout.shape[0])])
else:
normalized_x_pool = np.array([X_Pool_Dropout[i,:,:,0] / np.linalg.norm(X_Pool_Dropout[i,:,:,0]) for i in range(X_Pool_Dropout.shape[0])])
density_x_pool = [np.sum(normalized_x_pool[i] * normalized_x_pool) for i in range(X_Pool_Dropout.shape[0])]
a_1d = maxpyx * density_x_pool
x_pool_index = a_1d.argsort()[-Queries:][::-1]
Pooled_X = X_Pool_Dropout[x_pool_index, :, :, :]
Pooled_Y = y_Pool_Dropout[x_pool_index]
delete_Pool_X = np.delete(X_Pool, (pool_subset_dropout), axis=0)
delete_Pool_Y = np.delete(y_Pool, (pool_subset_dropout), axis=0)
delete_Pool_X_Dropout = np.delete(X_Pool_Dropout, (x_pool_index), axis=0)
delete_Pool_Y_Dropout = np.delete(y_Pool_Dropout, (x_pool_index), axis=0)
X_Pool = np.concatenate((delete_Pool_X, delete_Pool_X_Dropout), axis=0)
y_Pool = np.concatenate((delete_Pool_Y, delete_Pool_Y_Dropout), axis=0)
print('Acquised Points added to training set')
X_train = np.concatenate((X_train, Pooled_X), axis=0)
y_train = np.concatenate((y_train, Pooled_Y), axis=0)
Y_train = np_utils.to_categorical(y_train, nb_classes)
c = 3.5
Weight_Decay = c / float(X_train.shape[0])
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, kernel_regularizer = regularizers.l2(Weight_Decay),activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
hist = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=0, validation_data=(X_valid, Y_valid))
print('Evaluate Model Test Accuracy with pooled points')
print('Evaluating Test Accuracy Without Acquisition')
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print('Use this trained model with pooled points for Dropout again')
|
""" Views """
import decimal
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, Layout, Submit
from django import forms
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from .models import Bid, Category, Comment, Listing, User, Watchlist
class ListingForm(forms.ModelForm):
""" Form for creating Listings """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = "create_listing"
self.helper.help_text_inline = True
self.helper.add_input(Submit("submit", "Create Listing"))
class Meta:
model = Listing
fields = ["category", "title", "description", "starting_bid", "image_url"]
class BidForm(forms.ModelForm):
""" Form for placing Bids on Listings """
def __init__(self, *args, **kwargs):
high_bid = kwargs.pop("high_bid", None)
listing_id = kwargs.pop("listing_id", None)
super().__init__(*args, **kwargs)
self.helper = FormHelper()
# self.helper.form_id = 'id-myModelForm'
# self.helper.form_class = 'form-horizontal'
# self.helper.form_error_title = 'Form Errors'
self.helper.form_action = listing_id
self.helper.help_text_inline = True
if high_bid:
self.fields["amount"].widget.attrs["min"] = high_bid + decimal.Decimal(0.01)
self.helper.add_input(Submit("place_bid", "Place Bid"))
class Meta:
model = Bid
fields = [
"amount",
]
class CommentForm(forms.ModelForm):
""" Form for adding comments to Listings """
def __init__(self, *args, **kwargs):
listing_id = kwargs.pop("listing_id", None)
# user_id = kwargs.pop("user_id", None)
super().__init__(*args, **kwargs)
self.helper = FormHelper()
# self.helper.form_id = 'id-myModelForm'
# self.helper.form_class = 'form-horizontal'
# self.helper.form_error_title = 'Form Errors'
self.helper.form_action = listing_id
self.helper.help_text_inline = True
self.helper.add_input(Submit("add_comment", "Add Comment"))
class Meta:
model = Comment
fields = [
"body",
]
class CloseListingForm(forms.ModelForm):
""" Form for closing bids by the lister """
def __init__(self, *args, **kwargs):
listing_id = kwargs.pop("listing_id", None)
super().__init__(*args, **kwargs)
self.helper = FormHelper()
# self.helper.form_id = 'id-myModelForm'
# self.helper.form_class = 'form-horizontal'
# self.helper.form_error_title = 'Form Errors'
self.helper.form_action = listing_id
self.helper.help_text_inline = True
# self.fields["amount"].widget.attrs["min"] = high_bid + decimal.Decimal(0.01)
self.helper.add_input(Submit("close_auction", "Close Auction"))
class Meta:
model = Listing
fields = []
def index(request):
""" Home page - Displays all active listings """
listings = Listing.objects.filter(closed=False).order_by("-created_at", "-pk")
return render(request, "auctions/index.html", {"listings": listings})
@login_required
def add_listing_bid(request, listing):
""" Add bid on listing """
if request.user.username != listing.user.username:
form = BidForm(request.POST)
if form.is_valid():
bid_amount = form.cleaned_data["amount"]
bid = Bid(amount=bid_amount, user=request.user, listing=listing)
bid.save()
return HttpResponseRedirect(reverse("get_listing", args=(listing.id,)))
@login_required
def add_listing_comment(request, listing):
""" Add a comment to a listing """
form = CommentForm(request.POST)
if form.is_valid():
comment_body = form.cleaned_data["body"]
comment = Comment(body=comment_body, listing=listing, user=request.user)
comment.save()
return HttpResponseRedirect(reverse("get_listing", args=(listing.id,)))
def check_listing_on_watchlist(request, listing):
""" Check if the listing is on the current user's watchlist """
listing_on_watchlist = False
if request.user.is_authenticated:
watchlist_entry = Watchlist.objects.filter(user=request.user, listing=listing)
if watchlist_entry:
listing_on_watchlist = True
return listing_on_watchlist
def set_listing(request, listing):
""" Allow authed users to bid, post comments, and close the listing """
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
if "place_bid" in request.POST:
return add_listing_bid(request, listing)
if "add_comment" in request.POST:
return add_listing_comment(request, listing)
if "close_auction" in request.POST:
listing.closed = True
listing.save()
# always redirect on post to prevent form resubmission on refresh!
return HttpResponseRedirect(reverse("get_listing", args=(listing.id,)))
def get_active_listing(request, listing):
""" View for active listings """
close_listing_form = None
bid_form = None
bid_message = None
listing_id = str(listing.id)
# listing user
if listing.user.username == request.user.username:
close_listing_form = CloseListingForm(listing_id=listing_id)
if not listing.highest_bid_username:
bid_message = "There are no bids on this listing"
elif listing.bid_count > 1:
bid_message = f"There are {listing.bid_count} bids on this listing ({listing.highest_bid_username} has the highest bid)"
else:
bid_message = f"There is {listing.bid_count} bid on this listing ({listing.highest_bid_username} has the highest bid)"
# high-bid user
elif listing.highest_bid_username == request.user.username:
bid_form = BidForm(high_bid=listing.highest_bid_amount, listing_id=listing_id)
bid_message = "You currently have the highest bid on this listing"
# other authed user
else:
bid_form = BidForm(high_bid=listing.highest_bid_amount, listing_id=listing_id)
bid_message = "Enter a bid"
return close_listing_form, bid_form, bid_message
def get_closed_listing(request, listing):
""" View for closed listings """
# listing user
if listing.user.username == request.user.username:
if not listing.highest_bid_username:
return "There were no bids on this listing"
return f"{listing.highest_bid_username} won the auction"
# high bid user
if listing.highest_bid_username == request.user.username:
return "You won this acution"
# other authed user
return "This auction is closed"
def get_listing(request, listing_id):
""" Listing detail page - Allows users to place Bids on Listing """
listing = Listing.objects.get(pk=listing_id)
if request.method == "POST":
return set_listing(request, listing)
bid_form = None
bid_message = None
close_listing_form = None
high_bid_amount = listing.highest_bid_amount
high_bid_user = None
comment_form = None
listing_on_watchlist = check_listing_on_watchlist(request, listing)
# for authenticated users
if request.user.is_authenticated:
listing_on_watchlist = check_listing_on_watchlist(request, listing)
comment_form = CommentForm(listing_id=listing_id)
# active listing logic
if not listing.closed:
close_listing_form, bid_form, bid_message = get_active_listing(
request, listing
)
# closed listing logic
else:
bid_message = get_closed_listing(request, listing)
else:
bid_form = BidForm(high_bid=listing.highest_bid_amount, listing_id=listing_id)
bid_message = "Login to place a bid on this listing"
listing_comments = listing.comments.all()
return render(
request,
"auctions/listing.html",
{
"listing": listing,
"high_bid": high_bid_amount,
"high_bid_user": high_bid_user,
"bid_form": bid_form,
"bid_message": bid_message,
"close_listing_form": close_listing_form,
"listing_comments": listing_comments,
"comment_form": comment_form,
"listing_on_watchlist": listing_on_watchlist,
},
)
def get_categories(request):
""" Returns a list of categories with active listings """
categories = Category.objects.filter(listings__closed=False)
return render(request, "auctions/categories.html", {"categories": categories})
def get_categories_listings(request, category_id):
""" Returns a list of active listings for a given category """
listings = Listing.objects.filter(category__id=category_id, closed=False)
category_title = listings[0].category.title
return render(
request,
"auctions/category_listings.html",
{"listings": listings, "category_title": category_title},
)
@login_required
def get_watchlist(request):
""" Returns listings that user is watching """
listings = Listing.objects.filter(watched_listings__user=request.user)
return render(request, "auctions/watchlist.html", {"listings": listings})
@login_required
def add_to_watchlist(request, listing_id):
""" Adds the current listing to the current user's watchlist """
watched_listing = Watchlist(user=request.user, listing_id=listing_id)
watched_listing.save()
return HttpResponseRedirect(reverse("get_listing", args=(listing_id,)))
@login_required
def remove_from_watchlist(request, listing_id):
""" Removes the current listing from the current user's watchlist """
watched_listing = Watchlist.objects.get(user=request.user, listing_id=listing_id)
watched_listing.delete()
return HttpResponseRedirect(reverse("get_listing", args=(listing_id,)))
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(
request,
"auctions/login.html",
{"message": "Invalid username and/or password."},
)
else:
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(
request, "auctions/register.html", {"message": "Passwords must match."}
)
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(
request,
"auctions/register.html",
{"message": "Username already taken."},
)
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/register.html")
@login_required
def create_listing(request):
""" Create an auction listing """
if request.method == "POST":
category = Category.objects.get(pk=request.POST["category"])
title = request.POST["title"]
description = request.POST["description"]
image_url = request.POST["image_url"]
starting_bid = request.POST["starting_bid"]
user = request.user
listing = Listing(
title=title,
description=description,
category=category,
user=user,
image_url=image_url,
starting_bid=starting_bid,
)
listing.save()
return HttpResponseRedirect(reverse("index"))
return render(request, "auctions/create_listing.html", {"form": ListingForm()})
|
__author__ = 'iceke'
class SparkData(object):
def __init__(self, property):
self.__property = property
self.__app_id = ''
self.__app_name = ''
self.__stage__num = 0
self.__total_time = ''
self.__status = 'Finished'
self.__finished_stages = [] # only running spark need these variable
self.__running_stages = []
self.__failed_stages = []
def object2dict(self, obj):
# convert object to a dict
d = {}
d['__class__'] = obj.__class__.__name__
d['__module__'] = obj.__module__
d.update(obj.__dict__)
return d
def set_property(self, property):
self.__property = property
def get_property(self):
return self.__property
def set_app_name(self, app_name):
self.__app_name = app_name
def get_app_name(self):
return self.__app_name
def set_stage_num(self, stage_num):
self.__stage__num = stage_num
def get_stage_num(self):
return self.__stage__num
def set_total_time(self, total_time):
self.__total_time = total_time
def get_total_time(self):
return self.__total_time
def set_app_id(self, app_id):
self.__app_id = app_id
def get_app_id(self):
return self.__app_id
def set_status(self, status):
self.__status = status
def get_status(self):
return self.__status
def set_running_stages(self, running_stages):
self.__running_stages = running_stages
def get_running_stages(self):
return self.__running_stages
def set_finished_stages(self, finished_stages):
self.__finished_stages = finished_stages
def get_finished_stages(self):
return self.__finished_stages
def set_failed_stages(self, failed_stages):
self.__failed_stages = failed_stages
def get_failed_stages(self):
return self.__failed_stages
|
from kivy.gesture import GestureDatabase
from kivy.uix.boxlayout import BoxLayout
from kivy.gesture import Gesture
# 生成的手势字符串
gesture_strings = {
'top_to_bottom_line':'eNptl2tQlFUYxxcvqGQBooYoQWW6lindL2a7lbXdJURbBRQW3lwEYZ+9KKgHQQ26WCJhiIZShlommRRh4YjD4EzNVBRdjKjB8UszlVnTB0c/2Nl3nmfmP2fcOQMvv/2dPc/5v897gOqRhSXFayrnrLRC4UjQivPwd3LMqqEYRSOc3tEOh2NtcVHYTyM97qmXo69hb6yGfqt4pT9Mozzuklb9o2OEN15/5U9aEQiWF0UKwzTa444NDZ7srp7nHaPfDoWD5SVWiGLzaMwVl15kC3E0VhcwTlGc014rUF5cFg7RVXk03nnFaZlRIY6u1rOuURTv9MboWRWU4HFtbupPavraYYNKSvS4fulomdrRkhzx+aP2BEVJTr/2tg45pw3NqPZr42df3RRfnRgTFU0yjZ/aOpvaOuvZmKzoWtP4YU/v4j29mWwkK5piGgPRwvoT2EhRNNU0vq0f0qOfjWmKUk3jm6SBtKSBdDauU5RmG+H95yfu/8sRNb6kAyl0QFZJV3S9aZy6EDv5QqwYNyi60Tbc/zoS/vndNnqrrKQqS4zpim6KGicCqwIJq0K2cbwrmNoVFGOGopmm0dmXMbkvQwynolm2UVNZn1i53d7L4axTqVmnHGzcrOgW09jROHtS42wxZiu61TBOVJwNJJ8lMeYommsa+7LHJmWPlcQyFN1mGkfP5U44lyvG7YruMI3jOa7UHJfs5U5Fd5nGyYZLiQ2XpIPuVnSPafS1ntnRemYnG/cqus80vkjZ1JyySYz7Fc0zja+621u62w+y8YCi+abRv3umHqfZeFCRyzS++3O+HtJjbkUPmcb3y/9o1IONhxU9Yho/Xpy+5OJ0LxsLFD1qGqc7/4sONh5T5DGNwVDfslBfLhuPK3rCNH5dmq7HEBtPKnrKNH6rOaTHMBtPK3rGNIYXLKzXg41nFS00jTOlg9tLBxsiPss7PnpeFQYtq4yPn8w8es7pcZe67FPQ497YHz33YmopC2FPFDpqaRHCvfZFLWUjrGdzMcJqhksQBhg+r2G+fa1hJkMvQjfDpQjTGS5DmMAwB6GDYS7AqvMM8xAOM1yOsJ/hCoQ9nFI+wiNsFiDcxin5EEoghQglkCKEEoiFUAJ5AeFcXmglQknJj1BSKkYoKa0CqCSlEoQSSClC6ZDVCCWQMoStDMsRSiABhPkMCaHsPYgwg2EIYRoHEoau23Ceb1wEodz3NRqm8/QNUvxahFJ8BUIpvhKh3Lh1GtqXMRpKnesRyu3YAHC9JK8QSshVCKWkjfqPlmFefb30UjXChRxIDUIpaRNCKWmzhj280Lq/efoWhFLSiwilpFqEklIdQknpJYTStC8jlP58BWClrP4qwo1c51aEstBrCGWbrwOskPbehlCaoR6h7Gg7Qjc3WAPCeC7pDYBr5RY3IpQd7UAoeb6JUFZvQiiPzE6E8sg0I5SQdyGUkHcDXCO/Ed5CKId/C0IpaQ9CKWkvQhdPb0Uot+NtgBE5ad9BKIHsQyg9/y5C2XsbQjkq9yOU1Q8ADMvJcBChrP4eQtn7+wgzufhDCCX5DwCG5Fw6jPAE19mOUBb6EKFs8wjA4GWe/hFCae+jCKXrOhDKNj9GKCF/glAerk75v0mfIUE5WD5FKGdyF0JJ/piG1fKZ0oqfWRFfgXecvg6Xl1rBgrJCiz73uI41R1+7vKP0G2UFqy3q9kZ9Oh7xzfkf8O5LNA==',
'left_to_right_line':'eNpt13mczGUcB/DZHJt1LQkldgmNIyGENvu4ekjH5liDXXsZO8va3WdnxlE9GZJORykisR0owqbDUdqVI0mldSbHEHKEVV4dXpU+87xm9qXnY/7g5z2f7/N5fs/M67c2UClrbM74Se2y3V6fv9AdI8N/K0erKSpKqxucrioOh2NCziifR1WSwltyNfRyVQV63DnZHp+qLEWsw7xctfFHeKW0gsL8Uf4sn6oiRXT/OsF+a/9yReNtr68wf6zbq6qmqujrVg8ygRh1IzZQTasYp+kqyM/J83lV9VRVw3ndsaRQIEbVxFQtrWo7XVGYmqhiZcnBGkmpgxacNDBJ1ZGJgdA/c0v9mZ5Quq5WNzk9yB2osrfbquQDHkrU0+pmk9h7dXj9anV3hBLeLWnnepUEwon6WjUwiT1t19WecbFFKDGia+bUs3Md4URDrW4xibKWCedOr2jIiVu1amQSu+I2be+dXZ0Tt2nV2CR2dpzRP6Hv35xoolWcSWxvMS/q0sYTnIjXqqlJbC6+EBh4+U9ONNPqdpMorb7cm7KyGIkSV9GGZZ3LIonmWrUwibX/eFzNBzg50VKrO0xi9bY/5jTxl3PCqVUrk1hszsPBidZatQklEgc3GHxqfcp1Em21utMkivbMwceRxIl2Wt1lEsXTuhRsrRLkRHutOpjEukXN8Ok25ERHre42idKUaNxNc0500qqzSWzx/FurV6METnTR6h6T2H546azgieus0VWrbiax8+zC/W1yBSe6a3WvSeyqFzqQkZxI0Oo+kyib2mdNsEd3TvTQKtEkdp/vNDm6aa1QwtvEf3nIzMg3WWjV0yT2zep+pfW2ZE700qq3SRzIOOxYcuYKJ/po1dckDuL938934cT9WkmTOJS3W5YX1uVEP636m8SRmgs79PQP5MQDWg0wiWBM6Iu6mhMPavWQSRzr2WxYQVxjTjys1SMmcbx16Kt+8X8Jt6tG6HmVVeh254UfP0mp6lGnFMkB87iTQiWZi+lqILDUxkFSDHXYOBgobBwCpDWTgbTmUClctKYLSGsOA9Kaw4G05ggphtGaKUAaTwXS+EgphtN4GpDG04E0niHFCBrPBJo7igKKMGYBAzaOApba6JYixWHjaGCi+el1DWYDJ9voAZZEsH0Yc4BXbRwjRWrFbUZwLLDUxlwpRgobxwFpPA9I4/lSpNF4AZDGFZDGC6VId9joBcbb6ANSkR+YbuN4YJGNE4DUPhEYtHGSFBnU/hiQ2h8HUvsTQLp3DaT2J4HUPlmKTDqQAJC2NAVIW5oKpC09BaQDmQak9qelyKL26UAqegZIRc8C6TafA1LR81KMoqIXgHSbLwKpfQaQ2mcC6eRnAWlLs4G0pZekcNOWXgZS+xwgFb0CrCiKD+OrUoyOt3EuUNg4D5hu42vAIhvnA4M2LpAim4peB9L4QiCNvyGFh7a0CBiwcTGQxoukyKHxN4E0/haQTultKcbQ5t8B0oEsAdKaS4G0pWVSjKU13wXSmu8B6ZSWS5Fb8aCO4Aogrfk+sOLpHcGVQFpzFbDETq6WYhytWQyk8/wASPe+BkhFHwLpQD6SIo+KPgam27f5CbDIxrXAoI3rpMiPt3E9MM2+zQ1A2vynwKN28jMpCmLt5EZgexs/B9KnWQKkolJgsY2bgLts/ALXDhs3V1xfg1sq/ldwDW7FNbVvwzW1f4lrat+O63Ibv5KikNp3AKn9ayC17wTSN+QbILV/i9+i6d6/A9LJ7wLSyX8PpKIyYEVRbBh3S+GLtXEPMMnGvcAiG/cBgzbul8Lf3sYDwICNPwCLbTwIpH3+KMV4h42HgFR0GFhg4xEgbf4osNzGoBQT4m08BqQDOQ6cbeNPwFIbTwCp6KQUE6noFJCKfgbS5k8D6ejOAOnjOAuk9nNSTKLz/AVIX4bzwLjIkyGCF4DCTl4E0ubLgWn2+CW3PzPDVQ3Xvvxcd2FGXpZb/SoT188PvRa4KuONvIxxbvWb+T1GXfZntvsP+SXO8A==',
'right_to_left_line': 'eNpt2HlYVFUYx/HBBdFUyKWsXCYzHZcIc2/jWsYlM51EbdTQARwdXIAzzCioRwYFQlMbM1PMFCWLTJM0dw3CjTQNd9RMNDVNS6xUSqN+c5rh8Xlfzx8sH7537rnn3LnzPLhrxo2Ln5gaOsaW7HQ5bPV033dhaJ8uAqSoYbLUNhgMk+JHOe2ipq65NIN3WALxxW6LH2N3ilq6ZjT8r8H44nulkUmOxFGuOKeorWuBUWt29jhnt9TBn5OdjsRxtmQRGC3q3PfUUSqoJ4IwgbpS1DOpcyUlxic4k8UD0aK+6b6Hmb1BPdEARzWUIthkCcBRKSJEDz/dK828u3WkglTxoB4+oiVGqMEVa/fWjaRobLKjO9luwovrQ7raUSTPx1joL5pI0VQVJ5oO75Rb1cZbpC3DWOEvHpLiYVUc65Lat7C55i0yj2GccPuKZlI8oorDnuP5zVOX8OJRKR5TxaHwzCH7Tf140VyKFqo4OOzSTEenFF60lKKVKvZ1L7b1DzLzwijF46rY46haPquPhxetpXhCFcX9T8nRRjsv2kjxpCq2Dx6/bkalh69HWynaqWLD3JVXd3QuQlE4sBFGoL8wSdFeFatmb4u4csTAiw5SdFTF/EqcJkO9RpIHY45/Hp2keMpbFHo6Htwklmm8CJXiaVWszHNeiYhWxdQlGDn+IkyKzqpYN7T33Q4bjbx4RoouqtiK9XJUWXnRVYpuqijutqpOy1H3eY3uUvRQxe6cBmPT97p50VOKXqrY12Le9cB+Qbx4VornVHHAfXNg/FrvvhRmDMKI8q/Y81K8oIrSw95F3cWLF6UIV8XhC0sPWO6U8UKTorcqjt4qODuuwWVevCTFy6o4EVR8Q7a6y4s+UryiirKtzrzQQ6t5ESGFropTdzdeFI0reBEpxauqONOw5PaMtgZe9JXiNVWcNZ6uu6BnE170k+J1VZTHR0TnOc/zor8UA1RxzjFg9Gp5zVvMGoZhKXLF2iz1vc+rOIfNluB7/JijxRsmXcsMUY87XXOavd8DssRAYBDFKKCB4iBdy6ikOBhY4Uer+iFLDAFepvgmsIyiBVhKcSiwhOIw4GaKw4EFFN8CLqIYDfRQHAHMpjgS6KZoBSb5MfFf78gSMUArxVighWIc0ExxFDCSog2oURwNDPOj2zelMUAjRTuwGcV4YAjFsUADxXG6NqOS4nhgBcUJwHKKCcASionAXIpJwEUUBdBD0QHMppgMTKHoBNopuoBWihOBFoqTgGaKKUCNYiqwJ8XJQLZHU4AmilOBbOMkkG3cNCDbuDRgEEU3kO1muq5NZxs3Hcg2bgawjGIGsJRiJrCIYhZwM8W3gQUUs4H5FGcC2c0wC8huhneA7GaYDWQ3wxwguxnmAtnN8C4wkqIHyPZ9HpDt+3tAtsXzgWw33wey3Vyga+nsHfcBkG3cQmA5fTIsApZRzAGWUlwMLKH4IbCI4hLgZoofAQsoLgXmU1wGzKWYC2SP3+VA9qRdAUyhnzJ5wOrHr+YrPwbaKa4EWil+ArRQ/BQYSTEfqFH8DNiT4iqgieLnwGYUVwOr9z3Mh2uABopf6Jq7kuJa4GWKBcBSil8CiyiuAxZQXA/MpfgV0ENxAzCb4kagleImoIXiZqCZ4hagRnErMIziNqCR4nZg9SIbfbgDGETxa11LU++jgHuwEFhKyyJgAcVvgB6KxcCR6q6+B3cCw2i5CxhCcbeuTauguAdYTue5F8imVALMpeW3wCRa7gNqFPcDg+nkv9M1yaZ0AMjOfhDopvg90EqxFMgW5BAwhE7+sK5NZWc/Aiyk8zwKrL5pg30PlmPAJIrHgWEUT+jalHKKZcAi/5T8eBLopngKaKSHn9a1yaUUfwAm0cPPANnhP+paagUtzwKLaFkO9FA8B2TXfh6oUfxJ11LYtV8AsslfBLJFvgR0U/wZaKV4Gcgu84quTaqg+AuQnf0qkF3mNSC7zF+BZoq/Adm1XweyKVUADRRv6NpENs/fgWw7/gAWUPwTyCZ/E8iW7haQLd1tILtpK4EhFP/SNRdbur+B1Rtn8L1l7gDdFO8CrRT/qf4H2T1YBTRSxFvSZSDoMASoX8upB8BtrtgYS1386kwcb3PEJMTZHDW8efiWHO9YbKmFvyXETLA5aoLVR4ijFn5yxYb+B3SGITo==',
}
# 存储数据
gestures = GestureDatabase()
for name, gesture_string in gesture_strings.items():
gesture = gestures.str_to_gesture(gesture_string)
gesture.name = name
gestures.add_gesture(gesture)
class GestureBox(BoxLayout):
def __init__(self, **kwargs):
for name in gesture_strings:
self.register_event_type('on_{}'.format(name))
super(GestureBox, self).__init__(**kwargs)
def on_left_to_right_line(self):
pass
def on_right_to_left_line(self):
pass
def on_bottom_to_top_line(self):
pass
def on_top_to_bottom_line(self):
pass
def on_touch_down(self, touch):
touch.ud['gesture_path'] = [(touch.x, touch.y)]
super(GestureBox, self).on_touch_down(touch)
def on_touch_move(self, touch):
try:
# touch.ud['line'].points += [touch.x, touch.y]
touch.ud['gesture_path'].append((touch.x, touch.y))
super(GestureBox, self).on_touch_move(touch)
except (KeyError) as e:
print('KeyError has excepted')
def on_touch_up(self, touch):
# 判断是否匹配
if 'gesture_path' in touch.ud:
# 创建一个手势
gesture = Gesture()
# 添加移动坐标
gesture.add_stroke(touch.ud['gesture_path'])
# 标准化大小
gesture.normalize()
# 匹配手势,minscore:手势灵敏度
match = gestures.find(gesture, minscore=0.5)
if match:
print("{} happened".format(match[1].name))
self.dispatch('on_{}'.format(match[1].name))
super(GestureBox, self).on_touch_up(touch)
|
# port of https://phab.hepforge.org/source/fastjetsvn/browse/contrib/contribs/ConstituentSubtractor/tags/1.4.4/example_event_wide.cc
# to python w/ heppy
import fastjet as fj
import fjcontrib
from pyjetty.mputils import MPBase
class CEventSubtractor(MPBase):
def __init__(self, **kwargs):
# constants
# self.max_eta=4 # specify the maximal pseudorapidity for the input particles. It is used for the subtraction. Particles with eta>|max_eta| are removed and not used during the subtraction (they are not returned). The same parameter should be used for the GridMedianBackgroundEstimator as it is demonstrated in this example. If JetMedianBackgroundEstimator is used, then lower parameter should be used (to avoid including particles outside this range).
# self.max_eta_jet=3 # the maximal pseudorapidity for selected jets. Not used for the subtraction - just for the final output jets in this example.
# self.bge_rho_grid_size = 0.2
# self.max_distance = 0.3
# self.alpha = 1
# self.ghost_area = 0.01
# self.distance_type = fjcontrib.ConstituentSubtractor.deltaR
# self.CBS=1.0 # choose the scale for scaling the background charged particles
# self.CSS=1.0 # choose the scale for scaling the signal charged particles
# self.max_pt_correct = 5.
# set the default values
self.configure_from_args( max_eta=4,
bge_rho_grid_size=0.2,
max_distance=0.3,
alpha=1,
ghost_area=0.01,
distance_type=fjcontrib.ConstituentSubtractor.deltaR,
CBS=1.0,
CSS=1.0,
max_pt_correct=5.)
super(CEventSubtractor, self).__init__(**kwargs)
# background estimator
self.bge_rho = fj.GridMedianBackgroundEstimator(self.max_eta, self.bge_rho_grid_size) # Maximal pseudo-rapidity cut max_eta is used inside ConstituentSubtraction, but in GridMedianBackgroundEstimator, the range is specified by maximal rapidity cut. Therefore, it is important to apply the same pseudo-rapidity cut also for particles used for background estimation (specified by function "set_particles") and also derive the rho dependence on rapidity using this max pseudo-rapidity cut to get the correct rescaling function!
self.subtractor = fjcontrib.ConstituentSubtractor() # no need to provide background estimator in this case
self.subtractor.set_distance_type(self.distance_type) # free parameter for the type of distance between particle i and ghost k. There are two options: "deltaR" or "angle" which are defined as deltaR=sqrt((y_i-y_k)^2+(phi_i-phi_k)^2) or Euclidean angle between the momenta
self.subtractor.set_max_distance(self.max_distance) # free parameter for the maximal allowed distance between particle i and ghost k
self.subtractor.set_alpha(self.alpha) # free parameter for the distance measure (the exponent of particle pt). The larger the parameter alpha, the more are favoured the lower pt particles in the subtraction process
self.subtractor.set_ghost_area(self.ghost_area) # free parameter for the density of ghosts. The smaller, the better - but also the computation is slower.
# self.subtractor.set_do_mass_subtraction() # use this line if also the mass term sqrt(pT^2+m^2)-pT should be corrected or not. It is necessary to specify it like this because the function set_common_bge_for_rho_and_rhom cannot be used in this case.
self.subtractor.set_remove_particles_with_zero_pt_and_mass(True) # set to false if you want to have also the zero pt and mtMinuspt particles in the output. Set to true, if not. The choice has no effect on the performance. By deafult, these particles are removed - this is the recommended way since then the output contains much less particles, and therefore the next step (e.g. clustering) is faster. In this example, it is set to false to make sure that this test is successful on all systems (mac, linux).
# self.subtractor.set_grid_size_background_estimator(self.bge_rho_grid_size) # set the grid size (not area) for the background estimation with GridMedianBackgroundEstimation which is used within CS correction using charged info
self.subtractor.set_max_eta(self.max_eta) # parameter for the maximal eta cut
self.subtractor.set_background_estimator(self.bge_rho) # specify the background estimator to estimate rho.
self.sel_max_pt = fj.SelectorPtMax(self.max_pt_correct);
self.subtractor.set_particle_selector(self.sel_max_pt); # only particles with pt<X will be corrected - the other particles will be copied without any changes.
# subtractor.set_use_nearby_hard(0.2,2); // In this example, if there is a hard proxy within deltaR distance of 0.2, then the CS distance is multiplied by factor of 2, i.e. such particle is less favoured in the subtraction procedure. If you uncomment this line, then also uncomment line 106.
self.subtractor.initialize()
# print(self)
# print(self.subtractor.description())
def process_event(self, full_event):
self.bge_rho.set_particles(full_event);
# the correction of the whole event with ConstituentSubtractor
# self.corrected_event = self.subtractor.subtract_event(full_event, self.max_eta)
self.corrected_event = self.subtractor.subtract_event(full_event)
# if you want to use the information about hard proxies, use this version:
# vector<PseudoJet> corrected_event=subtractor.subtract_event(full_event,hard_event_charged); // here all charged hard particles are used for hard proxies. In real experiment, this corresponds to charged tracks from primary vertex. Optionally, one can add among the hard proxies also high pt calorimeter clusters after some basic pileup correction.
return self.corrected_event
def set_event_particles(self, full_event):
self.bge_rho.set_particles(full_event);
def process_jet(self, jet):
self.corrected_jet = self.subtractor.result(jet)
return self.corrected_jet
class CSubtractorJetByJet(MPBase):
def __init__(self, **kwargs):
# set the default values
self.configure_from_args( max_eta=4,
bge_rho_grid_size=0.2)
super(CSubtractorJetByJet, self).__init__(**kwargs)
# background estimator
self.bge_rho = fj.GridMedianBackgroundEstimator(self.max_eta, self.bge_rho_grid_size)
self.subtractor = fjcontrib.ConstituentSubtractor(self.bge_rho)
def set_event_particles(self, full_event):
self.bge_rho.set_particles(full_event);
def process_jet(self, jet):
self.corrected_jet = self.subtractor.result(jet)
return self.corrected_jet
def process_jets(self, jets):
self.corrected_jets = []
for j in jets:
corrected_jet = self.subtractor.result(j)
if corrected_jet.has_constituents():
self.corrected_jets.append(corrected_jet)
return self.corrected_jets
|
import os
from dataset import audio_dataset
from torch.utils.data import DataLoader
from utils import reconstruction_plot, attention_plot, create_folder
from tqdm import tqdm
def get_dataloader(data_path, yaml_path, args, cuda):
train_dataset = audio_dataset(data_path, yaml_path, args.val_fold, train = True)
val_dataset = audio_dataset(data_path, yaml_path, args.val_fold, train = False)
train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle = True, num_workers = args.num_workers, pin_memory = cuda)
val_dataloader = DataLoader(val_dataset, args.batch_size, shuffle = False, num_workers= args.num_workers, pin_memory = cuda)
data_container = {'train_dataset': train_dataset, 'val_dataset': val_dataset, \
'train_dataloader': train_dataloader, 'val_dataloader': val_dataloader}
return data_container
def train_epoch(data_container, model, optimizer, criterian_SED, criterian_AT , scheduler, args, cuda, MTL):
train_loss = 0.0
y_pred_train = []
y_true_train = []
model.train()
for x, y, _ in data_container['train_dataloader']:
if cuda:
x = x.cuda()
y = y.cuda()
optimizer.zero_grad()
out_dict = model(x)
y_pred = out_dict['y_pred']
loss = criterian_SED(y_pred, y) + (args.alpha*criterian_AT(out_dict['x_rec'], x) if MTL else 0.0)
loss.backward()
optimizer.step()
train_loss += loss.data/(data_container['train_dataset'].__len__()/args.batch_size)
y_pred_train.append(y_pred.detach().cpu().numpy())
y_true_train.append(y.detach().cpu().numpy())
return train_loss, y_pred_train, y_true_train
def eval_epoch(data_container, model, optimizer, criterian_SED, criterian_AT , scheduler, args, cuda, MTL):
val_loss = 0.0
y_pred_val = []
y_true_val = []
model.eval()
for x, y, _ in data_container['val_dataloader']:
if cuda:
x = x.cuda()
y = y.cuda()
out_dict = model(x)
y_pred = out_dict['y_pred']
loss = criterian_SED(y_pred, y) + (args.alpha*criterian_AT(out_dict['x_rec'], x) if MTL else 0.0)
val_loss += loss.data/(data_container['val_dataset'].__len__()/args.batch_size)
y_pred_val.append(y_pred.detach().cpu().numpy())
y_true_val.append(y.detach().cpu().numpy())
return val_loss, y_pred_val, y_true_val
def visualise_epoch(data_container, model, args, cuda, base_path):
base_path_ae = os.path.join(base_path, 'ae_vis')
base_path_dualatt = os.path.join(base_path, 'dualatt_vis')
create_folder(base_path_ae)
create_folder(base_path_dualatt)
model.eval()
i = 0
for x, _, audio_names in tqdm(data_container['val_dataloader']):
if cuda:
x = x.cuda()
out_dict = model(x)
y_pred = out_dict['y_pred'].cpu().detach().numpy()
x_rec = out_dict['x_rec'].cpu().detach().numpy()
class_x = out_dict['class_wise_input'].cpu().detach().numpy()
mel_attw = out_dict['mel_attw'].cpu().detach().numpy()
time_attw = out_dict['time_attw'].cpu().detach().numpy()
mel_x = out_dict['mel_x'].cpu().detach().numpy()
time_x = out_dict['time_x'].cpu().detach().numpy()
x = x.cpu().detach().numpy()
# here i maintains sample count (global)
# here j maintains count inside batch (local)
for j in range(x.shape[0]):
reconstruction_plot(x[j], x_rec[j], args, audio_names[j], base_path_ae)
attention_plot(mel_x[j], mel_attw[j], time_x[j], time_attw[j], args, audio_names[j], base_path_dualatt)
i = i + 1 |
from django.contrib.auth import get_user_model
from rest_framework import serializers
from apps.auction.models import Lot, Bet
User = get_user_model()
class LotSerializer(serializers.ModelSerializer):
class Meta:
model = Lot
fields = [
'id', 'starting_price', 'pet', 'seller', 'status',
]
def update(self, instance, validated_data):
new_status = validated_data.get('status', instance.status)
if new_status == instance.Status.CLOSED:
instance.close()
return instance
class BetSerializer(serializers.ModelSerializer):
class Meta:
model = Bet
fields = [
'id', 'bid', 'buyer', 'lot',
]
|
from read_only_guard import ReadOnlyGuard
from pathlib import Path
from crypto import Crypto
from datetime import datetime
import shutil
class Diary:
def __init__(self, filename, readonly):
self._filename = filename
self._readonly_guard = ReadOnlyGuard(readonly=readonly)
self._contents = ""
def save(self):
path = Path(self._filename)
if path.exists():
tempdir = path.parent / ".dmlbackups"
if not tempdir.exists():
tempdir.mkdir()
now = datetime.now()
nowstr = now.strftime("%Y-%m-%d--%H-%M")
bakfilename = path.name + "-" + nowstr + ".bak"
bakfile = tempdir / bakfilename
shutil.copyfile(path, bakfile)
contents = self._contents
Crypto.instance.write_file(path, contents)
self._readonly_guard.clear_dirty()
def is_readonly(self):
return self._readonly_guard.is_readonly()
def set_readonly(self, val):
return self._readonly_guard.set_readonly(val)
def is_dirty(self):
return self._readonly_guard.is_dirty()
def mark_dirty(self):
self._readonly_guard._dirty = True
def get_contents(self):
return self._contents
def set_contents(self, val):
self._readonly_guard.assert_not_readonly()
self._contents = val
def get_filename(self):
return Path(self._filename).name
def load(self):
path = Path(self._filename)
contents = Crypto.instance.read_file(path)
self._contents = contents
self._readonly_guard.clear_dirty()
|
# encoding: utf-8
from abc import abstractmethod, ABCMeta
from configparser import ConfigParser
from src.utils import singleton
import logging.config
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logging.config.fileConfig(fname='log.config', disable_existing_loggers=False)
class BaseConfig(metaclass=ABCMeta):
def get_member_vars(self):
return vars(self).keys()
@abstractmethod
def load(self, conf, section):
pass
@singleton
class ServerConfig(BaseConfig):
def __init__(self):
self.ip = '0.0.0.0'
self.port = '5000'
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
self.__dict__[item] = conf.get(section, item)
@singleton
class EsConfig(BaseConfig):
def __init__(self):
self.ip = '127.0.0.1'
self.port = '9200'
self.es_file = 'faq_vec.index'
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
self.__dict__[item] = conf.get(section, item)
@singleton
class AnnoyConfig(BaseConfig):
def __init__(self):
self.annoy_file = 'test.annoy'
self.vec_dim = 100
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
if item == 'annoy_file':
self.__dict__[item] = conf.get(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
# @singleton
# class TfidfTransformerConfig(BaseConfig):
# def __init__(self):
# self.model_file = 'tfidftransformer.pkl'
# self.max_feature = 256
# self.feature_dims = 100
#
# def load(self, conf, section):
# for item in self.get_member_vars():
# if item in conf.options(section):
# if item == 'model_file':
# self.__dict__[item] = conf.get(section, item)
# else:
# self.__dict__[item] = conf.getint(section, item)
@singleton
class SkipConfig(BaseConfig):
def __init__(self):
self.model_file = './sentence_embedding/saved_models/skip-best'
self.dict_file = './sentence_embedding/data/faq.txt.pkl'
self.vec_dim = 120
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
if item == 'model_file' or item == 'dict_file':
self.__dict__[item] = conf.get(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
@singleton
class LightGBMConfig(BaseConfig):
def __init__(self):
self.model_file = 'lightgbm_Model.pkl'
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
if item == 'model_file':
self.__dict__[item] = conf.get(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
@singleton
class TermRetrievalConfig(BaseConfig):
def __init__(self):
self.top_n = 20
self.threshold = 2.0
def load(self, conf, section):
for item in self.get_member_vars():
if item == 'threshold':
self.__dict__[item] = conf.getfloat(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
@singleton
class SemanticRetrievalConfig(BaseConfig):
def __init__(self):
self.top_n = 20
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
self.__dict__[item] = conf.getint(section, item)
@singleton
class RankConfig(BaseConfig):
def __init__(self):
self.top_n = 5
self.threshold = 0.5
def load(self, conf, section):
for item in self.get_member_vars():
if item == 'threshold':
self.__dict__[item] = conf.getfloat(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
@singleton
class AbcnnConfig(BaseConfig):
def __init__(self):
self.model_file = 'abcnn2.ckpt'
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
if item == 'model_file':
self.__dict__[item] = conf.get(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
@singleton
class XgboostConfig(BaseConfig):
def __init__(self):
self.model_file = 'Xgboost_train_Model_abcnn_zi.pkl'
def load(self, conf, section):
for item in self.get_member_vars():
if item in conf.options(section):
if item == 'model_file':
self.__dict__[item] = conf.get(section, item)
else:
self.__dict__[item] = conf.getint(section, item)
@singleton
class FaqConfig:
def __init__(self, config_file):
self.logger = logging.getLogger('Config')
self.conf = ConfigParser()
self.config_file = config_file
self.conf.read(self.config_file)
self.server = ServerConfig()
self.elastic_search = EsConfig()
self.annoy_search = AnnoyConfig()
self.skip_embedding = SkipConfig()
# self.tfidf_transformer = TfidfTransformerConfig()
self.term_retrieval = TermRetrievalConfig()
self.semantic_retrieval = SemanticRetrievalConfig()
self.rank = RankConfig()
self.lightgbm = LightGBMConfig()
self.abcnn = AbcnnConfig()
self.xgboost = XgboostConfig()
def get_member_vars(self):
return vars(self).keys()
def load(self):
sections = self.conf.sections()
for item in self.get_member_vars():
if item in ['logger', 'conf', 'config_file']:
continue
if item in sections:
self.__dict__[item].load(self.conf, item)
self.logger.info('load faq config SUCCESS !')
def save(self):
with open(self.config_file, 'w', encoding='utf-8') as f:
self.conf.write(f)
self.logger.info('save faq config SUCCESS !')
def add(self, section, option, value):
if section not in self.conf.sections():
self.conf.add_section(section)
self.conf.set(section, option, value)
def set(self, section, option, value):
if section not in self.conf.sections():
self.logger.error('ERROR: NO ' + section + ' in sections !')
else:
self.conf.set(section, option, value)
def remove(self, section, option=None):
if section not in self.conf.sections():
self.logger.error('ERROR: NO ' + section + ' in sections !')
else:
if option is None:
self.conf.remove_section(section)
else:
self.conf.remove_option(section, option)
def check_annoy_search(self):
if self.annoy_search.vec_dim != self.skip_embedding.vec_dim:
raise IndexError(
'dim of annoy vector DOES NOT match dim of embedding model !')
def init_faq_config(config_file):
logger = logging.getLogger('init_faq_config')
faq_config = FaqConfig(config_file)
faq_config.load()
logger.info('init faq config SUCCESS !')
return faq_config
if __name__ == '__main__':
cf = FaqConfig('faq.config')
cf.load()
cf.set('rank', 'top_n', '5')
cf.save()
|
import logging
from sleekxmpp import ClientXMPP
from inbetween import pull_data
# from sleekxmpp.exceptions import IqError, IqTimeout
# noinspection PyMethodMayBeStatic
class EchoBot(ClientXMPP):
def __init__(self, jid, password):
ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.message)
# If you wanted more functionality, here's how to register plugins:
# self.register_plugin('xep_0030') # Service Discovery
# self.register_plugin('xep_0199') # XMPP Ping
# Here's how to access plugins once you've registered them:
# self['xep_0030'].add_feature('echo_demo')
# If you are working with an OpenFire server, you will
# need to use a different SSL version:
# import ssl
# self.ssl_version = ssl.PROTOCOL_SSLv3
def session_start(self, event):
self.send_presence(pstatus="Send me a message", pnick="Admin Bot")
self.get_roster()
# Most get_*/set_* methods from plugins use Iq stanzas, which
# can generate IqError and IqTimeout exceptions
#
# try:
# self.get_roster()
# except IqError as err:
# logging.error('There was an error getting the roster')
# logging.error(err.iq['error']['condition'])
# self.disconnect()
# except IqTimeout:
# logging.error('Server is taking too long to respond')
# self.disconnect()
def message(self, msg):
if msg['type'] in ('chat', 'normal'):
# msg.reply("Thanks for sending\n%(body)s" % msg).send()
"""
result = "utc: {0}\n".format(utc())
result += "%(body)s" % msg
print(msg["from"], msg["body"]) # outputs the main body
msg.reply(result).send()
"""
try:
msg.reply("{}".format(pull_data(msg["body"]))).send()
except ValueError, e:
msg.reply("{}".format(e)).send()
except AttributeError, e:
msg.reply("{}".format(e)).send()
if __name__ == '__main__':
# Ideally use optparse or argparse to get JID,
# password, and log level.
logging.basicConfig(level=logging.DEBUG, format='%(levelname)-8s %(message)s')
# xmpp = EchoBot('admin@192.168.56.151/virtualbox', 'password')
# xmpp = EchoBot('admin@tigase', 'tigase')
# xmpp = EchoBot('ichux@192.168.56.152/virtualbox', 'passw0rd')
# xmpp = EchoBot('moderator@tigase', 'passw0rd')
xmpp = EchoBot('bot@192.168.56.151/virtualbox', 'tellnoone')
xmpp.connect()
xmpp.process(block=True)
|
'''
MIT License
Copyright (c) 2017 Sterin, Farrugia, Gripon.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
This code implements the GRU RNN model in theano.
'''
import numpy as np
import random
import theano
import theano.tensor as T
import collections as c
import copy
class GRU:
def __init__(self, n_i, n_h, n_o):
self.n_i = n_i
self.n_h = n_h
self.n_o = n_o
self.rand_init_params()
self.n_parameters = 3*self.n_h*self.n_i+3*self.n_h*self.n_h + self.n_o*self.n_h
self.W_z = theano.shared(copy.deepcopy(self.init_W_z), name='W_z')
self.U_z = theano.shared(copy.deepcopy(self.init_U_z), name='U_z')
self.W_r = theano.shared(copy.deepcopy(self.init_W_r), name='W_r')
self.U_r = theano.shared(copy.deepcopy(self.init_U_r), name='U_r')
self.W_hp = theano.shared(copy.deepcopy(self.init_W_hp), name='W_h')
self.U_hp = theano.shared(copy.deepcopy(self.init_W_up), name='U_h')
self.W_y = theano.shared(copy.deepcopy(self.init_W_y), name='W_y')
self.b1 = theano.shared(np.zeros(self.n_h), name='b1')
self.b2 = theano.shared(np.zeros(self.n_h), name='b2')
self.b3 = theano.shared(np.zeros(self.n_h), name='b3')
self.params = [self.W_z,self.U_z,self.W_r,self.U_r, self.W_hp, self.U_hp, self.W_y, self.b1, self.b2, self.b3]
self.__theano_build__()
def rand_init_params(self):
self.init_W_z = np.random.randn(self.n_h,self.n_i)
self.init_U_z = np.random.randn(self.n_h,self.n_h)
self.init_W_r = np.random.randn(self.n_h,self.n_i)
self.init_U_r = np.random.randn(self.n_h,self.n_h)
self.init_W_hp = np.random.randn(self.n_h,self.n_i)
self.init_W_up = np.random.randn(self.n_h,self.n_h)
self.init_W_y = np.random.randn(self.n_o,self.n_h)
def __theano_build__(self):
params = self.params
#First dim is time
x = T.matrix()
#target
t = T.matrix()
#initial hidden state
s0 = T.vector()
def step(x_t, s_tm1, W_z, U_z, W_r, U_r, W_h, U_h, W_y, b1, b2, b3):
z = T.nnet.sigmoid(W_z.dot(x_t)+U_z.dot(s_tm1)+b1)
r = T.nnet.sigmoid(W_r.dot(x_t)+U_r.dot(s_tm1)+b2)
h = T.tanh(W_h.dot(x_t)+U_h.dot(s_tm1*r)+b3)
s_t = (1-z)*h + z*s_tm1
y_t = W_y.dot(s_t)
return y_t, s_t,z,r
[y,s,z,r], _ = theano.scan(step,
sequences=x,
non_sequences=params,
outputs_info=[None, s0, None, None])
error = ((y - t) ** 2).sum()
grads = T.grad(error, params)
self.model = theano.function([x, s0], (y,s,z,r))
self.get_error = theano.function([x, t, s0], error)
self.bptt = theano.function([x, t, s0], grads)
lr = T.scalar()
chgt = {}
for i in range(len(params)):
chgt[params[i]] = params[i]-lr*grads[i]
self.train_step = theano.function([s0, x, t, lr],
(y, s, error),
updates=c.OrderedDict(chgt))
def reset(self, random_init=True):
if random_init:
self.rand_init_params()
self.W_z.set_value(self.init_W_z)
self.U_z.set_value(self.init_U_z)
self.W_r.set_value(self.init_W_r)
self.U_r.set_value(self.init_U_r)
self.W_hp.set_value(self.init_W_hp)
self.U_hp.set_value(self.init_W_up)
self.W_y.set_value(self.init_W_y)
self.b1 = theano.shared(np.zeros(self.n_h), name='b1')
self.b2 = theano.shared(np.zeros(self.n_h), name='b2')
self.b3 = theano.shared(np.zeros(self.n_h), name='b3')
|
#-*- coding: utf-8 -*-
import unittest
from litefs import TreeCache
class TestTreeCache(unittest.TestCase):
def setUp(self):
self.cache = TreeCache(clean_period=60, expiration_time=3600)
def test_put(self):
caches = {
'k_int': 1,
'k_str': 'hello',
'k_float': .5
}
cache = TreeCache(clean_period=60, expiration_time=3600)
for k, v in caches.items():
cache.put(k, v)
self.assertEqual(len(cache), len(caches))
def test_delete(self):
cache = self.cache
cache_key = 'delete_key'
cache.put(cache_key, 'delete me')
size_before_delete = len(cache)
cache.delete(cache_key)
size_after_delete = len(cache)
self.assertEqual(size_before_delete, size_after_delete + 1)
if __name__ == '__main__':
unittest.main()
|
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='gsheetapi',
version='0.1.0',
description='Sample Stuff for Implement Google Sheet API',
long_description=readme,
author='Fathur Rohman',
author_email='kgfathur@gmail.com',
url='https://github.com/kgfathur/gsheetapi',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
) |
from urllib.request import urlopen as uRep
from bs4 import BeautifulSoup as soup
start_url='https://www.mytek.tn/3-informatique'
links=[]
max_page=1
current_page=0
def Article(url):
global links
uClient = uRep(url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html,"html.parser")
containers = page_soup.findAll("li",{"class":"ajax_block_product"})
for container in containers:
title_container = container.find("a",{"class" : "product_img_link"})
href=title_container.get('href')
if ('search' not in href and 'page' not in href):
links.append(href)
print("saved",href)
if (current_page < max_page):
newlink = page_soup.find('ul',{'class':'pagination'}).find('a').get('href')
if (newlink):
current_page=current_page+1
Article(start_url+newlink)
Article(start_url)
|
from hashlib import sha224
users = ([1, 'bob', 'secret'],[2, 'alice', 'sekrit'], [3, 'eve', 'secret'])
for user in users:
user[2] = sha224(user[2]).hexdigest()[:8]
print users
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.