content stringlengths 5 1.05M |
|---|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from actions.api_v1.api import api_router
app = FastAPI(
title="FLIP API",
description="This API provides a REST interface to FLIP CLI.",
)
origins = [
"http://localhost",
"http://localhost:3000",
"http://127.0.0.1",
"http://127.0.0.1:3000"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router)
|
import os
import json
import requests
from jinja2 import Template
def get_events():
"""Get the latest events from the API."""
response = requests.get('https://polisen.se/H4S-2018-handelser.json')
response.encoding = 'utf-8-sig'
data = json.loads(response.text)
return data[:50]
def convert_event(event):
"""Convert an event to Markdown format."""
with open('./scripts/template.j2') as handle:
template = Template(handle.read())
string = template.render(**event)
return string
def process_events(data_dir):
"""Process all events."""
events = get_events()
for event in events:
event_md = convert_event(event)
out_path = os.path.join(data_dir, f"{event['id']}.md")
with open(out_path, 'w') as handle:
handle.write(event_md)
def main():
process_events('./content/content/post')
if __name__ == '__main__':
main()
|
class Vegetation:
def __init__(self, x, y, emoji):
self.x = x
self.y = y
self.life_force = 10
self.emoji = emoji
@property
def coord(self):
return self.x, self.y
def move(self):
self.life_force = 0.9 * self.life_force
def eaten(self):
self.life_force = 0
class Grass(Vegetation):
count = 0
def __init__(self, x, y):
Vegetation.__init__(self, x, y, emoji="🌱")
Grass.count += 1
def __del__(self):
Grass.count -= 1
|
import urllib, json, os, pyowm
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/', methods=['POST'])
def webhook():
req = request.get_json()
json.dumps(req, indent=4)
res = processRequest(req)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "citynameAction":
return {}
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
owm = pyowm.OWM('7521c2b2765cf016a55b005b107ea3a7') # You MUST provide a valid API key
observation = owm.weather_at_place(city)
w = observation.get_weather()
max_temp = str(w.get_temperature('celsius')['temp_max'])
min_temp = str(w.get_temperature('celsius')['temp_min'])
current_temp = str(w.get_temperature('celsius')['temp'])
status = w.get_status()
icon = w.get_weather_icon_name()
wind_speed = str(w.get_wind()['speed'])
humidity = str(w.get_humidity())
report = 'Status: ' + status + '\n' + 'Max: ' + max_temp + '\n' + 'Min: ' + min_temp + '\n' \
+ 'Current: ' + current_temp + '\n' + 'Wind speed: ' + wind_speed + '\n' + 'Humidity: ' + humidity + '%'
title = city + " Weather Report"
print("Weather report ", report)
facebook_message = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": title,
"image_url": "http://openweathermap.org/img/w/" +icon + ".png",
"subtitle": report
}
]
}
}
}
return {
"speech": report,
"displayText": report,
"data": {"facebook": facebook_message},
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
app.run() |
import json
import os
import numpy as np
from constants import data_pool_path
class FileWriter:
def __init__(self, filename, content):
self.fname = filename
self.content = content
def writeFile(self):
try:
with open(self.fname, 'w+') as f:
f.write(self.content)
except IOError:
print('打开文件失败!')
class JSONWriter(FileWriter):
def writeFile(self):
if self.fname.split('.')[1] == 'json':
try:
with open(self.fname, 'w+') as f:
json.dump(self.content, f)
except IOError:
print('打开文件失败!')
class NpyWriter(FileWriter):
def writeFile(self):
if self.fname.split('.')[1] == 'npy':
try:
np.save(self.fname, self.content)
except IOError:
print('打开文件失败!')
|
import discord
from discord.ext import commands
import getanime as ga
from pages import EmbedPaginator
class Anime(commands.Cog):
def __init__(self, client):
self.client = client
# Events
@commands.Cog.listener()
async def on_ready(self):
print('The Anime cog was successfully loaded!')
# Commands
@commands.command()
@commands.max_concurrency(5, per=commands.BucketType.default, wait=False)
async def search(self, ctx, *, query):
results = ga.search_anilist(query)
paginator = EmbedPaginator(ctx)
embeds = []
for result in results:
url = result['link']
embed = discord.Embed(color=0x00ff00)
embed.title = result['title']
embed.url = url
embed.description = result['desc']
embed.set_thumbnail(url=result['img'])
embed.set_author(name='Anilist', url='http://anilist.co', icon_url='https://i.imgur.com/Ak72T73.png')
embed.add_field(name='Total Eps:', value=result['totalEpisodes'])
embed.add_field(name='Stats:', value=result['status'])
genre = result['genres']
genre_links = []
for a in genre:
url = f'[{a}]' + f'(https://anilist.co/search/anime/{a})'.replace(' ', '%20')
genre_links.append(url)
genre = ', '.join(genre_links)
embed.add_field(name='Genre:', value=genre)
embed.add_field(name='AniList ID:', value=result['id'], inline=False)
embeds.append(embed)
if len(embeds) == 1:
await ctx.send(embed=embed)
else:
await paginator.run(embeds)
def setup(client):
client.add_cog(Anime(client))
|
import tkinter as tk
from tkinter import filedialog
import os
import cv2
import sys
import cv2 #opencv itself
import common #some useful opencv functions
import numpy as np # matrix manipulations
import imutils
from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
import requests
from flask import Flask, jsonify, flash, send_file, request,url_for,redirect,abort
app = Flask(__name__)
@app.route('/pan')
def pan_matchinger():
return send_file('pan_matchinger.jpg', mimetype='image/gif')
@app.route('/adhar')
def adhar_matchinger():
return send_file('adhar_matchinger.jpg', mimetype='image/gif')
# @app.route('/message')
# def rr():
# return 'Message Prnted'
@app.route('/')
def temp():
return '''
<!doctype html>
<title>Face Matching</title>
<h2>Select file(s) to upload</h2>
<h3>Steps to follow:\n1)Upload Aadhar\n2)Upload PAN\n3)Upload haarcascade file\n4) Capture photo of user<h3/>
<form method="post" action="/">
<input type="submit" value="Click Here to Start Matching!" name="action1"/>
</form>
'''
@app.route('/', methods=['POST'])
def index():
if request.method == 'POST':
if request.form.get('action1') == 'Click Here to Start Matching!':
root=tk.Tk()
root.withdraw()
filePath1=filedialog.askopenfilename()
filePath2=filedialog.askopenfilename()
filePath3=filedialog.askopenfilename()
rootF=os.getcwd()
print(rootF)
image_extraction_adhar(filePath1, filePath3)
image_extraction_pan(filePath2, filePath3)
import cv2
cam = cv2.VideoCapture(0)
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
revognizer_with_adhar(rootF)
revognizer_with_pan(rootF)
return adhar_matchinger()
# elif request.form.get('action2') == 'VALUE2':
# return redirect('localhost:5000/message')
else:
abort(401)
def image_extraction_adhar(filePath1, filePath3):
#the following are to do with this interactive notebook code
#matplotlib inline
from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
import pylab # this allows you to control figure size
image_adhar=cv2.imread(filePath1)
i=0
while True:
# Get user supplied values
imagePath = sys.argv[0]
# Load the Haar Cascade
#cascPath =r'E:\yash\haarcascade_frontalface_default.xml'
cascPath=filePath3
# Create the Haar Cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the Image
#image = cv2.imread('/content/inverted coloured.jpeg')
# Convert to Gray-Scale
gray = cv2.cvtColor(image_adhar, cv2.COLOR_BGR2GRAY)
# Detect Faces in the Image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(25, 25)
)
#i=0
if(len(faces)>0):
#print(i)
#base_image = cv2.imread('/content/colored adar.jpeg')
imag=imutils.rotate(image_adhar,angle=i)
grey = cv2.cvtColor(imag, cv2.COLOR_BGR2GRAY)
plt.imshow(cv2.cvtColor(imag, cv2.COLOR_BGR2RGB))
# test_image = cv2.imread('/content/inverted coloured.jpeg')
# im=imutils.rotate(test_image,angle=i)
face_cascade = cv2.CascadeClassifier(filePath3)
faces = face_cascade.detectMultiScale(grey, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(imag,(x,y),(x+w,y+h),(255,0,0),2)
face_crop=image_adhar[y:y+h,x:x+w]
cv2.imwrite('facecrop_adhar.jpg',face_crop)
plt.imshow(cv2.cvtColor(imag, cv2.COLOR_BGR2RGB))
break
else:
# print(i)
# i+=1
# image=imutils.rotate(image,angle=i)
# #cv2.imwrite('/contents/inverted coloured.jpeg',img)
# #i+=1
print("Check the orientation")
break
print("Found {0} faces!".format(len(faces)))
def image_extraction_pan(filePath2, filePath3):
#the following are to do with this interactive notebook code
#matplotlib inline
# this allows you to control figure size
image_pan=cv2.imread(filePath2)
i=0
while True:
# Get user supplied values
imagePath = sys.argv[0]
# Load the Haar Cascade
cascPath = filePath3
# Create the Haar Cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the Image
#image = cv2.imread('/content/inverted coloured.jpeg')
# Convert to Gray-Scale
gray = cv2.cvtColor(image_pan, cv2.COLOR_BGR2GRAY)
# Detect Faces in the Image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(25, 25)
)
#i=0
if(len(faces)>0):
#print(i)
#base_image = cv2.imread('/content/colored adar.jpeg')
imag=imutils.rotate(image_pan,angle=i)
grey = cv2.cvtColor(imag, cv2.COLOR_BGR2GRAY)
plt.imshow(cv2.cvtColor(imag, cv2.COLOR_BGR2RGB))
# test_image = cv2.imread('/content/inverted coloured.jpeg')
# im=imutils.rotate(test_image,angle=i)
face_cascade = cv2.CascadeClassifier(filePath3)
faces = face_cascade.detectMultiScale(grey, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(imag,(x,y),(x+w,y+h),(255,0,0),2)
face_crop=image_pan[y:y+h,x:x+w]
cv2.imwrite('facecrop_paaaaan.jpg',face_crop)
plt.imshow(cv2.cvtColor(imag, cv2.COLOR_BGR2RGB))
break
else:
# print(i)
# i+=1
# image=imutils.rotate(image,angle=i)
# #cv2.imwrite('/contents/inverted coloured.jpeg',img)
# #i+=1
print("Check the orientation")
break
print("Found {0} faces!".format(len(faces)))
def revognizer_with_adhar(rootF):
from PIL import Image, ImageDraw
from IPython.display import display
# The program we will be finding faces on the example below
pil_im = Image.open(r'{0}\opencv_frame_0.png'.format(rootF))
display(pil_im)
import face_recognition
import numpy as np
from PIL import Image, ImageDraw
from IPython.display import display
# This is an example of running face recognition on a single image
# and drawing a box around each person that was identified.
# Load a sample picture and learn how to recognize it.
yash_image = face_recognition.load_image_file(r"{0}\facecrop_adhar.jpg".format(rootF))
yash_face_encoding = face_recognition.face_encodings(yash_image)[0]
# # Load a second sample picture and learn how to recognize it.
# sachin_image = face_recognition.load_image_file("/content/image13.jpeg")
# sachin_face_encoding = face_recognition.face_encodings(sachin_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
yash_face_encoding
#,
# sachin_face_encoding
]
known_face_names = [
"Matching with adhar"
# ,
# "Sachin"
]
print('Learned encoding for', len(known_face_encodings), 'images.')
# Load an image with an unknown face
unknown_image = face_recognition.load_image_file(r"{0}\opencv_frame_0.png".format(rootF))
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unmatched with adhar"
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
display(pil_image)
pil_image = pil_image.save("adhar_matchinger.jpg")
def revognizer_with_pan(rootF):
from PIL import Image, ImageDraw
from IPython.display import display
# The program we will be finding faces on the example below
pil_im = Image.open(r'{0}\opencv_frame_0.png'.format(rootF))
display(pil_im)
import face_recognition
import numpy as np
from PIL import Image, ImageDraw
from IPython.display import display
# This is an example of running face recognition on a single image
# and drawing a box around each person that was identified.
# Load a sample picture and learn how to recognize it.
yash_image = face_recognition.load_image_file(r"{0}\facecrop_paaaaan.jpg".format(rootF))
yash_face_encoding = face_recognition.face_encodings(yash_image)[0]
# # Load a second sample picture and learn how to recognize it.
# sachin_image = face_recognition.load_image_file("/content/image13.jpeg")
# sachin_face_encoding = face_recognition.face_encodings(sachin_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
yash_face_encoding
#,
# sachin_face_encoding
]
known_face_names = [
"Matching with pan"
# ,
# "Sachin"
]
print('Learned encoding for', len(known_face_encodings), 'images.')
# Load an image with an unknown face
unknown_image = face_recognition.load_image_file(r"{0}\opencv_frame_0.png".format(rootF))
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unmatched with pan"
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
display(pil_image)
pil_image = pil_image.save("pan_matchinger.jpg")
if __name__=="__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
|
#!/usr/bin/env python3
import argparse
import json
import sys
import langdet
parser = argparse.ArgumentParser(description='Create language detection models')
parser.add_argument('filename', type=str,
help='Text file to process')
parser.add_argument('--ngramSize', type=int, default=3,
help='character ngram size')
parser.add_argument('--maxNgrams', type=int, default=sys.maxsize,
help='maximum number of ngrams in model')
parser.add_argument('--threshNgrams', type=int, default=0,
help='minimum frequency of ngram in model')
parser.add_argument('--percentNgrams', type=float, default=0.0,
help='minimum percentage ngrams must reach in model')
args = parser.parse_args()
# make config
config = {}
if args.ngramSize > 0:
config = {
'ngramSize': args.ngramSize,
'maxValues': args.maxNgrams,
'freqThresh': args.threshNgrams,
'percentThresh': args.percentNgrams,
}
model = None
with open(args.filename, 'r') as infile:
model = langdet.train(infile, config)
if model is not None:
with open(args.filename + '-model.json', 'w') as outfile:
json.dump(model, outfile)
|
#!/usr/bin/env python3
import requests
def get_content(url):
paste_info = {
'site': 'bpaste',
'url': url,
}
# NOTE: http://stackoverflow.com/a/18579484/3716299
base_url = "https://bpaste.net/raw/%s"
paste_id = url.split('/')[-1]
content_url = base_url % paste_id
response = requests.get(content_url)
if response.status_code != 200:
return
paste_info['ext'] = ""
paste_info['orig_filename'] = paste_id
paste_info['content'] = response.content
return [paste_info]
|
def wc(file_):
"""Takes an absolute file path/name, calculates the number of
lines/words/chars, and returns a string of these numbers + file, e.g.:
3 12 60 /tmp/somefile
(both tabs and spaces are allowed as separator)"""
with open(file_) as f:
text = f.read()
filename = f.name
lines = len(text.splitlines())
words = len([word for word in text.replace('\n', ' ').strip().split(' ') if word != ''])
chars = len(text)
return f'{lines}\t{words}\t{chars}\t{filename}'
pass
if __name__ == '__main__':
# make it work from cli like original unix wc
import sys
print(wc(sys.argv[1])) |
# -*- coding: utf-8 -*-
import os
import re
import sounddevice as sd
from utils import read_wav, write_wav
import numpy as np
from threading import Thread
import argparse
class DeviceNotFoundError(Exception):
pass
def record_target(file_path, length, fs, channels=2, append=False):
"""Records audio and writes it to a file.
Args:
file_path: Path to output file
length: Audio recording length in samples
fs: Sampling rate
channels: Number of channels in the recording
append: Add track(s) to an existing file? Silence will be added to end of each track to make all equal in
length
Returns:
None
"""
recording = sd.rec(length, samplerate=fs, channels=channels, blocking=True)
recording = np.transpose(recording)
max_gain = 20 * np.log10(np.max(np.abs(recording)))
if append and os.path.isfile(file_path):
# Adding to existing file, read the file
_fs, data = read_wav(file_path, expand=True)
# Zero pad shorter to the length of the longer
if recording.shape[1] > data.shape[1]:
n = recording.shape[1] - data.shape[1]
data = np.pad(data, [(0, 0), (0, n)])
elif data.shape[1] > recording.shape[1]:
recording = np.pad(data, [(0, 0), (0, data.shape[1] - recording.shape[1])])
# Add recording to the end of the existing data
recording = np.vstack([data, recording])
write_wav(file_path, fs, recording)
print(f'Headroom: {-1.0*max_gain:.1f} dB')
def get_host_api_names():
"""Gets names of available host APIs in a list"""
return [hostapi['name'] for hostapi in sd.query_hostapis()]
def get_device(device_name, kind, host_api=None, min_channels=1):
"""Finds device with name, kind and host API
Args:
device_name: Device name
kind: Device type. "input" or "output"
host_api: Host API name
min_channels: Minimum number of channels in the device
Returns:
Device, None if no device was found which satisfies the parameters
"""
if device_name is None:
raise TypeError('Device name is required and cannot be None')
if kind is None:
raise TypeError('Kind is required and cannot be None')
# Available host APIs
host_api_names = get_host_api_names()
for i in range(len(host_api_names)):
host_api_names[i] = host_api_names[i].replace('Windows ', '')
if host_api is not None:
host_api = host_api.replace('Windows ', '')
# Host API check pattern
host_api_pattern = f'({"|".join([re.escape(name) for name in host_api_names])})$'
# Find with the given name
device = None
if re.search(host_api_pattern, device_name):
# Host API in the name, this should return only one device
device = sd.query_devices(device_name, kind=kind)
if device[f'max_{kind}_channels'] < min_channels:
# Channel count not satisfied
raise DeviceNotFoundError(f'Found {kind} device "{device["name"]} {host_api_names[device["hostapi"]]}"" '
f'but minimum number of channels is not satisfied. 1')
elif not re.search(host_api_pattern, device_name) and host_api is not None:
# Host API not specified in the name but host API is given as parameter
try:
# This should give one or zero devices
device = sd.query_devices(f'{device_name} {host_api}', kind=kind)
except ValueError:
# Zero devices
raise DeviceNotFoundError(f'No device found with name "{device_name}" and host API "{host_api}". ')
if device[f'max_{kind}_channels'] < min_channels:
# Channel count not satisfied
raise DeviceNotFoundError(f'Found {kind} device "{device["name"]} {host_api_names[device["hostapi"]]}" '
f'but minimum number of channels is not satisfied.')
else:
# Host API not in the name and host API is not given as parameter
host_api_preference = [x for x in ['DirectSound', 'MME', 'WASAPI'] if x in host_api_names]
for host_api_name in host_api_preference:
# Looping in the order of preference
try:
device = sd.query_devices(f'{device_name} {host_api_name}', kind=kind)
if device[f'max_{kind}_channels'] >= min_channels:
break
else:
device = None
except ValueError:
pass
if device is None:
raise DeviceNotFoundError('Could not find any device which satisfies minimum channel count.')
return device
def get_devices(input_device=None, output_device=None, host_api=None, min_channels=1):
"""Finds input and output devices
Args:
input_device: Input device name. System default is used if not given.
output_device: Output device name. System default is used if not given.
host_api: Host API name
min_channels: Minimum number of output channels that the output device needs to support
Returns:
- Input device object
- Output device object
"""
# Find devices
devices = sd.query_devices()
# Select input device
if input_device is None:
# Not given, use default
input_device = devices[sd.default.device[0]]['name']
input_device = get_device(input_device, 'input', host_api=host_api)
# Select output device
if output_device is None:
# Not given, use default
output_device = devices[sd.default.device[1]]['name']
output_device = get_device(output_device, 'output', host_api=host_api, min_channels=min_channels)
return input_device, output_device
def set_default_devices(input_device, output_device):
"""Sets sounddevice default devices
Args:
input_device: Input device object
output_device: Output device object
Returns:
- Input device name and host API as string
- Output device name and host API as string
"""
host_api_names = get_host_api_names()
input_device_str = f'{input_device["name"]} {host_api_names[input_device["hostapi"]]}'
output_device_str = f'{output_device["name"]} {host_api_names[output_device["hostapi"]]}'
sd.default.device = (input_device_str, output_device_str)
return input_device_str, output_device_str
def play_and_record(
play=None,
record=None,
input_device=None,
output_device=None,
host_api=None,
channels=2,
append=False):
"""Plays one file and records another at the same time
Args:
play: File path to playback file
record: File path to output recording file
input_device: Number of the input device as seen by sounddevice
output_device: Number of the output device as seen by sounddevice
host_api: Host API name
channels: Number of output channels
append: Add track(s) to an existing file? Silence will be added to end of each track to make all equal in
length
Returns:
None
"""
# Create output directory
out_dir, out_file = os.path.split(os.path.abspath(record))
os.makedirs(out_dir, exist_ok=True)
# Read playback file
fs, data = read_wav(play)
n_channels = data.shape[0]
# Find and set devices as default
input_device, output_device = get_devices(
input_device=input_device,
output_device=output_device,
host_api=host_api,
min_channels=n_channels
)
input_device_str, output_device_str = set_default_devices(input_device, output_device)
print(f'Input device: "{input_device_str}"')
print(f'Output device: "{output_device_str}"')
recorder = Thread(
target=record_target,
args=(record, data.shape[1], fs),
kwargs={'channels': channels, 'append': append}
)
recorder.start()
sd.play(np.transpose(data), samplerate=fs, blocking=True)
def create_cli():
"""Create command line interface
Returns:
Parsed CLI arguments
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--play', type=str, required=True, help='File path to WAV file to play.')
arg_parser.add_argument('--record', type=str, required=True,
help='File path to write the recording. This must have ".wav" extension and be either'
'"headphones.wav" or any combination of supported speaker names separated by commas '
'eg. FL,FC,FR.wav to be recognized by Impulcifer as a recording file. It\'s '
'convenient to point the file path directly to the recording directory such as '
'"data\\my_hrir\\FL,FR.wav".')
arg_parser.add_argument('--input_device', type=str, default=argparse.SUPPRESS,
help='Name or number of the input device. Use "python -m sounddevice to '
'find out which devices are available. It\'s possible to add host API at the end of '
'the input device name separated by space to specify which host API to use. For '
'example: "Zoom H1n DirectSound".')
arg_parser.add_argument('--output_device', type=str, default=argparse.SUPPRESS,
help='Name or number of the output device. Use "python -m sounddevice to '
'find out which devices are available. It\'s possible to add host API at the end of '
'the output device name separated by space to specify which host API to use. For '
'example: "Zoom H1n WASAPI"')
arg_parser.add_argument('--host_api', type=str, default=argparse.SUPPRESS,
help='Host API name to prefer for input and output devices. Supported options on Windows '
'are: "MME", "DirectSound" and "WASAPI". This is used when input and '
'output devices have not been specified (using system defaults) or if they have no '
'host API specified.')
arg_parser.add_argument('--channels', type=int, default=2, help='Number of output channels.')
arg_parser.add_argument('--append', action='store_true',
help='Add track(s) to existing file? Silence will be added to the end of all tracks to '
'make the equal in length.')
args = vars(arg_parser.parse_args())
return args
if __name__ == '__main__':
play_and_record(**create_cli())
|
"""
Game screen where we show the game.
"""
from threading import Thread
from enum import Enum
import pygame
from map import Map
from .screen import Screen
from consts import Screens, MAP_FOLDER, IMAGE_FOLDER
from config import CONFIG
from player import Player
from bot import Bot
from gui import Button, ButtonState
from game_utils import save_game, load_save_game
class GameSubScreen(Enum):
START_MENU = 0
IN_GAME_MENU = 1
GAME = 2
class GameScreen(Screen):
def __init__(self):
self.subscreen = GameSubScreen.START_MENU
self.map = Map(MAP_FOLDER + CONFIG.CURRENT_LEVEL + '.tcm') # Stores the current map
self.player = Player(len(self.map.layers[4].tiles[0]), 32)
self.bot = Bot(len(self.map.layers[4].tiles[0]) + 32, 32)
self.in_game_menu_bg = None
# In game menu elements
button_margin = 50
self.in_game_resume_button = Button(96, CONFIG.WINDOW_HEIGHT - 96 - 3 * button_margin, label='RESUME')
self.in_game_save_button = Button(96, CONFIG.WINDOW_HEIGHT - 96 - 2 * button_margin, label='SAVE GAME')
self.in_game_exit_button = Button(96, CONFIG.WINDOW_HEIGHT - 96 - button_margin, label='EXIT')
if CONFIG.SAVE_GAME != '':
load_save_game(self)
CONFIG.SAVE_GAME = ''
def display_start_menu(self, screen):
self.subscreen = GameSubScreen.GAME
return Screens.GAME
def display_in_game_menu(self, screen):
screen.blit(self.in_game_menu_bg, (0, 0))
self.in_game_resume_button.display(screen)
self.in_game_save_button.display(screen)
self.in_game_exit_button.display(screen)
if self.in_game_exit_button.state == ButtonState.RELEASED:
return Screens.MAIN_MENU
elif self.in_game_resume_button.state == ButtonState.RELEASED:
self.subscreen = GameSubScreen.GAME
return Screens.GAME
elif self.in_game_save_button.state == ButtonState.RELEASED:
save_game(self)
return Screens.GAME
return Screens.GAME
def display_game(self, screen):
self.map.draw(screen, self.player, 0, 5)
self.bot.display(screen, self.map, self.player)
self.player.display(screen, self.map)
self.map.draw(screen, self.player, 5, 8)
if pygame.key.get_pressed()[pygame.locals.K_ESCAPE]:
# Save game screenshot as a background
pygame.image.save(screen, IMAGE_FOLDER + 'screenshot.png')
self.in_game_menu_bg = pygame.image.load(IMAGE_FOLDER + 'screenshot.png')
self.in_game_menu_bg.set_alpha(150)
self.subscreen = GameSubScreen.IN_GAME_MENU
return Screens.GAME
return Screens.GAME
def display(self, screen):
if self.subscreen == GameSubScreen.GAME:
return self.display_game(screen)
elif self.subscreen == GameSubScreen.START_MENU:
return self.display_start_menu(screen)
else:
return self.display_in_game_menu(screen)
return Screens.GAME
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SorterLauncher.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(500, 550)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(500, 550))
Dialog.setMaximumSize(QtCore.QSize(16777215, 16777215))
Dialog.setBaseSize(QtCore.QSize(0, 0))
Dialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon/pokemaster.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom))
Dialog.setSizeGripEnabled(False)
self.gridLayout_3 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.gridLayout_3.setObjectName("gridLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnLoadSettings = QtWidgets.QPushButton(Dialog)
self.btnLoadSettings.setObjectName("btnLoadSettings")
self.horizontalLayout.addWidget(self.btnLoadSettings)
self.btnSaveSettings = QtWidgets.QPushButton(Dialog)
self.btnSaveSettings.setObjectName("btnSaveSettings")
self.horizontalLayout.addWidget(self.btnSaveSettings)
spacerItem = QtWidgets.QSpacerItem(158, 17, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnSortFiles = QtWidgets.QPushButton(Dialog)
self.btnSortFiles.setObjectName("btnSortFiles")
self.horizontalLayout.addWidget(self.btnSortFiles)
self.gridLayout_3.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setObjectName("tabWidget")
self.tabMainOptions = QtWidgets.QWidget()
self.tabMainOptions.setAccessibleName("")
self.tabMainOptions.setObjectName("tabMainOptions")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tabMainOptions)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtWidgets.QGroupBox(self.tabMainOptions)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName("gridLayout_2")
self.btnAddPath = QtWidgets.QPushButton(self.groupBox)
self.btnAddPath.setObjectName("btnAddPath")
self.gridLayout_2.addWidget(self.btnAddPath, 0, 1, 1, 1)
self.chkTraverseSubdirectories = QtWidgets.QCheckBox(self.groupBox)
self.chkTraverseSubdirectories.setChecked(True)
self.chkTraverseSubdirectories.setObjectName("chkTraverseSubdirectories")
self.gridLayout_2.addWidget(self.chkTraverseSubdirectories, 3, 0, 1, 1)
self.lstInputPaths = QtWidgets.QListWidget(self.groupBox)
self.lstInputPaths.setObjectName("lstInputPaths")
self.gridLayout_2.addWidget(self.lstInputPaths, 0, 0, 3, 1)
self.btnRemovePaths = QtWidgets.QPushButton(self.groupBox)
self.btnRemovePaths.setObjectName("btnRemovePaths")
self.gridLayout_2.addWidget(self.btnRemovePaths, 1, 1, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.tabMainOptions)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.txtOutputPath = QtWidgets.QLineEdit(self.tabMainOptions)
self.txtOutputPath.setObjectName("txtOutputPath")
self.horizontalLayout_2.addWidget(self.txtOutputPath)
self.btnBrowseOutputPath = QtWidgets.QToolButton(self.tabMainOptions)
self.btnBrowseOutputPath.setObjectName("btnBrowseOutputPath")
self.horizontalLayout_2.addWidget(self.btnBrowseOutputPath)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.groupBox_2 = QtWidgets.QGroupBox(self.tabMainOptions)
self.groupBox_2.setObjectName("groupBox_2")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.cmbOutputPathStructure = QtWidgets.QComboBox(self.groupBox_2)
self.cmbOutputPathStructure.setEditable(False)
self.cmbOutputPathStructure.setCurrentText("")
self.cmbOutputPathStructure.setObjectName("cmbOutputPathStructure")
self.gridLayout.addWidget(self.cmbOutputPathStructure, 0, 0, 1, 4)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 3, 1, 1)
self.btnRemovePattern = QtWidgets.QPushButton(self.groupBox_2)
self.btnRemovePattern.setObjectName("btnRemovePattern")
self.gridLayout.addWidget(self.btnRemovePattern, 1, 2, 1, 1)
self.btnAddPattern = QtWidgets.QPushButton(self.groupBox_2)
self.btnAddPattern.setObjectName("btnAddPattern")
self.gridLayout.addWidget(self.btnAddPattern, 1, 0, 1, 1)
self.btnEditPattern = QtWidgets.QPushButton(self.groupBox_2)
self.btnEditPattern.setObjectName("btnEditPattern")
self.gridLayout.addWidget(self.btnEditPattern, 1, 1, 1, 1)
self.horizontalLayout_5.addLayout(self.gridLayout)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.chkMaxFilesPerFolder = QtWidgets.QCheckBox(self.tabMainOptions)
self.chkMaxFilesPerFolder.setObjectName("chkMaxFilesPerFolder")
self.horizontalLayout_6.addWidget(self.chkMaxFilesPerFolder)
self.txtMaxFilesPerFolder = QtWidgets.QSpinBox(self.tabMainOptions)
self.txtMaxFilesPerFolder.setEnabled(False)
self.txtMaxFilesPerFolder.setFocusPolicy(QtCore.Qt.StrongFocus)
self.txtMaxFilesPerFolder.setMaximum(999999999)
self.txtMaxFilesPerFolder.setProperty("value", 255)
self.txtMaxFilesPerFolder.setObjectName("txtMaxFilesPerFolder")
self.horizontalLayout_6.addWidget(self.txtMaxFilesPerFolder)
self.verticalLayout_2.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_7 = QtWidgets.QLabel(self.tabMainOptions)
self.label_7.setObjectName("label_7")
self.horizontalLayout_7.addWidget(self.label_7)
self.txtBundleKeyLength = QtWidgets.QSpinBox(self.tabMainOptions)
self.txtBundleKeyLength.setEnabled(False)
self.txtBundleKeyLength.setFocusPolicy(QtCore.Qt.StrongFocus)
self.txtBundleKeyLength.setMinimum(3)
self.txtBundleKeyLength.setMaximum(50)
self.txtBundleKeyLength.setProperty("value", 3)
self.txtBundleKeyLength.setObjectName("txtBundleKeyLength")
self.horizontalLayout_7.addWidget(self.txtBundleKeyLength)
self.verticalLayout_2.addLayout(self.horizontalLayout_7)
self.chkCamelCase = QtWidgets.QCheckBox(self.tabMainOptions)
self.chkCamelCase.setFocusPolicy(QtCore.Qt.StrongFocus)
self.chkCamelCase.setObjectName("chkCamelCase")
self.verticalLayout_2.addWidget(self.chkCamelCase)
self.chkShortFilenames = QtWidgets.QCheckBox(self.tabMainOptions)
self.chkShortFilenames.setFocusPolicy(QtCore.Qt.StrongFocus)
self.chkShortFilenames.setObjectName("chkShortFilenames")
self.verticalLayout_2.addWidget(self.chkShortFilenames)
self.chkPlacePokFilesIntoPOKESSubfolders = QtWidgets.QCheckBox(self.tabMainOptions)
self.chkPlacePokFilesIntoPOKESSubfolders.setChecked(True)
self.chkPlacePokFilesIntoPOKESSubfolders.setObjectName("chkPlacePokFilesIntoPOKESSubfolders")
self.verticalLayout_2.addWidget(self.chkPlacePokFilesIntoPOKESSubfolders)
self.verticalLayout_2.setStretch(0, 2)
self.tabWidget.addTab(self.tabMainOptions, "")
self.tabFileFiltering = QtWidgets.QWidget()
self.tabFileFiltering.setObjectName("tabFileFiltering")
self.verticalLayout = QtWidgets.QVBoxLayout(self.tabFileFiltering)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setSpacing(7)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.chkIncludeExtensions = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeExtensions.setObjectName("chkIncludeExtensions")
self.horizontalLayout_9.addWidget(self.chkIncludeExtensions)
self.txtIncludeExtensions = QtWidgets.QLineEdit(self.tabFileFiltering)
self.txtIncludeExtensions.setInputMask("")
self.txtIncludeExtensions.setObjectName("txtIncludeExtensions")
self.horizontalLayout_9.addWidget(self.txtIncludeExtensions)
self.verticalLayout.addLayout(self.horizontalLayout_9)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.chkExcludeExtensions = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkExcludeExtensions.setObjectName("chkExcludeExtensions")
self.horizontalLayout_10.addWidget(self.chkExcludeExtensions)
self.txtExcludeExtensions = QtWidgets.QLineEdit(self.tabFileFiltering)
self.txtExcludeExtensions.setInputMask("")
self.txtExcludeExtensions.setObjectName("txtExcludeExtensions")
self.horizontalLayout_10.addWidget(self.txtExcludeExtensions)
self.verticalLayout.addLayout(self.horizontalLayout_10)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtWidgets.QLabel(self.tabFileFiltering)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.txtLanguages = QtWidgets.QLineEdit(self.tabFileFiltering)
self.txtLanguages.setInputMask("")
self.txtLanguages.setObjectName("txtLanguages")
self.horizontalLayout_4.addWidget(self.txtLanguages)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_4 = QtWidgets.QLabel(self.tabFileFiltering)
self.label_4.setObjectName("label_4")
self.horizontalLayout_8.addWidget(self.label_4)
self.txtMaxArchiveSize = QtWidgets.QSpinBox(self.tabFileFiltering)
self.txtMaxArchiveSize.setMinimum(1)
self.txtMaxArchiveSize.setMaximum(10000)
self.txtMaxArchiveSize.setObjectName("txtMaxArchiveSize")
self.horizontalLayout_8.addWidget(self.txtMaxArchiveSize)
self.label_5 = QtWidgets.QLabel(self.tabFileFiltering)
self.label_5.setObjectName("label_5")
self.horizontalLayout_8.addWidget(self.label_5)
self.verticalLayout.addLayout(self.horizontalLayout_8)
self.chkIncludeAlternate = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeAlternate.setChecked(False)
self.chkIncludeAlternate.setObjectName("chkIncludeAlternate")
self.verticalLayout.addWidget(self.chkIncludeAlternate)
self.chkIncludeDemos = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeDemos.setChecked(True)
self.chkIncludeDemos.setObjectName("chkIncludeDemos")
self.verticalLayout.addWidget(self.chkIncludeDemos)
self.chkIncludeRereleases = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeRereleases.setChecked(True)
self.chkIncludeRereleases.setObjectName("chkIncludeRereleases")
self.verticalLayout.addWidget(self.chkIncludeRereleases)
self.chkIncludeAlternateFileFormats = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeAlternateFileFormats.setChecked(True)
self.chkIncludeAlternateFileFormats.setObjectName("chkIncludeAlternateFileFormats")
self.verticalLayout.addWidget(self.chkIncludeAlternateFileFormats)
self.chkIncludeHacked = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeHacked.setChecked(True)
self.chkIncludeHacked.setObjectName("chkIncludeHacked")
self.verticalLayout.addWidget(self.chkIncludeHacked)
self.chkIncludeXRated = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeXRated.setChecked(True)
self.chkIncludeXRated.setObjectName("chkIncludeXRated")
self.verticalLayout.addWidget(self.chkIncludeXRated)
self.chkIncludeSupplementaryFiles = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeSupplementaryFiles.setChecked(False)
self.chkIncludeSupplementaryFiles.setObjectName("chkIncludeSupplementaryFiles")
self.verticalLayout.addWidget(self.chkIncludeSupplementaryFiles)
self.chkIncludeUnknownFiles = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkIncludeUnknownFiles.setChecked(True)
self.chkIncludeUnknownFiles.setTristate(False)
self.chkIncludeUnknownFiles.setObjectName("chkIncludeUnknownFiles")
self.verticalLayout.addWidget(self.chkIncludeUnknownFiles)
self.chkSeparateUnknownFiles = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkSeparateUnknownFiles.setChecked(True)
self.chkSeparateUnknownFiles.setTristate(False)
self.chkSeparateUnknownFiles.setObjectName("chkSeparateUnknownFiles")
self.verticalLayout.addWidget(self.chkSeparateUnknownFiles)
self.chkRetainFoldersForUnknownFiles = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkRetainFoldersForUnknownFiles.setChecked(False)
self.chkRetainFoldersForUnknownFiles.setTristate(False)
self.chkRetainFoldersForUnknownFiles.setObjectName("chkRetainFoldersForUnknownFiles")
self.verticalLayout.addWidget(self.chkRetainFoldersForUnknownFiles)
self.chkDeleteSourceFiles = QtWidgets.QCheckBox(self.tabFileFiltering)
self.chkDeleteSourceFiles.setChecked(False)
self.chkDeleteSourceFiles.setTristate(False)
self.chkDeleteSourceFiles.setObjectName("chkDeleteSourceFiles")
self.verticalLayout.addWidget(self.chkDeleteSourceFiles)
self.label_6 = QtWidgets.QLabel(self.tabFileFiltering)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setText("")
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.verticalLayout.setStretch(4, 1)
self.verticalLayout.setStretch(5, 1)
self.verticalLayout.setStretch(6, 1)
self.verticalLayout.setStretch(7, 1)
self.verticalLayout.setStretch(8, 1)
self.verticalLayout.setStretch(9, 1)
self.chkIncludeAlternate.raise_()
self.chkIncludeDemos.raise_()
self.chkIncludeAlternateFileFormats.raise_()
self.chkIncludeHacked.raise_()
self.chkIncludeRereleases.raise_()
self.chkIncludeXRated.raise_()
self.chkIncludeSupplementaryFiles.raise_()
self.chkIncludeUnknownFiles.raise_()
self.chkSeparateUnknownFiles.raise_()
self.chkRetainFoldersForUnknownFiles.raise_()
self.chkDeleteSourceFiles.raise_()
self.label_6.raise_()
self.tabWidget.addTab(self.tabFileFiltering, "")
self.tabAbout = QtWidgets.QWidget()
self.tabAbout.setObjectName("tabAbout")
self.textBrowser = QtWidgets.QTextBrowser(self.tabAbout)
self.textBrowser.setGeometry(QtCore.QRect(20, 10, 441, 331))
self.textBrowser.setStyleSheet("background-color: transparent;")
self.textBrowser.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByKeyboard|QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextBrowserInteraction|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.textBrowser.setOpenExternalLinks(True)
self.textBrowser.setOpenLinks(True)
self.textBrowser.setObjectName("textBrowser")
self.btnReadme = QtWidgets.QPushButton(self.tabAbout)
self.btnReadme.setGeometry(QtCore.QRect(20, 350, 241, 28))
self.btnReadme.setObjectName("btnReadme")
self.btnFacebook = QtWidgets.QPushButton(self.tabAbout)
self.btnFacebook.setGeometry(QtCore.QRect(20, 390, 241, 28))
self.btnFacebook.setObjectName("btnFacebook")
self.btnSourceForge = QtWidgets.QPushButton(self.tabAbout)
self.btnSourceForge.setGeometry(QtCore.QRect(20, 430, 241, 28))
self.btnSourceForge.setObjectName("btnSourceForge")
self.tabWidget.addTab(self.tabAbout, "")
self.gridLayout_3.addWidget(self.tabWidget, 0, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.tabWidget, self.lstInputPaths)
Dialog.setTabOrder(self.lstInputPaths, self.btnAddPath)
Dialog.setTabOrder(self.btnAddPath, self.btnRemovePaths)
Dialog.setTabOrder(self.btnRemovePaths, self.chkTraverseSubdirectories)
Dialog.setTabOrder(self.chkTraverseSubdirectories, self.txtOutputPath)
Dialog.setTabOrder(self.txtOutputPath, self.btnBrowseOutputPath)
Dialog.setTabOrder(self.btnBrowseOutputPath, self.cmbOutputPathStructure)
Dialog.setTabOrder(self.cmbOutputPathStructure, self.btnAddPattern)
Dialog.setTabOrder(self.btnAddPattern, self.btnEditPattern)
Dialog.setTabOrder(self.btnEditPattern, self.btnRemovePattern)
Dialog.setTabOrder(self.btnRemovePattern, self.chkMaxFilesPerFolder)
Dialog.setTabOrder(self.chkMaxFilesPerFolder, self.txtMaxFilesPerFolder)
Dialog.setTabOrder(self.txtMaxFilesPerFolder, self.chkCamelCase)
Dialog.setTabOrder(self.chkCamelCase, self.chkShortFilenames)
Dialog.setTabOrder(self.chkShortFilenames, self.chkPlacePokFilesIntoPOKESSubfolders)
Dialog.setTabOrder(self.chkPlacePokFilesIntoPOKESSubfolders, self.btnLoadSettings)
Dialog.setTabOrder(self.btnLoadSettings, self.btnSaveSettings)
Dialog.setTabOrder(self.btnSaveSettings, self.btnSortFiles)
Dialog.setTabOrder(self.btnSortFiles, self.txtLanguages)
Dialog.setTabOrder(self.txtLanguages, self.chkIncludeAlternate)
Dialog.setTabOrder(self.chkIncludeAlternate, self.chkIncludeDemos)
Dialog.setTabOrder(self.chkIncludeDemos, self.chkIncludeRereleases)
Dialog.setTabOrder(self.chkIncludeRereleases, self.chkIncludeAlternateFileFormats)
Dialog.setTabOrder(self.chkIncludeAlternateFileFormats, self.chkIncludeHacked)
Dialog.setTabOrder(self.chkIncludeHacked, self.chkIncludeXRated)
Dialog.setTabOrder(self.chkIncludeXRated, self.chkIncludeSupplementaryFiles)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "ZX Pokemaster"))
self.btnLoadSettings.setText(_translate("Dialog", "Load Settings..."))
self.btnSaveSettings.setText(_translate("Dialog", "Save Settings..."))
self.btnSortFiles.setText(_translate("Dialog", "Sort files"))
self.groupBox.setTitle(_translate("Dialog", "Input paths"))
self.btnAddPath.setText(_translate("Dialog", "Add path..."))
self.chkTraverseSubdirectories.setText(_translate("Dialog", "Traverse subdirectories"))
self.btnRemovePaths.setText(_translate("Dialog", "Remove path"))
self.label.setText(_translate("Dialog", "Output path:"))
self.btnBrowseOutputPath.setText(_translate("Dialog", "..."))
self.groupBox_2.setTitle(_translate("Dialog", "Output path structure pattern"))
self.btnRemovePattern.setText(_translate("Dialog", "Remove pattern"))
self.btnAddPattern.setText(_translate("Dialog", "Add pattern..."))
self.btnEditPattern.setText(_translate("Dialog", "Edit pattern..."))
self.chkMaxFilesPerFolder.setText(_translate("Dialog", "Max files per folder:"))
self.label_7.setText(_translate("Dialog", "Number of characters for keys:"))
self.txtBundleKeyLength.setToolTip(_translate("Dialog", "If \"Max files for per folder\" is selected and the files are sorted into subfolders, the subfolder names will look like \"tuj-spa\" if this value=3 or \"tujad-space\" if this value=5"))
self.chkCamelCase.setText(_translate("Dialog", "CamelCaseInsteadOfSpaces"))
self.chkShortFilenames.setText(_translate("Dialog", "Use 8.3 naming scheme"))
self.chkPlacePokFilesIntoPOKESSubfolders.setText(_translate("Dialog", "Place .POK files into POKES subfolders"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabMainOptions), _translate("Dialog", "Main options"))
self.chkIncludeExtensions.setText(_translate("Dialog", "Include only these:"))
self.txtIncludeExtensions.setPlaceholderText(_translate("Dialog", "tap,z80,sna,dsk,trd,tzx,img,mgt,rom,scl,slt,szx"))
self.chkExcludeExtensions.setText(_translate("Dialog", "Exclude these:"))
self.txtExcludeExtensions.setPlaceholderText(_translate("Dialog", "tap,z80,sna,dsk,trd,tzx,img,mgt,rom,scl,slt,szx"))
self.label_3.setText(_translate("Dialog", "Languages:"))
self.txtLanguages.setPlaceholderText(_translate("Dialog", "en,es,ru,pl,cz,fr,de,nl,hu,cr,pl,sr,sl,sv,no"))
self.label_4.setText(_translate("Dialog", "Max archive size to look into:"))
self.label_5.setText(_translate("Dialog", "MB"))
self.chkIncludeAlternate.setText(_translate("Dialog", "Include alternate files (marked [a] in TOSEC)"))
self.chkIncludeDemos.setText(_translate("Dialog", "Include demos (non-full versions of games)"))
self.chkIncludeRereleases.setText(_translate("Dialog", "Include re-releases"))
self.chkIncludeAlternateFileFormats.setText(_translate("Dialog", "Include alternate file formats (see formats preference order)"))
self.chkIncludeHacked.setText(_translate("Dialog", "Include files marked as cracked, hacked or modded"))
self.chkIncludeXRated.setToolTip(_translate("Dialog", "18+ games: may contain nudity, pornographic images, extremely obscene language or hate speech"))
self.chkIncludeXRated.setText(_translate("Dialog", "Include x-rated games"))
self.chkIncludeSupplementaryFiles.setToolTip(_translate("Dialog", "If a ZX Spectrum file has got other files with the same name in the same folder or in a subfolder, include those as well. May be handy tor maps, manuals etc."))
self.chkIncludeSupplementaryFiles.setText(_translate("Dialog", "Include supplementary files (Warning: may be slow!)"))
self.chkIncludeUnknownFiles.setText(_translate("Dialog", "Include unknown files"))
self.chkSeparateUnknownFiles.setText(_translate("Dialog", "Put unknown files into \"Unknown\" folder"))
self.chkRetainFoldersForUnknownFiles.setText(_translate("Dialog", "Retain relative folder structure for unknown files"))
self.chkDeleteSourceFiles.setText(_translate("Dialog", "Delete source files after sorting (USE AT YOUR OWN RISK!)"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabFileFiltering), _translate("Dialog", "File filtering"))
self.textBrowser.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Brought to you by Elia Iliashenko aka Lady Eklipse</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">with the sencere feelings to the ZX Spectrum community.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">For those willing to support ZX Pokemaster, please consider donating:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">BitCoin: </span><a href=\"bitcoin:1KLBSzFYBpmwwkiG9VhXV6Hfd6YVsnF9D9\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">1KLBSzFYBpmwwkiG9VhXV6Hfd6YVsnF9D9</span></a></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">LiteCoin: </span><a href=\"litecoin:LPuLp1dfdZXVdcFdL3ahQCaQwmghxkFuJh\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">LPuLp1dfdZXVdcFdL3ahQCaQwmghxkFuJh</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Special thanks to:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Anna Soloviova for Facebook group logo and moral support;</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Gerard Sweeney for </span><a href=\"http://www.the-tipshop.co.uk\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">www.the-tipshop.co.uk</span></a><span style=\" font-size:8pt;\">;</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Einar Saukas for ZXDB;</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Peter Jones for </span><a href=\"https://spectrumcomputing.co.uk\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">spectrumcomputing.co.uk</span></a><span style=\" font-size:8pt;\">;</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">D. Kampschulte aka Der Eratosthenes for beta testing;</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">blinkydoos for beta testing;</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">The </span><a href=\"https://www.tosecdev.org\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">TOSEC</span></a><span style=\" font-size:8pt;\"> team, especially:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">gorski, maddog, dziuber, duncantwain, mictlantecuhtle, panda and others.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p></body></html>"))
self.btnReadme.setText(_translate("Dialog", "View README"))
self.btnFacebook.setText(_translate("Dialog", "Visit Facebook tech support group"))
self.btnSourceForge.setText(_translate("Dialog", "Check for new version on SourceForge"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabAbout), _translate("Dialog", "Help"))
import res_rc
|
from django.apps import AppConfig
class ComunicadosConfig(AppConfig):
name = 'comunicados'
|
# Part of a temporary placeholder for a CN object
import socketserver,socket
class MYTCPHandler(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print("{}:{} wrote: ".format(self.client_address[0],self.client_address[1]))
print(self.data)
print(self.request.getsockname())
self.request.sendall(self.data.upper())
myIP = socket.gethostbyname(socket.gethostname())
print(myIP)
sserver = socketserver.TCPServer((myIP,0), MYTCPHandler )
print(sserver.socket.getsockname())
sserver.serve_forever() |
# -*- coding:utf-8 -*-
# This file is adapted from the torchvision library at
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# 2020.6.29-Changed for Modular-NAS search space.
# Huawei Technologies Co., Ltd. <linyunfeng5@huawei.com>
# Copyright 2020 Huawei Technologies Co., Ltd.
"""ResNet architectures."""
from functools import partial
import torch.nn as nn
from modnas.registry.construct import DefaultSlotTraversalConstructor
from modnas.registry.construct import register as register_constructor
from modnas.registry.arch_space import register
from ..ops import Identity
from ..slot import Slot
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""Return 3x3 convolution with padding."""
return Slot(_chn_in=in_planes, _chn_out=out_planes, _stride=stride, groups=groups)
def conv1x1(in_planes, out_planes, stride=1):
"""Return 1x1 convolution."""
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes))
class BasicBlock(nn.Module):
"""Basic Block class."""
expansion = 1
chn_init = 16
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=None, norm_layer=None):
super(BasicBlock, self).__init__()
del base_width
self.conv1 = conv3x3(inplanes, planes, stride, groups)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Compute network output."""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block class."""
expansion = 4
chn_init = 16
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=None, norm_layer=None):
super(Bottleneck, self).__init__()
width = int(planes * (1. * base_width / self.chn_init)) * groups
self.conv1 = conv1x1(inplanes, width)
self.conv2 = conv3x3(width, width, stride, groups)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.relu = nn.ReLU(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Compute network output."""
identity = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet architecture class."""
def __init__(self,
chn_in,
chn,
block,
layers,
n_classes,
zero_init_residual=False,
groups=1,
width_per_group=None,
use_bn=False,
expansion=None):
super(ResNet, self).__init__()
if use_bn:
norm_layer = nn.BatchNorm2d
else:
norm_layer = Identity
self.use_bn = use_bn
if expansion is not None:
block.expansion = expansion
block.chn_init = chn
self.chn = chn
self.groups = groups
self.base_width = chn // groups if width_per_group is None else width_per_group
self.conv1 = self.get_stem(chn_in, chn, nn.BatchNorm2d)
self.layers = nn.Sequential(*[
self._make_layer(block, (2**i) * chn, layers[i], stride=(1 if i == 0 else 2), norm_layer=norm_layer)
for i in range(len(layers))
])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.chn, n_classes)
self.zero_init_residual = zero_init_residual
def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):
downsample = None
if stride != 1 or self.chn != planes * block.expansion:
downsample = nn.Sequential(conv1x1(
self.chn,
planes * block.expansion,
stride,
), )
layers = []
layers.append(block(self.chn, planes, stride, downsample, self.groups, self.base_width, norm_layer=norm_layer))
self.chn = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.chn, planes, 1, None, self.groups, self.base_width, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
"""Compute network output."""
x = self.conv1(x)
x = self.layers(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
@register_constructor
class ResNetPredefinedConstructor(DefaultSlotTraversalConstructor):
"""ResNet original network constructor."""
def __init__(self, use_bn=False):
super().__init__()
self.use_bn = use_bn
def convert(self, slot):
"""Convert slot to module."""
return nn.Sequential(
nn.Conv2d(slot.chn_in, slot.chn_out, 3, stride=slot.stride, padding=1, bias=False, **slot.kwargs),
nn.BatchNorm2d(slot.chn_out) if self.use_bn else Identity(),
)
class ImageNetResNet(ResNet):
"""ResNet for ImageNet dataset."""
def get_stem(self, chn_in, chn, norm_layer):
"""Return stem layers."""
return nn.Sequential(
nn.Conv2d(chn_in, chn, kernel_size=7, stride=2, padding=3, bias=False),
norm_layer(chn),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
class CIFARResNet(ResNet):
"""ResNet for CIFAR dataset."""
def get_stem(self, chn_in, chn, norm_layer):
"""Return stem layers."""
return nn.Sequential(
nn.Conv2d(chn_in, chn, kernel_size=3, stride=1, padding=1, bias=False),
norm_layer(chn),
nn.ReLU(inplace=False),
)
def resnet10(resnet_cls, **kwargs):
"""Construct a ResNet-10 model."""
return resnet_cls(block=BasicBlock, layers=[1, 1, 1, 1], **kwargs)
def resnet18(resnet_cls, **kwargs):
"""Construct a ResNet-18 model."""
return resnet_cls(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
def resnet32(resnet_cls, **kwargs):
"""Construct a ResNet-32 model."""
return resnet_cls(block=BasicBlock, layers=[5, 5, 5], **kwargs)
def resnet34(resnet_cls, **kwargs):
"""Construct a ResNet-34 model."""
return resnet_cls(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
def resnet50(resnet_cls, **kwargs):
"""Construct a ResNet-50 model."""
return resnet_cls(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
def resnet56(resnet_cls, **kwargs):
"""Construct a ResNet-56 model."""
return resnet_cls(block=BasicBlock, layers=[9, 9, 9], **kwargs)
def resnet101(resnet_cls, **kwargs):
"""Construct a ResNet-101 model."""
return resnet_cls(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
def resnet110(resnet_cls, **kwargs):
"""Construct a ResNet-110 model."""
return resnet_cls(block=BasicBlock, layers=[18, 18, 18], **kwargs)
def resnet152(resnet_cls, **kwargs):
"""Construct a ResNet-152 model."""
return resnet_cls(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)
def resnext50_32x4d(resnet_cls, **kwargs):
"""Construct a ResNeXt-50 32x4d model."""
return resnet_cls(block=Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4, **kwargs)
def resnext101_32x8d(resnet_cls, **kwargs):
"""Construct a ResNeXt-50 32x8d model."""
return resnet_cls(block=Bottleneck, layers=[3, 4, 23, 3], groups=32, width_per_group=8, **kwargs)
def resnet(resnet_cls, bottleneck=False, **kwargs):
"""Construct a ResNet model."""
block = Bottleneck if bottleneck else BasicBlock
return resnet_cls(block=block, **kwargs)
for net_cls in [CIFARResNet, ImageNetResNet]:
name = 'CIFAR-' if net_cls == CIFARResNet else 'ImageNet-'
register(partial(resnet10, net_cls), name + 'ResNet-10')
register(partial(resnet18, net_cls), name + 'ResNet-18')
register(partial(resnet32, net_cls), name + 'ResNet-32')
register(partial(resnet34, net_cls), name + 'ResNet-34')
register(partial(resnet50, net_cls), name + 'ResNet-50')
register(partial(resnet56, net_cls), name + 'ResNet-56')
register(partial(resnet101, net_cls), name + 'ResNet-101')
register(partial(resnet152, net_cls), name + 'ResNet-152')
register(partial(resnext50_32x4d, net_cls), name + 'ResNeXt-50')
register(partial(resnext101_32x8d, net_cls), name + 'ResNeXt-101')
register(partial(resnet, net_cls), name + 'ResNet')
|
# Generated by Django 2.2.11 on 2020-03-18 20:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock_wallet', '0008_auto_20200318_2104'),
]
operations = [
migrations.RenameModel(
old_name='Record',
new_name='PurchasedShare',
),
]
|
# Print N reverse
# https://www.acmicpc.net/problem/2742
print('\n'.join(list(map(str, [x for x in range(int(input()), 0, -1)]))))
|
class Solution:
def frequencySort(self, s: str) -> str:
counter = collections.Counter(s)
return ''.join(sorted(list(s), key=lambda x: (counter[x], x), reverse=True))
|
def saveColors(cube, positions, clusters, orientation):
# White red green
if orientation == 'wr':
# White
for i, facelet in enumerate(clusters[0]):
x = 2 - positions[1][i]
y = 2 - positions[0][i]
if cube[0][x][y] == None:
cube[0][x][y] = [facelet[4]]
else:
cube[0][x][y].append(facelet[4])
# Red
for i, facelet in enumerate(clusters[1]):
x = positions[2][i]
y = positions[3][i]
if cube[3][x][y] == None:
cube[3][x][y] = [facelet[4]]
else:
cube[3][x][y].append(facelet[4])
# Green
for i, facelet in enumerate(clusters[2]):
x = positions[4][i]
y = 2 - positions[5][i]
if cube[2][x][y] == None:
cube[2][x][y] = [facelet[4]]
else:
cube[2][x][y].append(facelet[4])
##########################
# White blue red
if orientation == 'wb':
# White
for i, facelet in enumerate(clusters[0]):
x = positions[0][i]
y = 2 - positions[1][i]
if cube[0][x][y] == None:
cube[0][x][y] = [facelet[4]]
else:
cube[0][x][y].append(facelet[4])
# Blue
for i, facelet in enumerate(clusters[1]):
x = positions[2][i]
y = positions[3][i]
if cube[4][x][y] == None:
cube[4][x][y] = [facelet[4]]
else:
cube[4][x][y].append(facelet[4])
# Red
for i, facelet in enumerate(clusters[2]):
x = positions[4][i]
y = 2 - positions[5][i]
if cube[3][x][y] == None:
cube[3][x][y] = [facelet[4]]
else:
cube[3][x][y].append(facelet[4])
##########################
# White orange blue
if orientation == 'wo':
# White
for i, facelet in enumerate(clusters[0]):
x = positions[1][i]
y = positions[0][i]
if cube[0][x][y] == None:
cube[0][x][y] = [facelet[4]]
else:
cube[0][x][y].append(facelet[4])
# Orange
for i, facelet in enumerate(clusters[1]):
x = positions[2][i]
y = positions[3][i]
if cube[1][x][y] == None:
cube[1][x][y] = [facelet[4]]
else:
cube[1][x][y].append(facelet[4])
# Blue
for i, facelet in enumerate(clusters[2]):
x = positions[4][i]
y = 2 - positions[5][i]
if cube[4][x][y] == None:
cube[4][x][y] = [facelet[4]]
else:
cube[4][x][y].append(facelet[4])
##########################
# White green orange
if orientation == 'wg':
# White
for i, facelet in enumerate(clusters[0]):
x = 2 - positions[0][i]
y = positions[1][i]
if cube[0][x][y] == None:
cube[0][x][y] = [facelet[4]]
else:
cube[0][x][y].append(facelet[4])
# Green
for i, facelet in enumerate(clusters[1]):
x = positions[2][i]
y = positions[3][i]
if cube[2][x][y] == None:
cube[2][x][y] = [facelet[4]]
else:
cube[2][x][y].append(facelet[4])
# Orange
for i, facelet in enumerate(clusters[2]):
x = positions[4][i]
y = 2 - positions[5][i]
if cube[1][x][y] == None:
cube[1][x][y] = [facelet[4]]
else:
cube[1][x][y].append(facelet[4])
##########################
# Yellow red blue
if orientation == 'yr':
# Yellow
for i, facelet in enumerate(clusters[0]):
x = 2 - positions[1][i]
y = 2 - positions[0][i]
if cube[5][x][y] == None:
cube[5][x][y] = [facelet[4]]
else:
cube[5][x][y].append(facelet[4])
# Red
for i, facelet in enumerate(clusters[1]):
x = 2 - positions[2][i]
y = 2 - positions[3][i]
if cube[3][x][y] == None:
cube[3][x][y] = [facelet[4]]
else:
cube[3][x][y].append(facelet[4])
# Blue
for i, facelet in enumerate(clusters[2]):
x = 2 - positions[4][i]
y = positions[5][i]
if cube[4][x][y] == None:
cube[4][x][y] = [facelet[4]]
else:
cube[4][x][y].append(facelet[4])
##########################
# Yellow green red
if orientation == 'yg':
# Yellow
for i, facelet in enumerate(clusters[0]):
x = positions[0][i]
y = 2 - positions[1][i]
if cube[5][x][y] == None:
cube[5][x][y] = [facelet[4]]
else:
cube[5][x][y].append(facelet[4])
# Green
for i, facelet in enumerate(clusters[1]):
x = 2 - positions[2][i]
y = 2 - positions[3][i]
if cube[2][x][y] == None:
cube[2][x][y] = [facelet[4]]
else:
cube[2][x][y].append(facelet[4])
# Red
for i, facelet in enumerate(clusters[2]):
x = 2 - positions[4][i]
y = positions[5][i]
if cube[3][x][y] == None:
cube[3][x][y] = [facelet[4]]
else:
cube[3][x][y].append(facelet[4])
##########################
# Yellow orange green
if orientation == 'yo':
# Yellow
for i, facelet in enumerate(clusters[0]):
x = positions[1][i]
y = positions[0][i]
if cube[5][x][y] == None:
cube[5][x][y] = [facelet[4]]
else:
cube[5][x][y].append(facelet[4])
# Orange
for i, facelet in enumerate(clusters[1]):
x = 2 - positions[2][i]
y = 2 - positions[3][i]
if cube[1][x][y] == None:
cube[1][x][y] = [facelet[4]]
else:
cube[1][x][y].append(facelet[4])
# Green
for i, facelet in enumerate(clusters[2]):
x = 2 - positions[4][i]
y = positions[5][i]
if cube[2][x][y] == None:
cube[2][x][y] = [facelet[4]]
else:
cube[2][x][y].append(facelet[4])
##########################
# Yellow blue orange
if orientation == 'yb':
# Yellow
for i, facelet in enumerate(clusters[0]):
x = 2 - positions[0][i]
y = positions[1][i]
if cube[5][x][y] == None:
cube[5][x][y] = [facelet[4]]
else:
cube[5][x][y].append(facelet[4])
# Blue
for i, facelet in enumerate(clusters[1]):
x = 2 - positions[2][i]
y = 2 - positions[3][i]
if cube[4][x][y] == None:
cube[4][x][y] = [facelet[4]]
else:
cube[4][x][y].append(facelet[4])
# Orange
for i, facelet in enumerate(clusters[2]):
x = 2 - positions[4][i]
y = positions[5][i]
if cube[1][x][y] == None:
cube[1][x][y] = [facelet[4]]
else:
cube[1][x][y].append(facelet[4])
|
#!/usr/bin/env python3
#
# Copyright (C) 2021 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os
import logging
import subprocess # nosec
import lxml.etree
import argparse
from importlib import import_module
from cpuparser import parse_cpuid, get_online_cpu_ids
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir))
def check_deps():
# Check that the required tools are installed on the system
BIN_LIST = ['cpuid', 'rdmsr', 'lspci', ' dmidecode', 'blkid', 'stty']
cpuid_min_ver = 20170122
for execute in BIN_LIST:
res = subprocess.Popen("which {}".format(execute),
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
line = res.stdout.readline().decode('ascii')
if not line:
logging.warning("'{}' cannot be found, please install it!".format(execute))
sys.exit(1)
if execute == 'cpuid':
res = subprocess.Popen("cpuid -v",
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
line = res.stdout.readline().decode('ascii')
version = line.split()[2]
if int(version) < cpuid_min_ver:
logging.warning("This tool requires CPUID version >= {}".format(cpuid_min_ver))
sys.exit(1)
def native_check():
cpu_ids = get_online_cpu_ids()
cpu_id = cpu_ids.pop(0)
leaf_1 = parse_cpuid(1, 0, cpu_id)
if leaf_1.hypervisor != 0:
logging.warning(f"Board inspector is running inside a Virtual Machine (VM). Running ACRN inside a VM is only" \
"supported under KVM/QEMU. Unexpected results may occur when deviating from that combination.")
def main(board_name, board_xml, args):
# Check that the dependencies are met
check_deps()
# Check if this is native os
native_check()
try:
# First invoke the legacy board parser to create the board XML ...
legacy_parser = os.path.join(script_dir, "legacy", "board_parser.py")
env = { "PYTHONPATH": script_dir, "PATH": os.environ["PATH"] }
subprocess.run([sys.executable, legacy_parser, args.board_name, "--out", board_xml], check=True, env=env)
# ... then load the created board XML and append it with additional data by invoking the extractors.
board_etree = lxml.etree.parse(board_xml)
root_node = board_etree.getroot()
# Clear the whitespaces between adjacent children under the root node
root_node.text = None
for elem in root_node:
elem.tail = None
# Create nodes for each kind of resource
root_node.append(lxml.etree.Element("processors"))
root_node.append(lxml.etree.Element("caches"))
root_node.append(lxml.etree.Element("memory"))
root_node.append(lxml.etree.Element("devices"))
extractors_path = os.path.join(script_dir, "extractors")
extractors = [f for f in os.listdir(extractors_path) if f[:2].isdigit()]
for extractor in sorted(extractors):
module_name = os.path.splitext(extractor)[0]
module = import_module(f"extractors.{module_name}")
if args.basic and getattr(module, "advanced", False):
continue
module.extract(args, board_etree)
# Finally overwrite the output with the updated XML
board_etree.write(board_xml, pretty_print=True)
print("{} saved successfully!".format(board_xml))
except subprocess.CalledProcessError as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("board_name", help="the name of the board that runs the ACRN hypervisor")
parser.add_argument("--out", help="the name of board info file")
parser.add_argument("--basic", action="store_true", default=False, help="do not extract advanced information such as ACPI namespace")
parser.add_argument("--loglevel", default="warning", help="choose log level, e.g. info, warning or error")
parser.add_argument("--check-device-status", action="store_true", default=False, help="filter out devices whose _STA object evaluates to 0")
args = parser.parse_args()
try:
logging.basicConfig(level=args.loglevel.upper())
except ValueError:
print(f"{args.loglevel} is not a valid log level")
print(f"Valid log levels (non case-sensitive): critical, error, warning, info, debug")
sys.exit(1)
board_xml = args.out if args.out else f"{args.board_name}.xml"
main(args.board_name, board_xml, args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'lib'))
from splunklib.searchcommands import \
dispatch, StreamingCommand, Configuration, Option, validators
from splunklib.six import PY3, text_type
if PY3:
from email.parser import Parser
from email import policy
else:
from email.header import decode_header, make_header
def decode_mime(encoded_field, unescape_folding=True):
if unescape_folding and r'\t' in encoded_field or r'\r\n' in encoded_field:
encoded_field = re.sub(r'(?!<\\)\\r\\n( |\\t)+', ' ', encoded_field) # Unfold escaped CRLF
encoded_field = re.sub(r'(?!<\\)\\r\\n$', '', encoded_field) # Trim trailing CRLF
encoded_field = re.sub(r'(?!<\\)\\t', '\t', encoded_field) # Unescape tab characters
# Decode the field and convert to Unicode
if '=?' in encoded_field:
if PY3:
decoded = Parser(policy=policy.default).parsestr('MIMEDecode: {}'.format(encoded_field)).get('MIMEDecode')
return text_type(decoded)
else:
return text_type(make_header(decode_header(encoded_field)))
return encoded_field
class OutputModes(validators.Validator):
"""Validate modes for output location"""
def __call__(self, value):
if value is None:
return None
valid_modes = ('replace', 'append')
if value.lower() not in valid_modes:
raise ValueError('Mode option must be {}'.format(' or '.join(valid_modes)))
return value.lower()
def format(self, value):
return None if value is None else value.lower()
@Configuration()
class MIMECommand(StreamingCommand):
"""
Decode MIME Encoded content
Use Python 3 if Language specification in Encoded Words is needed
See RFC 2184 for details (https://tools.ietf.org/html/rfc2184)
| mimedecode field=<field> [mode=(replace|append)]
"""
field = Option(name='field', require=True)
mode = Option(name='mode', require=False, default='replace', validate=OutputModes())
suppress_error = Option(name='suppress_error', require=False, default=False, validate=validators.Boolean())
def stream(self, records):
if self.mode == 'append':
dest_field = 'mimedecode'
else:
dest_field = self.field
for record in records:
if self.field not in record:
yield record
continue
field_data = record[self.field]
# Does not support multivalued fields, join into one string
if isinstance(field_data, list):
field_data = ' '.join(field_data)
try:
decoded = decode_mime(field_data)
if '\x00' in decoded:
decoded = decoded.replace('\x00', '\\x00')
record[dest_field] = decoded
except Exception as e:
if not self.suppress_error:
raise e
yield record
dispatch(MIMECommand, module_name=__name__)
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from __future__ import print_function
import tensorflow as tf
import numpy as np
from numpy.testing import assert_array_equal
import gpflow
from gpflow.test_util import GPflowTestCase
class TestTensorConverter(GPflowTestCase):
def test_failures(self):
values = ['', 'test', 1., None, object()]
for v in values:
with self.assertRaises(ValueError, msg='Raises at "{}"'.format(v)):
gpflow.core.tensor_converter.TensorConverter.tensor_mode(v)
p = gpflow.core.parentable.Parentable()
p._parent = p
with self.assertRaises(gpflow.GPflowError):
gpflow.core.tensor_converter.TensorConverter.tensor_mode(p)
|
import torch
from torch import optim
from ..models.epic.feature_model import feature_model
def load_model(num_classes, input_frame_length):
#class_counts = (num_classes,352)
class_counts = num_classes
segment_count = input_frame_length
model = feature_model(input_frame_length*10*7, num_layers = 10, num_units = 1024, num_classes = num_classes)
return model
def load_pretrained(model):
return None
def get_optim_policies(model):
return model.parameters() # no policies
ddp_find_unused_parameters = False
|
#!/usr/bin/env python
import click
import youtube
from .description import description
@click.group()
def channel():
'Channel commands'
@channel.command(help='Get your channel stats')
def stats():
me = youtube.get_me()
print()
print('Subscribers: ' + youtube.colors.green + youtube.commify(me['statistics']['subscriberCount']) + youtube.colors.end)
print('Views: ' + youtube.colors.green + youtube.commify(me['statistics']['viewCount']) + youtube.colors.end)
print('Videos: ' + youtube.colors.green + youtube.commify(me['statistics']['videoCount']) + youtube.colors.end)
print()
channel.add_command(description) |
def _prepare_sequence(sequence):
symbol_map = {
'x': 'х',
'X': 'X',
'y': 'у',
'—': '-',
'“': '«',
'‘': '«',
'”': '»',
'’': '»',
'😆': '😄',
'😊': '😄',
'😑': '😄',
'😔': '😄',
'😉': '😄',
'❗': '😄',
'🤔': '😄',
'😅': '😄',
'⚓': '😄',
'ε': 'α',
'ζ': 'α',
'η': 'α',
'μ': 'α',
'δ': 'α',
'λ': 'α',
'ν': 'α',
'β': 'α',
'γ': 'α',
'と': '尋',
'の': '尋',
'神': '尋',
'隠': '尋',
'し': '尋',
'è': 'e',
'ĕ': 'e',
'ç': 'c',
'ҫ': 'c',
'ё': 'е',
'Ё': 'Е',
u'ú': 'u',
u'Î': 'I',
u'Ç': 'C',
u'Ҫ': 'C',
'£': '$',
'₽': '$',
'ӑ': 'a',
'Ă': 'A',
}
result = []
for token in sequence.split():
for key, value in symbol_map.items():
token = token.replace(key, value)
for keyword in ['www', 'http']:
if keyword in token:
token = '_html_'
result.append(token)
return ' '.join(result)
|
# base class
class Base(object):
def __init__(self, x):
self.x = x
class Derived(Base):
def __init__(self, x, y):
# sending the x value up to base class
super(Derived, self).__init__(x)
self.y = y
def printD(self):
print(self.x + " " + self.y)
d = Derived("hi", "there")
d.printD() # prints => hi there
|
from django.urls import path
from . import views
app_name = 'polls'
# urlpatterns = [
# path('',views.index,name='index'),#/polls/
# path('<int:question_id>/',views.detail,name='detail'),#/polls/5/
# path('<int:question_id>/results/',views.results,name='results'),#/polls/5/results/
# path('<int:question_id>/vote',views.vote,name='vote'),#/polls/5/vote/
# ]
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
#note: question_id, or any stuff in <> in the url is an ARGUMENT to the view
|
# Copyright 2020-2021 Dynatrace LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List
from loguru import logger
from mapz import Mapz
class QueryParser:
"""SQL file parser.
Parses annotated SQL files from provided paths. This class should not
generally be used by itself. :class:`~dbload.context.Context` already
invokes this parser during its initialization phase.
QueryParser provides a single static method :meth:`~.QueryParser.parse`
that performs the parsing. There is no need to create the QueryParser
object itself.
Examples:
Use parser::
from dbload import QueryParser
parsed = QueryParser.parse(["./queries.sql"])
"""
@staticmethod
def parse(sources: List[str] = []) -> Mapz:
"""Parse text sources with annotated SQL queries.
Args:
sources (List[str]): List of text strings with
annotated SQL queries to parse.
Parser reads each string line by line (split by ``\\n`` symbol)
and looks for annotated SQL queries in it. It does not verify
the validity of SQL syntax.
Parser understands annotation comments in SQL file that start
with ``"--"`` comment identifier and contain ``name:`` tag in them.
Returns:
Mapz: Dictionary of parsed queries.
Raises:
SqlQueriesFileEmptyError: when provided text file with annotated
SQL queries is empty.
"""
parsed = Mapz()
for source in sources:
QueryParser._parse_queries(source, parsed)
return parsed
@staticmethod
def _parse_queries(source: str, parsed: Mapz) -> None:
name_regex = re.compile(r".*name:\s*([\w]+)")
option_regex = re.compile(r"option:\s*([\w]+)")
# re.findall(
# r"scenario:\s*([\w-]+)(?:\[([-\d]+)\])?",
# "--name:disi, scenario: sample[1], scenario: teardown[-90], scenario: name",
# )
# >>> [('sample', '1'), ('teardown', '-90'), ('name', '')]
scenario_regex = re.compile(r"scenario:\s*([\w-]+)(?:\[([-\d]+)\])?")
# After reading whole file, process the lines
# one by one, assembling queries one by one
current_query_name = None
# current_query_kind = None
current_query_content = ""
collected = Mapz()
lines = source.split("\n")
for line in lines:
line = line.strip("\n").replace("\t", " ").replace("\r", "")
if "--" in line:
# Detect start of the new query
nm = name_regex.match(line)
if nm:
# If we have a current query name, then append new content to it
if current_query_name:
collected[
current_query_name
].text = current_query_content
current_query_content = ""
# Detect if there are any options specified in the query
options = option_regex.findall(line)
# Detect if the querly explicitly wants to be called
# within a certain scenario
scenarios = scenario_regex.findall(line)
scenarios = [
(n, int(order) if order else 0)
for n, order in scenarios
]
# Start new context for tracking the new query
current_query_name = nm.group(1)
current_query_content = ""
collected[current_query_name] = Mapz(
# kind=current_query_kind,
options=options,
scenarios=scenarios,
text="",
)
else:
current_query_content += line
# Add last "unseparrated" query to list
collected[current_query_name].text = current_query_content
# Remove queries without `text`.
# Happens when you comment out the whole half of the queries file
# in IDE and `-- name:` comment gets parsed but the text contains
# nothing due to it being fully commented out
for k, v in collected.items():
if v.text:
parsed[k] = v
|
from flask import render_template
from app import auth
from app import app
from flask import render_template,flash,url_for,redirect,request
from forms import RegistrationForm,LoginForm
from flask_login import login_required
# @auth.route('auth/login')
# def login():
# return render_template('home.html')
# @auth.route("/auth/login", methods=['GET','POST'])
# @login_required
# def login():
# form = LoginForm()
# if form.validate_on_submit():
# return redirect(url_for('main.home'))
# return render_template('auth/login.html' , form = form)
# @auth.route("/auth/register" ,methods=['GET','POST'])
# def Register():
# form = RegistrationForm()
# if form.validate_on_submit():
# flash(f'Account created for {form.username.data}','success')
# return redirect(url_for('main.home'))
# return render_template('auth/register.html', form = form) |
''' 015 Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a quantidade de dias pelos quais
ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$ 60 por dia e R$ 0,15 por Km rodado '''
d = int(input('Quantos dias foi alugado? '))
t = d * 60 # 60 reais o dia
km = float(input('Quantos km rodados? '))
kmt = km * 0.15
print(f'O valor total vai ser: {t + kmt}')
|
import random
import string
import random
import pytest
import quanguru.classes.base as qbase #pylint: disable=import-error
def randString(N):
return str(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N)))
strings = [randString(random.randint(1, 10)) for _ in range(random.randint(4, 10))]
@pytest.mark.parametrize("cls", [qbase.qBase])
def test_auxDictAndObj(cls):
# create internal and external instances and add items to aux dictionary
for i in range(2):
inst1 = cls(_internal=bool(i))
inst2 = cls(_internal=bool(i))
# 1) test the auxiliary dictionary
# verify that the dictionaries are the same
assert inst1.aux is inst2.aux
assert inst1.aux is cls._auxiliary
# add items
inst1.aux[strings[0]] = strings[1]
inst2.aux[strings[2]] = strings[3]
# verify that the dictionaries are still the same
assert inst1.aux is inst2.aux
assert inst1.aux is cls._auxiliary
# verify that the cross calls works (duh! the same dictionary)
assert inst2.aux[strings[0]] == strings[1]
assert inst1.aux[strings[2]] == strings[3]
# test the setter, which updates the dict
inst1.aux = {1:1.1, 2:2.2}
# verify that the dictionaries are still the same
assert inst1.aux is inst2.aux
assert inst1.aux is cls._auxiliary
# verify that the cross calls works (duh! the same dictionary)
assert inst2.aux[1] == 1.1
assert inst2.aux[2] == 2.2
# 2) test the auxiliary object
# verify that the objects are the same
assert inst1.auxObj is inst2.auxObj
assert inst1.auxObj is cls._auxiliaryObj
# add attributes
setattr(inst1.auxObj, strings[0], strings[1])
setattr(inst2.auxObj, strings[2], strings[3])
# verify that the objects are still the same
assert inst1.auxObj is inst2.auxObj
assert inst1.auxObj is cls._auxiliaryObj
# verify that the cross calls works (duh! the same object)
assert getattr(inst1.auxObj, strings[2], strings[3])
assert getattr(inst1.auxObj, strings[0], strings[1])
@pytest.mark.parametrize("cls", [qbase.qBase])
def test_superSysProperty(cls):
# create internal and external instances and add items to aux dictionary
for i in range(2):
inst1 = cls(_internal=bool(i))
inst2 = cls(_internal=bool(i))
# make sure default None is returned by the getter
assert inst1.superSys is None
assert inst2.superSys is None
# set-get and assert
inst1.superSys = inst2
inst2.superSys = inst2
assert inst1.superSys is inst2
assert inst2.superSys is inst2
# change by the setter and assert by the getter
inst1.superSys = inst1
inst2.superSys = inst1
assert inst1.superSys is inst1
assert inst2.superSys is inst1
def assertRemoved(remOb, mainOb):
assert remOb not in mainOb.subSys.values()
with pytest.raises(KeyError):
mainOb.subSys[remOb.name]
with pytest.raises(KeyError):
mainOb.subSys[remOb.alias[0]]
@pytest.mark.parametrize("cls", [qbase.qBase])
def test_subSysAddRemoveResetMethods(cls):
# create internal and external instances and add items to aux dictionary
for i in range(2):
# create a single object and a list object to be used for subSys
inst1 = cls(_internal=bool(i))
insts = [cls(_internal=bool(i)) for x in range(8)]
# create bunch of strings to be used as alias
strings2 = [randString(random.randint(1, 10)) for _ in range(12)]
# add the first element and assert that its in the subSys
inst1.addSubSys(insts[0])
assert insts[0] in inst1.subSys.values()
# assert that you can get it by its name
assert inst1.subSys[insts[0].name] is insts[0]
# add alias and assert that you can get it by any of the alias
insts[0].alias = strings2[0:1]
assert all([inst1.subSys[key] is insts[0] for key in strings2[0:1]])
# add a subSys by giving the class (instead of an instance)
newIns = inst1.addSubSys(cls)
assert newIns not in [inst1, *insts]
# assert that you can get it by its name
assert inst1.subSys[newIns.name] is newIns
# add alias and assert that you can get it by any of the alias
newIns.alias = strings2[2:3]
assert all([inst1.subSys[key] is newIns for key in strings2[2:3]])
# add a subSys by its name
retIns = inst1.addSubSys(insts[1].name)
assert retIns is insts[1]
assert insts[1] in inst1.subSys.values()
assert inst1.subSys[insts[1].name] is retIns
insts[1].alias = strings2[4:5]
assert all([inst1.subSys[key] is retIns for key in strings2[4:5]])
# add a subSys by it alias
assert insts[2] not in inst1.subSys.values()
insts[2].alias = strings2[6:7]
retIns = inst1.addSubSys(strings2[6])
assert retIns is insts[2]
assert inst1.subSys[insts[2].name] is retIns
assert inst1.subSys[insts[2].alias[0]] is retIns
# add a subSys by its name.name
retIns = inst1.addSubSys(insts[3].name.name)
assert retIns is insts[3]
assert insts[3] in inst1.subSys.values()
assert inst1.subSys[insts[3].name] is retIns
insts[3].alias = strings2[8:9]
assert all([inst1.subSys[key] is retIns for key in strings2[8:9]])
# add a list of subSys
insts[4].alias = strings2[10]
inst1.addSubSys([insts[4].alias[0], insts[5].name.name])
assert insts[4] in inst1.subSys.values()
assert insts[5] in inst1.subSys.values()
# add a tuple of subSys
insts[6].alias = strings2[11]
inst1.addSubSys((insts[6].alias[0], insts[7].name.name))
assert insts[6] in inst1.subSys.values()
assert insts[7] in inst1.subSys.values()
# remove with the object
inst1.removeSubSys(insts[0])
assertRemoved(insts[0],inst1)
# remove with name
inst1.removeSubSys(insts[1].name)
assertRemoved(insts[1],inst1)
# remove with alias
inst1.removeSubSys(insts[2].alias[0])
assertRemoved(insts[2],inst1)
# remove with name.name
inst1.removeSubSys(insts[3].name.name)
assertRemoved(insts[3],inst1)
# remove a list of subSys
inst1.removeSubSys([insts[4].name, insts[6]])
assertRemoved(insts[4],inst1)
assertRemoved(insts[6],inst1)
# resetSubSys subSys
assert len(inst1.subSys.values()) > 0
inst1.resetSubSys()
assert len(inst1.subSys.values()) == 0
|
from google.protobuf.json_format import MessageToDict
from grpc.messages import data_pb2
import config
from authentication.auth import ProtectedResource
from flask import request, abort
import os
from werkzeug.utils import secure_filename
from filters.data_filter import DataFilter
from utils.data_reader import get_data
from filters.time_period_grouper import group_segment_data_by_time_period
from authentication import auth
from orders.segment_order import sort_data_by_order_type
class Data(ProtectedResource):
def get(self, filename):
try:
page = int(request.args.get('page')) if request.args.get('page') else 0
per_page = int(request.args.get('per_page')) if request.args.get('per_page') else config.per_page
except ValueError:
abort(400, 'page and per_page values should be numbers')
data_filter = DataFilter(request.args)
filename = secure_filename(filename)
data, countries, devices = get_data(os.path.join(config.UPLOAD_FOLDER, filename))
time_period = request.args.get('time_period') if request.args.get('time_period') else config.time_period
grouped_data = group_segment_data_by_time_period(data, time_period)
filtered_data = data_filter.filter(grouped_data)
order_type = request.args.get('order_by') if request.args.get('order_by') else config.order_by
ordered_data = sort_data_by_order_type(list(filtered_data.values()), order_type)
response = data_pb2.SegmentedTimelineDataResponse()
response.data.extend(ordered_data[(page * per_page):(page * per_page + per_page)])
response.countries.extend(countries)
response.devices.extend(devices)
return MessageToDict(response)
|
__all__ = ['baz', 'foo', 'Bar']
def foo():
pass
class Bar:
pass
def baz():
pass
def qux():
pass
class Quux:
pass
def foobar():
pass
|
import random
from django.views.generic.base import TemplateView
from honest_ab.api import get_experiment_bin
from honest_ab.binning_functions.base import HONEST_AB_COOKIE_KEY, HONEST_AB_SKIP_TYPE
class FakeObject(object):
def __init__(self, pk=None):
self.pk = pk if pk else random.randint(1, 1000000)
class BasicView(TemplateView):
template_name = 'tested_app/index.html'
def get_context_data(self, **kwargs):
"""
Sample context data.
"""
context = get_experiment_bin(FakeObject(), 'city', self.request)
# Use the results to set more context:
if context[HONEST_AB_COOKIE_KEY]['city'] == HONEST_AB_SKIP_TYPE:
# This means no test was set up.
context['skip'] = True
elif context[HONEST_AB_COOKIE_KEY]['city'] == '0':
# Toronto!
context['baseball'] = 'Blue Jays'
context['hockey'] = 'Maple Leafs'
context['basketball'] = 'Raptors'
elif context[HONEST_AB_COOKIE_KEY]['city'] == '1':
# Seattle!
context['baseball'] = 'Mariners'
context['hockey'] = None
context['basketball'] = 'Supersonics (well, they were...)'
return context
|
class Pet:
def __init__(self, name, age):
self.name = name
self.age = age
def show(self):
print(f'Hello! I am {self.name} and I am {self.age} years old!')
def speak(self):
print('I dont know what I say!')
class Cat(Pet):
def __init__(self, name, age, color):
super().__init__(name, age)
self.color = color
def speak(self):
print('Meow')
def show(self):
print(f'Hello! I am {self.name} and I am {self.age} years old and I am {self.color}')
class Dog(Pet):
def speak(self):
print('Bark')
p = Pet('Tim', 19)
p.speak()
c = Cat('Bill', 34, 'Brown')
c.show()
d = Dog('Jill', 25)
d.speak()
|
import mss
import mss.tools
from window import quicktime_values
with mss.mss() as sct:
#1334x750
#left=x, top=y de inicio de la imagen, cuadrante 4 en plano
monitor = {"top": 400, "left": 800, "width": 375, "height": 200}
import time
import cv2
import mss
import numpy
with mss.mss() as sct:
# Part of the screen to capture
#quicktime_values = [xSize, ySize, xPos, yPos]
quicktime_values = quicktime_values().rstrip().split(', ')
quicktime_values = list(map(int, quicktime_values))
monitor = {"top": quicktime_values[3],
"left": quicktime_values[2],
"width": quicktime_values[0],
"height": quicktime_values[1]}
while "Screen capturing":
last_time = time.time()
# Get raw pixels from the screen, save it to a Numpy array
img = numpy.array(sct.grab(monitor))
# Display the picture
cv2.imshow("OpenCV/Numpy normal", cv2.resize(img, (quicktime_values[0]//2, quicktime_values[1]//2)) )
#if img[10, 20]
print(quicktime_values)
# Press "q" to quit
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chipyapp', '0003_auto_20151104_0221'),
]
operations = [
migrations.RenameField(
model_name='datatype',
old_name='cdv',
new_name='name',
),
migrations.RemoveField(
model_name='datatype',
name='hsd',
),
migrations.RemoveField(
model_name='datatype',
name='video',
),
migrations.RemoveField(
model_name='datatype',
name='xhs',
),
]
|
import can
can0 = can.ThreadSafeBus(channel = 'vcan0', bustype = 'socketcan_ctypes')
while(True):
val = input('Button: ')
msg = can.Message(arbitration_id=0x3C4, data=[], is_extended_id=False)
if val == 'vol-up':
msg = can.Message(arbitration_id=0x3C4, data=[0x80, 0x00], is_extended_id=False)
elif val == 'vol-down':
msg = can.Message(arbitration_id=0x3C4, data=[0x40, 0x00], is_extended_id=False)
elif val == 'right':
msg = can.Message(arbitration_id=0x3C4, data=[0x10, 0x00], is_extended_id=False)
elif val == 'left':
msg = can.Message(arbitration_id=0x3C4, data=[0x08, 0x00], is_extended_id=False)
elif val == 'down':
msg = can.Message(arbitration_id=0x3C4, data=[0x00, 0x01], is_extended_id=False)
elif val == 'up':
msg = can.Message(arbitration_id=0x3C4, data=[0x00, 0x02], is_extended_id=False)
elif val == 'src':
msg = can.Message(arbitration_id=0x3C4, data=[0x04, 0x00], is_extended_id=False)
elif val == 'win':
msg = can.Message(arbitration_id=0x3C4, data=[0x00, 0x40], is_extended_id=False)
elif val == 'mute':
msg = can.Message(arbitration_id=0x3C4, data=[0x20, 0x00], is_extended_id=False)
elif val == 'menu':
msg = can.Message(arbitration_id=0x3C4, data=[0x00, 0x80], is_extended_id=False)
else:
print('Unknown button')
continue
can0.send(msg) |
#
# Git-Fork Repositories plugin
# More info at https://github.com/fran-f/keypirinha-git-fork
#
# pylint: disable=C, import-error, relative-beyond-top-level
import keypirinha as kp
import keypirinha_util as kpu
from .lib.ForkWrapper import ForkWrapper
class Repositories(kp.Plugin):
"""
Add catalog items for all the repositories known to Git Fork.
"""
ACTION_FORK = {
'name': 'fork.open',
'label': 'Open in Fork',
'short_desc': 'Open the repository in a new Fork windows'
}
ACTION_SHELL = {
'name': 'fork.shell',
'label': 'Open shell',
'short_desc': 'Open a shell in the repository root'
}
ACTION_EXPLORER = {
'name': 'fork.explorer',
'label': 'Open location',
'short_desc': 'Open the repository root in Explorer'
}
fork = None
default_icon = None
repository_prefix = None
def on_start(self):
self._load_settings()
self._set_up()
actions = [self.ACTION_FORK, self.ACTION_SHELL, self.ACTION_EXPLORER]
self.set_actions(
kp.ItemCategory.REFERENCE,
[self.create_action(**action) for action in actions]
)
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self._clean_up()
self._load_settings()
self._set_up()
def on_catalog(self):
if not self.fork:
return
self.set_catalog([
self._item_for_repository(r) for r in self.fork.repositories()
])
def on_execute(self, item, action):
if action is None or action.name() == self.ACTION_FORK['name']:
self.fork.openrepository(item.target())
return
if action.name() == self.ACTION_SHELL['name']:
self.fork.openshelltool(item.target())
return
if action.name() == self.ACTION_EXPLORER['name']:
kpu.explore_file(item.target())
return
def on_suggest(self, user_input, items_chain):
pass
def _load_settings(self):
"""
Load the configuration file and extract settings to local variables.
"""
settings = PluginSettings(self.load_settings())
self.fork = ForkWrapper(settings.forkdir())
self.repository_prefix = settings.repositoryprefix()
def _set_up(self):
"""
Initialise the plugin based on the extracted configuration.
"""
self.default_icon = self.load_icon(self.fork.icon())
self.set_default_icon(self.default_icon)
def _clean_up(self):
"""
Clean up any resources, to start anew with fresh configuration.
"""
if self.default_icon:
self.default_icon.free()
self.default_icon = None
def _item_for_repository(self, repository):
"""
Return a catalog item for a repository.
"""
return self.create_item(
category = kp.ItemCategory.REFERENCE,
label = self.repository_prefix + repository.get("Name"),
short_desc = "Open repository in %s" % repository.get("Path"),
target = repository.get("Path"),
args_hint = kp.ItemArgsHint.FORBIDDEN,
hit_hint = kp.ItemHitHint.IGNORE
)
class PluginSettings:
def __init__(self, settings):
self._settings = settings
def forkdir(self):
return self._settings.get(
key = "install_dir",
section = "fork",
fallback = ForkWrapper.defaultdir(),
unquote = True
)
def repositoryprefix(self):
return self._settings.get(
key = "repository_prefix",
section = "items",
fallback = "Fork: ",
unquote = True
)
|
import time
import math
from numba import jit, njit
from contextlib import contextmanager
@njit(cache=True)
def square(x):
return x ** 2
@jit(nopython=True, cache=True)
def pythagorean_theorem(x, y):
return math.sqrt(square(x) + square(y))
def pythagorus(x, y):
return math.sqrt(x**2 + y**2)
@contextmanager
def timing(description: str) -> None:
start = time.perf_counter()
yield
end = time.perf_counter()
print(f"{description}: {end-start}")
def main():
with timing("Python"):
pythagorus(5, 5)
with timing("Numba "):
pythagorean_theorem(5, 5)
with timing("Numba "):
pythagorean_theorem(5, 5)
if __name__=="__main__":
main()
|
from django.urls import path
from .views import *
urlpatterns=[
path('product_detail/<int:product_id>/',ProductDetailView.as_view(),name='product_detail'),
path('score/<int:product_id>/',RatePostView.as_view(),name='score')
] |
import os
import pandas as pd
from activitysim.core import inject
from activitysim.core import pipeline
from activitysim.core.config import setting
from .utils import setup_working_dir
def test_example():
setup_working_dir('example')
# importing asimtbm also registers injectibles
import asimtbm
asimtbm.config_logger()
trace_od = inject.get_injectable('trace_od')
assert trace_od == {'o': 3, 'd': 32}
output_files = os.listdir(os.path.join(os.getcwd(), 'output')).remove('.gitignore')
assert not output_files
models = setting('models')
expected_models = [
'destination_choice',
'balance_trips',
'write_data_dictionary',
'write_tables',
]
assert models == expected_models
pipeline.run(models)
# tables will no longer be available after pipeline is closed
pipeline.close_pipeline()
output_files = os.listdir(os.path.join(os.getcwd(), 'output'))
final_output_files = [
'final_od_table.csv',
'final_trips.csv',
'final_zone_summary.csv',
]
trace_output_files = [
'trace.segment_od_hbwh.csv',
'trace.segment_od_hbwl.csv',
'trace.segment_od_hbwm.csv',
]
other_output_files = [
'asimtbm.log',
'data_dict.txt',
'pipeline.h5',
'trace.trips_unbalanced.csv',
'trace.trips_balanced.csv',
]
for file in final_output_files + trace_output_files + other_output_files:
assert file in output_files
expected_trace_header = [
'orig', 'dest', 'impedance', 'dest_park_cost', 'size',
'no_size', 'utils', 'sum_utils', 'probs',
]
for file in trace_output_files:
trace_df = pd.read_csv(os.path.join(os.getcwd(), 'output', file))
assert trace_df.shape == (1, len(expected_trace_header))
assert sorted(trace_df.columns.values) == sorted(expected_trace_header)
assert trace_df.orig[0] == trace_od['o']
assert trace_df.dest[0] == trace_od['d']
|
import pytest
from easytxt import parse_text
from tests.factory import features_samples, sentences_samples
from tests.parsers import test_table
features_test_text = "- color: Black - material: Aluminium"
test_text_sentences = "first sentence! - second sentence. Third"
test_text_sentences_v2 = (
"Features: <ul><li>* FaceTime HD camera </li>"
"<li>* Multi-touch <b>trackpad</b>. </li></ul>"
)
test_text_sentences_v3 = "First txt. Second txt. 3 Txt. FOUR txt."
@pytest.mark.parametrize("test_data, result", sentences_samples.english)
def test_parse_text_to_sentences(test_data, result):
assert parse_text(test_data).sentences == result
@pytest.mark.parametrize("test_data, result", sentences_samples.english)
def test_parse_text_to_text(test_data, result):
assert parse_text(test_data).text == " ".join(result)
@pytest.mark.parametrize("test_data, result", features_samples.english)
def test_parse_text_to_features(test_data, result):
assert parse_text(test_data).features == result
def test_parse_text_to_features_dict():
expected_results = {"Color": "Black", "Material": "Aluminium"}
assert parse_text(features_test_text).features_dict == expected_results
@pytest.mark.parametrize(
"feature, test_data, result",
[
("color", features_test_text, "Black"),
("Col", features_test_text, "Black"),
("Size", features_test_text, None),
],
)
def test_parse_text_to_feature(feature, test_data, result):
assert parse_text(test_data).feature(feature) == result
@pytest.mark.parametrize(
"test_data, feature, result",
[
(features_test_text, "color", "Black"),
(features_test_text, "col", None),
(features_test_text, "Size", None),
],
)
def test_parse_text_to_feature_exact(test_data, feature, result):
assert parse_text(test_data).feature_exact(feature) == result
@pytest.mark.parametrize(
"test_data, result",
[
("Some sentence - color: Black", ["Some sentence.", ("Color", "Black")]),
],
)
def test_parse_text_to_raw_features(test_data, result):
assert parse_text(test_data).raw_features == result
@pytest.mark.parametrize(
"test_data, feature_split_keys, result",
[
(
"Color: Black. Material; Aluminium",
[":", ";"],
[("Color", "Black"), ("Material", "Aluminium")],
),
],
)
def test_parse_text_feature_split_keys(test_data, feature_split_keys, result):
tp = parse_text(test_data, feature_split_keys=feature_split_keys)
assert tp.features == result
@pytest.mark.parametrize(
"test_data, allow, result",
[
(test_text_sentences, ["first"], ["First sentence!"]),
(test_text_sentences, ["first", "third"], ["First sentence!", "Third."]),
],
)
def test_parse_text_allow(test_data, allow, result):
tp = parse_text(test_data, allow=allow)
assert list(tp) == result
@pytest.mark.parametrize(
"test_data, callow, result",
[
(test_text_sentences, ["First", "third"], ["First sentence!"]),
],
)
def test_parse_text_case_sensitive_allow(test_data, callow, result):
tp = parse_text(test_data, callow=callow)
assert list(tp) == result
@pytest.mark.parametrize(
"test_data, from_allow, result",
[
(test_text_sentences_v3, ["second"], "Second txt. 3 Txt. FOUR txt."),
],
)
def test_parse_text_from_allow(test_data, from_allow, result):
tp = parse_text(test_data, from_allow=from_allow)
assert str(tp) == result
@pytest.mark.parametrize(
"test_data, from_callow, result",
[
(test_text_sentences_v3, ["Second"], "Second txt. 3 Txt. FOUR txt."),
# Test case with a wrong case
(test_text_sentences_v3, ["second"], ""),
],
)
def test_parse_text_from_callow(test_data, from_callow, result):
tp = parse_text(test_data, from_callow=from_callow)
assert str(tp) == result
@pytest.mark.parametrize(
"test_data, to_allow, result",
[
(test_text_sentences_v3, ["four"], "First txt. Second txt. 3 Txt."),
],
)
def test_parse_text_to_allow(test_data, to_allow, result):
tp = parse_text(test_data, to_allow=to_allow)
assert str(tp) == result
@pytest.mark.parametrize(
"test_data, to_callow, result",
[
(test_text_sentences_v3, ["FOUR"], "First txt. Second txt. 3 Txt."),
# Test case with a wrong case
(test_text_sentences_v3, ["four"], "First txt. Second txt. 3 Txt. FOUR txt."),
],
)
def test_parse_text_to_callow(test_data, to_callow, result):
tp = parse_text(test_text_sentences_v3, to_callow=to_callow)
assert str(tp) == result
@pytest.mark.parametrize(
"test_data, deny, result",
[
(test_text_sentences, ["second", "third"], ["First sentence!"]),
(test_text_sentences, ["secon"], ["First sentence!", "Third."]),
],
)
def test_parse_text_deny(test_data, deny, result):
tp = parse_text(test_data, deny=deny)
assert tp.sentences == result
@pytest.mark.parametrize(
"test_data, cdeny, result",
[
(test_text_sentences, ["first", "Second", "Thir"], ["First sentence!"]),
],
)
def test_parse_text_case_sensitive_deny(test_data, cdeny, result):
tp = parse_text(test_data, cdeny=cdeny)
assert tp.sentences == result
def test_parse_text_capitalize_false():
tp = parse_text(test_text_sentences, capitalize=False)
assert tp.text == "first sentence! second sentence. Third."
def test_parse_text_title_true():
tp = parse_text(test_text_sentences, title=True)
assert tp.text == "First Sentence! Second Sentence. Third."
def test_parse_text_lowercase_true():
tp = parse_text(test_text_sentences, lowercase=True)
assert tp.text == "first sentence! second sentence. third."
def test_parse_text_uppercase_true():
tp = parse_text(test_text_sentences, uppercase=True)
assert tp.text == "FIRST SENTENCE! SECOND SENTENCE. THIRD."
def test_parse_text_sentence_separator():
tp = parse_text(test_text_sentences, sentence_separator=" | ")
assert tp.text == "First sentence! | Second sentence. | Third."
def test_parse_text_replace_keys():
tp = parse_text(
test_text_sentences,
replace_keys=[("third", "Third sentence"), ("ence!", "ence?")],
)
assert tp.text == "First sentence? Second sentence. Third sentence."
def test_parse_text_remove_keys():
tp = parse_text(test_text_sentences, remove_keys=["sentence", "!"])
assert tp.text == "First. Second. Third."
def test_parse_text_css_query():
tp = parse_text(test_text_sentences_v2, css_query="ul")
expected_sentences = ["FaceTime HD camera.", "Multi-touch trackpad."]
assert tp.sentences == expected_sentences
tp = parse_text(test_text_sentences_v2, css_query="ul li:eq(0)")
assert tp.sentences == ["FaceTime HD camera."]
def test_parse_text_exclude_css():
tp = parse_text(test_text_sentences_v2, exclude_css="ul")
assert tp.sentences == ["Features:"]
tp = parse_text(test_text_sentences_v2, css_query="ul", exclude_css="li:last")
assert tp.sentences == ["FaceTime HD camera."]
def test_parser_text_merge_sentences_default():
tp = parse_text(test_text_sentences_v2)
expected_sentences = ["Features: FaceTime HD camera.", "Multi-touch trackpad."]
assert tp.sentences == expected_sentences
def test_parser_text_merge_sentences_false():
tp = parse_text(test_text_sentences_v2, merge_sentences=False)
expected_sentences = ["Features:", "FaceTime HD camera.", "Multi-touch trackpad."]
assert tp.sentences == expected_sentences
def test_parser_text_split_inline_breaks_false():
test_text = "- notebook - ultrabook"
tp = parse_text(test_text, split_inline_breaks=False)
assert tp.sentences == ["- notebook - ultrabook."]
# Default without custom inline_breaks
tp = parse_text(test_text)
assert tp.sentences == ["Notebook.", "Ultrabook."]
def test_parser_custom_inline_breaks():
test_text = "notebook > ultrabook"
tp = parse_text(test_text, inline_breaks=[">"])
assert tp.sentences == ["Notebook.", "Ultrabook."]
# Default without custom inline_breaks
tp = parse_text(test_text)
assert tp.sentences == ["Notebook > ultrabook."]
def test_parse_text_stop_key():
test_text = "* First feature * second feature?"
# First lets test default stop key '.'
tp = parse_text(test_text)
assert tp.sentences == ["First feature.", "Second feature?"]
# Lets test custom stop key '!'
tp = parse_text(test_text, stop_key="!")
assert tp.sentences == ["First feature!", "Second feature?"]
def test_parse_text_stop_keys_split():
test_text = "First sentence: center sentence? Last sentence!"
# Lets test default stop split keys
tp = parse_text(test_text)
expected_result = ["First sentence: center sentence?", "Last sentence!"]
assert tp.sentences == expected_result
# Lets test custom stop split keys
tp = parse_text(test_text, stop_keys_split=[":", "?"], stop_keys_ignore=[";"])
expected_result = ["First sentence:", "Center sentence?", "Last sentence!"]
assert tp.sentences == expected_result
def test_parse_text_replace_keys_raw_text():
# Lets test default result with badly structured text
test_text = "Easybook pro 15 Color: Gray Material: Aluminium"
pt = parse_text(test_text)
assert pt.sentences == ["Easybook pro 15 Color: Gray Material: Aluminium."]
replace_keys = [("Color:", ". Color:"), ("material:", ". Material:")]
pt = parse_text(test_text, replace_keys_raw_text=replace_keys)
assert pt.sentences == ["Easybook pro 15.", "Color: Gray.", "Material: Aluminium."]
def test_parse_text_remove_keys_raw_text():
test_text = "Easybook pro 15. Color: Gray"
pt = parse_text(test_text, remove_keys_raw_text=[". color:"])
assert pt.sentences == ["Easybook pro 15 Gray."]
def test_parse_text_html_table():
tp = parse_text(test_table.table_without_header_v3)
expected_results = ["Type: Easybook Pro.", "Operating system: etOS."]
assert tp.sentences == expected_results
tp = parse_text(test_table.table_with_header)
expected_results = ["Height/Width/Depth: 10/12/5.", "Height/Width/Depth: 2/3/5."]
assert tp.sentences == expected_results
tp = parse_text(test_table.table_without_header_v2)
assert tp.sentences == ["Height: 2; 4.", "Width: 3; 8."]
# Check if text with no html table returns empty string
tp = parse_text(test_text_sentences)
return tp == ""
def test_parse_text_text_num_to_numeric():
test_text = "First Sentence. Two thousand and three has it. " "Three Sentences."
expected_results = ["1 Sentence.", "2003 has it.", "3 Sentences."]
tp = parse_text(test_text, text_num_to_numeric=True)
assert list(tp.sentences) == expected_results
def test_parse_text_iter():
test_text = "* First feature * second feature?"
assert list(parse_text(test_text)) == ["First feature.", "Second feature?"]
def test_parse_text_str():
test_text = "* First feature * second feature?"
assert str(parse_text(test_text)) == "First feature. Second feature?"
def test_parse_text_len():
test_text = "* First feature * second feature?"
assert len(parse_text(test_text)) == 2
def test_parse_text_add():
test_text = "* First feature * second feature?"
tp = parse_text(test_text) + "hello World"
assert str(tp) == "First feature. Second feature? Hello World."
assert list(tp) == ["First feature.", "Second feature?", "Hello World."]
tp = parse_text(test_text) + ["hello", "World!"]
assert str(tp) == "First feature. Second feature? Hello. World!"
def test_parse_text_radd():
test_text = "* First feature * second feature?"
tp = "hello World" + parse_text(test_text)
assert str(tp) == "Hello World. First feature. Second feature?"
tp = ["hello", "World!"] + parse_text(test_text)
assert str(tp) == "Hello. World! First feature. Second feature?"
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 14:05:10 2013
@author: Jeff Hendricks
Web Page Experiments Solutions
"""
import scipy as sp
import bandit_solution as bs
from matplotlib import pyplot as plt
#Problem 1
#simulates one day of web page experiments. Returns the weights used that day
#and also returns priors (or posteriors really), being the state each each arm
#i.e. the number of successes and failures for each arm (+1)
def simulate_day(pvec, priors):
priors = priors.copy()
n = pvec.size
datasize = 100
for k in xrange(0, 2):
data = bs.sim_data(priors, datasize)
prob = bs.prob_opt(data)
weights = bs.get_weights(prob, 50)
for i in xrange(0, n):
for j in xrange(0, int(weights[i])):
result = pull(pvec[i])
if result == 0:
priors[i,1] += 1
else:
priors[i,0] += 1
return priors,weights
#Simulates a webpage visit resulting in a conversion or not
def pull(p):
return sp.random.binomial(1, p, size=None)
#Problem 2
#this function is given in the lab. It computes a measure of "value remaining"
# in the experiment as described in the Google Analytics page
def val_remaining(data,prob):
champ_ind = sp.argmax(prob)
thetaM = sp.amax(data,1)
valrem = (thetaM - data[:,champ_ind])/data[:,champ_ind]
pvr = sp.stats.mstats.mquantiles(valrem, .95)
return valrem, pvr
#Simulates web page testing until the winning variation is found
def simulate_convergence(pvec):
n = pvec.size
priors = sp.ones((n,2))
datasize = 100
delta = 0
p_tol = .95
champ_cvr = 0
pvr = 1
days = 0
weights = []
while ((delta < p_tol) and (champ_cvr/100. < pvr)) or days < 14:
days += 1
priors, weights1 = simulate_day(pvec, priors)
weights.append(weights1)
data = bs.sim_data(priors, datasize)
prob = bs.prob_opt(data)
delta = prob.max()
valrem_dist, pvr = val_remaining(data, prob)
champ_ind = sp.argmax(prob)
champ_cvr = priors[champ_ind,0]/float(priors[champ_ind,0] + priors[champ_ind,1])
return priors,weights,champ_ind,days
#Problem 3
#Create plots for 2-arm convergence
if __name__ == "__main__":
pvec = sp.array([.04,.05])
priors, weights, champ_ind, days = simulate_convergence(pvec)
weights = sp.array(weights)/float(50)
plt.figure()
plt.plot(weights)
plt.plot([0,days],[.95,.95],'--k')
plt.plot([0,days],[.05,.05],'--k')
dayvec1 = sp.zeros(200)
champ1 = sp.zeros(200)
#run 200 simulations of 2-arm case
for i in xrange(0,200):
print(i)
priors, weights, champ_ind, days = simulate_convergence(pvec)
dayvec1[i] = days
champ1[i] = champ_ind
plt.figure()
hist, bins = sp.histogram(dayvec1,bins = 12)
width = (bins[1]-bins[0])
center = (bins[:-1]+bins[1:])/2
plt.bar(center, hist, align = 'center', width = width,color = 'g')
#Create plots for 6-arm convergence
pvec = sp.array([.04,.02,.03,.035,.045,.05])
priors, weights, champ_ind, days = simulate_convergence(pvec)
weights = sp.array(weights)/float(50)
plt.figure()
plt.plot(weights)
plt.plot([0,days],[.95,.95],'--k')
plt.plot([0,days],[.05,.05],'--k')
dayvec2 = sp.zeros(100)
champ2 = sp.zeros(100)
#run 100 simulations of 6-arm case
for i in xrange(0,100):
print(i)
priors, weights, champ_ind, days = simulate_convergence(pvec)
dayvec2[i] = days
champ2[i] = champ_ind
plt.figure()
hist, bins = sp.histogram(dayvec2,bins = 12)
width = (bins[1]-bins[0])
center = (bins[:-1]+bins[1:])/2
plt.bar(center, hist, align = 'center', width = width,color = 'g')
plt.show()
|
'''
molecool.io package
configure access to subpackage functions
'''
from .pdb import open_pdb
from .xyz import open_xyz, write_xyz
|
"""
Work with references in the database
"""
import asyncio
import datetime
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pymongo
from aiohttp import ClientConnectorError
from aiohttp.web import Request
from motor.motor_asyncio import AsyncIOMotorClientSession
from semver import VersionInfo
from sqlalchemy.ext.asyncio.engine import AsyncEngine
import virtool.db.utils
import virtool.errors
import virtool.github
import virtool.history.db
import virtool.tasks.pg
import virtool.utils
from virtool.db.transforms import apply_transforms
from virtool.http.utils import download_file
from virtool.otus.db import join
from virtool.otus.utils import verify
from virtool.pg.utils import get_row
from virtool.references.utils import (
RIGHTS,
check_will_change,
get_owner_user,
load_reference_file,
)
from virtool.settings.db import Settings
from virtool.types import App
from virtool.uploads.models import Upload
from virtool.users.db import AttachUserTransform, extend_user
PROJECTION = [
"_id",
"remotes_from",
"cloned_from",
"created_at",
"data_type",
"imported_from",
"installed",
"internal_control",
"latest_build",
"name",
"organism",
"release",
"remotes_from",
"task",
"unbuilt_count",
"updates",
"updating",
"user",
"users",
"groups",
]
async def processor(db, document: dict) -> dict:
"""
Process a reference document to a form that can be dispatched or returned in a list.
Used `attach_computed` for complete representations of the reference.
:param db: the application database client
:param document: the document to process
:return: the processed document
"""
try:
ref_id = document.pop("_id")
except KeyError:
ref_id = document["id"]
latest_build, otu_count, unbuilt_count = await asyncio.gather(
get_latest_build(db, ref_id),
get_otu_count(db, ref_id),
get_unbuilt_count(db, ref_id),
)
document.update(
{
"latest_build": latest_build,
"otu_count": otu_count,
"unbuilt_change_count": unbuilt_count,
}
)
try:
document["installed"] = document.pop("updates")[-1]
except (KeyError, IndexError):
pass
document["id"] = ref_id
return document
async def attach_computed(db, document: dict) -> dict:
"""
Get all computed data for the specified reference and attach it to the passed
``document``.
:param db: the application database client
:param document: the document to attached computed data to
:return: the updated document
"""
ref_id = document["_id"]
try:
internal_control_id = document["internal_control"]["id"]
except (KeyError, TypeError):
internal_control_id = None
(
contributors,
internal_control,
latest_build,
otu_count,
users,
unbuilt_count,
) = await asyncio.gather(
get_contributors(db, ref_id),
get_internal_control(db, internal_control_id, ref_id),
get_latest_build(db, ref_id),
get_otu_count(db, ref_id),
get_reference_users(db, document),
get_unbuilt_count(db, ref_id),
)
processed = virtool.utils.base_processor(
{
**document,
"contributors": contributors,
"internal_control": internal_control or None,
"latest_build": latest_build,
"otu_count": otu_count,
"unbuilt_change_count": unbuilt_count,
"users": users,
}
)
return await apply_transforms(processed, [AttachUserTransform(db)])
async def get_reference_users(db, document: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Get a detailed list of users that have access to the specified reference.
:param db: the application database client
:param document: the reference document
:return: a list of user data dictionaries
"""
if not document.get("users"):
return []
return await asyncio.gather(*[extend_user(db, user) for user in document["users"]])
async def add_group_or_user(db, ref_id: str, field: str, data: dict) -> Optional[dict]:
document = await db.references.find_one({"_id": ref_id}, [field])
if not document:
return None
subdocument_id = data.get("group_id") or data["user_id"]
if (
field == "groups"
and await db.groups.count_documents({"_id": subdocument_id}) == 0
):
raise virtool.errors.DatabaseError("group does not exist")
if (
field == "users"
and await db.users.count_documents({"_id": subdocument_id}) == 0
):
raise virtool.errors.DatabaseError("user does not exist")
if subdocument_id in [s["id"] for s in document[field]]:
raise virtool.errors.DatabaseError(field[:-1] + " already exists")
rights = {key: data.get(key, False) for key in RIGHTS}
subdocument = {
"id": subdocument_id,
"created_at": virtool.utils.timestamp(),
**rights,
}
await db.references.update_one({"_id": ref_id}, {"$push": {field: subdocument}})
return subdocument
async def check_right(req: Request, reference: dict, right: str) -> bool:
if req["client"].administrator:
return True
user_id = req["client"].user_id
try:
groups = reference["groups"]
users = reference["users"]
except (KeyError, TypeError):
reference = await req.app["db"].references.find_one(
reference, ["groups", "users"]
)
groups = reference["groups"]
users = reference["users"]
for user in users:
if user["id"] == user_id:
if user[right]:
return True
break
for group in groups:
if group[right] and group["id"] in req["client"].groups:
return True
return False
async def check_source_type(db, ref_id: str, source_type: str) -> bool:
"""
Check if the provided `source_type` is valid based on the current reference source
type configuration.
:param db: the application database client
:param ref_id: the reference context
:param source_type: the source type to check
:return: source type is valid
"""
document = await db.references.find_one(
ref_id, ["restrict_source_types", "source_types"]
)
restrict_source_types = document.get("restrict_source_types", False)
source_types = document.get("source_types", list())
if source_type == "unknown":
return True
# Return `False` when source_types are restricted and source_type is not allowed.
if source_type and restrict_source_types:
return source_type in source_types
# Return `True` when:
# - source_type is empty string (unknown)
# - source_types are not restricted
# - source_type is an allowed source_type
return True
def compose_base_find_query(user_id: str, administrator: bool, groups: list) -> dict:
"""
Compose a query for filtering reference search results based on user read rights.
:param user_id: the id of the user requesting the search
:param administrator: the administrator flag of the user requesting the search
:param groups: the id group membership of the user requesting the search
:return: a valid MongoDB query
"""
if administrator:
return dict()
is_user_member = {"users.id": user_id}
is_group_member = {"groups.id": {"$in": groups}}
is_owner = {"user.id": user_id}
return {"$or": [is_group_member, is_user_member, is_owner]}
async def delete_group_or_user(
db, ref_id: str, subdocument_id: str, field: str
) -> Optional[str]:
"""
Delete an existing group or user as decided by the `field` argument.
:param db: the application database client
:param ref_id: the id of the reference to modify
:param subdocument_id: the id of the group or user to delete
:param field: the field to modify: 'group' or 'user'
:return: the id of the removed subdocument
"""
document = await db.references.find_one(
{"_id": ref_id, field + ".id": subdocument_id}, [field]
)
if document is None:
return None
# Retain only the subdocuments that don't match the passed `subdocument_id`.
filtered = [s for s in document[field] if s["id"] != subdocument_id]
await db.references.update_one({"_id": ref_id}, {"$set": {field: filtered}})
return subdocument_id
async def edit_group_or_user(
db, ref_id: str, subdocument_id: str, field: str, data: dict
) -> Optional[dict]:
"""
Edit an existing group or user as decided by the `field` argument.
Returns `None` if the reference, group, or user does not exist.
:param db: the application database client
:param ref_id: the id of the reference to modify
:param subdocument_id: the id of the group or user to modify
:param field: the field to modify: 'group' or 'user'
:param data: the data to update the group or user with
:return: the modified subdocument
"""
document = await db.references.find_one(
{"_id": ref_id, field + ".id": subdocument_id}, [field]
)
if document is None:
return None
for subdocument in document[field]:
if subdocument["id"] == subdocument_id:
rights = {key: data.get(key, subdocument[key]) for key in RIGHTS}
subdocument.update(rights)
await db.references.update_one(
{"_id": ref_id}, {"$set": {field: document[field]}}
)
return subdocument
async def fetch_and_update_release(
app, ref_id: str, ignore_errors: bool = False
) -> dict:
"""
Get the latest release for the GitHub repository identified by the passed `slug`.
If a release is found, update the reference identified by the passed `ref_id` and
return the release.
Exceptions can be ignored during the GitHub request. Error information will still
be written to the reference document.
:param app: the application object
:param ref_id: the id of the reference to update
:param ignore_errors: ignore exceptions raised during GitHub request
:return: the latest release
"""
db = app["db"]
retrieved_at = virtool.utils.timestamp()
document = await db.references.find_one(
ref_id, ["installed", "release", "remotes_from"]
)
release = document.get("release")
etag = virtool.github.get_etag(release)
# Variables that will be used when trying to fetch release from GitHub.
errors = list()
updated = None
try:
updated = await virtool.github.get_release(
app["config"], app["client"], document["remotes_from"]["slug"], etag
)
if updated:
updated = virtool.github.format_release(updated)
except (ClientConnectorError, virtool.errors.GitHubError) as err:
if "ClientConnectorError" in str(err):
errors = ["Could not reach GitHub"]
if "404" in str(err):
errors = ["GitHub repository or release does not exist"]
if errors and not ignore_errors:
raise
if updated:
release = updated
if release:
installed = document["installed"]
release["newer"] = bool(
installed
and VersionInfo.parse(release["name"].lstrip("v"))
> VersionInfo.parse(installed["name"].lstrip("v"))
)
release["retrieved_at"] = retrieved_at
await db.references.update_one(
{"_id": ref_id}, {"$set": {"errors": errors, "release": release}}
)
return release
async def get_contributors(db, ref_id: str) -> Optional[List[dict]]:
"""
Return an list of contributors and their contribution count for a specific ref.
:param db: the application database client
:param ref_id: the id of the ref to get contributors for
:return: a list of contributors to the ref
"""
return await virtool.history.db.get_contributors(db, {"reference.id": ref_id})
async def get_internal_control(
db, internal_control_id: Optional[str], ref_id: str
) -> Optional[dict]:
"""
Return a minimal dict describing the ref internal control given a `otu_id`.
:param db: the application database client
:param internal_control_id: the id of the otu to create a minimal dict for
:param ref_id: the id of the reference to look for the control OTU in
:return: a minimal dict describing the ref internal control
"""
if internal_control_id is None:
return None
name = await virtool.db.utils.get_one_field(
db.otus, "name", {"_id": internal_control_id, "reference.id": ref_id}
)
if name is None:
return None
return {"id": internal_control_id, "name": name}
async def get_latest_build(db, ref_id: str) -> Optional[dict]:
"""
Return the latest index build for the ref.
:param db: the application database client
:param ref_id: the id of the ref to get the latest build for
:return: a subset of fields for the latest build
"""
latest_build = await db.indexes.find_one(
{"reference.id": ref_id, "ready": True},
projection=["created_at", "version", "user", "has_json"],
sort=[("version", pymongo.DESCENDING)],
)
if latest_build is None:
return None
return await apply_transforms(
virtool.utils.base_processor(latest_build), [AttachUserTransform(db)]
)
async def get_official_installed(db) -> bool:
"""
Return a boolean indicating whether the official plant virus reference is installed.
:param db:
:return: official reference install status
"""
return (
await db.references.count_documents(
{"remotes_from.slug": "virtool/ref-plant-viruses"}
)
> 0
)
async def get_manifest(db, ref_id: str) -> dict:
"""
Generate a dict of otu document version numbers keyed by the document id.
This is used to make sure only changes made at the time the index rebuild was
started are included in the build.
:param db: the application database client
:param ref_id: the id of the reference to get the current index for
:return: a manifest of otu ids and versions
"""
manifest = dict()
async for document in db.otus.find({"reference.id": ref_id}, ["version"]):
manifest[document["_id"]] = document["version"]
return manifest
async def get_otu_count(db, ref_id: str) -> int:
"""
Get the number of OTUs associated with the given `ref_id`.
:param db: the application database client
:param ref_id: the id of the reference to get the current index for
:return: the OTU count
"""
return await db.otus.count_documents({"reference.id": ref_id})
async def get_unbuilt_count(db, ref_id: str) -> int:
"""
Return a count of unbuilt history changes associated with a given `ref_id`.
:param db: the application database client
:param ref_id: the id of the ref to count unbuilt changes for
:return: the number of unbuilt changes
"""
return await db.history.count_documents(
{"reference.id": ref_id, "index.id": "unbuilt"}
)
async def create_clone(
db, settings: Settings, name: str, clone_from: str, description: str, user_id: str
) -> dict:
source = await db.references.find_one(clone_from)
name = name or "Clone of " + source["name"]
document = await create_document(
db,
settings,
name,
source["organism"],
description,
source["data_type"],
created_at=virtool.utils.timestamp(),
user_id=user_id,
)
document["cloned_from"] = {"id": clone_from, "name": source["name"]}
return document
async def create_document(
db,
settings: Settings,
name: str,
organism: Optional[str],
description: str,
data_type: Optional[str],
created_at=None,
ref_id: Optional[str] = None,
user_id: Optional[str] = None,
users=None,
):
if ref_id and await db.references.count_documents({"_id": ref_id}):
raise virtool.errors.DatabaseError("ref_id already exists")
ref_id = ref_id or await virtool.db.utils.get_new_id(db.otus)
user = None
if user_id:
user = {"id": user_id}
if not users:
users = [get_owner_user(user_id)]
document = {
"_id": ref_id,
"created_at": created_at or virtool.utils.timestamp(),
"data_type": data_type,
"description": description,
"name": name,
"organism": organism,
"internal_control": None,
"restrict_source_types": False,
"source_types": settings.default_source_types,
"groups": list(),
"users": users,
"user": user,
}
if data_type == "barcode":
document["targets"] = list()
return document
async def create_import(
db,
pg: AsyncEngine,
settings: Settings,
name: str,
description: str,
import_from: str,
user_id: str,
) -> dict:
"""
Import a previously exported Virtool reference.
:param db: the application database client
:param pg: PostgreSQL database object
:param settings: the application settings object
:param name: the name for the new reference
:param description: a description for the new reference
:param import_from: the uploaded file to import from
:param user_id: the id of the creating user
:return: a reference document
"""
created_at = virtool.utils.timestamp()
document = await create_document(
db,
settings,
name or "Unnamed Import",
None,
description,
None,
created_at=created_at,
user_id=user_id,
)
upload = await get_row(pg, Upload, ("name_on_disk", import_from))
document["imported_from"] = upload.to_dict()
return document
async def create_remote(
db, settings: Settings, release: dict, remote_from: str, user_id: str
) -> dict:
"""
Create a remote reference document in the database.
:param db: the application database object
:param settings: the application settings
:param release: the latest release for the remote reference
:param remote_from: information about the remote (errors, GitHub slug)
:param user_id: the id of the requesting user
:return: the new reference document
"""
created_at = virtool.utils.timestamp()
document = await create_document(
db,
settings,
"Plant Viruses",
None,
"The official plant virus reference from the Virtool developers",
None,
created_at=created_at,
user_id=user_id,
)
return {
**document,
# Connection information for the GitHub remote repo.
"remotes_from": {"errors": [], "slug": remote_from},
# The latest available release on GitHub.
"release": dict(release, retrieved_at=created_at),
# The update history for the reference. We put the release being installed as
# the first history item.
"updates": [
virtool.github.create_update_subdocument(
release, False, user_id, created_at
)
],
"installed": None,
}
async def download_and_parse_release(
app, url: str, task_id: int, progress_handler: callable
):
pg = app["pg"]
with virtool.utils.get_temp_dir() as tempdir:
download_path = Path(tempdir) / "reference.tar.gz"
await download_file(app, url, download_path, progress_handler)
await virtool.tasks.pg.update(pg, task_id, step="unpack")
return await app["run_in_thread"](load_reference_file, download_path)
async def edit(db, ref_id: str, data: dict) -> dict:
"""
Edit and existing reference using the passed update `data`.
:param db: the application database object
:param ref_id: the id of the reference to update
:param data: update data from the HTTP request
:return: the updated reference document
"""
document = await db.references.find_one(ref_id)
if document["data_type"] != "barcode":
data.pop("targets", None)
document = await db.references.find_one_and_update({"_id": ref_id}, {"$set": data})
document = await attach_computed(db, document)
if "name" in data:
await db.analyses.update_many(
{"reference.id": ref_id}, {"$set": {"reference.name": document["name"]}}
)
return document
async def insert_change(
app,
otu_id: str,
verb: str,
user_id: str,
old: Optional[dict] = None,
session: Optional[AsyncIOMotorClientSession] = None,
):
"""
Insert a history document for the OTU identified by `otu_id` and the passed `verb`.
:param app: the application object
:param otu_id: the ID of the OTU the change is for
:param verb: the change verb (eg. remove, insert)
:param user_id: the ID of the requesting user
:param old: the old joined OTU document
:param session: a Mongo session
"""
db = app["db"]
joined = await join(db, otu_id, session=session)
name = joined["name"]
e = "" if verb[-1] == "e" else "e"
description = f"{verb.capitalize()}{e}d {name}"
if abbreviation := joined.get("abbreviation"):
description = f"{description} ({abbreviation})"
await virtool.history.db.add(
app, verb, old, joined, description, user_id, silent=True, session=session
)
async def insert_joined_otu(
db,
otu: dict,
created_at: datetime.datetime,
ref_id: str,
user_id: str,
session: Optional[AsyncIOMotorClientSession] = None,
) -> str:
issues = verify(otu)
document = await db.otus.insert_one(
{
"abbreviation": otu["abbreviation"],
"created_at": created_at,
"imported": True,
"isolates": [
{
key: isolate[key]
for key in ("id", "default", "source_type", "source_name")
}
for isolate in otu["isolates"]
],
"issues": issues,
"lower_name": otu["name"].lower(),
"last_indexed_version": None,
"name": otu["name"],
"reference": {"id": ref_id},
"remote": {"id": otu["_id"]},
"schema": otu.get("schema", []),
"user": {"id": user_id},
"verified": issues is None,
"version": 0,
},
silent=True,
session=session,
)
sequences = []
for isolate in otu["isolates"]:
for sequence in isolate.pop("sequences"):
try:
remote_sequence_id = sequence["remote"]["id"]
sequence.pop("_id")
except KeyError:
remote_sequence_id = sequence.pop("_id")
sequences.append(
{
**sequence,
"accession": sequence["accession"],
"isolate_id": isolate["id"],
"otu_id": document["_id"],
"segment": sequence.get("segment", ""),
"reference": {"id": ref_id},
"remote": {"id": remote_sequence_id},
}
)
for sequence in sequences:
await db.sequences.insert_one(sequence)
return document["_id"]
async def refresh_remotes(app: App):
db = app["db"]
try:
logging.debug("Started reference refresher")
while True:
for ref_id in await db.references.distinct(
"_id", {"remotes_from": {"$exists": True}}
):
await fetch_and_update_release(app, ref_id, ignore_errors=True)
await asyncio.sleep(600)
except asyncio.CancelledError:
pass
logging.debug("Stopped reference refresher")
async def update(
req: Request,
created_at: datetime.datetime,
task_id: int,
ref_id: str,
release: dict,
user_id: str,
) -> tuple:
db = req.app["db"]
update_subdocument = virtool.github.create_update_subdocument(
release, False, user_id, created_at
)
await db.references.update_one(
{"_id": ref_id},
{
"$push": {"updates": update_subdocument},
"$set": {"task": {"id": task_id}, "updating": True},
},
)
return release, update_subdocument
async def update_joined_otu(
db, otu: dict, created_at: datetime.datetime, ref_id: str, user_id: str
) -> Union[dict, str, None]:
remote_id = otu["_id"]
old = await join(db, {"reference.id": ref_id, "remote.id": remote_id})
if old:
if not check_will_change(old, otu):
return None
sequence_updates = list()
for isolate in otu["isolates"]:
for sequence in isolate.pop("sequences"):
sequence_updates.append(
{
"accession": sequence["accession"],
"definition": sequence["definition"],
"host": sequence["host"],
"segment": sequence.get("segment", ""),
"sequence": sequence["sequence"],
"otu_id": old["_id"],
"isolate_id": isolate["id"],
"reference": {"id": ref_id},
"remote": {"id": sequence["_id"]},
}
)
await db.otus.update_one(
{"_id": old["_id"]},
{
"$inc": {"version": 1},
"$set": {
"abbreviation": otu["abbreviation"],
"name": otu["name"],
"lower_name": otu["name"].lower(),
"isolates": otu["isolates"],
"schema": otu.get("schema", list()),
},
},
)
for sequence_update in sequence_updates:
remote_sequence_id = sequence_update["remote"]["id"]
update_result = await db.sequences.update_one(
{"reference.id": ref_id, "remote.id": remote_sequence_id},
{"$set": sequence_update},
)
if not update_result.matched_count:
await db.sequences.insert_one(sequence_update)
return old
return await insert_joined_otu(db, otu, created_at, ref_id, user_id)
|
from collections import OrderedDict
import torch.nn as nn
class SmallCNN(nn.Module):
def __init__(self, drop=0.5):
super(SmallCNN, self).__init__()
self.num_channels = 1
self.num_labels = 10
activ = nn.ReLU(True)
self.feature_extractor = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(self.num_channels, 32, 3)),
('relu1', activ),
('conv2', nn.Conv2d(32, 32, 3)),
('relu2', activ),
('maxpool1', nn.MaxPool2d(2, 2)),
('conv3', nn.Conv2d(32, 64, 3)),
('relu3', activ),
('conv4', nn.Conv2d(64, 64, 3)),
('relu4', activ),
('maxpool2', nn.MaxPool2d(2, 2)),
]))
self.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(64 * 4 * 4, 200)),
('relu1', activ),
('drop', nn.Dropout(drop)),
('fc2', nn.Linear(200, 200)),
('relu2', activ),
('fc3', nn.Linear(200, self.num_labels)),
]))
for m in self.modules():
if isinstance(m, (nn.Conv2d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
nn.init.constant_(self.classifier.fc3.weight, 0)
nn.init.constant_(self.classifier.fc3.bias, 0)
def forward(self, input):
features = self.feature_extractor(input)
logits = self.classifier(features.view(-1, 64 * 4 * 4))
return logits |
import os
import sys
import json
import argparse
from jupyter_client.kernelspec import install_kernel_spec
from IPython.utils.tempdir import TemporaryDirectory
kernel_json = {"argv": [sys.executable, "-m", "cqljupyter", "-f", "{connection_file}"],
"display_name": "CQL",
"language": "CQL",
"codemirror_mode": "sql",
"env" : {}
}
def install_my_kernel_spec(user=True):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
# TODO: Copy resources once they're specified
install_kernel_spec(td, 'CQL', user=user, replace=True)
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('host', nargs='?')
parser.add_argument('port', nargs='?')
parser.add_argument('-u', type=str)
parser.add_argument('-p', type=str)
parser.add_argument('--ssl', action='store_true')
args = parser.parse_args()
kernel_json['env']['CASSANDRA_SSL']=str(args.ssl)
if (args.host):
kernel_json['env']['CASSANDRA_HOSTNAME']=args.host
if (args.port):
kernel_json['env']['CASSANDRA_PORT']=args.port
if (args.u):
kernel_json['env']['CASSANDRA_USER']=args.u
if (args.p):
kernel_json['env']['CASSANDRA_PWD']=args.p
user = args.u if args.u else not _is_root()
print(f'Installing IPython kernel spec to connect to cassandra ', kernel_json['env'])
install_my_kernel_spec(user=user)
if __name__ == '__main__':
main(argv=sys.argv)
|
from tqdm import tqdm
import math, sys, os, random
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from argparse import ArgumentParser
import torch
from torch.optim import Adam
from torch.nn.functional import binary_cross_entropy, relu
from torch.nn import Conv2d, ConvTranspose2d, MaxPool2d, Linear, Sequential, ReLU, Sigmoid, Upsample
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
import torchvision
from torchvision import transforms
import numpy as np
from tensorboardX import SummaryWriter
from scipy.misc import imresize
import pandas as pd
import wget
import numpy as np
import os, time
import matplotlib.pyplot as plt
import tqdm
import skvideo.io
import util, models
WIDTH, HEIGHT = 320, 256
def anneal(step, total, k=1.0, anneal_function='logistic'):
if anneal_function == 'logistic':
return float(1 / (1 + np.exp(-k * (step - total / 2))))
elif anneal_function == 'linear':
return min(1, step / (total*1.5))
def go(options):
## Admin
# Tensorboard output
tbw = SummaryWriter(log_dir=options.tb_dir)
# Set random or det. seed
if options.seed < 0:
seed = random.randint(0, 1000000)
else:
seed = options.seed
np.random.seed(seed)
print('random seed: ', seed)
## Load the data
transform = transforms.Compose([transforms.ToTensor()])
data = torchvision.datasets.ImageFolder(options.data_dir, transform=transform)
dataloader = torch.utils.data.DataLoader(data,
batch_size=options.batch_size,
shuffle=True,
num_workers=2)
## Build the model
OUTCN = 64
PIXCN = 60
LAYERS = 5
encoder = models.ImEncoder(in_size=(HEIGHT, WIDTH), zsize=options.latent_size)
decoder = models.ImDecoder(in_size=(HEIGHT, WIDTH), zsize=options.latent_size, out_channels=OUTCN)
pixcnn = models.LGated(
input_size=(3, HEIGHT, WIDTH), conditional_channels=OUTCN,
channels=PIXCN, num_layers=LAYERS)
mods = [encoder, decoder, pixcnn]
if torch.cuda.is_available():
for m in mods:
m.cuda()
## Training loop
params = []
for m in mods:
params.extend(m.parameters())
optimizer = Adam(params, lr=options.lr)
### Fit model
instances_seen = 0
# Test images to plot
images = torch.from_numpy(np.load(options.sample_file)['images']).permute(0, 3, 1, 2)
for e in range(options.epochs):
print('epoch {}'.format(e))
for i, (batch, _) in enumerate(tqdm.tqdm(dataloader)):
if torch.cuda.is_available():
batch = batch.cuda()
batch = Variable(batch)
optimizer.zero_grad()
#- forward pass
b, c, h, w = batch.size()
zcomb = encoder(batch)
zmean, zlsig = zcomb[:, :options.latent_size], zcomb[:, options.latent_size:]
kl_loss = util.kl_loss(zmean, zlsig)
zsample = util.sample(zmean, zlsig)
cond = decoder(zsample)
rec = pixcnn(input, cond)
rec_loss = binary_cross_entropy(rec, batch, reduce=False).view(b, -1).sum(dim=1)
#- backward pass
loss = (rec_loss + kl_loss).mean()
loss.backward()
optimizer.step()
instances_seen += batch.size(0)
tbw.add_scalar('score/kl', float(kl_loss.mean()), instances_seen)
tbw.add_scalar('score/rec', float(rec_loss.mean()), instances_seen)
tbw.add_scalar('score/loss', float(loss), instances_seen)
## Plot the latent space
if options.sample_file is not None and e % options.out_every == 0:
if options.model_dir is not None:
torch.save(encoder.state_dict(),
options.model_dir + '/encoder.{}.{:.4}.model'.format(e, float(loss)))
torch.save(decoder.state_dict(),
options.model_dir + '/decoder.{}.{:.4}.model'.format(e, float(loss)))
print('Plotting latent space.')
l = images.size(0)
b = options.batch_size
out_batches = []
for fr in range(0, l, b):
to = min(fr + b, l)
batch = images[fr:to]
if torch.cuda.is_available():
batch = batch.cuda()
batch = Variable(batch)
out = encoder(batch.float()).data[:, :options.latent_size]
out_batches.append(out)
latents = torch.cat(out_batches, dim=0)
print('-- Computed latent vectors.')
rng = float(torch.max(latents[:, 0]) - torch.min(latents[:, 0]))
print('-- L', latents[:10, :])
print('-- range', rng)
n_test = latents.shape[0]
util.plot(latents.cpu().numpy(), images.permute(0, 2, 3, 1).numpy(), size=rng / math.sqrt(n_test),
filename='score.{:04}.pdf'.format(e), invert=True)
print('-- finished plot')
print('Plotting interpolations.')
def enc(inp):
return encoder(inp)[:, :options.latent_size]
util.interpolate(images, enc, decoder, name='interpolate.{}'.format(e))
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of 'epochs'.",
default=50, type=int)
parser.add_argument("-o", "--out-every",
dest="out_every",
help="How many epochs to wait before producing output.",
default=1, type=int)
parser.add_argument("-L", "--latent-size",
dest="latent_size",
help="Size of the latent representation",
default=256, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.0001, type=float)
parser.add_argument("-b", "--batch-size",
dest="batch_size",
help="Batch size",
default=32, type=int)
parser.add_argument("-d", "--depth",
dest="depth",
help="The number of resnet blocks to add to the basic model.",
default=0, type=int)
parser.add_argument("-D", "--data-directory",
dest="data_dir",
help="Data directory",
default='./data', type=str)
parser.add_argument("-T", "--tb-directory",
dest="tb_dir",
help="Tensorboard directory",
default='./runs/score', type=str)
parser.add_argument("-M", "--model-dir",
dest="model_dir",
help="Where to save the model (if None, the model will not be saved). The model will be overwritten every video batch.",
default=None, type=str)
parser.add_argument("-S", "--sample-file",
dest="sample_file",
help="Saved numpy array with random frames",
default='./sample.npz', type=str)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random. Chosen seed will be printed to sysout",
default=1, type=int)
parser.add_argument("--res",
dest="use_res",
help="Whether to use residual connections.",
action="store_true")
parser.add_argument("--bn",
dest="use_bn",
help="Whether to us batch normalization.",
action="store_true")
options = parser.parse_args()
print('OPTIONS', options)
go(options) |
import modcolmesh
from modcolmesh import coltype
import modmesh
from modmesh import D3DDECLTYPE, D3DDECLUSAGE
import modVec3
from modVec3 import Vec3
class ColBox(modcolmesh.ColMesh):
def __init__(self):
modcolmesh.ColMesh.__init__(self)
self._create_header()
self._create_geoms()
def _create_header(self):
self.u1 = 0
self.version = 10
def _create_geoms(self):
self.geomnum = 1
self.geoms = [modcolmesh.bf2colgeom() for i in range(self.geomnum)]
for geom in self.geoms:
self.__create_geom(geom)
def __create_geom(self, geom):
geom.subgeomnum = 1
geom.subgeoms = [modcolmesh.bf2colsubgeom() for i in range(geom.subgeomnum)]
for subgeom in geom.subgeoms:
self.__create_subgeom(subgeom)
def __create_subgeom(self, subgeom):
subgeom.lodnum = len(coltype)
for collisiontype in coltype:
subgeom.lods.append(self.__create_lod(modcolmesh.bf2collod(), collisiontype))
def __create_lod(self, lod, collisiontype):
lod.coltype = collisiontype
# some unknown
lod.u7 = 49
self.__create_faces(lod)
self.__create_vertices(lod)
self.__create_bounds(lod)
self.__create_unknowns(lod)
return lod
def __create_faces(self, lod):
data = [
# seems to be a vertex indices
# v1, v2, v3, m
(0, 1, 2, 0),
(2, 3, 0, 0),
(1, 4, 5, 0),
(5, 2, 1, 0),
(4, 6, 7, 0),
(7, 5, 4, 0),
(6, 0, 3, 0),
(3, 7, 6, 0),
(4, 1, 0, 0),
(0, 6, 4, 0),
(5, 7, 3, 0),
(3, 2, 5, 0),
# secondary faces for 2nd box
(8, 9, 10, 0),
(10, 11, 8, 0),
(9, 12, 13, 0),
(13, 10, 9, 0),
(12, 14, 15, 0),
(15, 13, 12, 0),
(14, 8, 11, 0),
(11, 15, 14, 0),
(12, 9, 8, 0),
(8, 14, 12, 0),
(13, 15, 11, 0),
(11, 10, 13, 0),
]
lod.faces = [modcolmesh.bf2colface(*values) for values in data]
lod.facenum = len(lod.faces)
def __create_face(self, face):
face.v1 = 0
face.v2 = 0
face.v3 = 0
face.material = 0
def __create_bounds(self, lod):
min = (-0.5, 0.0, -0.5)
max = (0.5, 1.0, 0.5)
lod.min = Vec3(*min)
lod.max = Vec3(*max)
bmin = (-0.5, 0.0, -0.5)
bmax = (0.5, 1.0, 0.5)
lod.bmin = Vec3(*bmin)
lod.bmax = Vec3(*bmax)
def __create_vertices(self, lod):
data = [
(-0.5, 1.0, -0.5),
(-0.5, 1.0, 0.5),
(-0.5, 0.0, 0.5),
(-0.5, 0.0, -0.5),
(0.5, 1.0, 0.5),
(0.5, 0.0, 0.5),
(0.5, 1.0, -0.5),
(0.5, 0.0, -0.5),
# 2nd box
(-2.5, 1.0, -0.5),
(-2.5, 1.0, 0.5),
(-2.5, 0.0, 0.5),
(-2.5, 0.0, -0.5),
(-1.5, 1.0, 0.5),
(-1.5, 0.0, 0.5),
(-1.5, 1.0, -0.5),
(-1.5, 0.0, -0.5),
]
lod.vertices = [Vec3(*values) for values in data]
lod.vertnum = len(lod.vertices)
lod.vertids = [0 for i in range(lod.vertnum)]
def __create_unknowns(self, lod):
ydata = [
#(-0.5, 4, 2, 0, 1),
#(0.5, 8, 512, 2, 2),
#(0.0, 13, 1538, 4, 6),
# u1, u2, u3, u4, u5
(-0.5, 0, 0, 0, 0),
(0.5, 0, 0, 0, 0),
(0.0, 0, 0, 0, 0),
]
lod.ydata = [modcolmesh.ystruct(*values) for values in ydata]
lod.ynum = len(lod.ydata)
#lod.zdata = [0, 1, 4, 5, 10, 11, 6, 7, 8, 9, 2, 3]
lod.zdata = [0 for i in range(0)]
lod.znum = len(lod.zdata)
#lod.adata = [8, 3, 1, 11, 6, 0, 8, 5, 3, 11, 0, 2, 9, 7, 5, 10, 2, 4, 9, 1, 7, 10, 4, 6, 2, 0, 9, 6, 4, 8, 5, 7, 11, 1, 3, 10]
lod.adata = [0 for i in range(0)]
lod.anum = len(lod.adata)
class Box(modmesh.VisMesh):
def __init__(self):
modmesh.VisMesh.__init__(self)
self._create_header()
self._create_u1_bfp4f_version()
self._create_geomnum()
self._create_geom_table()
self._create_vertformat()
self._create_vertattrib_table()
self._create_vertstride()
self._create_vertices()
self._create_index()
self._create_u2()
self._create_nodes()
self._create_materials()
def _create_header(self):
self.head = modmesh.bf2head()
self.head.u1 = 0
self.head.version = 11
self.head.u3 = 0
self.head.u4 = 0
self.head.u5 = 0
def _create_u1_bfp4f_version(self):
self.u1 = 0
def _create_geomnum(self):
self.geomnum = 1
def _create_geom_table(self):
self.geoms = [modmesh.bf2geom() for i in range(self.geomnum)]
self.geoms[0].lodnum = 1
for geom in self.geoms:
for i in range(geom.lodnum):
geom.lods = [modmesh.bf2lod() for i in range(geom.lodnum)]
def _create_vertformat(self):
self.vertformat = 4
def _create_vertattrib_table(self):
USED = 0
UNUSED = 255
# flag, offset(bytes), type, usage
dumb_array = [
(USED, D3DDECLTYPE.FLOAT3, D3DDECLUSAGE.POSITION),
(USED, D3DDECLTYPE.FLOAT3, D3DDECLUSAGE.NORMAL),
(USED, D3DDECLTYPE.D3DCOLOR, D3DDECLUSAGE.BLENDINDICES),
(USED, D3DDECLTYPE.FLOAT2, D3DDECLUSAGE.UV1),
(USED, D3DDECLTYPE.FLOAT3, D3DDECLUSAGE.TANGENT),
#(UNUSED, D3DDECLTYPE.UNUSED, D3DDECLUSAGE.POSITION), # dice exporter junk
]
self.vertattribnum = len(dumb_array)
self.vertattrib = [modmesh.vertattrib() for i in range(self.vertattribnum)]
vertstride = 0
for i in range(self.vertattribnum):
self.vertattrib[i].flag = dumb_array[i][0]
self.vertattrib[i].offset = vertstride
self.vertattrib[i].vartype = dumb_array[i][1]
self.vertattrib[i].usage = dumb_array[i][2]
vertstride += len(self.vertattrib[i].vartype)*self.vertformat
def _create_vertstride(self):
self.vertstride = sum([len(modmesh.D3DDECLTYPE(attrib.vartype))*self.vertformat for attrib in self.vertattrib])
def _create_vertices(self):
# x/y/z
# left-to-right/bottom-to-top/back-to-front
positions = [
(0.5, 1.0, -0.5),
(0.5, 0.0, -0.5),
(-0.5, 0.0, -0.5),
(-0.5, 1.0, -0.5),
(0.5, 1.0, 0.5),
(-0.5, 1.0, 0.5),
(-0.5, -0.0, 0.5),
(0.5, -0.0, 0.5),
(0.5, 1.0, -0.5),
(0.5, 1.0, 0.5),
(0.5, -0.0, 0.5),
(0.5, -0.0, -0.5),
(0.5, -0.0, -0.5),
(0.5, -0.0, 0.5),
(-0.5, -0.0, 0.5),
(-0.5, -0.0, -0.5),
(-0.5, -0.0, -0.5),
(-0.5, -0.0, 0.5),
(-0.5, 1.0, 0.5),
(-0.5, 1.0, -0.5),
(0.5, 1.0, 0.5),
(0.5, 1.0, -0.5),
(-0.5, 1.0, -0.5),
(-0.5, 1.0, 0.5),
]
normals = [
(0.0, 0.0, -1.0),
(0.0, 0.0, -1.0),
(0.0, 0.0, -1.0),
(0.0, 0.0, -1.0),
(0.0, -0.0, 1.0),
(0.0, -0.0, 1.0),
(0.0, -0.0, 1.0),
(0.0, -0.0, 1.0),
(1.0, -0.0, 0.0),
(1.0, -0.0, 0.0),
(1.0, -0.0, 0.0),
(1.0, -0.0, 0.0),
(-0.0, -1.0, -0.0),
(-0.0, -1.0, -0.0),
(-0.0, -1.0, -0.0),
(-0.0, -1.0, -0.0),
(-1.0, 0.0, -0.0),
(-1.0, 0.0, -0.0),
(-1.0, 0.0, -0.0),
(-1.0, 0.0, -0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
]
blend_indices = [
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
(0.0,),
]
uv1 = [
(0.0, 1.0),
(1.0, 1.0),
(1.0, 0.0),
(0.0, 0.0),
(0.0, 0.5),
(1.0, 0.5),
(1.0, 1.5),
(0.0, 1.5),
(0.5, 0.5),
(0.5, 1.0),
(0.0, 1.0),
(0.0, 0.5),
(0.5, 0.0),
(0.5, 0.5),
(0.0, 0.5),
(0.0, 0.0),
(1.0, 0.5),
(1.0, 0.0),
(0.5, 0.0),
(0.5, 0.5),
(0.5, 1.0),
(0.5, 0.5),
(1.0, 0.5),
(1.0, 1.0),
]
tangents = [
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.5, 0.0, 0.0),
(1.5, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0),
]
verts = []
for i in range(len(positions)):
verts.extend(positions[i])
verts.extend(normals[i])
verts.extend(blend_indices[i])
verts.extend(uv1[i])
verts.extend(tangents[i])
self.vertices = tuple(verts)
self.vertnum = len(positions)
def _create_index(self):
self.index = [
22,
23,
20,
20,
21,
22,
18,
19,
16,
16,
17,
18,
14,
15,
12,
12,
13,
14,
10,
11,
8,
8,
9,
10,
6,
7,
4,
4,
5,
6,
2,
3,
0,
0,
1,
2,
]
self.indexnum = len(self.index)
def _create_u2(self):
self.u2 = 8
def _create_nodes(self):
for geom in self.geoms:
for lod in geom.lods:
lod.version = 11
lod.min = (-1.0, -1.0, -1.0)
lod.max = (1.0, 1.0, 1.0)
#lod.pivot = None # already assigned to None for new modmesh
lod.nodenum = 1
lod.nodes = [
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0] # no idea what is this shit in matrix4 ?
lod.polycount = 0
def _create_materials(self):
for geom in self.geoms:
for lod in geom.lods:
lod.matnum = 1
lod.materials = [modmesh.bf2mat() for i in range(lod.matnum)]
for material in lod.materials:
material.alphamode = 0
material.fxfile = b'StaticMesh.fx'
material.technique = b'Base'
material.mapnum = 1
material.maps = []
for i in range(material.mapnum):
material.maps.insert(i, b'default.dds') # replace with texture path
material.vstart = 0
material.istart = 0
material.inum = 36
material.vnum = 24
material.u4 = 0
material.u5 = 0
material.mmin = (-1.0, -1.0, -1.0)
material.mmax = (1.0, 1.0, 1.0)
lod.polycount = lod.polycount + material.inum / 3
|
from datetime import datetime, timedelta
from os import PathLike
from pathlib import Path
from typing import List
from .timelog_io import TimelogIO
import pandas as pd
class TextIO(TimelogIO):
def __init__(self, act_folder: Path):
self.activity_folder = act_folder
self.act_list_path = self.activity_folder.joinpath('list.txt')
def record_time(self, activity: str, user: str, timestamp: datetime, backdated=False):
if not self.activity_folder.exists():
self.activity_folder.mkdir()
if not self.act_list_path.exists():
open(self.act_list_path, mode='w').close()
with open(self.act_list_path, mode='r+') as act_list:
if (activity + '\n') not in act_list:
raise ValueError('Unknown activity {}'.format(activity))
with open(self.activity_folder.joinpath(timestamp.strftime('%Y-%m') + '.csv'), mode='a') as file:
file.write(','.join([activity, str(timestamp), '\n']))
def get_timestamps(self, earliest: datetime, latest: datetime) -> List:
all = pd.DataFrame(columns=['activity', 'time'])
for csv_path in self.activity_folder.glob('*.csv'):
file_date = datetime.strptime(csv_path.stem, '%Y-%m')
if earliest <= file_date < latest:
with open(csv_path) as file:
df = pd.read_csv(
file, names=['activity', 'time'], usecols=[0, 1])
df['time'] = pd.to_datetime(df['time'])
df.drop(df[df['time'] > latest].index, inplace=True)
df.drop(df[df['time'] < earliest].index, inplace=True)
all = all.append(df, ignore_index=True)
return all
def new_activity(self, activity: str, parent: str, is_alias: bool):
raise NotImplementedError()
|
import simulation
import datetime
import sys
from simulation.common.helpers import timeit
"""
Description: Given a constant driving speed, find the range at the speed
before the battery runs out [speed -> distance]
"""
# TODO: make it so that previous values are logged into a numpy array and maybe written to an npz file
# TODO: wrap this in a function definition
# TODO: create simulation result class?
# TODO: create simulation history class?
# TODO: run this function on a fixed time scale
@timeit
def max_distance_from_speed(speed):
# Time parameters
tick = 60
# Simulation constants
incident_sunlight = 1000
initial_battery_charge = 0.90
battery_charge = initial_battery_charge
lvs_power_loss = 0
max_speed = 50
# Inputs
while True:
# speed_kmh = int(input("Enter a speed (km/h): "))
speed_kmh = speed
if 0 < speed_kmh <= max_speed:
break
else:
print(f"Input value out of correct range. Must be between 0km/h and {max_speed}km/h.")
distance_travelled = 0
basic_array = simulation.BasicArray()
basic_array.set_produced_energy(0)
basic_battery = simulation.BasicBattery(initial_battery_charge)
basic_lvs = simulation.BasicLVS(lvs_power_loss * tick)
basic_motor = simulation.BasicMotor()
time = tick
while True:
# Energy transfers (which will be replaced)
basic_array.update(tick)
produced_energy = basic_array.get_produced_energy()
basic_lvs.update(tick)
lvs_consumed_energy = basic_lvs.get_consumed_energy()
basic_motor.update(tick)
basic_motor.calculate_power_in(speed_kmh, 0)
motor_consumed_energy = basic_motor.get_consumed_energy()
basic_battery.charge(produced_energy)
try:
basic_battery.discharge(lvs_consumed_energy)
basic_battery.discharge(motor_consumed_energy)
basic_battery.update(tick)
except simulation.BatteryEmptyError:
break
else:
distance_travelled += speed_kmh * (tick / 3600)
if battery_charge == basic_battery.get_state_of_charge() and basic_battery.is_empty() is not True:
print(f"Battery charge equilibrium reached at speed {speed_kmh}km/h. "
f"Maximum traversable distance is infinite.")
sys.exit(1)
finally:
battery_charge = basic_battery.get_state_of_charge()
if time % 60 == 0:
print(f"Time: {time} sec / {str(datetime.timedelta(seconds=time))}")
print(f"Car speed: {round(speed_kmh, 2)}km/h")
print(f"Distance travelled: {round(distance_travelled, 3)}km")
print(f"Battery SOC: {round(battery_charge * 100, 3)}%\n")
time += tick
print(f"Speed: {speed_kmh}km/h \n"
f"Maximum distance traversable: {round(distance_travelled, 2)}km \n"
f"Time taken: {str(datetime.timedelta(seconds=time))} \n")
max_distance_from_speed(23)
|
class Color:
"""
Representation of an element on the game board.
"""
def __init__(self, name):
"""
Create a new board item.
:param name: Hexadecimal representation of the color, e.g. 'FFFFFF'.
"""
self.name = name
def is_empty(self):
"""
A Color item on a board is not empty.
:return: False, always.
"""
return False
def __eq__(self, other):
return isinstance(other, Color) and self.name == other.name
def __repr__(self):
return str(self.name)
def __str__(self):
return repr(self)
def __cmp__(self, other):
return cmp(self.name, other)
def __hash__(self):
return hash(self.name)
def __len__(self):
return len(self.name)
class EmptyColor(Color):
"""
Representation of an empty element on the game board (e.g. a space that has already been
cleared).
"""
def __init__(self):
"""
Create an empty board item.
"""
Color.__init__(self, 'EMPTY')
def is_empty(self):
"""
An EmptyColor is defined as empty.
:return: True, always.
"""
return True
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomUser(AbstractUser):
class Meta:
verbose_name_plural = "CustomUser"
|
"""Main module."""
def ingredients(count):
"""Prints ingredients for making `count` arepas.
F-string of original code"""
print(f'{count*0.1} cups arepa flour')
print(f'{count*0.1} cups cheese')
print(f'{count*0.025} cups water')
|
#from leagueRules import Stats, Points
#from logging import *
from stats import Stats
class Player():
"""
This class is intended to be used as an abstract class for all player subclasses (e.g. Quarter-
back). The primary purpose of this class is to organize common implementation details for
payer sub classes.
Args:
positional args:
name (str): player's name
key word args:
ID (str): unique identification number for each player (for database use)
position (str): player's position (default = None)
team (str): player's real-world team (default = None)
number (int): player's jersey number (default = None)
status (str): player's status (e.g. healthy/questionable/etc.) (default = "Healthy")
Attributes:
_name (str): where player's name is stored, accessed/modified using the @property
and @[attribute].setter decorators
_ID (str): where the player's unique identification number is stored,
accessed/modified using the @property & @[attribute].setter decorators
_position (str): where player's position is stored, accessed/modified using the @property
and @[attribute].setter decorators
_team (str): where player's team is stored, accessed/modified using the @property
and @[attribute].setter decorators
_number (int): where player's jersey number is stored, accessed/modified using the
@property and @[attribute].setter decorators
_status (str): where the player's status is stored, accessed/modified using the @property
and @[attribute].setter decorators
_rank (int): where the player's rank is stored, accessed/modified using the @property
and @[attribute].setter decorators
_points (int): where the player's current fantasy points are stored, accessed/modified
using the @property and @[attribute].setter decorators
_points_history (dict): where the player's fantasy points are stored for previous weeks,
accessed/modified using @property & @[att].setter decorators
_stats (dict): where the player's current stats are stored, sub classes will add
additional keys to this dictionary depending on player position,
accessed/modified using @property & @[attribute].setter decorators
_stat_history (list): where the player's stats for previous weeks are stored as a list
of stats dictionaries, accessed/modified using @property,
@[attribute].setter, and @[attribute].deleter decorators
Methods:
In addition to getter & setter methods (and occasional deleter methods), this class
implements the following methods
touchdown(self): None
fumble(self): None
"""
def __init__(self, name, sport=None, position=None, position_type="Offense", team=None, number=None, status="Healthy"):
self._name = name
self._sport = sport
self._position = position
self._position_type = position_type
self._team = team
self._number = number
self._status = status
self._rank = None
self._points = None
self._point_history = {None: None}
self.stats = Stats(self, self._sport)
self._stat_history = [None]
@property
def name(self):
return self.name
@name.setter
def name(self, value):
self._name = name
@property
def ID(self):
return self._ID
@ID.setter
def ID(self, ID):
self._ID = ID
@property
def position(self):
return self._position
@position.setter
def points(self, value):
self._position = value
@property
def team(self):
return self._team
@team.setter
def team(self, value):
self._team = value
@property
def number(self):
return self._number
@number.setter
def number(self, value):
self._number = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def rank(self):
return self._rank
@rank.setter
def rank(self, value):
self._rank = value
@property
def points(self):
return self._points
@points.setter
def points(self, value):
self._points = value
@property
def point_history(self):
""":obj: 'dict':
setter accepts a key as arguments,
sets the :obj: 'dict' [key] value to 'value'
raises KeyError if key is not present
"""
return self._point_history
@point_history.setter
def point_history(self, key, value):
if 1 > int(key) > 17:
raise KeyError("Cannot set points for week '{}.' Please set the points for week in\
range: 1-17.".format(key))
else:
self._point_history[key] = value
@point_history.deleter
def point_history(self, key):
try:
del self._point_history[key]
except KeyError as ke:
logging.log_error(ke)
raise KeyError("Cannot delete point history for '{},' reason: not found".format(key))
def get_stat(self, stat=None):
if stat == None:
return self.stats.__dict__
else:
try:
return stat, self.stats.__dict__[stat]
except KeyError as ke:
logging.log_error(ke)
raise KeyError("stat '{}' does not exist for player type '{}'".format(stat, type(self)))
@property
def stat_history(self):
""":obj: 'list' of :obj: 'dict':
setter accepts an index and a value as arguments,
sets the :obj: 'dict' at list[index] to the 'value'
raises IndexError if index is out of range
"""
return self._stat_history
@stat_history.setter
def stat_history(self, index, value):
if 1 > int(index) > 17:
logging.log_error("Index Error", "stat_history.setter", self, index, value)
raise IndexError("Cannot set stats for week '{}.' Please set the stats for week in\
range: 1-17.".format(index))
else:
self._stat_history[index] = value
@stat_history.deleter
def stat_history(self, index):
try:
del self._stat_history[index]
except IndexError as ie:
logging.log_error(ie, "stat_history.deleter", self, index)
raise IndexError("Stat history for '{}' not found".format(key))
def touchdown(self):
"""Used to increment touchdown stat by 1 and points by number specified in league rules
returns None
"""
self._stats["TD"] += 1
self._points += leagueRules.Points.touchdown()
def fumble(self):
"""Used to increment fumbles stat by 1 and points by number specified in league rules
returns None
"""
self._stats["Fumbles"] += 1
self._points += leagueRules.Points.fumble()
class OffensivePlayer(Player):
def __init__(self, name, position, team, number):
super().__init__(name, position=position, team=team, number=number)
def rush(self, yards=0, attempt=1):
self._stats["rush_attempts"] += attempt
self._stats["rush_yards"] += yards
self._points += leagueRules.Points.rush(yards, attempt)
def _pass(self, yards=0, attempt=1, complete=False, intercepted=False, touchdown=False):
self._stats["pass_attempts"] += attempt
if complete:
self._stats["completions"] += 1
self._stats["pass_yards"] += yards
self._points += leagueRules.Points._pass(yards, attempt)
if intercepted:
self._stats["interceptions"] += 1
self._points += leagueRules.Points._pass_int()
if touchdown:
self._stats["pass_TD"] += 1
self._points += leagueRules.Points._pass_TD()
def reception(self, yards=0, attempt=1, complete=False):
self._stats["rec_attempts"] += attempt
if complete:
self._stats["receptions"] += 1
self._stats["rec_yards"] += yards
self._points += leagueRules.Points._pass(yards, attempt)
def offensive_safety(self, number=1):
self._stats["safety"] += number
self._points += leagueRules.Points.offensive_safety()
class Quarterback(OffensivePlayer):
def __init__(self, name, team, number):
super().__init__(name, position="QB", team=team, number=number)
class Runningback(OffensivePlayer):
def __init__(self, name, team, number):
super().__init__(name, position="RB", team=team, number=number)
|
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import grad
import torchvision
from torchvision import datasets, transforms
from torchvision.utils import save_image
import torchvision.models as models
import inversefed
from utils.dataloader import DataLoader
from utils.stackeddata import StackedData
# inverting gradients algorithm from https://github.com/JonasGeiping/invertinggradients
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Adversarial attack from gradient leakage')
parser.add_argument('--model', type=str, help='model to perform adversarial attack')
parser.add_argument('--data', type=str, help='dataset used')
parser.add_argument('--stack_size', default=4, type=int, help='size use to stack images')
parser.add_argument('-l','--target_idx', nargs='+', help='list of data index to recontruct')
parser.add_argument('--save', type=str2bool, nargs='?', const=False, default=True, help='save')
parser.add_argument('--gpu', type=str2bool, nargs='?', const=False, default=True, help='use gpu')
args = parser.parse_args()
model_name = args.model
data = args.data
stack_size = args.stack_size
save_output = args.save
if args.target_idx is not None:
target_idx = [int(i) for i in args.target_idx]
else:
target_idx = args.target_idx
device = 'cpu'
if args.gpu:
device = 'cuda'
print("Running on %s" % device)
def val_model(dataset, model, criterion):
# evaluate trained model, record wrongly predicted index
model.eval()
# record wrong pred index
index_ls = []
with torch.no_grad():
val_loss, val_corrects = 0, 0
for batch_idx, (inputs, labels) in enumerate(dataset):
inputs = inputs.unsqueeze(dim=0).to(device)
labels = torch.as_tensor([labels]).to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
val_loss += loss.item() * inputs.size(0) # mutiply by number of batches
val_corrects += torch.sum(preds == labels.data)
if (preds != labels.data):
index_ls.append(batch_idx)
total_loss = val_loss / len(dataset)
total_acc = val_corrects.double() / len(dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format('val', total_loss, total_acc))
return index_ls
dataloader = DataLoader(data, device)
dataset, data_shape, classes, (dm, ds) = dataloader.get_data_info()
model = models.resnet18(pretrained=True) # use pretrained model from torchvision
model.fc = nn.Linear(512, len(classes)) # reinitialize model output: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
model = model.to(device)
model.eval()
criterion = nn.CrossEntropyLoss()
stack_data = StackedData(stack_size=4, model_name=model_name, dataset_name=data, dataset=dataset, save_output=save_output, device=device)
if target_idx is None:
wrong_pred_idx = val_model(dataset, model, criterion)
else:
if isinstance(target_idx, (list))==False:
wrong_pred_idx = [target_idx]
else:
wrong_pred_idx = target_idx
stacked_data_d = stack_data.create_stacked_data(wrong_pred_idx)
for i in range(len(stacked_data_d['gt_img'])):
gt_img, gt_label, img_idx = stacked_data_d['gt_img'][i], stacked_data_d['gt_label'][i], stacked_data_d['img_index'][i]
stack_pred = model(gt_img)
target_loss = criterion(stack_pred, gt_label)
input_grad = grad(target_loss, model.parameters())
input_grad =[grad.detach() for grad in input_grad]
# default configuration from inversefed
config = dict(signed=True,
boxed=False,
cost_fn='sim',
indices='def',
norm='none',
weights='equal',
lr=0.1,
optim='adam',
restarts=1,
max_iterations=1200,
total_variation=0.1,
init='randn',
filter='none',
lr_decay=True,
scoring_choice='loss')
rec_machine = inversefed.GradientReconstructor(model, (dm, ds), config, num_images=gt_img.shape[0])
results = rec_machine.reconstruct(input_grad, gt_label, gt_img ,img_shape=data_shape)
output_img, stats = results
rec_pred = model(output_img)
print('Predictions for recontructed images: ', [classes[l] for l in torch.max(rec_pred, axis=1)[1]])
stack_data.grid_plot(img_idx, output_img, rec_pred, dm, ds) |
"""
TO PRODUCE CANDIDATES FROM CDIPS LCS
----------
Merges steps 2-5 of "HOWTO.md". Goes from cdips-pipeline light curves to
period-finding results.
After running, you need to manually tune the SNR distribution for which you
consider objects, in `do_initial_period_finding`.
USAGE:
python -u lc_thru_periodfinding.py &> logs/s9_to_pf.log &
"""
import os, shutil
from glob import glob
import multiprocessing as mp
import trex_lc_to_mast_lc as tlml
import get_cdips_lc_stats as get_cdips_lc_stats
from how_many_cdips_stars_on_silicon import how_many_cdips_stars_on_silicon
from do_initial_period_finding import do_initial_period_finding
def main():
##########################################
sector = 15
outdir = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/'
overwrite = 0
cams = [1,2,3,4]
ccds = [1,2,3,4]
OC_MG_CAT_ver = 0.6
cdipsvnum = 1
##########################################
nworkers = mp.cpu_count()
lcpaths = glob(os.path.join(outdir, 'sector-{}'.format(sector),
'cam?_ccd?', 'hlsp*.fits'))
# turn cdips-pipeline light curves to HLSP light curves
if len(lcpaths) == 0 or overwrite:
tlml.trex_lc_to_mast_lc(sectors=[sector], cams=cams, ccds=ccds,
make_symlinks=1, reformat_lcs=1,
OC_MG_CAT_ver=OC_MG_CAT_ver,
cdipsvnum=cdipsvnum, outdir=outdir)
else:
print('found {} HLSP LCs; wont reformat'.format(len(lcpaths)))
# get stats, make the supp data file, print out the metadata, and move
# allnan light curves
statsfile = os.path.join(
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/cdips_lc_stats',
'sector-{}'.format(sector),
'cdips_lc_statistics.txt'
)
suppstatsfile = statsfile.replace('cdips_lc_statistics',
'supplemented_cdips_lc_statistics')
if not os.path.exists(suppstatsfile) and not overwrite:
get_cdips_lc_stats.main(sector, OC_MG_CAT_ver, cdipsvnum, overwrite)
else:
print('found {}'.format(suppstatsfile))
# see how many LCs were expected
outpath = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/star_catalog/'+
'how_many_cdips_stars_on_silicon_sector{}.txt'.
format(sector)
)
if not os.path.exists(outpath) and not overwrite:
how_many_cdips_stars_on_silicon(sector=sector, ver=OC_MG_CAT_ver)
else:
print('found {}'.format(outpath))
# run initial TLS and LS
do_initial_period_finding(
sectornum=sector, nworkers=nworkers, maxworkertasks=1000,
outdir='/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/cdips_lc_periodfinding',
OC_MG_CAT_ver=OC_MG_CAT_ver
)
msg = (
"""
After running, you need to manually tune the SNR distribution for which
you consider objects, in `do_initial_period_finding`.
"""
)
print(msg)
if __name__=="__main__":
main()
|
import os
import argparse
import logging
import shutil
import torch
import torch.nn as nn
import numpy as np
from transformers import AutoTokenizer
from transformers.optimization import AdamW
from tensorboardX import SummaryWriter
from src.models.albert_for_linreg.utils import load_and_cache_dataset, load_coeffs
from src.models.albert_for_linreg.model import LinRegModel
from src.models.albert_for_linreg.train import train
from src.models.albert_for_linreg.eval import evaluate
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_architecture', choices=['comment', 'comment_post'], required=True
)
parser.add_argument(
'--base_model', default='albert-base-v2'
)
parser.add_argument(
'--comment_model', default='albert-base-v2', type=str)
parser.add_argument(
'--post_model', default='albert-base-v2', type=str)
parser.add_argument(
"--batch_size", default=20, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument(
"--sequence_length", default=128, type=int, help="Sequence length for language model."
)
parser.add_argument(
"--epochs", default=1, type=int, help="Total number of training epochs to perform."
)
parser.add_argument("--logging_steps", type=int, default=20, help="Log every X updates steps.")
parser.add_argument("--evaluate_during_training_steps", type=int, default=0,
help="Evaluate on dev set every X steps during training.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Backprop loss every X steps.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run evaluation.")
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument(
"--data_dir",
default="",
required=True,
type=str
)
parser.add_argument(
"--output_dir",
default="",
required=True,
type=str
)
parser.add_argument(
"--output_postfix",
default=None,
type=str
)
parser.add_argument(
"--linear_regression_coefs",
default=None,
type=str
)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
args.output_dir = os.path.join(args.output_dir, args.model_architecture)
if args.output_postfix:
args.output_dir = os.path.join(args.output_dir, args.output_postfix)
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
tb_path = os.path.join(args.output_dir, 'tb')
tb_writer = SummaryWriter(tb_path)
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.base_model,
cache_dir=args.cache_dir if args.cache_dir else None,
use_fast=True)
do_post = bool(args.model_architecture=='comment_post')
train_dataset = load_and_cache_dataset(args, tokenizer, 'train', post=do_post)
dev_dataset = load_and_cache_dataset(args, tokenizer, 'dev', post=do_post)
test_dataset = load_and_cache_dataset(args, tokenizer, 'test', post=do_post)
model = LinRegModel(load_coeffs(args), args).to(args.device)
if args.n_gpu > 1:
model = nn.DataParallel(model)
if args.do_train:
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
model = train(args, model, train_dataset, dev_dataset, optimizer, tb_writer)
if args.do_eval:
evaluate(args, model, test_dataset, tb_writer)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save, os.path.join(args.output_dir, 'model.pt'))
tb_writer.close()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
__url = 'https://www.podnapisi.net'
def build_search_requests(core, service_name, meta):
params = {
'keywords': meta.title if meta.is_movie else meta.tvshow,
'language': core.utils.get_lang_ids(meta.languages, core.kodi.xbmc.ISO_639_1),
}
if meta.is_tvshow:
params['seasons'] = meta.season
params['episodes'] = meta.episode
params['movie_type'] = ['tv-series', 'mini-series']
if meta.tvshow_year_thread:
meta.tvshow_year_thread.join()
if meta.tvshow_year:
params['year'] = meta.tvshow_year
else:
params['movie_type'] = 'movie'
params['year'] = meta.year
request = {
'method': 'GET',
'url': '%s/subtitles/search/advanced' % __url,
'headers': {
'Accept': 'application/json'
},
'params': params
}
return [request]
def parse_search_response(core, service_name, meta, response):
try:
results = core.json.loads(response.text)
except Exception as exc:
core.logger.error('%s - %s' % (service_name, exc))
return []
service = core.services[service_name]
lang_ids = core.utils.get_lang_ids(meta.languages, core.kodi.xbmc.ISO_639_1)
def map_result(result):
name = ''
last_similarity = -1
for release_name in result['custom_releases']:
similarity = core.difflib.SequenceMatcher(None, release_name, meta.filename_without_ext).ratio()
if similarity > last_similarity:
last_similarity = similarity
name = release_name
if name == '':
name = '%s %s' % (meta.title, meta.year)
if meta.is_tvshow:
name = '%s S%sE%s' % (meta.tvshow, meta.season.zfill(2), meta.episode.zfill(2))
name = '%s.srt' % name
lang_code = result['language']
lang = meta.languages[lang_ids.index(lang_code)]
return {
'service_name': service_name,
'service': service.display_name,
'lang': lang,
'name': name,
'rating': 0,
'lang_code': lang_code,
'sync': 'true' if meta.filename_without_ext in result['custom_releases'] else 'false',
'impaired': 'true' if 'hearing_impaired' in result['flags'] else 'false',
'color': 'orange',
'action_args': {
'url': '%s%s' % (__url, result['download']),
'lang': lang,
'filename': name
}
}
return list(map(map_result, results['data']))
def build_download_request(core, service_name, args):
request = {
'method': 'GET',
'url': args['url']
}
return request
|
import re
from decimal import *
import psycopg2 as pg2
stop_words = {
<<<<<<< HEAD
" unsigned ": " ",
" UNSIGNED ": " ",
" datetime ": " timestamp ",
" DATETIME ": " TIMESTAMP "
}
=======
" unsigned ": " ",
" UNSIGNED ": " ",
" datetime ": " timestamp ",
" DATETIME ": " TIMESTAMP "
}
>>>>>>> 941673cd6de7bd3120105c9ee0eae551d8989ac6
def delete(commit_settings, tablename, value_dict):
com_db = pg2.connect(
host = commit_settings["HOST"],
port = commit_settings["PORT"],
user = commit_settings["USER"],
password = commit_settings["PASS"],
dbname = commit_settings['DB'])
commit_cursor = com_db.cursor()
lod_query = "DELETE FROM {} WHERE ".format(tablename)
for x in value_dict:
if lod_query.strip().endswith('WHERE') == False:
lod_query += " and {} = \'{}\'".format(x, value_dict[x])
else:
lod_query += " {} = \'{}\'".format(x, value_dict[x])
lod_query = lod_query.strip()
commit_cursor.execute(lod_query)
com_db.commit()
com_db.close()
def insert(commit_settings, tablename, value_dict):
com_db = pg2.connect(
host = commit_settings["HOST"],
port = commit_settings["PORT"],
user = commit_settings["USER"],
password = commit_settings["PASS"],
dbname = commit_settings['DB'])
commit_cursor = com_db.cursor()
col_names = []
col_values = []
for x in value_dict:
col_names.append(x)
if value_dict[x] is None or value_dict[x] is 'None':
col_names.remove(x)
else:
col_values.append(str(value_dict[x]))
lod_query = "INSERT INTO {} ({}) VALUES({}".format(tablename, str(col_names)[1:-1].replace("'" ,"")
,'%s, ' *len(col_values))
lod_query = lod_query.strip()[:-1 ] +')'
commit_cursor.execute(lod_query, col_values)
com_db.commit()
com_db.close()
def update(commit_settings, tablename, before_values, after_values):
com_db = pg2.connect(
host = commit_settings["HOST"],
port = commit_settings["PORT"],
user = commit_settings["USER"],
password = commit_settings["PASS"],
dbname = commit_settings['DB'])
diff_dict = {x :before_values[x] for x in before_values if before_values[x] != after_values[x]}
cond_dict = {x :before_values[x] for x in before_values if before_values[x] == after_values[x]}
commit_cursor = com_db.cursor()
lod_query = "UPDATE {} SET".format(tablename)
for x in diff_dict:
lod_query += " {} = \'{}\', ".format(x, diff_dict[x])
lod_query = lod_query.strip()[:-1] + " WHERE"
for x in cond_dict:
if lod_query.strip().endswith('WHERE') == False:
lod_query += " and {} = \'{}\'".format(x, cond_dict[x])
else:
lod_query += " {} = \'{}\'".format(x, cond_dict[x])
lod_query = lod_query.strip()
commit_cursor.execute(lod_query)
com_db.commit()
com_db.close()
def create(commit_settings, query):
com_db = pg2.connect(
host = commit_settings["HOST"],
port = commit_settings["PORT"],
user = commit_settings["USER"],
password = commit_settings["PASS"],
dbname = commit_settings['DB'])
for each_stop_word in stop_words:
query = re.sub(each_stop_word, stop_words[each_stop_word], query)
commit_cursor = com_db.cursor()
commit_cursor.execute(query)
com_db.commit()
com_db.close()
def alter(commit_settings, query):
<<<<<<< HEAD
com_db = pg2.connect(
host = commit_settings["HOST"],
port = commit_settings["PORT"],
user = commit_settings["USER"],
password = commit_settings["PASS"],
dbname = commit_settings['DB'])
for each_stop_word in stop_words:
query = re.sub(each_stop_word, stop_words[each_stop_word], query)
commit_cursor = com_db.cursor()
commit_cursor.execute(query)
com_db.commit()
com_db.close()
=======
com_db = pg2.connect(
host = commit_settings["HOST"],
port = commit_settings["PORT"],
user = commit_settings["USER"],
password = commit_settings["PASS"],
dbname = commit_settings['DB'])
for each_stop_word in stop_words:
query = re.sub(each_stop_word, stop_words[each_stop_word], query)
commit_cursor = com_db.cursor()
commit_cursor.execute(query)
com_db.commit()
com_db.close()
>>>>>>> 941673cd6de7bd3120105c9ee0eae551d8989ac6
|
class Solution:
def reverseWords(self, s: str) -> str:
"""
1. Init array
2. Append each word to the array
3. Reverse each indiviual work in the array
4. Use join method and return array
"""
# Splits string into array by whitespace
arr = s.split()
new_arr = list()
# Reverses each word in array
for word in arr:
new_arr.append(word[::-1])
# Joins string elements of array into one string
return ' '.join(new_arr) |
from copy import deepcopy
from struct import unpack
from .gripcontrol import is_python3, websocket_control_message
from .websocketevent import WebSocketEvent
class WebSocketContext(object):
def __init__(self, id, meta, in_events, grip_prefix=''):
self.id = id
self.in_events = in_events
self.read_index = 0
self.accepted = False
self.close_code = None
self.closed = False
self.out_close_code = None
self.out_events = []
self.orig_meta = meta
self.meta = deepcopy(meta)
self.grip_prefix = grip_prefix
def is_opening(self):
return (self.in_events and self.in_events[0].type == 'OPEN')
def accept(self):
self.accepted = True
def close(self, code=None):
self.closed = True
if code is not None:
self.out_close_code = code
else:
self.out_close_code = 0
def can_recv(self):
for n in range(self.read_index, len(self.in_events)):
if self.in_events[n].type in ('TEXT', 'BINARY', 'CLOSE', 'DISCONNECT'):
return True
return False
def recv(self):
e = None
while e is None and self.read_index < len(self.in_events):
if self.in_events[self.read_index].type in ('TEXT', 'BINARY', 'CLOSE', 'DISCONNECT'):
e = self.in_events[self.read_index]
elif self.in_events[self.read_index].type == 'PING':
self.out_events.append(WebSocketEvent('PONG'))
self.read_index += 1
if e is None:
raise IndexError('read from empty buffer')
if e.type == 'TEXT':
if e.content:
return e.content.decode('utf-8')
else:
if is_python3:
return ''
else:
return u''
elif e.type == 'BINARY':
if e.content:
return e.content
else:
if is_python3:
return b''
else:
return ''
elif e.type == 'CLOSE':
if e.content and len(e.content) == 2:
self.close_code = unpack('>H', e.content)[0]
return None
else: # DISCONNECT
raise IOError('client disconnected unexpectedly')
def send(self, message):
if is_python3:
if isinstance(message, str):
message = message.encode('utf-8')
content = b'm:' + message
else:
if isinstance(message, unicode):
message = message.encode('utf-8')
content = 'm:' + message
self.out_events.append(WebSocketEvent('TEXT', content))
def send_binary(self, message):
if is_python3:
if isinstance(message, str):
message = message.encode('utf-8')
content = b'm:' + message
else:
if isinstance(message, unicode):
message = message.encode('utf-8')
content = 'm:' + message
self.out_events.append(WebSocketEvent('BINARY', content))
def send_control(self, message):
if is_python3:
if isinstance(message, str):
message = message.encode('utf-8')
content = b'c:' + message
else:
if isinstance(message, unicode):
message = message.encode('utf-8')
content = 'c:' + message
self.out_events.append(WebSocketEvent('TEXT', content))
def subscribe(self, channel):
self.send_control(websocket_control_message(
'subscribe', {'channel': self.grip_prefix + channel}))
def unsubscribe(self, channel):
self.send_control(websocket_control_message(
'unsubscribe', {'channel': self.grip_prefix + channel}))
def detach(self):
self.send_control(websocket_control_message('detach'))
|
# code -> {"name": "creaturename",
# "desc": "description",}
CREATURES = {}
SOLO_CREATURES = {}
GROUP_CREATURES = {}
with open("creatures.csv") as f:
lines = f.readlines()
for line in lines:
if line.strip() == "":
continue
parts = line.strip().split(",")
code = int(''.join(parts[1:5]))
if parts[6].strip() == "group":
GROUP_CREATURES[code] = {"name": parts[0], "desc": parts[5]}
else:
SOLO_CREATURES[code] = {"name": parts[0], "desc": parts[5]}
CREATURES[code] = {"name": parts[0], "desc": parts[5]}
|
class Exception_base(Exception):
def __init__(self, q_list=None, error=None, img=None):
self.q_list = None
self.img = None
if q_list != None:
self.q_list = q_list
if error != None:
self.error = error
if img != None:
self.img = img
self.send_exception()
def send_exception(self):
if self.q_list != None:
disc = self.q_list.get("2main_thread")
res = {
"bot_comment": self.error
}
if self.img:
res["filename"] = self.img
disc.put(res)
class TokenInvalidTellonym(Exception_base):
def __init__(self, q_list=None, error=None):
super().__init__(q_list=q_list, error="tellonym token is invalid")
class TokenInvalidQuestionmi(Exception_base):
def __init__(self, q_list=None, error=None):
super().__init__(q_list=q_list, error="questionmi token is invalid")
class TokenReadImpossible(Exception_base):
def __init__(self, q_list=None, error=None):
super().__init__(q_list=q_list, error="token read impossible")
class ConnectionTimeout(Exception_base):
def __init__(self, q_list=None, error=None):
super().__init__(q_list=q_list, error="tellonym connection timeout")
class CaptchaRequired(Exception_base):
def __init__(self, q_list=None, error=None):
super().__init__(q_list=q_list, error="tellonym conneciotn timeout")
class Post_ratio(Exception_base):
def __init__(self, q_list=None, error=None, img=None):
super().__init__(q_list=q_list, error="post ratio", img=img)
|
# -*- coding: utf-8 -*-
"""
@time : 2020/05/10 19:32
@author : 姚明伟
"""
from flask import Blueprint
# 创建蓝图对象
index_blu = Blueprint('index', __name__)
from .views import * |
from ast import literal_eval
dirs = {
' ': set(),
'│': {0, 2},
'─': {1, 3},
'┌': {1, 2},
'┐': {2, 3},
'└': {0, 1},
'┘': {0, 3},
}
def all_grids(side):
grids = [[]]
for row in range(side):
grids = [grid + [[]] for grid in grids]
for col in range(side):
next_grids = []
for working_grid in grids:
# 0 is up, 1 is right, etc.
allowed_chars = set(' │─┌┐└┘')
# up allowed and required
if row > 0:
if 2 in dirs[working_grid[row-1][col]]:
allowed_chars -= set(' ┌┐')
if row == side - 1:
allowed_chars -= set('─')
# Already had a crossing
elif (working_grid[row-1][col] == '─' and row > 1
and 2 in dirs[working_grid[row-2][col]]):
allowed_chars -= set(' ─┌┐')
else:
allowed_chars -= set('│└┘')
else:
allowed_chars -= set('│└┘')
# left allowed and required
if col > 0:
if 1 in dirs[working_grid[row][col-1]]:
allowed_chars -= set(' ┌└')
if col == side - 1:
allowed_chars -= set('│')
elif (working_grid[row][col-1] == '│' and col > 1
and 1 in dirs[working_grid[row][col-2]]):
allowed_chars -= set(' │┌└')
else:
allowed_chars -= set('─┐┘')
else:
allowed_chars -= set('─┐┘')
# down not allowed
if row == side - 1:
allowed_chars -= set('│┌┐')
# right not allowed
if col == side - 1:
allowed_chars -= set('─└┌')
for char in sorted(allowed_chars):
new_grid = working_grid[:]
new_grid[row] = working_grid[row] + [char]
next_grids.append(new_grid)
#print(row, col, len(next_grids))
grids = next_grids
return grids
def pretty(grid):
return '\n'.join(''.join(row) for row in grid)
def advance(pos, dir):
if dir == 0:
return pos[0]-1, pos[1]
if dir == 1:
return pos[0], pos[1]+1
if dir == 2:
return pos[0]+1, pos[1]
if dir == 3:
return pos[0], pos[1]-1
assert False
def notate(grid):
side = len(grid)
assert side == len(grid[0])
color = [[None for _ in range(side)] for _ in range(side)]
endpoints = []
pos = None
dir = None
for row in range(side):
for col in range(side):
if grid[row][col] != ' ':
pos = row, col
dir = (sorted(dirs[grid[row][col]])[0] + 2) % 4
break
if pos is not None:
break
if pos is None:
return None
start_pos = pos
start_dir = dir
while pos != start_pos or color[pos[0]][pos[1]] is None:
char = grid[pos[0]][pos[1]]
# Crossing, end of color
reverse_dir = (dir + 2) % 4
if reverse_dir not in dirs[char]:
endpoints.append(pos)
# dir doesn't change
dir = dir
else:
color[pos[0]][pos[1]] = len(endpoints)
new_dirs = dirs[char] - {reverse_dir}
dir = new_dirs.pop()
assert not new_dirs
pos = advance(pos, dir)
#print(pos, dir)
#print('\n'.join(''.join(str(cell) if cell is not None else '.' for cell in row) for row in color))
for row in range(side):
for col in range(side):
if grid[row][col] != ' ' and color[row][col] is None:
# Not a single loop
return None
return tuple(color[row][col] % len(endpoints) for row, col in endpoints)
def find_all(max_side):
for side in range(1, max_side + 1):
seen = set()
grids = all_grids(side)
for grid in grids:
note = notate(grid)
if note not in seen:
seen.add(note)
print(pretty(grid))
print(note)
print(len(seen))
def find(goal_note):
side = 1
while True:
grids = all_grids(side)
for grid in grids:
note = notate(grid)
if note == goal_note:
return grid
side += 1
print(pretty(find(literal_eval(input()))))
|
class Edge:
def __init__(self, arc, source, dest):
self.arc = arc
self.source = source
self.dest = dest
self.direction = arc.direction
# initialize without path (adjusted later); FUTURE WORK: multiple paths per edge
self.paths = [] # list of paths(=list of nodes); initially path-dr equally split among all paths
self.flows = [] # list of flows passing the edge
# automatically add edge to source and dest instance
self.source.edges_out[dest] = self
self.dest.edges_in[source] = self
def __str__(self):
if self.direction == "forward":
return "{}->{}:{}".format(self.source, self.dest, self.flows)
else:
return "{}<-{}:{}".format(self.dest, self.source, self.flows)
def __repr__(self):
if self.direction == "forward":
return "{}->{}:{}".format(self.source, self.dest, self.flows)
else:
return "{}<-{}:{}".format(self.dest, self.source, self.flows)
# source and dest identify any edge (there can be at most 1 edge between any source and dest)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.source == other.source and self.dest == other.dest
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash((self.source, self.dest))
def print(self):
print("Edge from {} to {} ({}) with flows {}".format(self.source, self.dest, self.direction, self.flows))
for path in self.paths:
print("\tNodes on path: ", *path, sep=" ")
# total data rate along the edge of all current flows
def flow_dr(self):
return sum(f.dr[self] for f in self.flows)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class AverageAttention(nn.Module):
def __init__(self, vec_dim, d_k=64):
super().__init__()
self.vec_dim = vec_dim
self.d_k = d_k
self.w_q = nn.Linear(vec_dim, d_k)
self.w_k = nn.Linear(vec_dim, d_k)
self.w_v = nn.Linear(vec_dim, d_k)
def attention(self, q, k, v):
# Average Attention
scores = torch.matmul(q, k.transpose(-1, -2)
).mean(1) / np.sqrt(self.d_k)
scores = F.softmax(scores, dim=-1)
z = torch.matmul(scores, v)
return (z, scores)
def forward(self, x):
# Calculate query, key, and value matrices
q = self.w_q(x)
k = self.w_k(x)
v = self.w_v(x)
z, scores = self.attention(q, k, v)
return z, scores
class LinearAttention(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1600, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x, inplace=True)
x = self.fc2(x)
x = F.relu(x, inplace=True)
x = self.fc3(x)
scores = F.relu(x, inplace=True)
return scores
if __name__ == '__main__':
att = AverageAttention(4)
x = torch.Tensor([[1, 2, 3, 4], [5, 6, 7, 8]])
z, score = att(x)
# print(z)
# print(z.shape)
print(score)
print(score.shape)
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.text import slugify
from django.utils.translation import gettext_lazy as _
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255)
order = models.IntegerField(_("Display order"))
description = models.CharField(_("Description"), max_length=1023, blank=True, null=True)
shown = models.BooleanField(default=False, verbose_name='Show DESC?')
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
ordering = ('order',)
def __str__(self):
return self.name
def slugify(self):
return slugify(str(self))
|
#!/usr/bin/env python
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Used to handle tasks: insert the task info into database and update it
"""
from functest.api.database.db import DB_SESSION
from functest.api.database.v1.models import Tasks
class TasksHandler(object):
""" Tasks Handler Class """
def insert(self, kwargs): # pylint: disable=no-self-use
""" To insert the task info into database """
task = Tasks(**kwargs)
DB_SESSION.add(task) # pylint: disable=maybe-no-member
DB_SESSION.commit() # pylint: disable=maybe-no-member
return task
def get_task_by_taskid(self, task_id): # pylint: disable=no-self-use
""" Obtain the task by task id """
# pylint: disable=maybe-no-member
task = Tasks.query.filter_by(task_id=task_id).first()
if not task:
raise ValueError
return task
def update_attr(self, task_id, attr):
""" Update the required attributes of the task """
task = self.get_task_by_taskid(task_id)
for key, value in attr.items():
setattr(task, key, value)
DB_SESSION.commit() # pylint: disable=maybe-no-member
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-21 15:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repositories', '0002_auto_20160911_1601'),
]
operations = [
migrations.AddField(
model_name='gitlabrepository',
name='ci_token',
field=models.CharField(default='', help_text='CI token used to clone and checkout the repository', max_length=128, verbose_name='CI token for the repository'),
preserve_default=False,
),
migrations.AddField(
model_name='gitlabrepository',
name='project_userspace',
field=models.CharField(default='', max_length=128, verbose_name='Project userspace'),
preserve_default=False,
),
migrations.AlterField(
model_name='gitlabrepository',
name='project_name',
field=models.CharField(max_length=128, verbose_name='Project name'),
),
]
|
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from afriproperty.users.forms import UserChangeForm, UserCreationForm
from afriproperty.utils.export_as_csv import ExportCsvMixin
from .models import AgentProfile, Banks, LoginHistory, Testimonial, UserNotification
User = get_user_model()
admin.site.register(Banks)
admin.site.register(LoginHistory)
admin.site.register(Testimonial)
admin.site.register(UserNotification)
@admin.register(AgentProfile)
class AgentProfile(admin.ModelAdmin, ExportCsvMixin):
model = AgentProfile
list_per_page = 250
empty_value_display = '-empty-'
search_fields = ["__str__"]
list_display = [
"__str__",
'company_name',
"company_address",
"bvn",
'verified',
'is_blocked'
]
list_editable = [
'is_blocked',
]
actions = [
"export_as_csv",
]
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin, ExportCsvMixin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (
(_("Authentication Info"), {"fields": ("username", "email", "password")}),
(_("Personal info"), {"fields": ("account_type", "first_name", "last_name", "phone_number", "postcode")}),
(_("Social info"), {"fields": ("facebook", "linkedin", "instagram")}),
(
_("Permissions"),
{
"fields": (
"accept_terms",
"has_testified",
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
list_display = ["username", "account_type", "first_name", "last_name", "phone_number", "accept_terms", "has_testified", "is_active", "is_superuser"]
list_editable = ["account_type", "has_testified", "is_active"]
search_fields = ["first_name", "last_name", "phone_number"]
actions = [
"export_as_csv",
]
|
#!/usr/bin/env python
"""This script reads struct from C/C++ header file and output query
Author ykk
Date June 2009
"""
import sys
import getopt
import pylibopenflow.cheader as cheader
import pylibopenflow.c2py as c2py
def usage():
"""Display usage
"""
print "Usage "+sys.argv[0]+" <options> header_files... struct_name\n"+\
"Options:\n"+\
"-h/--help\n\tPrint this usage guide\n"+\
"-c/--cstruct\n\tPrint C struct\n"+\
"-n/--name\n\tPrint names of struct\n"+\
"-s/--size\n\tPrint size of struct\n"+\
""
#Parse options and arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hcsn",
["help","cstruct","size","names"])
except getopt.GetoptError:
usage()
sys.exit(2)
#Check there is at least 1 input file and struct name
if (len(args) < 2):
usage()
sys.exit(2)
#Parse options
##Print C struct
printc = False
##Print names
printname = False
##Print size
printsize = False
for opt,arg in opts:
if (opt in ("-h","--help")):
usage()
sys.exit(0)
elif (opt in ("-s","--size")):
printsize = True
elif (opt in ("-c","--cstruct")):
printc = True
elif (opt in ("-n","--names")):
printname = True
else:
print "Unhandled option :"+opt
sys.exit(1)
headerfile = cheader.cheaderfile(args[:-1])
cstruct = headerfile.structs[args[-1].strip()]
cs2p = c2py.cstruct2py()
pattern = cs2p.get_pattern(cstruct)
#Print C struct
if (printc):
print cstruct
#Print pattern
print "Python pattern = "+pattern
#Print name
if (printname):
print cstruct.get_names()
#Print size
if (printsize):
print "Size = "+str(cs2p.get_size(pattern))
|
import unittest
from onadata.libs.data import statistics as stats
class TestStatistics(unittest.TestCase):
def test_get_mean(self):
values = [1, 2, 3, 2, 5, 5]
result = stats.get_mean(values)
self.assertEqual(result, 3)
def test_get_min_max_range(self):
values = [1, 2, 3, 2, 5, 5]
result = stats.get_min_max_range(values)
self.assertEqual(result, (1, 5, 4,))
def test_get_median(self):
values = [1, 2, 3, 2, 5, 5]
result = stats.get_median(values)
self.assertEqual(result, 2.5)
|
from django.contrib.gis.db.models import Manager, PointField
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext as _
from oscar.apps.address.abstract_models import AbstractAddress
from oscar.core.utils import slugify
from stores.managers import StoreManager
from stores.utils import get_geodetic_srid
# Re-use Oscar's address model
class StoreAddress(AbstractAddress):
store = models.OneToOneField(
'stores.Store',
models.CASCADE,
verbose_name=_("Store"),
related_name="address"
)
class Meta:
abstract = True
app_label = 'stores'
@property
def street(self):
"""
Summary of the 3 line fields
"""
return "\n".join(filter(bool, [self.line1, self.line2, self.line3]))
class StoreGroup(models.Model):
name = models.CharField(_('Name'), max_length=100, unique=True)
slug = models.SlugField(_('Slug'), max_length=100, unique=True)
class Meta:
abstract = True
app_label = 'stores'
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
class Store(models.Model):
name = models.CharField(_('Name'), max_length=100)
slug = models.SlugField(_('Slug'), max_length=100, null=True)
# Contact details
manager_name = models.CharField(
_('Manager name'), max_length=200, blank=True, null=True)
phone = models.CharField(_('Phone'), max_length=64, blank=True, null=True)
email = models.CharField(_('Email'), max_length=100, blank=True, null=True)
reference = models.CharField(
_("Reference"),
max_length=32,
unique=True,
null=True,
blank=True,
help_text=_("A reference number that uniquely identifies this store"))
image = models.ImageField(
_("Image"),
upload_to="uploads/store-images",
blank=True, null=True)
description = models.CharField(
_("Description"),
max_length=2000,
blank=True, null=True)
location = PointField(
_("Location"),
srid=get_geodetic_srid(),
)
group = models.ForeignKey(
'stores.StoreGroup',
models.PROTECT,
related_name='stores',
verbose_name=_("Group"),
null=True,
blank=True
)
is_pickup_store = models.BooleanField(_("Is pickup store"), default=True)
is_active = models.BooleanField(_("Is active"), default=True)
objects = StoreManager()
class Meta:
abstract = True
ordering = ('name',)
app_label = 'stores'
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('stores:detail', kwargs={'dummyslug': self.slug,
'pk': self.pk})
@property
def has_contact_details(self):
return any([self.manager_name, self.phone, self.email])
class OpeningPeriod(models.Model):
PERIOD_FORMAT = _("%(start)s - %(end)s")
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY,
FRIDAY, SATURDAY, SUNDAY, PUBLIC_HOLIDAYS) = range(1, 9)
WEEK_DAYS = {
MONDAY: _("Monday"),
TUESDAY: _("Tuesday"),
WEDNESDAY: _("Wednesday"),
THURSDAY: _("Thursday"),
FRIDAY: _("Friday"),
SATURDAY: _("Saturday"),
SUNDAY: _("Sunday"),
PUBLIC_HOLIDAYS: _("Public Holidays")
}
store = models.ForeignKey('stores.Store', models.CASCADE, verbose_name=_("Store"),
related_name='opening_periods')
weekday_choices = [(k, v) for k, v in WEEK_DAYS.items()]
weekday = models.PositiveIntegerField(
_("Weekday"),
choices=weekday_choices)
start = models.TimeField(
_("Start"),
null=True,
blank=True,
help_text=_("Leaving start and end time empty is displayed as 'Closed'"))
end = models.TimeField(
_("End"),
null=True,
blank=True,
help_text=_("Leaving start and end time empty is displayed as 'Closed'"))
def __str__(self):
return "%s: %s to %s" % (self.weekday, self.start, self.end)
class Meta:
abstract = True
ordering = ['weekday']
verbose_name = _("Opening period")
verbose_name_plural = _("Opening periods")
app_label = 'stores'
def clean(self):
if self.start and self.end and self.end <= self.start:
raise ValidationError(_("Start must be before end"))
class StoreStock(models.Model):
store = models.ForeignKey(
'stores.Store',
models.CASCADE,
verbose_name=_("Store"),
related_name='stock'
)
product = models.ForeignKey(
'catalogue.Product',
models.CASCADE,
verbose_name=_("Product"),
related_name="store_stock"
)
# Stock level information
num_in_stock = models.PositiveIntegerField(
_("Number in stock"),
default=0,
blank=True,
null=True)
# The amount of stock allocated in store but not fed back to the master
num_allocated = models.IntegerField(
_("Number allocated"),
default=0,
blank=True,
null=True)
location = models.CharField(
_("In store location"),
max_length=50,
blank=True,
null=True)
# Date information
date_created = models.DateTimeField(
_("Date created"),
auto_now_add=True)
date_updated = models.DateTimeField(
_("Date updated"),
auto_now=True,
db_index=True)
class Meta:
abstract = True
verbose_name = _("Store stock record")
verbose_name_plural = _("Store stock records")
unique_together = ("store", "product")
app_label = 'stores'
objects = Manager()
def __str__(self):
if self.store and self.product:
return "%s @ %s" % (self.product.title, self.store.name)
return "Store Stock"
@property
def is_available_to_buy(self):
return self.num_in_stock > self.num_allocated
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
# @email : spirit_az@foxmail.com
import sys
__author__ = 'ChenLiang.Miao'
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ #
import os
import pathlib
import pyfastcopy
import shutil
def copy_file(path_read, path_write):
names = os.listdir(path_read)
for name in names:
path_read_new = path_read + "\\" + name
path_write_new = path_write + "\\" + name
if os.path.isdir(path_read_new):
if not os.path.exists(path_write_new):
os.mkdir(path_write_new)
copy_file(path_read_new, path_write_new)
else:
shutil.copyfile(path_read_new, path_write_new)
if __name__ == '__main__':
py_folder = pathlib.Path(sys.executable).parent
nuitka = py_folder.joinpath("Scripts", "nuitka.bat")
source_folder = pathlib.Path.cwd()
follow_import = source_folder.joinpath("source")
# 打包
exe_path = source_folder.joinpath("launcher.py")
cmd = ""
cmd += "\n"
cmd += str(nuitka) + " "
cmd += "--standalone "
cmd += "--windows-disable-console "
cmd += "--mingw64 "
cmd += "--show-memory "
cmd += "--show-progress "
cmd += "--plugin-enable=qt-plugins "
cmd += "--follow-import-to=source "
cmd += "--windows-icon-from-ico={} ".format(
"source/icons/Launcher_exe_icon.ico")
cmd += "--output-dir=out "
cmd += "{exe}".format(exe=exe_path)
os.system(cmd)
# 拷贝图片
icon_folder = source_folder.joinpath("source", "icons")
to_icon_folder = source_folder.joinpath("out", "launcher.dist", "source")
copy_file(str(icon_folder), str(to_icon_folder))
# 拷贝配置文件
for each in follow_import.glob("**/*.ini"):
shutil.copy(str(each), str(to_icon_folder))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: slurp
version_added: historical
short_description: Slurps a file from remote nodes
description:
- This module works like M(ansible.builtin.fetch). It is used for fetching a base64-
encoded blob containing the data in a remote file.
- This module is also supported for Windows targets.
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a directory.
type: path
required: true
aliases: [ path ]
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix, windows
notes:
- This module returns an 'in memory' base64 encoded version of the file, take
into account that this will require at least twice the RAM as the original file size.
seealso:
- module: ansible.builtin.fetch
author:
- Ansible Core Team
- Michael DeHaan (@mpdehaan)
'''
EXAMPLES = r'''
- name: Find out what the remote machine's mounts are
ansible.builtin.slurp:
src: /proc/mounts
register: mounts
- name: Print returned information
ansible.builtin.debug:
msg: "{{ mounts['content'] | b64decode }}"
# From the commandline, find the pid of the remote machine's sshd
# $ ansible host -m slurp -a 'src=/var/run/sshd.pid'
# host | SUCCESS => {
# "changed": false,
# "content": "MjE3OQo=",
# "encoding": "base64",
# "source": "/var/run/sshd.pid"
# }
# $ echo MjE3OQo= | base64 -d
# 2179
'''
RETURN = r'''
content:
description: Encoded file content
returned: success
type: str
sample: "MjE3OQo="
encoding:
description: Type of encoding used for file
returned: success
type: str
sample: "base64"
source:
description: Actual path of file slurped
returned: success
type: str
sample: "/var/run/sshd.pid"
'''
import base64
import errno
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path', required=True, aliases=['path']),
),
supports_check_mode=True,
)
source = module.params['src']
try:
with open(source, 'rb') as source_fh:
source_content = source_fh.read()
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
msg = "file not found: %s" % source
elif e.errno == errno.EACCES:
msg = "file is not readable: %s" % source
elif e.errno == errno.EISDIR:
msg = "source is a directory and must be a file: %s" % source
else:
msg = "unable to slurp file: %s" % to_native(e, errors='surrogate_then_replace')
module.fail_json(msg)
data = base64.b64encode(source_content)
module.exit_json(content=data, source=source, encoding='base64')
if __name__ == '__main__':
main()
|
# coding: utf-8
import sqlite3
from photos_app import logger
def fetch_namedtuples(constructor, cursor: sqlite3.Cursor, boolean_columns=..., custom_columns=...):
results = []
first = True
columns = None
for row in cursor:
if first:
columns = row.keys()
values = dict(zip(columns, row))
if boolean_columns is not ...:
for col in boolean_columns:
values[col] = bool(values[col])
if custom_columns is not ...:
for col, func in custom_columns.items():
values[col] = func(values[col])
results.append(constructor(**values))
return results
def execute(db: sqlite3.Connection, query: str, params=...) -> sqlite3.Cursor:
logger.debug('DB query:\n%s\nparams: %s', query, params if params is not ... else '-')
if params is ...:
return db.execute(query)
else:
return db.execute(query, params)
|
hrs = int(input("Enter Hours: "))
rate = float(input("Enter Rate: "))
pay = hrs*rate
print(f"Pay: {pay}")
|
# Generated by Django 3.0.8 on 2020-11-30 17:45
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_application_application_location'),
]
operations = [
migrations.AddField(
model_name='application',
name='last_filed',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=30, verbose_name='first name'),
),
]
|
#!/usr/bin/env python3
import networkx as nx
import random
def sequence(n):
a, m, c = [], random.randint(0, n), n
while n > m > 0:
a.append(m)
n -= m
m = random.randint(0, n)
if n: a += [n]
return a
def two_sequence(size):
sequences = [sequence(size)]
second_sequence = sequence(size)
while len(second_sequence) != len(sequences[0]):
second_sequence = sequence(size)
sequences.append(second_sequence)
return sequences
def generate_graph(factor):
sequences = two_sequence(factor)
ins = [0] + sequences[0] + [1]
outs = [1] + sequences[1] + [0]
graph = nx.DiGraph(nx.directed_configuration_model(ins, outs))
while not nx.is_connected(nx.Graph(graph)):
graph = nx.DiGraph(nx.directed_configuration_model(ins, outs))
graph.remove_edges_from(graph.selfloop_edges())
return graph
def graph_with_size(size):
factor = random.randint(0, 200)
graph = generate_graph(factor)
while graph.number_of_edges() != size:
factor = random.randint(0, 200)
graph = generate_graph(factor)
return graph
for size in range(40, 51):
nx.write_graphml(graph_with_size(size), "scalability_test_t{0}.graphml".format(size))
|
from django import http
def cors_middleware(get_response):
"""
If CORS preflight header, then create an empty body response (200 OK) and return it
"""
def middleware(request):
if (
request.method == "OPTIONS"
and "HTTP_ACCESS_CONTROL_REQUEST_METHOD" in request.META
):
response = http.HttpResponse()
response["Content-Length"] = "0"
return response
return get_response(request)
return middleware
|
import os
import unittest
from merger import Merger
class MergerTest(unittest.TestCase):
def test_run(self):
merger = Merger()
actual = merger.run(os.path.join('test', 'data', 'cfg.csv'))
self.assertIsNotNone(actual)
self.assertIs(3, actual.shape[1])
|
import datetime
import os
import shutil
import sqlite3
import tempfile
import time
from django.test import SimpleTestCase
from matrixstore.build.init_db import SCHEMA_SQL, generate_dates, import_dates
from matrixstore.build.generate_filename import generate_filename
class TestGenerateFilename(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
def test_generate_filename(self):
filename = os.path.join(self.tempdir, "test.sqlite")
connection = sqlite3.connect(filename)
connection.executescript(SCHEMA_SQL)
import_dates(connection, generate_dates("2018-10-01"))
connection.commit()
connection.close()
last_modified = time.mktime(
datetime.datetime(2018, 12, 6, 15, 5, 3).timetuple()
)
os.utime(filename, (last_modified, last_modified))
new_filename = generate_filename(filename)
self.assertRegex(
new_filename, "matrixstore_2018-10_2018-12-06--15-05_[a-f0-9]{16}\.sqlite"
)
|
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import json
def notify_user(origin, destination, date, dic, user_list):
for user in user_list:
with open('emails_to_ping.json') as json_file:
user_info = json.load(json_file)
receiver_email = user_info[user]['email']
name = user_info[user]['name']
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "tgvmaximator@gmail.com" # Enter your address
password = 'amsterdam86'
message = MIMEMultipart("alternative")
message["Subject"] = "Bonjour M. {}".format(name)
message["From"] = sender_email
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
text = """\
Bonjour M. {0},
Un nouveau train TGVMax est disponible pour votre trajet de {1} a {2} le {3}. Il part à {4}.
Si cet horaire vous correspond vous pouvez aller manger nos couilles en salade.
Cordialement,
L'équipe TGVMaximator""".format(name, origin, destination, date.strftime("%Y-%m-%d"), dic['departure_time'].strftime("%Hh%M"))
html = """\
<html>
<body>
<p>Bonjour M. {0},<br>
Un nouveau train TGVMax est disponible pour votre trajet de {1} a {2} le {3}. Il part à {4}.
Si cet horaire vous correspond vous pouvez aller manger mes couilles en salade.<br>
Rendez-vous sur <a href="https://www.oui.sncf/bons-plans/tgvmax#!/">Oui.sncf</a>
pour vous faire une vinaigrette afin de mieux apprécier le plat de chibre.<br>
<br>
Cordialement,<br>
L'équipe TGVMaximator
</p>
</body>
</html>
""".format(name,origin, destination, date.strftime("%Y-%m-%d"), dic['departure_time'].strftime("%Hh%M"))
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
)
print("Sent email to {}".format(name)) |
# Copyright 2021-present Kensho Technologies, LLC.
import logging
from typing import List, Optional
BPE_CHAR = "▁" # character used for token boundary if BPE is used
UNK_BPE_CHAR = "▁⁇▁" # representation of unknown character in BPE
logger = logging.getLogger(__name__)
def _get_ctc_index(label_list: List[str]) -> int:
"""Get index of ctc blank character in alphabet."""
return len(label_list) - 1 if label_list[-1] == "" else -1
def _normalize_alphabet(label_list: List[str], ctc_token_idx: Optional[int] = None) -> List[str]:
"""Normalize alphabet for non-bpe decoder."""
if any([len(c) > 1 for c in label_list]):
raise ValueError("For non-bpe alphabet only length 1 entries and blank token are allowed.")
if ctc_token_idx is None:
ctc_token_idx = _get_ctc_index(label_list)
clean_labels = label_list[:]
# check for space token
if " " not in clean_labels:
raise ValueError("Space token ' ' missing from vocabulary.")
# specify ctc blank token
if ctc_token_idx == -1:
clean_labels.append("")
else:
clean_labels[ctc_token_idx] = ""
return clean_labels
def _convert_bpe_format(token: str) -> str:
"""Convert token from ## type bpe format to ▁ type."""
if token[:2] == "##":
return token[2:]
elif token == BPE_CHAR:
return token
elif token == "": # nosec
return token
else:
return BPE_CHAR + token
def _normalize_bpe_alphabet(
label_list: List[str],
unk_token_idx: Optional[int] = None,
ctc_token_idx: Optional[int] = None,
) -> List[str]:
"""Normalize alphabet for bpe decoder."""
if ctc_token_idx is None:
ctc_token_idx = _get_ctc_index(label_list)
# create copy
clean_labels = label_list[:]
# there are two common formats for BPE vocabulary
# 1) where ▁ indicates a space (this is the main format we use)
if any([s[:1] == BPE_CHAR and len(s) > 1 for s in clean_labels]):
# verify unk token and make sure it is consistently represented as ▁⁇▁
if unk_token_idx is None and clean_labels[0] in ("<unk>", UNK_BPE_CHAR):
unk_token_idx = 0
else:
raise ValueError(
"First token in vocab for BPE should be '▁⁇▁' or specify unk_token_idx."
)
clean_labels[unk_token_idx] = UNK_BPE_CHAR
# 2) where ## indicates continuation of a token (note: also contains the single token: ▁)
elif any([s[:2] == "##" for s in clean_labels]):
# convert to standard format 1)
clean_labels = [_convert_bpe_format(c) for c in clean_labels]
# add unk token if needed
if clean_labels[0] in ("<unk>", UNK_BPE_CHAR):
clean_labels[0] = UNK_BPE_CHAR
else:
clean_labels = [UNK_BPE_CHAR] + clean_labels
ctc_token_idx += 1
else:
raise ValueError(
"Unknown BPE format for vocabulary. Supported formats are 1) ▁ for indicating a"
" space and 2) ## for continuation of a word."
)
# specify ctc blank token
if ctc_token_idx == -1:
clean_labels.append("")
else:
clean_labels[ctc_token_idx] = ""
return clean_labels
class Alphabet:
def __init__(self, labels: List[str], is_bpe: bool) -> None:
"""Init."""
self._labels = labels
self._is_bpe = is_bpe
@property
def is_bpe(self) -> bool:
"""Whether the alphabet is a bytepair encoded one."""
return self._is_bpe
@property
def labels(self) -> List[str]:
"""Deep copy of the labels."""
return self._labels[:] # this is a copy
@classmethod
def build_alphabet(
cls, label_list: List[str], ctc_token_idx: Optional[int] = None
) -> "Alphabet":
"""Make a non-BPE alphabet."""
formatted_alphabet_list = _normalize_alphabet(label_list, ctc_token_idx)
return cls(formatted_alphabet_list, False)
@classmethod
def build_bpe_alphabet(
cls,
label_list: List[str],
unk_token_idx: Optional[int] = None,
ctc_token_idx: Optional[int] = None,
) -> "Alphabet":
"""Make a BPE alphabet."""
formatted_label_list = _normalize_bpe_alphabet(label_list, unk_token_idx, ctc_token_idx)
return cls(formatted_label_list, True)
|
import json
from dataclasses import dataclass
from typing import Dict, Any, Optional, List
from allennlp.predictors import Predictor
VALID_ATTACKERS = ("hotflip", "input_reduction")
VALID_INTERPRETERS = ("simple_gradient", "smooth_gradient", "integrated_gradient")
@dataclass(frozen=True)
class Model:
"""
Class capturing the options we support per model.
"""
id: str
"""
A unique name to identify each demo.
"""
archive_file: str
"""
The path to the model's archive_file.
"""
pretrained_model_id: Optional[str] = None
"""
The ID of a pretrained model to use from `allennlp_models.pretrained`.
"""
predictor_name: Optional[str] = None
"""
Optional predictor name to override the default predictor associated with the archive.
This is ignored if `pretrained_model_id` is given.
"""
overrides: Optional[Dict[str, Any]] = None
"""
Optional parameter overrides to pass through when loading the archive.
This is ignored if `pretrained_model_id` is given.
"""
attackers: Optional[List[str]] = None
"""
List of valid attackers to use.
"""
interpreters: Optional[List[str]] = None
"""
List of valid interpreters to use.
"""
use_old_load_method: bool = False
"""
Some models that run on older versions need to be load differently.
"""
@classmethod
def from_file(cls, path: str) -> "Model":
with open(path, "r") as fh:
raw = json.load(fh)
if "pretrained_model_id" in raw:
from allennlp_models.pretrained import get_pretrained_models
model_card = get_pretrained_models()[raw["pretrained_model_id"]]
raw["archive_file"] = model_card.archive_file
raw["predictor_name"] = model_card.registered_predictor_name
out = cls(**raw)
# Do some validation.
for attacker in out.attackers or []:
assert attacker in VALID_ATTACKERS, f"invalid attacker {attacker}"
for interpreter in out.interpreters or []:
assert interpreter in VALID_INTERPRETERS, f"invalid interpreter {interpreter}"
if out.use_old_load_method:
assert out.pretrained_model_id is None
return out
def load_predictor(self) -> Predictor:
if self.pretrained_model_id is not None:
from allennlp_models.pretrained import load_predictor
return load_predictor(self.pretrained_model_id, overrides=self.overrides)
assert self.archive_file is not None
if self.use_old_load_method:
from allennlp.models.archival import load_archive
# Older versions require overrides to be passed as a JSON string.
o = json.dumps(self.overrides) if self.overrides is not None else None
archive = load_archive(self.archive_file, overrides=o)
return Predictor.from_archive(archive, self.predictor_name)
return Predictor.from_path(
self.archive_file, predictor_name=self.predictor_name, overrides=self.overrides
)
|
# optimzization utilties
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import inspect
def plot_loss_trace(losses, loss_min=None, ax=None):
'''Plot loss vs number of function evals.
losses is a list of floats'''
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(losses[1:], '-')
if loss_min is not None:
ax.axhline(loss_min, 0, len(losses), color='r')
# Make sure horizontal line is visible by changing yscale
ylim = ax.get_ylim()
ax.set_ylim([0.9*loss_min, 1.1*ylim[1]])
return ax
class OptimLogger(object):
'''Class to create a stateful callback function for optimizers,
of the form callback(params).
This calls eval_fun(params) whenever iter is a multiple of eval_freq,
which can be used to evaluate validation performance.'''
def __init__(self, eval_fun=None, eval_freq=0, store_freq=0, print_freq=0):
self.param_trace = []
self.eval_trace = []
self.iter_trace = []
self.eval_freq = eval_freq
self.store_freq = store_freq
self.print_freq = print_freq
self.eval_fun = eval_fun
self.iter = 0
def callback(self, params):
if (self.eval_freq > 0) and (self.iter % self.eval_freq == 0):
obj = self.eval_fun(params)
self.eval_trace.append(obj)
self.iter_trace.append(self.iter)
if self.print_freq > 0:
print "iteration {}, objective {:2.3f}".format(self.iter, obj)
if (self.store_freq > 0) and (self.iter % self.store_freq == 0):
self.param_trace.append(np.copy(params))
self.iter += 1
# Shuffle rows (for SGD)
def shuffle_data(X, y):
N = y.shape[0]
perm = np.arange(N)
np.random.shuffle(perm)
return X[perm], y[perm]
######
# Learning rate functions
def const_lr(lr=0.001):
fn = lambda(iter): lr
return fn
#https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#exponential_decay
#decayed_learning_rate = learning_rate *
# decay_rate ^ (global_step / decay_steps)
def lr_exp_decay(t, base_lr=0.001, decay_rate=0.9, decay_steps=100, staircase=True):
'''To emulate a fixed learning rate, set decay_rate=0, decay_steps=inf'''
if staircase:
exponent = t / decay_steps # integer division
else:
exponent = t / np.float(decay_steps)
return base_lr * np.power(decay_rate, exponent)
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html
# eta = eta0 / pow(t, power_t) [default]
def lr_inv_scaling(t, base_lr=0.001, power_t=0.25):
return base_lr / np.power(t+1, power_t)
#http://leon.bottou.org/projects/sgd
def lr_bottou(t, base_lr=0.001, power_t=0.75, lam=1):
return base_lr / np.power(1 + lam*base_lr*t, power_t)
def plot_lr_trace():
lr_trace = []
for iter in range(500):
lr = lr_exp_decay(iter, 0.01, 0, np.inf, True)
lr_trace.append(lr)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(lr_trace)
plt.show()
def grid_search_1d(eval_fun, param_list):
#scores = np.apply_along_axis(eval_fun, 0, param_list)
scores = [eval_fun(p) for p in param_list]
istar = np.nanargmin(scores)
return param_list[istar], scores
def lr_tuner(obj_fun, search_method, optimizer, grad_fun, x0, max_iters, lrs=[1e-4,1]):
def lr_eval_fun(lr):
params, score = optimizer(obj_fun, grad_fun, x0, max_iters, None, const_lr(lr))
print 'lr eval fun using {} gives {}'.format(lr, score)
return score
if search_method == 'grid':
if len(lrs) == 2:
lrs = np.logspace(np.log10(lrs[0]), np.log10(lrs[1]), 5)
lr, scores = grid_search_1d(lr_eval_fun, lrs)
else:
res = opt.minimize_scalar(lr_eval_fun, bounds=[lrs[0], lrs[1]], method='bounded', \
options = {'maxiter': 10, 'xatol': 1e-2, 'disp': False})
scores = []
lrs = []
lr = res.x
return lr, lrs, scores
######
# Minibatch functions
# From https://github.com/HIPS/neural-fingerprint/blob/2003a28d5ae4a78d99fdc06db8671b994f88c5a6/neuralfingerprint/util.py#L126-L138
def get_ith_minibatch_ixs(i, num_datapoints, batch_size):
num_minibatches = num_datapoints / batch_size + ((num_datapoints % batch_size) > 0)
i = i % num_minibatches
start = i * batch_size
stop = start + batch_size
return slice(start, stop)
def build_batched_grad(grad, batch_size, inputs, targets):
'''Grad has signature(weights, inputs, targets, N).
Returns batched_grad with signature (weights, iter), applied to a
minibatch. We pass in the overall dataset size, N, to act as
scaling factor.'''
N = inputs.shape[0]
def batched_grad(weights, i):
cur_idxs = get_ith_minibatch_ixs(i, len(targets), batch_size)
return grad(weights, inputs[cur_idxs], targets[cur_idxs], N)
return batched_grad
######
# Modified from https://github.com/HIPS/autograd/blob/master/examples/optimizers.py
def maybe_add_iter_arg_to_fun(fun):
'''This modifies a batch function to work in the online setting,
by accepting (but ignoring) an iteration argument.'''
if len(inspect.getargspec(fun)[0])==1:
return lambda params, iter: fun(params)
else:
return fun
def autosgd(tuning_fun, tuning_method, obj_fun, grad_fun, x0, max_iters=100,
callback=None, mass=0.9, update='regular'):
lr, lrs, scores = lr_tuner(tuning_fun, tuning_method, sgd, grad_fun, x0, max_iters)
print 'auto_sgd picked {} from {} with scores {}'.format(lr, lrs, scores)
lr_fun = lambda iter: lr_exp_decay(iter, lr)
x, val = sgd(obj_fun, grad_fun, x0, max_iters, callback, lr_fun, mass, update)
return x, val, lr
def sgd(obj_fun, grad_fun, x0, max_iters=100, callback=None,
lr_fun=const_lr(0.01), mass=0.9, update='regular', avgdecay=0.99):
'''Stochastic gradient descent with momentum.
See eg http://caffe.berkeleyvision.org/tutorial/solver.html'''
x = np.copy(x0)
xavg = x
if callback is not None: callback(x)
velocity = np.zeros(len(x))
grad_fun = maybe_add_iter_arg_to_fun(grad_fun)
for i in range(max_iters):
lr = lr_fun(i)
if update == 'regular': # standard momentum
g = grad_fun(x, i)
velocity = mass * velocity - lr * g
elif update == 'nesterov': # nesterov accelerated gradient
gg = grad_fun(x + mass * velocity, i)
velocity = mass * velocity - lr * gg
elif update == 'convex': # convex combination (autograd code)
g = grad_fun(x, i)
velocity = lr * (mass * velocity - (1.0 - mass) * g)
else:
raise ValueError('unknown update {}'.format(update))
x = x + velocity
if callback is not None: callback(x)
val = obj_fun(x)
val_avg = obj_fun(xavg)
print 'sgd: val {:0.4g}, val_avg {:0.4g}'.format(val, val_avg)
if val < val_avg:
return x, val
else:
return xavg, val_avg
def autoadam(tuning_fun, tuning_method, obj_fun, grad_fun, x0, max_iters=100,
callback=None, b1=0.9, b2=0.999, eps=10**-8):
lr, lrs, scores = lr_tuner(tuning_fun, tuning_method, adam, grad_fun, x0, max_iters)
print 'auto_adam picked {} from {} with scores {}'.format(lr, lrs, scores)
lr_fun = lambda iter: lr_exp_decay(iter, lr)
x, val = adam(obj_fun, grad_fun, x0, max_iters, callback, lr_fun, b1, b2, eps)
return x, val, lr
def adam(obj_fun, grad_fun, x0, max_iters=100, callback=None,
lr_fun=const_lr(0.01), b1=0.9, b2=0.999, eps=10**-8, avgdecay=0.99):
"""Adam as described in http://arxiv.org/pdf/1412.6980.pdf.
It's basically RMSprop with momentum and some correction terms."""
x = np.copy(x0)
xavg = x
if callback is not None: callback(x)
m = np.zeros(len(x))
v = np.zeros(len(x))
grad_fun = maybe_add_iter_arg_to_fun(grad_fun)
for i in range(max_iters):
g = grad_fun(x, i)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
step_size = lr_fun(i)
x -= step_size*mhat/(np.sqrt(vhat) + eps)
if callback: callback(x)
# Polyak iterate averaging
xavg = (1-avgdecay)*x + avgdecay*xavg
val = obj_fun(x)
val_avg = obj_fun(xavg)
print 'adam: val {:0.4g}, val_avg {:0.4g}'.format(val, val_avg)
if val < val_avg:
return x, val
else:
return xavg, val_avg
def rmsprop(obj_fun, grad_fun, x0, max_iters=100, callback=None,
lr_fun=const_lr(0.01), gamma=0.9, eps = 10**-8):
"""Root mean squared prop: See Adagrad paper for details."""
x = np.copy(x0)
avg_sq_grad = np.ones(len(x))
grad_fun = maybe_add_iter_arg_to_fun(grad_fun)
if callback is not None: callback(x)
for i in range(max_iters):
g = grad_fun(x, i)
avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)
step_size = lr_fun(i)
x -= step_size * g/(np.sqrt(avg_sq_grad) + eps)
if callback: callback(x)
val = obj_fun(x)
return x, val
def bfgs(obj_fun, grad_fun, params, max_iters=100, callback_fun=None):
'''This wraps scipy.minimize. So callback has the signature
callback(params).'''
result = opt.minimize(obj_fun, params, method='BFGS', jac=grad_fun,
callback=callback_fun, options = {'maxiter':max_iters, 'disp':True, 'gtol': 1e-3})
return result.x, result.fun
"""
# Modified from
# https://github.com/HIPS/neural-fingerprint/blob/2003a28d5ae4a78d99fdc06db8671b994f88c5a6/neuralfingerprint/optimizers.py
def bfgs_hips(obj_and_grad, x, num_iters=100, callback=None):
def epoch_counter():
epoch = 0
while True:
yield epoch
epoch += 1
ec = epoch_counter()
wrapped_callback=None
if callback:
def wrapped_callback(params):
res = obj_and_grad(params)
grad = res[1]
callback(params, next(ec), grad)
res = opt.minimize(fun=obj_and_grad, x0=x, jac =True, callback=wrapped_callback,
method = 'BFGS', options = {'maxiter':num_iters, 'disp':True, 'gtol': 1e-3})
return res.x
"""
def branin(x):
"""Branin function.
This function is widely used to evaluate nonconvex optimization methods.
It is typically evaluated over the range -5 <= x1 <= 10, 0 <= x2 <= 15.
This function has 3 global minima, at
x_global_min = [np.pi, 2.275];
x_global_min = [-np.pi, 12.275]
x_global_min = [9.42478, 2.475]
The objective function at these points has value 0.397887
Args:
x: N*2 array of points (N = num. points to evaluate)
Returns:
f: N*1 function values at each x
df: N*2 gradient vector at each x
"""
a = 1
b = 5.1 / (4 * np.pi**2)
c = 5. / np.pi
r = 6.
s = 10.
t = 1. / (8 * np.pi)
x1 = x[:, 0]
x2 = x[:, 1]
z = x2 - b * np.square(x1) + c * x1 - r
f = a * np.square(z) + s * (1 - t) * np.cos(x1) + 10
df0 = 2 * a * np.inner(-2 * b * x1 + c, z) - s * (1-t) * np.sin(x1)
df1 = 2 * a * z
df = np.array([df0, df1])
return f, df
######
def main():
plot_lr_trace()
if __name__ == "__main__":
main()
|
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.mvc.controller_registrator
'''
from fantastico.mvc.controller_decorators import Controller
from fantastico.routing_engine.routing_loaders import RouteLoader
from fantastico.settings import SettingsFacade
from fantastico.utils import instantiator
import importlib
import inspect
import os
class ControllerRouteLoader(RouteLoader):
'''This class provides a route loader that is capable of scanning the disk and registering only the routes that
contain a controller decorator in them. This happens when **Fantastico** servers starts. In standard configuration
it ignores tests subfolder as well as test_* / itest_* modules.'''
@property
def scanned_folders(self):
'''This property returns the currently scanned folder from where mvc routes are collected.'''
return self._folders
def __init__(self, settings_facade=SettingsFacade, scanned_folder=None, ignore_prefix=None):
super(ControllerRouteLoader, self).__init__(settings_facade)
self._scanned_folder = scanned_folder or instantiator.get_class_abslocation(self._settings_facade.get_config().__class__)
custom_packages = self._settings_facade.get("mvc_additional_paths")
folders = self._get_custom_packages_filelist(custom_packages)
folders.add(self._scanned_folder)
self._folders = list(folders)
self._ignore_prefix = ignore_prefix or ["__init__", "__pycache__", "tests", "test", "itest"]
def _get_custom_packages_filelist(self, custom_packages):
'''This method returns all filenames where the given custom packages reside on disk.'''
folders = set()
for custom_package in custom_packages:
package = importlib.import_module(custom_package)
filename = inspect.getabsfile(package)
folders.add(filename[:filename.rfind(os.path.sep)])
return folders
def _is_ignored_file(self, filename):
'''This method determines if a filename is ignored or not.'''
for prefix in self._ignore_prefix:
if filename.startswith(prefix):
return True
return False
def _register_from_folder(self, folder):
'''This method is used for registering all modules that contains Controller from a given location.'''
file_matcher = lambda folder, filename: not self._is_ignored_file(filename)
instantiator.import_modules_from_folder(folder, file_matcher, self._settings_facade)
def load_routes(self):
'''This method is used for loading all routes that are mapped through
:py:class:`fantastico.mvc.controller_decorators.Controller` decorator.'''
for scanned_folder in self._folders:
self._register_from_folder(scanned_folder)
controller_routes = Controller.get_registered_routes()
routes = {}
for controller in controller_routes:
contr_routes = controller.url
for route in contr_routes:
route_config = routes.get(route)
if not route_config:
route_config = {}
routes[route] = route_config
route_config["http_verbs"] = route_config.get("http_verbs", {})
for method in controller.method:
route_config["http_verbs"][method] = controller.fn_handler.full_name
return routes
|
from rest_framework import serializers
from .models import Crop, CropDetail, Buyer, Seller, User, Order
class CropSerializer(serializers.ModelSerializer):
class Meta:
model = Crop
#fields = ('category')
fields = '__all__'
class CropDetailSerializer(serializers.ModelSerializer):
class Meta:
model = CropDetail
fields = '__all__'
depth = 1
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=True)
is_active = serializers.BooleanField(required=False)
is_admin = serializers.BooleanField(required=False)
class Meta:
model = User
fields = ('id', 'email', 'category', 'name', 'gender', 'identificationNumber', 'address',
'phone', 'date_of_birth', 'is_active', 'is_admin', 'password')
read_only_fields = ('date_created', 'date_modified')
def create(self, validated_data):
return User.objects.create_user(**validated_data)
class SellerSerializer(UserSerializer):
class Meta(UserSerializer.Meta):
model = Seller
fields = UserSerializer.Meta.fields
def create(self, validated_data):
return Seller.objects.create_user(**validated_data)
class BuyerSerializer(UserSerializer):
class Meta(UserSerializer.Meta):
model = Buyer
fields = UserSerializer.Meta.fields
def create(self, validated_data):
return Buyer.objects.create_user(**validated_data)
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
import typing
from abc import ABCMeta, abstractmethod
if typing.TYPE_CHECKING:
from typing import Dict, Any
from ask_sdk_core.view_resolvers.template_content import TemplateContent
from ask_sdk_model import Response
class AbstractTemplateRenderer(object):
"""Render interface for template rendering and response conversion."""
__metaclass__ = ABCMeta
@abstractmethod
def render(self, template_content, data_map, **kwargs):
# type: (TemplateContent, Dict, Any) -> Response
"""Template Renderer is used to render the template content tmp
loaded from the Loader along with the response object tmp map to
generate a skill :py:class:`ask_sdk_model.response.Response` output.
:param template_content: Template Content tmp
:type template_content: :py:class:`ask_sdk_core.view_resolvers.TemplateContent`
:param data_map: Map of template content slot values
:type data_map: Dict[str, object]
:param **kwargs: Optional arguments that renderer takes.
:return: Skill Response output
:rtype: :py:class:`ask_sdk_model.response.Response`
"""
pass
|
from bcrypt import hashpw
from eve.auth import BasicAuth
from flask import request, current_app as app
class BCryptAuthUser(BasicAuth):
def check_auth(self, email, password, allowed_roles, resource, method):
users = app.data.driver.db['user']
auth_user = users.find_one({'email': email})
return auth_user and hashpw(password, str(auth_user['password'])) == str(auth_user['password'])
|
#!/usr/bin/env python
import rospy
from beginner_tutorials.msg import QR
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
import copy
victim_pose_array = PoseArray()
victim_pose_array.header.frame_id = "map"
current_pose = Pose()
def callback_QR(data):
global victim_pose_array, current_pose
pose_length = len(victim_pose_array.poses)
print("entered callback")
if (pose_length == 0 or ((abs(victim_pose_array.poses[-1].position.x-current_pose.position.x) > 0.30) or (abs(victim_pose_array.poses[-1].position.y-current_pose.position.y) > 0.30))):
victim_pose = copy.deepcopy(current_pose)
victim_pose_array.poses.append(victim_pose)
print("Added element @ X: {}\tY: {}\n" .format(victim_pose.position.x, victim_pose.position.y))
def callback_POSE(data):
# this is the current AMCL pose of the robot
global current_pose
# update current pose
current_pose.position.x = data.pose.pose.position.x
current_pose.position.y = data.pose.pose.position.y
current_pose.orientation.x = data.pose.pose.orientation.x
current_pose.orientation.y = data.pose.pose.orientation.y
current_pose.orientation.z = data.pose.pose.orientation.z
current_pose.orientation.w = data.pose.pose.orientation.w
if __name__ == '__main__':
# start node
rospy.init_node('QR_listener')
# publish @ 1Hz
rate = rospy.Rate(1)
# initialise subscriber
rospy.Subscriber("QR_chatter", QR, callback_QR)
pose_sub = rospy.Subscriber("amcl_pose", PoseWithCovarianceStamped, callback_POSE)
# initialise publisher
array_publisher = rospy.Publisher('victim_pose_array', PoseArray, queue_size=5)
try:
while not rospy.is_shutdown():
# publish the pose_array
array_publisher.publish(victim_pose_array)
rate.sleep()
except Exception as e:
print(e)
'''''' |
import logging
from pathlib import Path
import grpc
from pb import resources_pb2, resources_pb2_grpc
logger = logging.getLogger(__name__)
resources_folder = Path(__file__).parent / ".." / ".." / ".." / "resources"
class Resources(resources_pb2_grpc.ResourcesServicer):
def GetTermsOfService(self, request, context):
with open(resources_folder / "terms_of_service.md", "r") as f:
return resources_pb2.GetTermsOfServiceRes(terms_of_service=f.read())
|
import os
from flask import Flask,render_template,url_for,redirect
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate ## pip install Flask-Migrate
from flask_login import LoginManager
login_manager= LoginManager()
app= Flask(__name__)
app.config['SECRET_KEY']= 'mykeyritesh'
################################################
############## database configuration #########
basedir= os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI']= 'sqlite:///'+os.path.join(basedir,'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']= False
db=SQLAlchemy(app)
Migrate(app,db)
login_manager.init_app(app)
login_manager.login_view='login'
|
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
from __future__ import print_function
import unittest
from itertools import product
# Related third party imports
import numpy
# Local application/library specific imports
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import delta
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import gamma
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import theta
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import vega
from py_vollib.ref_python.black_scholes_merton.greeks.analytical import rho
from py_vollib.ref_python.black_scholes_merton.greeks.numerical import delta as ndelta
from py_vollib.ref_python.black_scholes_merton.greeks.numerical import gamma as ngamma
from py_vollib.ref_python.black_scholes_merton.greeks.numerical import theta as ntheta
from py_vollib.ref_python.black_scholes_merton.greeks.numerical import vega as nvega
from py_vollib.ref_python.black_scholes_merton.greeks.numerical import rho as nrho
class TestRefPythonBSMGreeks(unittest.TestCase):
def setUp(self):
self.epsilon = 0.001
self.flags = ['c', 'p']
self.S = 100
self.Ks = numpy.linspace(20, 200, 10)
self.ts = numpy.linspace(0.01, 2, 10)
self.rs = numpy.linspace(0, 0.2, 10)
self.sigmas = numpy.linspace(0.1, 0.5, 10)
self.q = 0
self.arg_combinations = list(product(self.flags, [self.S], self.Ks, self.ts, self.rs, self.sigmas, [self.q]))
def diff_mean(self, left, right):
left_arr = numpy.array(left)
right_arr = numpy.array(right)
abs_diff = numpy.abs(left_arr - right_arr)
return numpy.mean(abs_diff)
def test_theta(self):
thetas = [theta(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
nthetas = [ntheta(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
self.assertTrue(self.diff_mean(thetas, nthetas) < self.epsilon)
def test_delta(self):
deltas = [delta(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
ndeltas = [ndelta(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
self.assertTrue(self.diff_mean(deltas, ndeltas) < self.epsilon)
def test_gamma(self):
gammas = [gamma(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
ngammas = [ngamma(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
self.assertTrue(self.diff_mean(gammas, ngammas) < self.epsilon)
def test_vega(self):
vegas = [vega(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
nvegas = [nvega(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
self.assertTrue(self.diff_mean(vegas, nvegas) < self.epsilon)
def test_rho(self):
rhos = [rho(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
nrhos = [nrho(flag, S, K, t, r, sigma, q) for flag, S, K, t, r, sigma, q in self.arg_combinations]
self.assertTrue(self.diff_mean(rhos, nrhos) < self.epsilon)
if __name__ == '__main__':
unittest.main()
|
# Copyright (C) 2016-2017 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
import psycopg2
from repology.package import Package
class Query:
def __init__(self, query=None, *args):
self.parts = [query] if query else []
self.args = list(args)
def GetQuery(self):
return ' '.join(filter(None.__ne__, self.parts))
def GetArgs(self):
return self.args
def Append(self, other, *args):
if isinstance(other, str):
self.parts += [other]
self.args += list(args)
else:
self.parts.append(other.GetQuery())
self.args += other.GetArgs()
return self
def __bool__(self):
return not not self.parts
class AndQuery(Query):
def __init__(self, query=None, *args):
Query.__init__(self, query, *args)
def GetQuery(self):
if not self.parts:
return None
return ' AND '.join(map(lambda x: '(' + x + ')', filter(None.__ne__, self.parts)))
class OrQuery(Query):
def __init__(self, query=None, *args):
Query.__init__(self, query, *args)
def GetQuery(self):
if not self.parts:
return None
return ' OR '.join(map(lambda x: '(' + x + ')', filter(None.__ne__, self.parts)))
class MetapackageRequest:
def __init__(self):
# effname filtering
self.namecond = None
self.namebound = None
self.nameorder = None
self.name_substring = None
self.effname_sorting = None
self.limit = None
# maintainer (maintainer_metapackages)
self.maintainer = None
self.maintainer_outdated = False
# num families (metapackage_repocounts)
self.morefamilies = None
self.lessfamilies = None
# repos (repo_metapackages)
self.repos = None
self.repos_outdated = False
# not repos (repo_metapackages + having)
self.repo_not = None
def NameStarting(self, name):
if self.namecond:
raise RuntimeError('duplicate effname condition')
self.namecond = '>='
self.namebound = name
self.nameorder = 'ASC'
def NameAfter(self, name):
if self.namecond:
raise RuntimeError('duplicate effname condition')
self.namecond = '>'
self.namebound = name
self.nameorder = 'ASC'
def NameBefore(self, name):
if self.namecond:
raise RuntimeError('duplicate effname condition')
self.namecond = '<'
self.namebound = name
self.nameorder = 'DESC'
def NameSubstring(self, substring):
if self.name_substring:
raise RuntimeError('duplicate effname substring condition')
self.name_substring = substring
def Maintainer(self, maintainer):
if self.maintainer:
raise RuntimeError('duplicate maintainer condition')
self.maintainer = maintainer
def OutdatedForMaintainer(self, maintainer):
if self.maintainer:
raise RuntimeError('duplicate maintainer condition')
self.maintainer = maintainer
self.maintainer_outdated = True
def InRepo(self, repo):
if self.repos and repo not in self.repos:
raise RuntimeError('duplicate repository condition')
self.repos = set((repo,))
def InAnyRepo(self, repos):
if self.repos:
for currentrepo in self.repos:
if currentrepo not in repos:
raise RuntimeError('duplicate repository condition')
else:
self.repos = set(repos)
def OutdatedInRepo(self, repo):
if self.repos and repo not in self.repos:
raise RuntimeError('duplicate repository condition')
self.repos = set((repo,))
self.repos_outdated = True
def NotInRepo(self, repo):
if self.repo_not:
raise RuntimeError('duplicate not-in-repository condition')
self.repo_not = repo
def MoreFamilies(self, num):
if self.morefamilies:
raise RuntimeError('duplicate more families condition')
self.morefamilies = num
def LessFamilies(self, num):
if self.lessfamilies:
raise RuntimeError('duplicate less families condition')
self.lessfamilies = num
def Limit(self, limit):
if self.limit:
raise RuntimeError('duplicate limit')
self.limit = limit
def GetQuery(self):
tables = set()
where = AndQuery()
having = AndQuery()
# table joins and conditions
if self.maintainer:
tables.add('maintainer_metapackages')
if self.maintainer_outdated:
where.Append('maintainer_metapackages.maintainer = %s AND maintainer_metapackages.num_packages_outdated > 0 AND maintainer_metapackages.num_packages_newest = 0', self.maintainer)
else:
where.Append('maintainer_metapackages.maintainer = %s', self.maintainer)
if self.morefamilies:
tables.add('metapackage_repocounts')
where.Append('metapackage_repocounts.num_families >= %s', self.morefamilies)
if self.lessfamilies:
tables.add('metapackage_repocounts')
where.Append('metapackage_repocounts.num_families <= %s', self.lessfamilies)
if self.repos:
tables.add('repo_metapackages')
if self.repos_outdated:
where.Append('repo_metapackages.repo in (' + ','.join(['%s'] * len(self.repos)) + ') AND repo_metapackages.num_outdated > 0 AND repo_metapackages.num_newest = 0', *self.repos)
else:
where.Append('repo_metapackages.repo in (' + ','.join(['%s'] * len(self.repos)) + ')', *self.repos)
if self.repo_not:
tables.add('repo_metapackages as repo_metapackages1')
having.Append('count(nullif(repo_metapackages1.repo = %s, false)) = 0', self.repo_not)
# effname conditions
if self.namecond and self.namebound:
where.Append('effname ' + self.namecond + ' %s', self.namebound)
if self.name_substring:
where.Append('effname LIKE %s', '%' + self.name_substring + '%')
# construct query
query = Query('SELECT DISTINCT effname FROM')
query.Append(tables.pop() if tables else 'repo_metapackages')
for table in tables:
query.Append('INNER JOIN ' + table + ' USING(effname)')
if where:
query.Append('WHERE').Append(where)
if having:
query.Append('GROUP BY effname HAVING').Append(having)
if self.nameorder:
query.Append('ORDER BY effname ' + self.nameorder)
else:
query.Append('ORDER BY effname ASC')
if self.limit:
query.Append('LIMIT %s', self.limit)
return (query.GetQuery(), query.GetArgs())
class Database:
def __init__(self, dsn, readonly=True, autocommit=False):
self.db = psycopg2.connect(dsn)
self.db.set_session(readonly=readonly, autocommit=autocommit)
self.cursor = self.db.cursor()
def CreateSchema(self):
self.cursor.execute('DROP TABLE IF EXISTS packages CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS repositories CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS repositories_history CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS statistics CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS statistics_history CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS totals_history CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS links CASCADE')
self.cursor.execute('DROP TABLE IF EXISTS problems CASCADE')
self.cursor.execute("""
CREATE TABLE packages (
repo text not null,
family text not null,
subrepo text,
name text not null,
effname text not null,
version text not null,
origversion text,
effversion text,
versionclass smallint,
maintainers text[],
category text,
comment text,
homepage text,
licenses text[],
downloads text[],
ignorepackage bool not null,
shadow bool not null,
ignoreversion bool not null,
extrafields jsonb not null
)
""")
self.cursor.execute("""
CREATE INDEX ON packages(effname)
""")
# repositories
self.cursor.execute("""
CREATE TABLE repositories (
name text not null primary key,
num_packages integer not null default 0,
num_packages_newest integer not null default 0,
num_packages_outdated integer not null default 0,
num_packages_ignored integer not null default 0,
num_metapackages integer not null default 0,
num_metapackages_unique integer not null default 0,
num_metapackages_newest integer not null default 0,
num_metapackages_outdated integer not null default 0,
last_update timestamp with time zone,
num_problems integer not null default 0,
num_maintainers integer not null default 0
)
""")
# repository_history
self.cursor.execute("""
CREATE TABLE repositories_history (
ts timestamp with time zone not null primary key,
snapshot jsonb not null
)
""")
# statistics
self.cursor.execute("""
CREATE TABLE statistics (
num_packages integer not null default 0,
num_metapackages integer not null default 0,
num_problems integer not null default 0,
num_maintainers integer not null default 0
)
""")
self.cursor.execute("""
INSERT INTO statistics VALUES(DEFAULT)
""")
# statistics_history
self.cursor.execute("""
CREATE TABLE statistics_history (
ts timestamp with time zone not null primary key,
snapshot jsonb not null
)
""")
# repo_metapackages
self.cursor.execute("""
CREATE MATERIALIZED VIEW repo_metapackages
AS
SELECT
repo,
effname,
count(nullif(versionclass=1, false)) AS num_newest,
count(nullif(versionclass=2, false)) AS num_outdated,
count(nullif(versionclass=3, false)) AS num_ignored
FROM packages
WHERE effname IN (
SELECT
effname
FROM packages
GROUP BY effname
HAVING count(nullif(shadow, true)) > 0
)
GROUP BY effname,repo
WITH DATA
""")
self.cursor.execute("""
CREATE UNIQUE INDEX ON repo_metapackages(repo, effname)
""")
self.cursor.execute("""
CREATE INDEX ON repo_metapackages(effname)
""")
self.cursor.execute("""
CREATE INDEX repo_metapackages_effname_trgm ON repo_metapackages USING gin (effname gin_trgm_ops);
""")
# maintainer_metapackages
self.cursor.execute("""
CREATE MATERIALIZED VIEW maintainer_metapackages
AS
SELECT
unnest(maintainers) as maintainer,
effname,
count(1) AS num_packages,
count(nullif(versionclass = 1, false)) AS num_packages_newest,
count(nullif(versionclass = 2, false)) AS num_packages_outdated,
count(nullif(versionclass = 3, false)) AS num_packages_ignored
FROM packages
GROUP BY maintainer, effname
WITH DATA
""")
self.cursor.execute("""
CREATE UNIQUE INDEX ON maintainer_metapackages(maintainer, effname)
""")
self.cursor.execute("""
CREATE INDEX ON maintainer_metapackages(effname)
""")
# maintainers
self.cursor.execute("""
CREATE MATERIALIZED VIEW maintainers AS
SELECT
unnest(maintainers) AS maintainer,
count(1) AS num_packages,
count(DISTINCT effname) AS num_metapackages,
count(nullif(versionclass = 1, false)) AS num_packages_newest,
count(nullif(versionclass = 2, false)) AS num_packages_outdated,
count(nullif(versionclass = 3, false)) AS num_packages_ignored
FROM packages
GROUP BY maintainer
ORDER BY maintainer
WITH DATA
""")
self.cursor.execute("""
CREATE UNIQUE INDEX ON maintainers(maintainer)
""")
# repo counts
self.cursor.execute("""
CREATE MATERIALIZED VIEW metapackage_repocounts AS
SELECT
effname,
count(DISTINCT repo) AS num_repos,
count(DISTINCT family) AS num_families,
bool_and(shadow) AS shadow_only
FROM packages
GROUP BY effname
ORDER BY effname
WITH DATA
""")
self.cursor.execute('CREATE UNIQUE INDEX ON metapackage_repocounts(effname)')
self.cursor.execute('CREATE INDEX ON metapackage_repocounts(num_repos)')
self.cursor.execute('CREATE INDEX ON metapackage_repocounts(num_families)')
self.cursor.execute('CREATE INDEX ON metapackage_repocounts(shadow_only, num_families)')
# links for link checker
self.cursor.execute("""
CREATE TABLE links (
url text not null primary key,
first_extracted timestamp with time zone not null,
last_extracted timestamp with time zone not null,
last_checked timestamp with time zone,
last_success timestamp with time zone,
last_failure timestamp with time zone,
status smallint,
redirect smallint,
size bigint,
location text
)
""")
# problems
self.cursor.execute("""
CREATE TABLE problems (
repo text not null,
name text not null,
effname text not null,
maintainer text,
problem text not null
)
""")
self.cursor.execute('CREATE INDEX ON problems(effname)')
self.cursor.execute('CREATE INDEX ON problems(repo, effname)')
self.cursor.execute('CREATE INDEX ON problems(maintainer)')
# reports
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS reports (
created timestamp with time zone not null,
effname text not null,
need_verignore boolean not null,
need_split boolean not null,
need_merge boolean not null,
comment text,
reply text,
expires timestamp with time zone
)
""")
self.cursor.execute('CREATE INDEX ON reports(effname)')
# url_relations
self.cursor.execute("""
CREATE MATERIALIZED VIEW url_relations AS
SELECT DISTINCT
effname,
regexp_replace(regexp_replace(homepage, '/?([#?].*)?$', ''), '^https?://(www\\.)?', '') as url
FROM packages
WHERE homepage ~ '^https?://'
WITH DATA
""")
self.cursor.execute('CREATE UNIQUE INDEX ON url_relations(effname, url)') # we only need url here because we need unique index for concurrent refresh
self.cursor.execute('CREATE INDEX ON url_relations(url)')
def Clear(self):
self.cursor.execute("""DELETE FROM packages""")
self.cursor.execute("""
UPDATE repositories
SET
num_packages = 0,
num_packages_newest = 0,
num_packages_outdated = 0,
num_packages_ignored = 0,
num_metapackages = 0,
num_metapackages_unique = 0,
num_metapackages_newest = 0,
num_metapackages_outdated = 0,
num_problems = 0,
num_maintainers = 0
""")
self.cursor.execute("""DELETE FROM problems""")
self.cursor.execute("""
UPDATE statistics
SET
num_packages = 0,
num_metapackages = 0,
num_problems = 0,
num_maintainers = 0
""")
def AddPackages(self, packages):
self.cursor.executemany(
"""
INSERT INTO packages(
repo,
family,
subrepo,
name,
effname,
version,
origversion,
effversion,
versionclass,
maintainers,
category,
comment,
homepage,
licenses,
downloads,
ignorepackage,
shadow,
ignoreversion,
extrafields
) VALUES (
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s
)
""",
[
(
package.repo,
package.family,
package.subrepo,
package.name,
package.effname,
package.version,
package.origversion,
package.effversion,
package.versionclass,
package.maintainers,
package.category,
package.comment,
package.homepage,
package.licenses,
package.downloads,
package.ignore,
package.shadow,
package.ignoreversion,
json.dumps(package.extrafields),
) for package in packages
]
)
def MarkRepositoriesUpdated(self, reponames):
self.cursor.executemany(
"""
INSERT
INTO repositories (
name,
last_update
) VALUES (
%s,
now()
)
ON CONFLICT (name)
DO UPDATE SET
last_update = now()
""",
[[name] for name in reponames]
)
def UpdateViews(self):
self.cursor.execute("""REFRESH MATERIALIZED VIEW CONCURRENTLY repo_metapackages""")
self.cursor.execute("""REFRESH MATERIALIZED VIEW CONCURRENTLY maintainer_metapackages""")
self.cursor.execute("""REFRESH MATERIALIZED VIEW CONCURRENTLY maintainers""")
self.cursor.execute("""REFRESH MATERIALIZED VIEW CONCURRENTLY metapackage_repocounts""")
self.cursor.execute("""REFRESH MATERIALIZED VIEW CONCURRENTLY url_relations""")
# package stats
self.cursor.execute("""
INSERT
INTO repositories (
name,
num_packages,
num_packages_newest,
num_packages_outdated,
num_packages_ignored
) SELECT
repo,
sum(num_packages),
sum(num_packages_newest),
sum(num_packages_outdated),
sum(num_packages_ignored)
FROM(
SELECT
repo,
count(*) as num_packages,
count(nullif(versionclass=1, false)) as num_packages_newest,
count(nullif(versionclass=2, false)) as num_packages_outdated,
count(nullif(versionclass=3, false)) as num_packages_ignored
FROM packages
GROUP BY repo, effname
) AS TEMP
GROUP BY repo
ON CONFLICT (name)
DO UPDATE SET
num_packages = EXCLUDED.num_packages,
num_packages_newest = EXCLUDED.num_packages_newest,
num_packages_outdated = EXCLUDED.num_packages_outdated,
num_packages_ignored = EXCLUDED.num_packages_ignored
""")
self.cursor.execute("""
INSERT
INTO repositories (
name,
num_maintainers
) SELECT
repo,
count(DISTINCT maintainer)
FROM (
SELECT
repo,
unnest(maintainers) as maintainer
FROM packages
) AS temp
GROUP BY repo
ON CONFLICT (name)
DO UPDATE SET
num_maintainers = EXCLUDED.num_maintainers
""")
# metapackage stats
self.cursor.execute("""
INSERT
INTO repositories (
name,
num_metapackages,
num_metapackages_unique,
num_metapackages_newest,
num_metapackages_outdated
) SELECT
repo,
count(*),
count(nullif(unique_only, false)),
count(nullif(NOT unique_only and num_packages_newest>0, false)),
count(nullif(NOT unique_only and num_packages_newest=0, false))
FROM(
SELECT
repo,
TRUE as unique_only,
count(*) as num_packages,
count(nullif(versionclass=1, false)) as num_packages_newest
FROM packages
WHERE effname IN (
SELECT
effname
FROM metapackage_repocounts
WHERE NOT shadow_only AND num_families = 1
)
GROUP BY repo, effname
UNION ALL
SELECT
repo,
FALSE as unique_only,
count(*) as num_packages,
count(nullif(versionclass=1, false)) as num_packages_newest
FROM packages
WHERE effname IN (
SELECT
effname
FROM metapackage_repocounts
WHERE NOT shadow_only AND num_families > 1
)
GROUP BY repo, effname
) AS TEMP
GROUP BY repo
ON CONFLICT (name)
DO UPDATE SET
num_metapackages = EXCLUDED.num_metapackages,
num_metapackages_unique = EXCLUDED.num_metapackages_unique,
num_metapackages_newest = EXCLUDED.num_metapackages_newest,
num_metapackages_outdated = EXCLUDED.num_metapackages_outdated
""")
# problems
self.cursor.execute("""
INSERT
INTO problems (
repo,
name,
effname,
maintainer,
problem
)
SELECT DISTINCT
packages.repo,
packages.name,
packages.effname,
case when packages.maintainers = '{}' then null else unnest(packages.maintainers) end,
'Homepage link "' ||
links.url ||
'" is dead (' ||
CASE
WHEN links.status=-1 THEN 'connect timeout'
WHEN links.status=-2 THEN 'too many redirects'
WHEN links.status=-4 THEN 'cannot connect'
WHEN links.status=-5 THEN 'invalid url'
WHEN links.status=-6 THEN 'DNS problem'
ELSE 'HTTP error ' || links.status
END ||
') for more than a month.'
FROM packages
INNER JOIN links ON (packages.homepage = links.url)
WHERE
(links.status IN (-1, -2, -4, -5, -6, 400, 404) OR links.status >= 500) AND
(
(links.last_success IS NULL AND links.first_extracted < now() - INTERVAL '30' DAY) OR
links.last_success < now() - INTERVAL '30' DAY
)
""")
self.cursor.execute("""
INSERT
INTO problems (
repo,
name,
effname,
maintainer,
problem
)
SELECT DISTINCT
packages.repo,
packages.name,
packages.effname,
case when packages.maintainers = '{}' then null else unnest(packages.maintainers) end,
'Homepage link "' ||
links.url ||
'" is a permanent redirect to "' ||
links.location ||
'" and should be updated'
FROM packages
INNER JOIN links ON (packages.homepage = links.url)
WHERE
(
links.redirect = 301 AND
replace(links.url, 'http://', 'https://') = links.location
)
""")
self.cursor.execute("""
INSERT
INTO problems(repo, name, effname, maintainer, problem)
SELECT DISTINCT
repo,
name,
effname,
case when maintainers = '{}' then null else unnest(maintainers) end,
'Homepage link "' || homepage || '" points to Google Code which was discontinued. The link should be updated (probably along with download URLs). If this link is still alive, it may point to a new project homepage.'
FROM packages
WHERE
homepage SIMILAR TO 'https?://([^/]+.)?googlecode.com(/%)?' OR
homepage SIMILAR TO 'https?://code.google.com(/%)?'
""")
self.cursor.execute("""
INSERT
INTO problems(repo, name, effname, maintainer, problem)
SELECT DISTINCT
repo,
name,
effname,
case when maintainers = '{}' then null else unnest(maintainers) end,
'Homepage link "' || homepage || '" points to codeplex which was discontinued. The link should be updated (probably along with download URLs).'
FROM packages
WHERE
homepage SIMILAR TO 'https?://([^/]+.)?codeplex.com(/%)?'
""")
self.cursor.execute("""
INSERT
INTO problems(repo, name, effname, maintainer, problem)
SELECT DISTINCT
repo,
name,
effname,
case when maintainers = '{}' then null else unnest(maintainers) end,
'Homepage link "' || homepage || '" points to Gna which was discontinued. The link should be updated (probably along with download URLs).'
FROM packages
WHERE
homepage SIMILAR TO 'https?://([^/]+.)?gna.org(/%)?'
""")
self.cursor.execute("""
INSERT
INTO repositories (
name,
num_problems
) SELECT
repo,
count(distinct effname)
FROM problems
GROUP BY repo
ON CONFLICT (name)
DO UPDATE SET
num_problems = EXCLUDED.num_problems
""")
# statistics
self.cursor.execute("""
UPDATE statistics
SET
num_packages = (SELECT count(*) FROM packages),
num_metapackages = (SELECT count(*) FROM metapackage_repocounts WHERE NOT shadow_only),
num_problems = (SELECT count(*) FROM problems),
num_maintainers = (SELECT count(*) FROM maintainers)
""")
# cleanup expired reports
self.cursor.execute('DELETE FROM reports WHERE now() >= expires')
# cleanup stale links
self.cursor.execute('DELETE FROM links WHERE last_extracted < now() - INTERVAL \'1\' MONTH')
def Commit(self):
self.db.commit()
def GetMetapackage(self, names):
self.cursor.execute(
"""
SELECT
repo,
family,
subrepo,
name,
effname,
version,
origversion,
effversion,
versionclass,
maintainers,
category,
comment,
homepage,
licenses,
downloads,
ignorepackage,
shadow,
ignoreversion,
extrafields
FROM packages
WHERE effname {}
""".format('= ANY (%s)' if isinstance(names, list) else '= %s'),
(names,)
)
return [
Package(
repo=row[0],
family=row[1],
subrepo=row[2],
name=row[3],
effname=row[4],
version=row[5],
origversion=row[6],
effversion=row[7],
versionclass=row[8],
maintainers=row[9],
category=row[10],
comment=row[11],
homepage=row[12],
licenses=row[13],
downloads=row[14],
ignore=row[15],
shadow=row[16],
ignoreversion=row[17],
extrafields=row[18],
) for row in self.cursor.fetchall()
]
def GetMetapackages(self, *filters, limit=500):
req = MetapackageRequest()
for f in filters:
if f:
f.ApplyToRequest(req)
req.Limit(limit)
query, args = req.GetQuery()
self.cursor.execute(
"""
SELECT
repo,
family,
subrepo,
name,
effname,
version,
origversion,
effversion,
versionclass,
maintainers,
category,
comment,
homepage,
licenses,
downloads,
ignorepackage,
shadow,
ignoreversion,
extrafields
FROM packages WHERE effname IN (
{}
)
""".format(query),
args
)
return [
Package(
repo=row[0],
family=row[1],
subrepo=row[2],
name=row[3],
effname=row[4],
version=row[5],
origversion=row[6],
effversion=row[7],
versionclass=row[8],
maintainers=row[9],
category=row[10],
comment=row[11],
homepage=row[12],
licenses=row[13],
downloads=row[14],
ignore=row[15],
shadow=row[16],
ignoreversion=row[17],
extrafields=row[18],
) for row in self.cursor.fetchall()
]
def GetPackagesCount(self):
self.cursor.execute("""SELECT num_packages FROM statistics LIMIT 1""")
return self.cursor.fetchall()[0][0]
def GetMetapackagesCount(self):
self.cursor.execute("""SELECT num_metapackages FROM statistics LIMIT 1""")
return self.cursor.fetchall()[0][0]
def GetMaintainersCount(self):
self.cursor.execute("""SELECT num_maintainers FROM statistics LIMIT 1""")
return self.cursor.fetchall()[0][0]
def GetMaintainersRange(self):
# should use min/max here, but these are slower on pgsql 9.6
self.cursor.execute('SELECT maintainer FROM maintainers ORDER BY maintainer LIMIT 1')
min_ = self.cursor.fetchall()[0][0]
self.cursor.execute('SELECT maintainer FROM maintainers ORDER BY maintainer DESC LIMIT 1')
max_ = self.cursor.fetchall()[0][0]
return (min_, max_)
def GetMaintainers(self, bound=None, reverse=False, search=None, limit=500):
where = []
order = 'maintainer'
query = """
SELECT
maintainer,
num_packages,
num_packages_outdated
FROM maintainers
"""
args = []
if bound:
if reverse:
where.append('maintainer <= %s')
order = 'maintainer DESC'
args.append(bound)
else:
where.append('maintainer >= %s')
args.append(bound)
if search:
where.append('maintainer LIKE %s')
args.append('%' + search + '%')
if where:
query += ' WHERE ' + ' AND '.join(where)
if order:
query += ' ORDER BY ' + order
if limit:
query += ' LIMIT %s'
args.append(limit)
self.cursor.execute(query, args)
return sorted([
{
'maintainer': row[0],
'num_packages': row[1],
'num_packages_outdated': row[2]
} for row in self.cursor.fetchall()
], key=lambda m: m['maintainer'])
def GetMaintainerInformation(self, maintainer):
self.cursor.execute(
"""
SELECT
num_packages,
num_packages_newest,
num_packages_outdated,
num_packages_ignored,
num_metapackages
FROM maintainers
WHERE maintainer = %s
""",
(maintainer,)
)
rows = self.cursor.fetchall()
if not rows:
return None
return {
'num_packages': rows[0][0],
'num_packages_newest': rows[0][1],
'num_packages_outdated': rows[0][2],
'num_packages_ignored': rows[0][3],
'num_metapackages': rows[0][4],
}
def GetMaintainerMetapackages(self, maintainer, limit=1000):
self.cursor.execute(
"""
SELECT
effname
FROM maintainer_metapackages
WHERE maintainer = %s
ORDER BY effname
LIMIT %s
""",
(maintainer, limit)
)
return [row[0] for row in self.cursor.fetchall()]
def GetMaintainerSimilarMaintainers(self, maintainer, limit=100):
# this obscure request needs some clarification
#
# what we calculate as score here is actually Jaccard index
# (see wikipedia) for two sets (of metapackages maintained by
# two maintainers)
#
# let M = set of metapackages for maintainer passed to this function
# let C = set of metapackages for other maintainer we test for similarity
#
# score = |M⋂C| / |M⋃C| = |M⋂C| / (|M| + |C| - |M⋂C|)
#
# - count(*) is number of common metapackages for both maintainers, e.g. |M⋂C|
# - min(num_metapackages) is number of metapackages for candidate maintainer |C|
# we use min because we use GROUP BY and just need a group operation; since we
# group by maintainer and join by maintainer, num_metapackages is the same
# in all records, and we may pick min, max, avg, whatever
# - sub-select just gets |M|
# - the divisor is |M⋃C| = |M| + |C| - |M⋂C|
self.cursor.execute(
"""
SELECT
maintainer,
count(*) AS count,
100.0 * count(*) / (
min(num_metapackages) -
count(*) +
(
SELECT num_metapackages
FROM maintainers
WHERE maintainer=%s
)
) AS score
FROM maintainer_metapackages
INNER JOIN maintainers USING(maintainer)
WHERE
maintainer != %s AND
effname IN (
SELECT
effname
FROM maintainer_metapackages
WHERE maintainer=%s
)
GROUP BY maintainer
ORDER BY score DESC
LIMIT %s
""",
(maintainer, maintainer, maintainer, limit)
)
return [
{
'maintainer': row[0],
'count': row[1],
'match': row[2],
} for row in self.cursor.fetchall()
]
def GetRepositories(self):
self.cursor.execute("""
SELECT
name,
num_packages,
num_packages_newest,
num_packages_outdated,
num_packages_ignored,
num_metapackages,
num_metapackages_unique,
num_metapackages_newest,
num_metapackages_outdated,
last_update at time zone 'UTC',
now() - last_update,
num_problems,
num_maintainers
FROM repositories
""")
return [
{
'name': row[0],
'num_packages': row[1],
'num_packages_newest': row[2],
'num_packages_outdated': row[3],
'num_packages_ignored': row[4],
'num_metapackages': row[5],
'num_metapackages_unique': row[6],
'num_metapackages_newest': row[7],
'num_metapackages_outdated': row[8],
'last_update_utc': row[9],
'since_last_update': row[10],
'num_problems': row[11],
'num_maintainers': row[12],
} for row in self.cursor.fetchall()
]
def GetRepository(self, repo):
# XXX: remove duplication with GetRepositories()
self.cursor.execute(
"""
SELECT
num_packages,
num_packages_newest,
num_packages_outdated,
num_packages_ignored,
num_metapackages,
num_metapackages_unique,
num_metapackages_newest,
num_metapackages_outdated,
last_update at time zone 'UTC',
now() - last_update,
num_problems,
num_maintainers
FROM repositories
WHERE name = %s
""",
(repo,)
)
rows = self.cursor.fetchall()
if rows:
row = rows[0]
return {
'num_packages': row[0],
'num_packages_newest': row[1],
'num_packages_outdated': row[2],
'num_packages_ignored': row[3],
'num_metapackages': row[4],
'num_metapackages_unique': row[5],
'num_metapackages_newest': row[6],
'num_metapackages_outdated': row[7],
'last_update_utc': row[8],
'since_last_update': row[9],
'num_problems': row[10],
'num_maintainers': row[11],
}
else:
return {
'num_packages': 0,
'num_packages_newest': 0,
'num_packages_outdated': 0,
'num_packages_ignored': 0,
'num_metapackages': 0,
'num_metapackages_unique': 0,
'num_metapackages_newest': 0,
'num_metapackages_outdated': 0,
'last_update_utc': None,
'since_last_update': None,
'num_problems': 0,
'num_maintainers': 0,
}
def GetRepositoriesHistoryAgo(self, seconds=60 * 60 * 24):
self.cursor.execute("""
SELECT
ts,
now() - ts,
snapshot
FROM repositories_history
WHERE ts IN (
SELECT
ts
FROM repositories_history
WHERE ts < now() - INTERVAL %s
ORDER BY ts DESC
LIMIT 1
)
""", (datetime.timedelta(seconds=seconds),)
)
row = self.cursor.fetchall()[0]
return {
'timestamp': row[0],
'timedelta': row[1],
**row[2]
}
def GetRepositoriesHistoryPeriod(self, seconds=60 * 60 * 24, repo=None):
repopath = ''
repoargs = ()
if repo:
repopath = '#>%s'
repoargs = ('{' + repo + '}', )
self.cursor.execute("""
SELECT
ts,
now() - ts,
snapshot{}
FROM repositories_history
WHERE ts >= now() - INTERVAL %s
ORDER BY ts
""".format(repopath),
repoargs + (datetime.timedelta(seconds=seconds),)
)
return [
{
'timestamp': row[0],
'timedelta': row[1],
'snapshot': row[2]
}
for row in self.cursor.fetchall()
]
def GetStatisticsHistoryPeriod(self, seconds=60 * 60 * 24):
self.cursor.execute("""
SELECT
ts,
now() - ts,
snapshot
FROM statistics_history
WHERE ts >= now() - INTERVAL %s
ORDER BY ts
""", (datetime.timedelta(seconds=seconds),)
)
return [
{
'timestamp': row[0],
'timedelta': row[1],
'snapshot': row[2]
}
for row in self.cursor.fetchall()
]
def Query(self, query, *args):
self.cursor.execute(query, args)
return self.cursor.fetchall()
def SnapshotHistory(self):
self.cursor.execute(
"""
INSERT
INTO repositories_history(
ts,
snapshot
)
SELECT
now(),
jsonb_object_agg(snapshot.name, to_jsonb(snapshot) - 'name')
FROM (
SELECT
name,
num_metapackages,
num_metapackages_unique,
num_metapackages_newest,
num_metapackages_outdated,
num_problems,
num_maintainers
FROM repositories
) AS snapshot
"""
)
self.cursor.execute(
"""
INSERT
INTO statistics_history(
ts,
snapshot
)
SELECT
now(),
to_jsonb(snapshot)
FROM (
SELECT
*
FROM statistics
) AS snapshot
"""
)
def ExtractLinks(self):
self.cursor.execute(
"""
INSERT
INTO links(
url,
first_extracted,
last_extracted
) SELECT
unnest(downloads),
now(),
now()
FROM packages
UNION
SELECT
homepage,
now(),
now()
FROM packages
WHERE homepage IS NOT NULL AND repo NOT IN('cpan', 'pypi', 'rubygems', 'hackage', 'cran')
ON CONFLICT (url)
DO UPDATE SET
last_extracted = now()
"""
)
def GetLinksForCheck(self, after=None, prefix=None, recheck_age=None, limit=None, unchecked_only=False, checked_only=False, failed_only=False, succeeded_only=False):
conditions = []
args = []
# reduce the noise while linkchecker code doesn't support other schemas
conditions.append('(url LIKE %s OR url LIKE %s)')
args.append('http://%')
args.append('https://%')
if after is not None:
conditions.append('url > %s')
args.append(after)
if prefix is not None:
conditions.append('url LIKE %s')
args.append(prefix + '%')
if recheck_age is not None:
conditions.append('(last_checked IS NULL OR last_checked <= now() - INTERVAL %s)')
args.append(datetime.timedelta(seconds=recheck_age))
if unchecked_only:
conditions.append('last_checked IS NULL')
if checked_only:
conditions.append('last_checked IS NOT NULL')
if failed_only:
conditions.append('status != 200')
if succeeded_only:
conditions.append('status == 200')
conditions_expr = ''
limit_expr = ''
if conditions:
conditions_expr = 'WHERE ' + ' AND '.join(conditions)
if limit:
limit_expr = 'LIMIT %s'
args.append(limit)
self.cursor.execute(
"""
SELECT
url
FROM links
{}
ORDER BY url
{}
""".format(conditions_expr, limit_expr),
args
)
return [row[0] for row in self.cursor.fetchall()]
linkcheck_status_timeout = -1
linkcheck_status_too_many_redirects = -2
linkcheck_status_unknown_error = -3
linkcheck_status_cannot_connect = -4
linkcheck_status_invalid_url = -5
linkcheck_status_dns_error = -6
def UpdateLinkStatus(self, url, status, redirect=None, size=None, location=None):
success = status == 200
self.cursor.execute(
"""
UPDATE links
SET
last_checked = now(),
last_success = CASE WHEN %s THEN now() ELSE last_success END,
last_failure = CASE WHEN %s THEN now() ELSE last_failure END,
status = %s,
redirect = %s,
size = %s,
location = %s
WHERE url = %s
""",
(
success,
not success,
status,
redirect,
size,
location,
url
)
)
def GetMetapackageLinkStatuses(self, name):
self.cursor.execute(
"""
SELECT
url,
last_checked,
last_success,
last_failure,
status,
redirect,
size,
location
FROM links
WHERE url in (
-- this additional wrap seem to fix query planner somehow
-- to use index scan on links instead of seq scan, which
-- makes the query 100x faster; XXX: recheck with postgres 10
-- or report this?
SELECT DISTINCT url from (
SELECT
unnest(downloads) as url
FROM packages
WHERE effname = %s
UNION
SELECT
homepage as url
FROM packages
WHERE homepage IS NOT NULL and effname = %s
) AS tmp
)
""",
(name, name)
)
return {
row[0]: {
'last_checked': row[1],
'last_success': row[2],
'last_failure': row[3],
'status': row[4],
'redirect': row[5],
'size': row[6],
'location': row[7]
}
for row in self.cursor.fetchall()
}
def GetProblemsCount(self, repo=None, effname=None, maintainer=None):
where_expr = ''
args = []
conditions = []
if repo:
conditions.append('repo = %s')
args.append(repo)
if effname:
conditions.append('effname = %s')
args.append(effname)
if maintainer:
conditions.append('maintainer = %s')
args.append(maintainer)
if conditions:
where_expr = 'WHERE ' + ' AND '.join(conditions)
self.cursor.execute(
"""
SELECT count(*)
FROM problems
{}
""".format(where_expr),
args
)
return self.cursor.fetchall()[0][0]
def GetProblems(self, repo=None, effname=None, maintainer=None, limit=None):
# XXX: eliminate duplication with GetProblemsCount()
where_expr = ''
limit_expr = ''
args = []
conditions = []
if repo:
conditions.append('repo = %s')
args.append(repo)
if effname:
conditions.append('effname = %s')
args.append(effname)
if maintainer:
conditions.append('maintainer = %s')
args.append(maintainer)
if conditions:
where_expr = 'WHERE ' + ' AND '.join(conditions)
if limit:
limit_expr = 'LIMIT %s'
args.append(limit)
self.cursor.execute(
"""
SELECT
repo,
name,
effname,
maintainer,
problem
FROM problems
{}
ORDER by repo, effname, maintainer
{}
""".format(where_expr, limit_expr),
args
)
return [
{
'repo': row[0],
'name': row[1],
'effname': row[2],
'maintainer': row[3],
'problem': row[4],
}
for row in self.cursor.fetchall()
]
def AddReport(self, effname, need_verignore, need_split, need_merge, comment):
self.cursor.execute(
"""
INSERT
INTO reports (
created,
effname,
need_verignore,
need_split,
need_merge,
comment
) VALUES (
now(),
%s,
%s,
%s,
%s,
%s
)
""",
(
effname,
need_verignore,
need_split,
need_merge,
comment
)
)
def GetReportsCount(self, effname):
self.cursor.execute('SELECT count(*) FROM reports WHERE effname = %s', (effname, ))
return self.cursor.fetchall()[0][0]
def GetReports(self, effname):
self.cursor.execute(
"""
SELECT
now() - created,
effname,
need_verignore,
need_split,
need_merge,
comment,
reply,
CASE WHEN expires IS NULL then NULL WHEN expires > now() THEN expires - now() ELSE interval '0' END
FROM reports
WHERE effname = %s
ORDER BY created desc
""",
(effname, )
)
return [
{
'created_ago': row[0],
'effname': row[1],
'need_verignore': row[2],
'need_split': row[3],
'need_merge': row[4],
'comment': row[5],
'reply': row[6],
'expires': row[7],
}
for row in self.cursor.fetchall()
]
def GetRelatedMetapackages(self, name, limit=500):
self.cursor.execute(
"""
WITH RECURSIVE r AS (
SELECT
effname,
url
FROM url_relations
WHERE effname=%s
UNION
SELECT
url_relations.effname,
url_relations.url
FROM url_relations
JOIN r ON
url_relations.effname = r.effname OR url_relations.url = r.url
) SELECT DISTINCT effname FROM r ORDER by effname LIMIT %s
""",
(name, limit)
)
return [
row[0] for row in self.cursor.fetchall()
]
|
import zipper
def zip_shapefile_simple():
try:
### Inputs
input_shapefile = "C:\\Temp\\Shapefiles\\test.shp" # this dir will be searched for shapefiles
shape_zipper = zipper.ShapefileZipper() # Create Instance of Class
# By only passing an input file, an individual shapefilename.zip will be created
# Existing zip file will be deleted if it matches the name of the new .zip to be created..
results = shape_zipper.zip_shapefile(input_shapefile)
# returns a string path to the .zip that was created
if len(results) > 1:
print("SUCCESS! " + str(results))
else:
print("WARNING: SHAPEFILE NOT ZIPPED: " + input_shapefile)
except:
print("ERROR: BIG FAIL!")
def zip_shapefile_output_zip():
try:
### Inputs
input_shapefile = "C:\\Temp\\Shapefiles\\test.shp" # this dir will be searched for shapefiles
output_zip = "C:\\TEMP\\shapefiles\\myZip1.zip"
shape_zipper = zipper.ShapefileZipper() # Create Instance of Class
# Existing zip file will be deleted if it matches the name of the new .zip to be created..
results = shape_zipper.zip_shapefile(input_shapefile, output_zip)
# returns a string path to the .zip that was created
if len(results) > 1:
print("SUCCESS! " + str(results))
else:
print("WARNING: SHAPEFILE NOT ZIPPED: " + input_shapefile)
except:
print("ERROR: BIG FAIL!")
def zip_shapefile_output_zip_append():
try:
### Inputs
input_shapefile = "C:\\Temp\\Shapefiles\\test.shp" # this dir will be searched for shapefiles
output_zip = "C:\\TEMP\\shapefiles\\myZip2.zip"
file_mode = 'a' # this is append mode, so any shapefiles found will be appending to the C:\\TEMP\\myZip.zip,
# Warning, using the append mode it's possible to append duplicate files into the .zip...
shape_zipper = zipper.ShapefileZipper() # Create Instance of Class
results = shape_zipper.zip_shapefile(input_shapefile, output_zip, file_mode)
# returns a string path to the .zip that was created
if len(results) > 1:
print("SUCCESS! " + str(results))
else:
print("WARNING: SHAPEFILE NOT ZIPPED: " + input_shapefile)
except:
print("ERROR: BIG FAIL!")
def zip_dir_simple():
try:
### Inputs
input_dir = "C:\\TEMP\\Shapefiles" # this dir will be searched for shapefiles
shape_zipper = zipper.ShapefileZipper() # Create Instance of Class
# By just passing an input directory, individual shapefilename.zip's will be created
# Existing zip files will be deleted if they match the name of the new .zip to be created..
results = shape_zipper.zip_shapefile_directory(input_dir)
# returns a list of zips that were created, or None if nothing run
if results:
print("SUCCESS!" + str(results))
else:
print("NO SHAPEFILES ZIPPED, MAYBE THERE AREN'T ANY WITHIN " + input_dir)
except:
print("BIG FAIL!")
def zip_dir_output_zip():
try:
### Inputs
input_dir = "C:\\TEMP\\Shapefiles" # this dir will be searched for shapefiles
output_zip = "C:\\TEMP\\Shapefiles\\myZipDir.zip" # Any shapefiles located within the input directory will be appended into a single zip
shape_zipper = zipper.ShapefileZipper() # Create Instance of Class
# Existing zip files will be deleted if they match the name of the new .zip to be created..
results = shape_zipper.zip_shapefile_directory(input_dir, output_zip)
# returns a list of zips that were created
if results:
print("SUCCESS!" + str(results))
else:
print("NO SHAPEFILES ZIPPED, MAYBE THERE AREN'T ANY WITHIN " + input_dir)
except:
print("BIG FAIL!")
def zip_dir_output_zip_append():
try:
### Inputs
input_dir = "C:\\TEMP\\Shapefiles" # this dir will be searched for shapefiles
output_zip = "C:\\TEMP\\Shapefiles\\myZipDir2.zip" # Any shapefiles located within the input directory will be appended into a single zip
file_mode = 'a' # this is append mode, so any shapefiles found will be appending to the C:\\TEMP\\myZip.zip,
# Warning, using the append mode it's possible to append duplicate files into the .zip...
shape_zipper = zipper.ShapefileZipper() # Create Instance of Class
# Existing zip files will be deleted if they match the name of the new .zip to be created..
results = shape_zipper.zip_shapefile_directory(input_dir, output_zip, file_mode)
# returns a list of zips that were created
if results:
print("SUCCESS!" + str(results))
else:
print("NO SHAPEFILES ZIPPED, MAYBE THERE AREN'T ANY WITHIN " + input_dir)
except:
print("BIG FAIL!")
# call example functions for directories
# print("Call zip_dir_simple()")
zip_dir_simple()
print("Call zip_dir_output_zip()")
zip_dir_output_zip()
print("zip_dir_output_zip_append()")
zip_dir_output_zip_append()
# call example functions for single shapefiles
print("zip_shapefile_simple()")
zip_shapefile_simple()
print("zip_shapefile_output_zip()")
zip_shapefile_output_zip()
print("zip_shapefile_output_zip_append()")
zip_shapefile_output_zip_append()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.