text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
from os import path
from glob import glob
import sys
import importlib
class Bot(object):
def __init__(self):
self._cmd = ''
self._listeners = []
def add_listener(self, listener):
self._listeners.append(listener)
def say(self, m):
print(m)
@property
def cmd(self):
return self._cmd
@cmd.setter
def cmd(self, value):
self._cmd = value
def listen(self, cmd):
self.cmd = cmd
for l in self._listeners:
if hasattr(l, "_command"):
if l._command == self.cmd:
l(self)
else:
l(self)
def main():
sys.path += ['scripts']
bot = Bot()
for f in glob('scripts/*.py'):
module_name = path.splitext(path.basename(f))[0]
module = importlib.import_module('scripts.' + module_name)
for x in dir(module):
method = getattr(module, x)
if hasattr(method, "_hook"):
bot.add_listener(getattr(module, x))
bot.listen('hello')
bot.listen('goodby')
if __name__ == '__main__':
main()
|
import sys
input_string_1 = "test";
input_string_2 = "tTtt";
#we make an assumption, and then try to disprove it
is_permutation = True;
#we hash the first string, then second string, and compare the frequency of letters in each
if (len(input_string_1) != len(input_string_2)):
is_permutation = False;
else:
#hash each string into a different table
hash_table_1, hash_table_2 = {}, {};
for input_string, hash_table in [(input_string_1, hash_table_1), (input_string_2, hash_table_2)]:
for letter in input_string:
if (letter in hash_table):
hash_table[letter] += 1;
else:
hash_table[letter] = 1;
#check if the hashed elements, and their values, are identical
if (len(hash_table_1) != len(hash_table_2)):
is_permutation = False;
else:
for letter in hash_table_1:
if (letter in hash_table_2):
if (hash_table_2[letter] != hash_table_1[letter]):
is_permutation = False;
else:
is_permutation = False;
if (not is_permutation):
break;
#print the message on the screen
if (is_permutation):
print("STRINGS ARE PERMUTATIONS OF EACH OTHER");
else:
print("STRINGS ARE NOT PERMUTATIONS OF EACH OTHER");
|
def unlimited_arguments(*args):
for argument in args:
print(argument)
unlimited_arguments(1,2,3,4)
unlimited_arguments(*[1,2,3,4])
def unlimited_arguments2(*args, **keyword_args):
print(keyword_args)
for k, argument in keyword_args.items():
print(k, argument)
unlimited_arguments(1,2,3,4)
unlimited_arguments(*[1,2,3,4])
unlimited_arguments2(1, 2, 3, 4, name='Taddes', age=30) |
from create_model import *
from transform import load_weight_16
import numpy as np
from keras import optimizers
from keras import regularizers
from keras import callbacks
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
if __name__ == "__main__":
x = np.load("x.npy")
x_reshape = x.reshape((x.shape[0], 48, 48, 1))
y = np.load("y.npy")
y_cat = to_categorical(y)
train_datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
valid_datagen = ImageDataGenerator()
train_datagen.fit(x_reshape[2870: , : , : , :])
valid_datagen.fit(x_reshape[0:2870 , : , : , :])
test_generator = valid_datagen.flow(x_reshape[0:2870 , : , : , :], y_cat[0:2870, :], batch_size=512)
train_generator = train_datagen.flow(x_reshape[2870: , : , : , :], y_cat[2870:, :], batch_size=512)
model = create_model(input_shape=(48, 48, 1),
alpha=0.5,
depth_multiplier=1,
classes=7)
adam = optimizers.Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
filepath = "model/m-{epoch:02d}-{categorical_accuracy:.3f}-{val_categorical_accuracy:.3f}.h5"
checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_categorical_accuracy', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)
history = model.fit_generator(train_generator,
steps_per_epoch=len(x_reshape) / 64,
epochs=300,
validation_data=test_generator,
validation_steps=100,
callbacks=[checkpoint])
|
from rest_framework.views import APIView
from .myCreator import create
import os
from django.http import HttpResponse
class ApplicationView(APIView):
def get(self, request):
params = dict(request.query_params)
userId = request.user
if type(userId) != 'str':
userId = "AnonymousUser"
create(userId,
params['fullName'][0],
params['group'][0],
params['compensationName'][0],
True if params['dormitoryBox'][0] == "true" else False,
True if params['scholarBox'][0] == "true" else False,
True if params['workBox'][0] == "true" else False,
True if params['docsBox'][0] == "true" else False)
module_dir = os.path.dirname(__file__) # get current directory
returnFileName = "your_application_" + userId + ".pdf"
file_path = os.path.join(module_dir, returnFileName)
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/pdf")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
os.remove(file_path)
return response
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0012_auto_20150716_0156'),
('relationships', '0005_comments'),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('student', models.ForeignKey(to='authentication.StudentProfile')),
('teacher', models.ForeignKey(to='authentication.TeacherProfile')),
],
),
]
|
import cv2
import numpy as np
import pyautogui
import sudoku99
left = 731
top = 120
width = 82
def read_img(image):
sudoku = np.zeros([9, 9], dtype=np.int)
img_gray = np.array(image.convert('L'))
for i in range(1, 10):
template = cv2.imread('img/{}.png'.format(i), 0)
h, w = template.shape[:2]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.95
loc = np.where(res >= threshold)
out_pts = [(-30, -30)]
for pt in zip(*loc[::-1]):
add_flag = True
for out_pt in out_pts:
if abs(pt[0] - out_pt[0]) + abs(pt[1] - out_pt[1]) < 30:
add_flag = False
break
if add_flag:
out_pts.append(pt)
del out_pts[0]
for pt in out_pts:
sudoku[(pt[1] - top) // width][(pt[0] - left) // width] = i
cv2.rectangle(img_gray, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
return sudoku
def complete_sudoku(original_sudoku, sudoku):
for i in range(9):
for j in range(9):
if original_sudoku[i][j] == 0:
pyautogui.moveTo(left + width * (j + 1), top + width * (i + 1))
pyautogui.click()
pyautogui.press(str(sudoku[i][j]))
if __name__ == '__main__':
taskbar = pyautogui.screenshot(region=(0, 1030, 1920, 50))
taskbar_gray = np.array(taskbar.convert('L'))
sudoku_ico = cv2.imread('img/sudoku.png', 0)
loc = cv2.matchTemplate(taskbar_gray, sudoku_ico, cv2.TM_CCOEFF_NORMED)
pt = np.unravel_index(loc.argmax(), loc.shape)
pyautogui.moveTo(pt[1] + 25, 1050)
pyautogui.click()
pyautogui.sleep(0.5)
pos = pyautogui.locateCenterOnScreen('img/resume.png', grayscale=False)
if pos != None:
pyautogui.moveTo(pos)
pyautogui.click()
pyautogui.sleep(0.5)
screen = pyautogui.screenshot()
print('开始读取')
original_sudoku = read_img(screen)
sum = sum(sum(original_sudoku))
if sum == 0:
print("读取sudoku失败")
exit()
sudoku = original_sudoku.tolist()
sudoku = [i for row in sudoku for i in row]
pointList = sudoku99.initPoint(sudoku)
sudoku99.showSudoku(sudoku)
print('开始计算')
p = pointList.pop()
sudoku99.tryInsert(p, sudoku, pointList)
print('计算完毕:')
sudoku99.showSudoku(sudoku)
print('自动填充')
sudoku = np.asarray(sudoku)
sudoku.resize([9, 9])
print(original_sudoku)
print(sudoku)
complete_sudoku(original_sudoku, sudoku)
print('自动填充完毕')
|
# File: tv_shows_fxns.py
# Author: Joel Okpara
# Date: 3/28/2016
# Section: 04
# E-mail: joelo1@umbc.edu
# Description:
# This file contains python code that implements lab5
# (a TV show voting system) using functions to:
# 1) Get a choice
# 2) Find the name of the winner
STOP = 0
# getVote() returns a valid choice from the given list
# Input: showList, a list of strings (names of shows to vote on)
# Output: vote, an integer containing a choice between 0 and list length
# *reprompts user until valid choice is made*
def getVote(showList):
choice = int(input("Enter '0' to stop voting: "))
while choice > len(showList) and choice < 0:
print("Invalid vote -- try again")
choice = int(input("Enter '0' to stop voting: "))
else:
return(choice)
# getWinner() takes in a list of shows and votes and calculates the winner
# Input: showList, a list of strings (names of shows to vote on)
# votes, a list of integers (votes for each show)
# Output: whoWon, the name of the show that won with the most votes
# *in case of a tie, the first show seen in the list wins*
def getWinner(showList, votes):
print()
###################################################
# your function to determine the winner goes here #
###################################################
def main():
# initialize shows and votes lists, and choice variable
shows = ["Daredevil", "Fargo",
"Limitless", "Elementary", "Brooklyn 99",
"Empire", "Supergirl"]
votes = [0]*len(shows)
choice = 1
# print the voting number and show name
for i in range(len(shows)):
print(i + 1, "-", shows[i])
# print the program greeting/instructions
print("You and your friends are voting on a show to watch.")
print("Which show would you like to vote for?")
while choice != STOP:
choice = getVote(shows)
votes[choice - 1] = votes[choice - 1] + 1
######################################################
# your code to call the function getVote() goes here #
######################################################
# don't forget to handle the function's return value #
######################################################
# once done voting, print the results
print("\nHere are the final votes:")
for i in range(len(shows)):
print(shows[i], "has\t", votes[i], "votes")
########################################################
# your code to call the function getWinner() goes here #
########################################################
# print(winner, "wins!")
main()
|
import win32gui
import win32api
import os
def getwindow(Title="SpotifyMainWindow"):
window_id = win32gui.FindWindow(Title, None)
return window_id
def song_info():
try:
song_info = win32gui.GetWindowText(getwindow())
except:
pass
return song_info
def artist():
try:
temp = song_info()
artist, song = temp.split("-",1)
artist = artist.strip()
return artist
except:
return "There is noting playing at this moment"
def song():
try:
temp = song_info()
artist, song = temp.split("-",1)
song = song.strip()
return song
except:
return "There is noting playing at this moment"
|
# Напишите программу, которая реализует reducer для задачи WordCount в Hadoop Streaming.
# Sample Input:
# cogitare 1
# est 1
# est 1
# est 1
# militate 1
# potentia 1
# Scientia 1
# Vivere 1
# Vivere 1
# Sample Output:
# cogitare 1
# est 3
# militate 1
# potentia 1
# Scientia 1
# Vivere 2
import sys
(lastKey, sum) = (None, 0)
for line in sys.stdin:
(key, value) = line.strip().split("\t")
if lastKey and lastKey != key:
print(lastKey + '\t' + str(sum))
(lastKey, sum) = (key, int(value))
else:
(lastKey, sum) = (key, sum + int(value))
if lastKey:
print(lastKey + '\t' + str(sum)) |
from karel.stanfordkarel import *
"""
File: MidpointKarel.py
----------------------
When you finish writing it, MidpointKarel should
leave a beeper on the corner closest to the center of 1st Street
(or either of the two central corners if 1st Street has an even
number of corners). Karel can put down additional beepers as it
looks for the midpoint, but must pick them up again before it
stops. The world may be of any size, but you are allowed to
assume that it is at least as tall as it is wide.
"""
def main():
if front_is_clear():
fill_whole_street_with_beepers()
pick_beepers_at_the_ends()
karel_move_to_the_middle_put_beeper()
else:
# if front of karel is blocked, put beeper and stop
put_beeper()
# pre condition: karel is facing east
# post condition: karel is blocked by the wall facing east
def fill_whole_street_with_beepers():
if front_is_blocked():
put_beeper()
else:
move()
while front_is_clear():
put_beeper()
move()
put_beeper()
# pre condition: karel is blocked by the wall while facing east
# post condition: karel continuously pick beepers at the end corners when a beeper is present
def pick_beepers_at_the_ends():
turn_around()
while beepers_present():
check_presence_of_beepers()
keep_moving()
check_presence_of_beepers()
# pre: karel facing east when no beeper remains
# post: karel in the middle on top of beeper
def karel_move_to_the_middle_put_beeper():
if facing_east():
turn_around()
move()
if front_is_clear():
put_beeper()
if front_is_blocked():
turn_around()
move()
put_beeper()
# pre condition: karel check if beeper present
# post condition: karel pick beeper, otherwise move until it reaches a beeper
def check_presence_of_beepers():
if beepers_present():
pick_beeper()
move()
else:
move()
if beepers_present():
pick_beeper()
move()
# pre condition: karel picked beeper
# post condition: karel continue moving until it reaches an empty corner
def keep_moving():
while beepers_present():
move()
turn_around()
def turn_right():
for i in range(3):
turn_left()
def turn_around():
turn_left()
turn_left()
# There is no need to edit code beyond this point
if __name__ == "__main__":
run_karel_program()
|
import requests
import time
#多进程
if __name__ == '__main__':
count=10
while True:
if count<=0:
break;
response1=requests.get("http://localhost:8080/joke/task_get")
task_key=response1.content.decode("utf-8")
print(task_key)
if task_key=="":
count=count-1
else:
count = 10
time.sleep(0.2)
print("##########################"+str(task_key))
url="http://jandan.net/duan/page-"+str(task_key)+"#comments";
response=requests.get(url=url)
open("htmls\\"+str(task_key)+".txt", 'w', encoding="utf-8").write(response.text)
|
# -*- test-case-name: mimic.test.test_auth -*-
"""
Defines get token, impersonation
"""
from __future__ import absolute_import, division, unicode_literals
import json
import time
import attr
from twisted.python.urlpath import URLPath
from mimic.canned_responses.auth import (
get_token,
get_endpoints,
format_timestamp,
impersonator_user_role)
from mimic.canned_responses.mimic_presets import get_presets
from mimic.core import MimicCore
from mimic.model.behaviors import make_behavior_api
from mimic.model.identity import (
APIKeyCredentials,
ImpersonationCredentials,
PasswordCredentials,
TokenCredentials)
from mimic.rest.mimicapp import MimicApp
from mimic.session import NonMatchingTenantError
from mimic.util.helper import (
invalid_resource,
seconds_to_timestamp,
json_from_request,
)
from mimic.model.behaviors import (
BehaviorRegistryCollection,
Criterion,
EventDescription,
regexp_predicate
)
authentication = EventDescription()
"""
Event refers to authenticating against Identity using a username/password,
username/api-key, token, or getting an impersonation token.
"""
@authentication.declare_criterion("username")
def username_criterion(value):
"""
Return a Criterion which matches the given regular expression string
against the ``"username"`` attribute.
"""
return Criterion(name='username', predicate=regexp_predicate(value))
@authentication.declare_criterion("tenant_id")
def tenant_id_criterion(value):
"""
Return a Criterion which matches the given regular expression string
against the ``"tenant_Id"`` attribute.
"""
return Criterion(name='tenant_id', predicate=regexp_predicate(value))
@authentication.declare_default_behavior
def default_authentication_behavior(core, http_request, credentials):
"""
Default behavior in response to a server creation. This will create
a session for the tenant if one does not already exist, and return
the auth token for that session. In the case of
:class:`PasswordCredentials`, :class:`ApiKeyCredentials`, or
:class:`TokenCredentials`, also returns the service catalog.
:param core: An instance of :class:`mimic.core.MimicCore`
:param http_request: A twisted http request/response object
:param credentials: An `mimic.model.identity.ICredentials` provider
Handles setting the response code and also
:return: The response body for a default authentication request.
"""
try:
session = credentials.get_session(core.sessions)
except NonMatchingTenantError as e:
http_request.setResponseCode(401)
if type(credentials) == TokenCredentials:
message = ("Token doesn't belong to Tenant with Id/Name: "
"'{0}'".format(e.desired_tenant))
else:
message = ("Tenant with Name/Id: '{0}' is not valid for "
"User '{1}' (id: '{2}')".format(
e.desired_tenant,
e.session.username,
e.session.user_id))
return json.dumps({
"unauthorized": {
"code": 401,
"message": message
}
})
else:
if type(credentials) == ImpersonationCredentials:
return json.dumps({"access": {
"token": {"id": credentials.impersonated_token,
"expires": format_timestamp(session.expires)}
}})
http_request.setResponseCode(200)
prefix_map = {
# map of entry to URI prefix for that entry
}
def lookup(entry):
return prefix_map[entry]
result = get_token(
session.tenant_id,
entry_generator=lambda tenant_id:
list(core.entries_for_tenant(
session.tenant_id, prefix_map,
base_uri_from_request(http_request))),
prefix_for_endpoint=lookup,
response_token=session.token,
response_user_id=session.user_id,
response_user_name=session.username,
)
return json.dumps(result)
@authentication.declare_behavior_creator("fail")
def authenticate_failure_behavior(parameters):
"""
Create a failing behavior for authentication.
Takes three parameters:
``"code"``, an integer describing the HTTP response code, and
``"message"``, a string describing a textual message.
``"type"``, a string representing what type of error message it is
If ``type`` is "string", the message is just returned as the string body.
Otherwise, the following JSON body will be synthesized (as per the
canonical Nova error format):
```
{
<type>: {
"message": <message>,
"code": <code>
}
}
The default type is unauthorized, the default code is 401, and the
default message is
"Unable to authenticate user with credentials provided."
"""
def _fail(core, http_request, credentials):
status_code = parameters.get("code", 401)
http_request.setResponseCode(status_code)
failure_type = parameters.get("type", "unauthorized")
failure_message = parameters.get(
"message",
"Unable to authenticate user with credentials provided.")
if failure_type == "string":
return failure_message
else:
return json.dumps({
failure_type: {
"message": failure_message,
"code": status_code
}
})
return _fail
@attr.s(hash=False)
class AuthApi(object):
"""
Rest endpoints for mocked Auth api.
:ivar core: an instance of :class:`mimic.core.MimicCore`
:ivar registry_collection: an instance of
:class:`mimic.model.behaviors.BehaviorRegistryCollection`
"""
core = attr.ib(validator=attr.validators.instance_of(MimicCore))
registry_collection = attr.ib(
validator=attr.validators.instance_of(BehaviorRegistryCollection))
app = MimicApp()
@app.route('/v2.0/tokens', methods=['POST'])
def get_token_and_service_catalog(self, request):
"""
Return a service catalog consisting of all plugin endpoints and an api
token.
"""
try:
content = json_from_request(request)
except ValueError:
pass
else:
for cred_type in (PasswordCredentials, APIKeyCredentials,
TokenCredentials):
if cred_type.type_key in content['auth']:
try:
cred = cred_type.from_json(content)
except (KeyError, TypeError):
pass
else:
registry = self.registry_collection.registry_by_event(
authentication)
behavior = registry.behavior_for_attributes(
attr.asdict(cred))
return behavior(self.core, request, cred)
request.setResponseCode(400)
return json.dumps(invalid_resource("Invalid JSON request body"))
@app.route('/v1.1/mosso/<string:tenant_id>', methods=['GET'])
def get_username(self, request, tenant_id):
"""
Returns response with random usernames.
"""
request.setResponseCode(301)
session = self.core.sessions.session_for_tenant_id(tenant_id)
return json.dumps(dict(user=dict(id=session.username)))
@app.route('/v2.0/users', methods=['GET'])
def get_users_details(self, request):
"""
Returns response with detailed account information about each user
including email, name, user ID, account configuration and status
information.
"""
username = request.args.get(b"name")[0].decode("utf-8")
session = self.core.sessions.session_for_username_password(
username, "test")
return json.dumps(dict(user={
"RAX-AUTH:domainId": session.tenant_id,
"id": session.user_id,
"enabled": True,
"username": session.username,
"email": "thisisrandom@email.com",
"RAX-AUTH:defaultRegion": "ORD",
"created": seconds_to_timestamp(time.time()),
"updated": seconds_to_timestamp(time.time())
}))
@app.route('/v2.0/users/<string:user_id>/OS-KSADM/credentials/RAX-KSKEY:apiKeyCredentials',
methods=['GET'])
def rax_kskey_apikeycredentials(self, request, user_id):
"""
Support, such as it is, for the apiKeysCredentials call.
"""
if user_id in self.core.sessions._userid_to_session:
username = self.core.sessions._userid_to_session[user_id].username
apikey = '7fc56270e7a70fa81a5935b72eacbe29' # echo -n A | md5sum
return json.dumps({'RAX-KSKEY:apiKeyCredentials': {'username': username,
'apiKey': apikey}})
else:
request.setResponseCode(404)
return json.dumps({'itemNotFound':
{'code': 404, 'message': 'User ' + user_id + ' not found'}})
@app.route('/v2.0/RAX-AUTH/impersonation-tokens', methods=['POST'])
def get_impersonation_token(self, request):
"""
Return a token id with expiration.
"""
request.setResponseCode(200)
try:
content = json_from_request(request)
except ValueError:
request.setResponseCode(400)
return json.dumps(invalid_resource("Invalid JSON request body"))
x_auth_token = request.getHeader(b"x-auth-token")
if x_auth_token is not None:
x_auth_token = x_auth_token.decode("utf-8")
cred = ImpersonationCredentials.from_json(content, x_auth_token)
registry = self.registry_collection.registry_by_event(authentication)
behavior = registry.behavior_for_attributes({
"token": cred.impersonator_token,
"username": cred.impersonated_username
})
return behavior(self.core, request, cred)
@app.route('/v2.0/tokens/<string:token_id>', methods=['GET'])
def validate_token(self, request, token_id):
"""
Creates a new session for the given tenant_id and token_id
and always returns response code 200.
Docs: http://developer.openstack.org/api-ref-identity-v2.html#admin-tokens
"""
request.setResponseCode(200)
tenant_id = request.args.get(b'belongsTo')
if tenant_id is not None:
tenant_id = tenant_id[0].decode("utf-8")
session = self.core.sessions.session_for_tenant_id(tenant_id, token_id)
response = get_token(
session.tenant_id,
response_token=session.token,
response_user_id=session.user_id,
response_user_name=session.username,
)
if session.impersonator_session_for_token(token_id) is not None:
impersonator_session = session.impersonator_session_for_token(token_id)
response["access"]["RAX-AUTH:impersonator"] = impersonator_user_role(
impersonator_session.user_id,
impersonator_session.username)
if token_id in get_presets["identity"]["token_fail_to_auth"]:
request.setResponseCode(401)
return json.dumps({'itemNotFound':
{'code': 401, 'message': 'Invalid auth token'}})
imp_token = get_presets["identity"]["maas_admin_roles"]
racker_token = get_presets["identity"]["racker_token"]
if token_id in imp_token:
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "123",
"name": "monitoring:service-admin"},
{"id": "234",
"name": "object-store:admin"}]}
if token_id in racker_token:
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "9",
"name": "Racker"}]}
if tenant_id in get_presets["identity"]["observer_role"]:
response["access"]["user"]["roles"] = [
{"id": "observer",
"description": "Global Observer Role.",
"name": "observer"}]
if tenant_id in get_presets["identity"]["creator_role"]:
response["access"]["user"]["roles"] = [
{"id": "creator",
"description": "Global Creator Role.",
"name": "creator"}]
if tenant_id in get_presets["identity"]["admin_role"]:
response["access"]["user"]["roles"] = [
{"id": "admin",
"description": "Global Admin Role.",
"name": "admin"},
{"id": "observer",
"description": "Global Observer Role.",
"name": "observer"}]
# Canned responses to be removed ...
if token_id in get_presets["identity"]["non_dedicated_observer"]:
response["access"]["token"]["tenant"] = {
"id": "135790",
"name": "135790",
}
response["access"]["user"] = {
"id": "12",
"name": "OneTwo",
"roles": [{"id": "1",
"name": "monitoring:observer",
"description": "Monitoring Observer"}]
}
if token_id in get_presets["identity"]["non_dedicated_admin"]:
response["access"]["token"]["tenant"] = {
"id": "135790",
"name": "135790",
}
response["access"]["user"] = {
"id": "34",
"name": "ThreeFour",
"roles": [{"id": "1",
"name": "monitoring:admin",
"description": "Monitoring Admin"},
{"id": "2",
"name": "admin",
"description": "Admin"}]
}
if token_id in get_presets["identity"]["non_dedicated_impersonator"]:
response["access"]["token"]["tenant"] = {
"id": "135790",
"name": "135790",
}
response["access"]["user"] = {
"id": "34",
"name": "ThreeFour",
"roles": [{"id": "1",
"name": "identity:nobody",
"description": "Nobody"}]
}
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "1",
"name": "monitoring:service-admin"},
{"id": "2",
"name": "object-store:admin"}]
}
if token_id in get_presets["identity"]["non_dedicated_racker"]:
response["access"]["token"]["tenant"] = {
"id": "135790",
"name": "135790",
}
response["access"]["user"] = {
"id": "34",
"name": "ThreeFour",
"roles": [{"id": "1",
"name": "identity:nobody",
"description": "Nobody"}]
}
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "1",
"name": "Racker"}]
}
if token_id in get_presets["identity"]["dedicated_full_device_permission_holder"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "12",
"name": "HybridOneTwo",
"roles": [{"id": "1",
"name": "monitoring:observer",
"tenantId": "hybrid:123456"}],
"RAX-AUTH:contactId": "12"
}
if token_id in get_presets["identity"]["dedicated_account_permission_holder"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "34",
"name": "HybridThreeFour",
"roles": [{"id": "1",
"name": "monitoring:creator",
"description": "Monitoring Creator"},
{"id": "2",
"name": "creator",
"description": "Creator"}],
"RAX-AUTH:contactId": "34"
}
if token_id in get_presets["identity"]["dedicated_limited_device_permission_holder"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "56",
"name": "HybridFiveSix",
"roles": [{"id": "1",
"name": "monitoring:observer",
"description": "Monitoring Observer"},
{"id": "2",
"name": "observer",
"description": "Observer"}],
"RAX-AUTH:contactId": "56"
}
if token_id in get_presets["identity"]["dedicated_racker"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "12",
"name": "HybridOneTwo",
"roles": [{"id": "1",
"name": "identity:nobody",
"description": "Nobody"}],
"RAX-AUTH:contactId": "12"
}
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "1",
"name": "Racker"}]
}
if token_id in get_presets["identity"]["dedicated_impersonator"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "34",
"name": "HybridThreeFour",
"roles": [{"id": "1",
"name": "identity:nobody",
"description": "Nobody"}],
"RAX-AUTH:contactId": "34"
}
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "1",
"name": "monitoring:service-admin"}]
}
if token_id in get_presets["identity"]["dedicated_non_permission_holder"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "78",
"name": "HybridSevenEight",
"roles": [{"id": "1",
"name": "identity:user-admin",
"description": "User admin"}],
"RAX-AUTH:contactId": "78"
}
if token_id in get_presets["identity"]["dedicated_quasi_user_impersonator"]:
response["access"]["token"]["tenant"] = {
"id": "hybrid:123456",
"name": "hybrid:123456",
}
response["access"]["user"] = {
"id": "90",
"name": "HybridNineZero",
"roles": [{"id": "1",
"name": "identity:user-admin",
"description": "Admin"},
{"id": "3",
"name": "hybridRole",
"description": "Hybrid Admin",
"tenantId": "hybrid:123456"}]
}
response["access"]["RAX-AUTH:impersonator"] = {
"id": response["access"]["user"]["id"],
"name": response["access"]["user"]["name"],
"roles": [{"id": "1",
"name": "monitoring:service-admin"}]
}
return json.dumps(response)
@app.route('/v2.0/tokens/<string:token_id>/endpoints', methods=['GET'])
def get_endpoints_for_token(self, request, token_id):
"""
Return a service catalog consisting of nova and load balancer mocked
endpoints.
"""
# FIXME: TEST
request.setResponseCode(200)
prefix_map = {}
session = self.core.sessions.session_for_token(token_id)
return json.dumps(get_endpoints(
session.tenant_id,
entry_generator=lambda tenant_id: list(
self.core.entries_for_tenant(tenant_id, prefix_map,
base_uri_from_request(request))),
prefix_for_endpoint=prefix_map.get)
)
@app.route('/v2.0/tenants', methods=['GET'])
def list_tenants(self, request):
"""
List all tenants for the specified auth token.
The token for this call is specified in the X-Auth-Token header,
like using the services in the service catalog. Mimic supports
only one tenant per session, so the number of listed tenants is
always 1 if the call succeeds.
For more information about this call, refer to the `Rackspace Cloud
Identity Developer Guide
<https://developer.rackspace.com/docs/cloud-identity/v2/developer-guide/#list-tenants>`_
"""
try:
sess = self.core.sessions.existing_session_for_token(
request.getHeader(b'x-auth-token').decode('utf-8'))
return json.dumps({'tenants': [{'id': sess.tenant_id,
'name': sess.tenant_id,
'enabled': True}]})
except KeyError:
request.setResponseCode(401)
return json.dumps({'unauthorized': {
'code': 401,
'message': ("No valid token provided. Please use the 'X-Auth-Token'"
" header with a valid token.")}})
def base_uri_from_request(request):
"""
Given a request, return the base URI of the request
:param request: a twisted HTTP request
:type request: :class:`twisted.web.http.Request`
:return: the base uri the request was trying to access
:rtype: ``str``
"""
return str(URLPath.fromRequest(request).click(b'/'))
AuthControlApiBehaviors = make_behavior_api({'auth': authentication})
"""
Handlers for CRUD operations on authentication behaviors.
:ivar registry_collection: an instance of
:class:`mimic.model.behaviors.BehaviorRegistryCollection`
"""
|
#!/usr/bin/python
#coding=utf-8
from ipfunc import *
import os
re_strong = re.compile(".*strong class=.*")
re_strongb = re.compile("strong.*strong")
url_1 = 'http://ip.chinaz.com/?IP='
def Get_LocalInfo(): #Get Localhost Infomation
LocalInfo = []
f = urllib2.urlopen(url_1).read()
for i in re_strong.findall(f):
i = i.lstrip()
local_ip = re.split("[<>]",i.split()[3])[1]
local_place = re.split("[<>]",i.split("strong")[3])[1]
# local_operation = re.split("[<>]",i.split("strong")[5])[1]
# local_browers = re.split("[<>]",i.split("strong")[7])[1]
LocalInfo.append("Your_IP"+11*' ' + "You_Place" + "\n%-17s %s" %(local_ip, local_place))
return LocalInfo
def Get_RequestInfo(url): #Get Request Infomation
Requests = []
f = urllib2.urlopen(url_1 + url).read()
For_Time = 0
for i in re_strong.findall(f):
For_Time = For_Time +1
i = i.lstrip()
if For_Time == 1:
pass
elif re.findall("找不到请求的类型的数据",i):
#print i
Requests.append("Some Error Happend, Check It:%s"% url.rstrip('\n'))
else:
query_ip = i.split()[2]
domain_name = url.rstrip('\n')
query_place = re.split("[<>]",i)[6]
Requests.append("%-20s%-23s %s" %(query_ip,domain_name,query_place))
return Requests
def Get_NetworkInfo(IP):
NetworkInfos = []
for Decimal_IP in range(Get_NetworkAddress(IP), Get_BoardIP(IP)+1):
NetworkInfos.append(Get_RequestInfo(Transfer_LongInt2Decimal_IP(Decimal_IP)))
return NetworkInfos
def flatten(nested):
try:
try: nested + ''
except TypeError: pass
else:raise TypeError
for sublist in nested:
for element in flatten(sublist):
yield element
except TypeError:
yield nested
def GetAll(Str):
if Is_NetworkAddr(Str):
return list(flatten(Get_NetworkInfo(Str)))
else :
return list(flatten(Get_RequestInfo(Str)))
def Return2Tk(input):
ReturnResult = []
if input != '':
for infs in Get_LocalInfo():
ReturnResult.append(infs)
ReturnResult.append("-"*50)
ReturnResult.append("Query_Name: %s" % input)
ReturnResult.append("Query_IP" + 12*' ' + "Domain_Name" + 15*' ' + "Query_Place")
if input[-5:] == '.list' and os.path.isfile(input):
f = open(input)
l = f.readline()
while l:
# print GetAll(l)
for infs in GetAll(l):
ReturnResult.append(infs)
l = f.readline()
elif input[-5:] == '.list' and os.path.isfile(input)==False:
ReturnResult.append("File no exist.")
# exit(10)
else:
#GetAll(sys.argv[1])
for infs in GetAll(input):
ReturnResult.append(infs)
else:
for tips in Usage():
ReturnResult.append(tips)
return ReturnResult
if __name__ == '__main__':
if len(sys.argv) == 2:
for i in Return2Tk(sys.argv[1]):
print i
else:
for i in Usage():
print i
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 10/02/2018 5:06 PM
# @Author : Lee
# @File : SST.py
# @Software: PyCharm
class SST(object):
def __init__(self, head=None, count=0):
self.head = head
self.count = count
def size(self):
return self.count
def is_empty(self):
return self.count == 0
def insert(self, key, value):
"""
查找顺序表,看是否存在同样大小的key
若存在,则更新value值
若不存在,则创建新节点,插入表头前面
:param key:
:param value:
:return:
"""
node = self.head
while node is not None:
if key == node.key:
node.value = value
return
node = node.next
new_node = Node(key, value)
new_node.next = self.head
self.head = new_node
self.count += 1
def contain(self, key):
node = self.head
while node is not None:
if key == node.key:
return True
node = node.next
return False
def search(self, key):
"""
查找链表中key所对应的value
:param key:
:return:
"""
node = self.head
while node is not None:
if key == node.key:
return node.value
node = node.next
return None
def remove(self, key):
"""
首先考虑链表是否为空
其次要删除的节点是否为头节点
:param key:
:return:
"""
if self.head is None:
return
if key == self.head.key:
del_head = self.head
self.head = self.head.next
del_head.next = None
self.count -= 1
return
node = self.head
while node.next is not None and node.next.key != key:
node = node.next
if node.next is None:
del_node = node.next
node.next = del_node.next
del_node.next = None
self.count -= 1
return
class Node(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
|
"""
什么是协程?
又称为微线程、纤程,英文名:Coroutine
import asyncio
async def main():
print("hello")
await asyncio.sleep(1)
print("world")
asyncio.run(main())
通过async/await语法进行声明,是编写异步应用的推荐方式
使用asunc修饰要运行的函数,在运行协程函数时,需要await。
可以用run或者creat_task启动微线程
"""
import asyncio,time
async def say_after(delay,what):
await asyncio.sleep(delay)
print(what)
async def fun():
print(f"开始时间:{time.strftime('%X')}")
await say_after(1,"hello")
await say_after(2,"world")
print(f"执行结束:{time.strftime('%X')}")
asyncio.run(fun())
async def myfun():
task1 = asyncio.create_task(
say_after(1,"hello")
)
task2 = asyncio.create_task(
say_after(2,"world")
)
print(f"开始时间:{time.strftime('%X')}")
await task1
await task2
print(f"执行结束:{time.strftime('%X')}")
asyncio.run(myfun()) |
import numpy as np
import cv2
# flag for imread 0 is grayscale, 1 is color, -1 is alpha channel
#img = cv2.imread('lena.jpg', 1)
# create image with numpy zeros method
img = np.zeros([512, 512, 3], np.uint8)
# draw a line
img = cv2.line(img, (0, 0), (255, 255), (147, 96, 44), 10) # 44, 96, 147
img = cv2.arrowedLine(img, (0, 255), (255, 255), (255, 0, 0), 10)
# draw a rectangle
# top left (x1,y1)-> pt1, low right (x2,y2)-> pt2
# thickness -1 set filled color
img = cv2.rectangle(img, (384, 0), (510, 128), (0, 0, 255), -1)
# draw a circle
img = cv2.circle(img, (447, 63), 63, (0, 255, 0), -1)
# text
font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.putText(img, 'OpenCv', (10, 500), font, 4, (0, 255, 255), 10, cv2.LINE_AA)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import math
def makeChange(cents):
coins = {};
if cents/100 >= 1:
coins["dollars"] = math.floor(cents/100);
cents = cents % 100
if cents/50 >= 1:
coins["half-dollars"] = math.floor(cents/50);
cents = cents % 50
if cents/25 >= 1:
coins["quarters"] = math.floor(cents/25);
cents = cents % 25
if cents/10 >= 1:
coins["dimes"] = math.floor(cents/10);
cents = cents % 10
if cents/5 >= 1:
coins["nickels"] = math.floor(cents/5);
cents = cents % 5
if cents/1 >= 1:
coins["pennies"] = math.floor(cents/1);
cents = cents % 1
return coins
print(makeChange(45))
print(makeChange(1231233))
print(makeChange(65423567))
print(makeChange(10000))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
class Edition():
def __init__(self):
self.id=0
self.name=""
def all(self):
try:
conn = mysql.connector.connect(host="localhost",user="root",password="magicpswd", database="magic")
cursor = conn.cursor()
cursor.execute("""SELECT idEdition,URL FROM editionURL""")
rows = cursor.fetchall()
return rows
except Exception as e:
print("Erreur")
print e
return False
conn.close()
|
# Copyright 2022 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Special register layouts defined for convenience."""
from __future__ import annotations
from typing import Any, cast
import pulser.register._patterns as patterns
from pulser.json.utils import obj_to_dict
from pulser.register import Register
from pulser.register.register_layout import RegisterLayout
class SquareLatticeLayout(RegisterLayout):
"""A RegisterLayout with a square lattice pattern in a rectangular shape.
Args:
rows: The number of rows of traps.
columns: The number of columns of traps.
spacing: The distance between neighbouring traps (in µm).
"""
def __init__(self, rows: int, columns: int, spacing: float):
"""Initializes a SquareLatticeLayout."""
self._rows = int(rows)
self._columns = int(columns)
self._spacing = float(spacing)
slug = (
f"SquareLatticeLayout({self._rows}x{self._columns}, "
f"{self._spacing}µm)"
)
super().__init__(
patterns.square_rect(self._rows, self._columns) * self._spacing,
slug=slug,
)
def square_register(self, side: int, prefix: str = "q") -> Register:
"""Defines a register with a square shape.
Args:
side: The length of the square's side, in number of atoms.
prefix: The prefix for the qubit ids. Each qubit ID starts
with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
The register instance created from this layout.
"""
return self.rectangular_register(side, side, prefix=prefix)
def rectangular_register(
self,
rows: int,
columns: int,
prefix: str = "q",
) -> Register:
"""Defines a register with a rectangular shape.
Args:
rows: The number of rows in the register.
columns: The number of columns in the register.
prefix: The prefix for the qubit ids. Each qubit ID starts
with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
The register instance created from this layout.
"""
if rows > self._rows or columns > self._columns:
raise ValueError(
f"A '{rows}x{columns}' array doesn't fit a "
f"{self._rows}x{self._columns} SquareLatticeLayout."
)
points = patterns.square_rect(rows, columns) * self._spacing
trap_ids = self.get_traps_from_coordinates(*points)
qubit_ids = [f"{prefix}{i}" for i in range(len(trap_ids))]
return cast(
Register, self.define_register(*trap_ids, qubit_ids=qubit_ids)
)
def _to_dict(self) -> dict[str, Any]:
return obj_to_dict(self, self._rows, self._columns, self._spacing)
class TriangularLatticeLayout(RegisterLayout):
"""A RegisterLayout with a triangular lattice pattern in a hexagonal shape.
Args:
n_traps: The number of traps in the layout.
spacing: The distance between neighbouring traps (in µm).
"""
def __init__(self, n_traps: int, spacing: float):
"""Initializes a TriangularLatticeLayout."""
self._spacing = float(spacing)
slug = f"TriangularLatticeLayout({int(n_traps)}, {self._spacing}µm)"
super().__init__(
patterns.triangular_hex(int(n_traps)) * self._spacing, slug=slug
)
def hexagonal_register(self, n_atoms: int, prefix: str = "q") -> Register:
"""Defines a register with an hexagonal shape.
Args:
n_atoms: The number of atoms in the register.
prefix: The prefix for the qubit ids. Each qubit ID starts
with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
The register instance created from this layout.
"""
if n_atoms > self.number_of_traps:
raise ValueError(
f"The desired register has more atoms ({n_atoms}) than there"
" are traps in this TriangularLatticeLayout"
f" ({self.number_of_traps})."
)
points = patterns.triangular_hex(n_atoms) * self._spacing
trap_ids = self.get_traps_from_coordinates(*points)
qubit_ids = [f"{prefix}{i}" for i in range(len(trap_ids))]
return cast(
Register, self.define_register(*trap_ids, qubit_ids=qubit_ids)
)
def rectangular_register(
self, rows: int, atoms_per_row: int, prefix: str = "q"
) -> Register:
"""Defines a register with a rectangular shape.
Args:
rows: The number of rows in the register.
atoms_per_row: The number of atoms in each row.
prefix: The prefix for the qubit ids. Each qubit ID starts
with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
The register instance created from this layout.
"""
if rows * atoms_per_row > self.number_of_traps:
raise ValueError(
f"A '{rows}x{atoms_per_row}' rectangular subset of a "
"triangular lattice has more atoms than there are traps in "
f"this TriangularLatticeLayout ({self.number_of_traps})."
)
points = patterns.triangular_rect(rows, atoms_per_row) * self._spacing
trap_ids = self.get_traps_from_coordinates(*points)
qubit_ids = [f"{prefix}{i}" for i in range(len(trap_ids))]
return cast(
Register, self.define_register(*trap_ids, qubit_ids=qubit_ids)
)
def _to_dict(self) -> dict[str, Any]:
return obj_to_dict(self, self.number_of_traps, self._spacing)
|
# Author: Lizhen Tan
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
'''required functions for main program'''
def test_grades(grade_list):
# take a grade_list as input, assign numeric values to the letter grades (i.e. A = 5,
# B = 3, C = 1), then fit a linear line to the data, find the slope by using the scipy.stats.linregress package,
# if slope < 0,then grades are declinin (return -1); if slope > 0, then grades are imporoving (return 1);
# if slope = 0, then grades stay the same (return 0)
grades = {'A' : 5, 'B' : 3, 'C': 1}
numeric_grade = [grades[i] for i in grade_list]
x = np.arange(len(numeric_grade)) # create x-coordinates
slope, intercept = stats.linregress(x, numeric_grade)[:2]
if slope > 0:
return 1
elif slope < 0:
return -1
elif slope == 0:
return 0
class SingleRecordError(Exception):
# error raises when there is only one record for a camis ID
def __str__(self):
pass
def test_restaurant_grades(data, camis_id):
# taking the camis_id as input and get the grade list for the camis_id
# then return the test for restaurant by using the test_grades function
grade_list = data['GRADE'][data['CAMIS'] == camis_id]
if len(grade_list) > 1:
return test_grades(grade_list)
else:
# print "Restaurant has only one grade record, can't determine improvement"
return None
raise SingleRecordError
def all_restaurant_sum(data):
camis_id = pd.unique(data.CAMIS.ravel()) # create an array to store unique camis_id
camis_dic = {}
for i in camis_id:
camis_dic[i] = test_restaurant_grades(data,i)
df = pd.DataFrame(camis_dic.items(), columns =['camis_id', 'restaurant_improvement']) #create a dataframe using the dictionary
# print "Count of all grades improvement in NYC is: \n", df.restaurant_improvement.dropna().value_counts()
print "Sum of all grades improvement in NYC is: \n", df.restaurant_improvement.dropna().sum(), "\n"
# return df
def sum_by_boro(data):
boro_camis_id = {}
frames = []
for i in pd.unique(data['BORO'][data['BORO'] != 'Missing']):
boro_camis_id[i] = pd.unique(data['CAMIS'][data['BORO'] == i])
camis_dic = {}
for j in boro_camis_id[i]:
camis_dic[j] = test_restaurant_grades(data,j)
df = pd.DataFrame(camis_dic.items(), columns =['camis_id', 'restaurant_improvement']) #create a dataframe using the dictionary
df['BORO'] = pd.Series([i]*len(camis_dic))
frames.append(df)
# print "Count of improvement in", i, "is \n",df.restaurant_improvement.dropna().value_counts()
print "Sum of all grades improvement in %s is: \n" %i, df.restaurant_improvement.dropna().sum(),"\n"
data_df = pd.concat(frames)
return data_df
def graphs(data):
# plot histogram of the counts for grade improvement for New York City
plt.figure()
data['year'] = data['GRADE DATE'].map(lambda x:x.year)
data_new = pd.DataFrame(data.groupby(['year','GRADE']).size().unstack())
data_new.plot(kind = 'bar', figsize = (11.5, 8.5))
plt.title('Grade improvement of all restaurants in New York City')
plt.xlabel('Year')
plt.ylabel('Count')
plt.savefig('grade_improvement_nyc.pdf')
plt.clf()
for i in pd.unique(data['BORO'][data['BORO'] != 'Missing']):
# plot histogram of counts for grade improvement for each borough
data_new = pd.DataFrame(data[data['BORO'] == i].groupby(['year','GRADE']).size().unstack())
data_new.plot(kind = 'bar',figsize = (11.5, 8.5))
plt.title('Grade improvement of restaurants in ' + i)
plt.xlabel('Year')
plt.ylabel('Count')
plt.savefig('grade_improvement_'+ i +'.pdf')
plt.clf()
|
def battery_is_ok(temperature, soc, charge_rate):
if temperature < 0 or temperature > 45:
print('Temperature is out of range!')
return False
elif soc < 20 or soc > 80:
print('State of Charge is out of range!')
return False
elif charge_rate > 0.8:
print('Charge rate is out of range!')
return False
return True
if __name__ == '__main__':
assert(battery_is_ok(25, 70, 0.7) is True)
assert(battery_is_ok(50, 85, 0) is False)
|
#!/usr/bin/env python
# coding: utf-8
import os
from . import DatasetDirectoryError
def getDatasetDirectory():
DATASET_EXPLORER_ROOT = "DATASET_EXPLORER_ROOT"
if DATASET_EXPLORER_ROOT not in os.environ:
raise DatasetDirectoryError(
f"The environment variable {DATASET_EXPLORER_ROOT} must be set to the root path of the dataset directory")
root = os.getenv("DATASET_EXPLORER_ROOT")
if not os.path.exists(root) or not os.path.isdir(root):
raise DatasetDirectoryError(f"The provided dataset directory {root} does not exists")
if len(os.listdir(root)) == 0:
raise DatasetDirectoryError(f"The provided dataset directory {root} is empty")
return root
def getPluginsPath():
DATASET_EXPLORER_PLUGINS = "DATASET_EXPLORER_PLUGINS"
return [path for path in os.getenv(DATASET_EXPLORER_PLUGINS, "").split(":") if path]
|
from os.path import join, exists, isdir
from os import environ
from .config import CONFIG
KEY_VIMRC_PATH = 'vimrc_path'
def find_vimrc_auto() -> str:
cfgp = CONFIG.get(KEY_VIMRC_PATH, None)
if cfgp is not None:
return cfgp
h = environ.get('HOME', None)
if h is None:
return None
rcp = join(h, '.vimrc')
if not exists(rcp):
return None
return rcp
def find_home(ask=False) -> str:
h = environ.get('HOME', None)
if h is not None:
return h
while ask:
h = input('[A] Please enter HOME path: ')
if exists(h) and isdir(h):
return h
print('[E] Incorrect path, try another!')
def find_or_ask_vimrc_auto() -> str:
vp = find_vimrc_auto()
if vp is not None:
return vp
vp = input('[A] please enter .vimrc path: ')
while not exists(vp):
vp = input('[A] .vimrc not found, please try again: ')
CONFIG[KEY_VIMRC_PATH] = vp
print('[A] .vimrc path save automatically.')
return vp
|
from PIL import Image, ImageDraw, ImageFont
import textwrap
import os
import json
import base64
def makememe( text, location ):
if len(os.listdir(location)) == 0:
return
for file in os.listdir(location):
imgname = file
img = location + "/" + imgname
image = Image.open(img)
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
font = ImageFont.truetype('./fonts/Roboto-Bold.ttf', size=int(im_height/10))
text = text.upper()
char_width, char_height = font.getsize('A')
linelength = im_width // char_width
memetext = textwrap.wrap( text, width=linelength)
yoffset = 5
for line in memetext:
l_width, l_height = font.getsize(line)
xoffset = (im_width - l_width)/2
draw.text( (xoffset,yoffset), line, fill='white', font=font)
yoffset += l_height
image.save(img)
#B64 Encode Meme and Return
with open( img, mode ='rb') as file:
c_img = file.read()
data = base64.encodebytes(c_img).decode("utf-8")
return data |
A=int(input("A= "))
B=int(input("B= "))
if(A<B):
for i in range(A,B):
print(i)
i=B-A
print("N=",i) |
# Generated by Django 3.1.2 on 2020-11-06 14:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('Name', models.CharField(max_length=20, primary_key=True, serialize=False)),
('Photo', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='File2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('Id', models.BigAutoField(primary_key=True, serialize=False)),
('Name', models.CharField(max_length=30)),
('Header_photo', models.CharField(max_length=25)),
('Create_time', models.DateTimeField(auto_now_add=True)),
('Summary', models.TextField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('Id', models.CharField(max_length=30, primary_key=True, serialize=False)),
('Name', models.CharField(max_length=20)),
('Photo', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='Content',
fields=[
('Id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='polls.post')),
('Content1', models.CharField(max_length=30)),
('Content2', models.CharField(max_length=20)),
('Main_content', models.TextField()),
],
),
migrations.AddField(
model_name='post',
name='Writer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.user'),
),
migrations.AddField(
model_name='post',
name='categorie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.categories'),
),
migrations.CreateModel(
name='ReadsPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.user')),
],
options={
'unique_together': {('user', 'post')},
},
),
migrations.CreateModel(
name='Mark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.user')),
],
options={
'unique_together': {('user', 'post')},
},
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.user')),
],
options={
'unique_together': {('user', 'post')},
},
),
]
|
import os
import logging
import fisheye
class Settings(object):
"""docstring for Settings"""
APP_ROOT = os.path.dirname(os.path.realpath(__file__))
# Number of allowed threads/process to run in parallel to process videos
PROCESSINGS_THREADS_NUM = 3
#
# Uploads settings
#
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'uploads')
CONVERTED_PAID_FOLDER = os.path.join(APP_ROOT, 'converted', 'paid')
CONVERTED_UNPAID_FOLDER = os.path.join(APP_ROOT, 'converted', 'unpaid')
ALLOWED_EXTENSIONS = ['.mp4', '.avi']
#
# Log settings
#
LOG_FILE_PATH = os.path.join(APP_ROOT, 'fisheye_webservice.log')
LOG_FILE_MAX_SIZE = 100 * 1024 # Bytes
# logging level. See available https://docs.python.org/3/library/logging.html#logging-levels
LOG_LEVEL = logging.DEBUG
#
# Video store timeouts (hours)
#
PAID_VIDEO_TIMEOUT = 72 # hours
UNPAID_VIDEO_TIMEOUT = 24 # hours
#
# Watermark
#
UNPAID_WATERMARK_TEXT = 'DEMO - free version. Deposit money for paid version without watermark.'
#
# Analytics
#
GOOGLE_ANALYTICS_TRACKING_ID = 'UA-86511099-1'
VIDEO_FORMATS = {
# name, fisheye enum code, file extension
'mpeg-4': { 'name': 'MPEG-4', 'code': fisheye.CODEC_MPEG_4, 'extension': '.mp4'},
'mpeg1': { 'name': 'MPEG-1', 'code': fisheye.CODEC_MPEG_1, 'extension': '.mpeg'},
'flv1': { 'name': 'FLV1', 'code': fisheye.CODEC_FLV1, 'extension': '.flv'},
# TODO: following codecs don't work now
# 'm-jpeg': { 'name': 'Motion JPEG', 'code': fisheye.CODEC_MOTION_JPEG, 'extension': '.mjpeg'},
# 'mpeg-4.2': { 'name': 'MPEG-4.2', 'code': fisheye.CODEC_MPEG_4_2, 'extension': '.mp4'},
# 'mpeg-4.3': { 'name': 'MPEG-4.3', 'code': fisheye.CODEC_MPEG_4_3, 'extension': '.mp4'},
# 'h263': { 'name': 'H263', 'code': fisheye.CODEC_H263, 'extension': '.mp4'},
# 'h263i': { 'name': 'H263I', 'code': fisheye.CODEC_H263I, 'extension': '.mp4'},
} |
#coding:gb2312
#创建数值列表和对列表进行一些简单的操作
for numbers in range(1,6):#range(1,6)只包含1,2,3,4,5,没有6
print(numbers)
#range创建数字列表:
numbers=list(range(1,6))#list()将数字转化成列表
print(numbers)
even_numbers=list(range(2,14,2))#range(x,y,z)x,y表示数字范围,z表示步长即数字间的距离
print(even_numbers)
#比较下列三种创建列表的方式
#第一种
squares=[]
for number in range(1,11):
square=number**2
squares.append(square)
print(squares)
#第二种
squares=[]
for number in range(1,11):
squares.append(number**2)
print(squares)
#第三种
squares=[number**2 for number in range(1,11)]#nuber**2是定义的表达式;for number in range(1,11)for循环是用于给表达式提供值
print(squares)
#对数字列表执行简单的统计计算
print(min(squares))#数字列表最小值
print(max(squares))#数字列表最大值
print(sum(squares))#数字列表元素求和
|
# Generated by Django 3.0.5 on 2020-04-29 21:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0006_auto_20200429_1345'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
from django.contrib import messages
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from .forms import extend_user_form, user_detail_form, todo_entery_form
from django.contrib.auth.models import User
from .models import enter_todo_items
# indexview
@login_required(login_url = 'auth:Login' )
def index_view(request):
profile_of = request.user
get_query = profile_of.connected_user.all()
context = {'data':get_query}
return render(request, 'data.html', context)
@login_required(login_url = 'auth:Login' )
def test_view(request):
return render(request, 'test.html')
# show user their details
@login_required(login_url='auth:Login')
def account_of_user(request):
user_details = User.objects.get(id = request.user.id)
profile = user_details.linked_profile
context = {'html_user_details': user_details, 'picture':profile}
return render(request, 'account.html',context)
# change profile picture
@login_required(login_url= 'auth:Login')
def profile_change(request):
# if post request
profile_of =request.user.linked_profile
if request.method == "POST":
profile_of.user_profile.delete()
post_profile_form = extend_user_form(request.POST, request.FILES, instance=profile_of)
# if form is valid
if post_profile_form.is_valid():
post_profile_form.save()
messages.success(request, 'profile updated')
return redirect('home:Account')
profile_form = extend_user_form()
context = {'html_profile_change_form':profile_form}
return render(request, 'updateprofile.html', context)
# change details
@login_required(login_url={'auth:Login'})
def details_update(request):
profile_of =request.user
if request.method == "POST":
post_form = user_detail_form(request.POST,instance=profile_of)
if post_form.is_valid():
post_form.save()
messages.success(request, 'Details updated')
return redirect('home:Account')
get_form = user_detail_form(initial={
'first_name': profile_of.first_name,
'last_name': profile_of.last_name,
'email':profile_of.email
})
context = {'html_detail_form': get_form}
return render(request,'updatedetails.html', context)
# enter todo items
@login_required(login_url='auth:Login')
def user_todo(request):
profile_of = request.user
todo_form_instance = todo_entery_form
todo_form = todo_form_instance(request.POST or None)
if request.method == 'POST':
if todo_form.is_valid():
intial_save = todo_form.save(commit=False)
intial_save.linked_profile = profile_of
intial_save.save()
messages.success(request, 'Task Added')
return redirect('home:Todo')
else:
messages.error(request, 'Enter valid data')
context = {'html_todo_form': todo_form }
return render(request,'todo.html',context)
#change details of todo
@login_required(login_url='auth:Login')
def change_details(request, item_id):
profile_of = request.user.connected_user.get(id = item_id)
if request.method == 'POST':
todo_post = todo_entery_form(request.POST, instance=profile_of)
if todo_post.is_valid():
todo_post.save()
return redirect('home:Home')
data_get = enter_todo_items.todo.select_related('linked_profile').get(id = item_id)
todo_change = todo_entery_form(initial=
{'todo_name':data_get.todo_name,
'datetime_to_happen':data_get.datetime_to_happen,
'descriptions':data_get.descriptions})
context = {'html_todo_change':todo_change, 'of_what':data_get}
return render(request,'change_todo.html',context)
# history
def history(request):
profile_of = request.user
get_query = profile_of.connected_user.all()
context = {'data':get_query}
return render(request, 'history.html', context)
# view of todo items
def view(request, item_id):
person_profile = request.user
data_get = person_profile.connected_user.get(id = item_id)
context = {'details':data_get}
return render(request,'view.html',context)
# completed
def completed(request, item_id):
data_get = enter_todo_items.todo.select_related('linked_profile').get(id = item_id)
data_get.isdone = True
data_get.save()
return redirect('home:Home')
# about
def about(request):
data = {
'name': 'jatinder singh',
'purpose': 'to make a todo app for myself',
'tech_used': 'Django , html, css, scss, python '
}
context = {'html_data': data}
return render(request, 'about.html', context) |
from herd import Herd
from weapon import Weapon
from herd import Herd
from dino import Dino
class Robot:
def __init__(self, name, weapon):
self.name = name
self.health = 200
self.weapon = weapon
def attack(self):
health = Dino.health()
atk = Weapon.attack_power()
new_health = health - atk
return new_health
|
#! /usr/bin/python
# coding=utf-8
import os
import subprocess
from scipy import misc
from lib import *
import argparse
parser = argparse.ArgumentParser(description='Compute an exposure-fused image from multiple different exposures')
parser.add_argument("source", help="Folder containing all the images to be fused together")
parser.add_argument("output", help="Where to put the output")
parser.add_argument("--debug", help="Display intermediate results", action="store_true")
parser.add_argument("--dynamic", help="Run dynamic exposure fusion", action="store_true")
parser.add_argument("--filterbycolor", help="Run the cross bilateral filter on color rather than luminance", action="store_true")
parser.add_argument("--withiqa", help="Run IQA after fusion", action="store_true")
parser.add_argument("--sigma", help="Fix the sigma for the gaussian derivate to compute the gradients", type=float, default=20.0)
parser.add_argument("--sigmaColor", help="Fix the sigma for colors in the cross bilateral filter", type=float, default=255.0/10.0)
parser.add_argument("--sigmaSpace", help="Fix the sigma for space in the cross bilateral filter", type=float, default=25.0)
args = parser.parse_args()
images = [misc.imread(os.path.join(args.source,x)) for x in os.listdir(args.source) if
x.lower().endswith("jpg") or
x.lower().endswith("png") or
x.lower().endswith("tif")]
output = pipeline(images, args.sigma, args.sigmaColor, args.sigmaSpace, static=(args.dynamic is False), filter_by_color=(args.filterbycolor is True))
displayable_output = numpy.uint8(output)
misc.imsave(args.output, displayable_output)
if args.withiqa:
DEVNULL = open(os.devnull, 'wb')
p = subprocess.Popen("matlab -wait -nodesktop -nosplash -r \"addpath('mef_iqa'); "
"iqa('" + args.source + "', '" + args.output + "', 1, '"+ args.output+"_iqa.fig', '" + args.output + "_iqa.txt')",
shell=True, stdout=DEVNULL, stderr=DEVNULL)
p.wait() |
n = int(input())
print(*[1, 1, n-2] if n % 3 == 0 else [1, 2, n - 3])
|
# implementation of card game - Memory
import simplegui
import random
turns = 0
# helper function to initialize globals
def new_game():
global dock, exposed, state, turns
turns = 0
state = 0
cards = range(0,8) * 2
dock = []
for card in cards:
dock.append(card)
random.shuffle(dock)
exposed = [False] * 16
label.set_text("Turns = " + str(turns))
# define event handlers
def mouseclick(pos):
# add game state logic here
global state, turns, exposed, dock, clicked1, clicked2
click = int(pos[0] / 50)
if state == 0:
state = 1
clicked1 = click
exposed[clicked1] = True
elif state == 1:
if not exposed[click]:
state = 2
clicked2 = click
exposed[clicked2] = True
turns += 1
else:
if not exposed[click]:
if dock[clicked1] == dock[clicked2]:
exposed[clicked1] = True
exposed[clicked2] = True
else:
exposed[clicked1] = False
exposed[clicked2] = False
clicked1 = click
exposed[clicked1] = True
state = 1
label.set_text("Turns = " + str(turns))
# cards are logically 50x100 pixels in size
def draw(canvas):
for card in range(len(dock)):
if exposed[card]: #if True draw number
canvas.draw_text(str(dock[card]),(card * 50 + 10, 65), 48, 'White', "sans-serif")
else: # draw green card
canvas.draw_polygon([(50*card, 0), (50*card + 50, 0), (50*card + 50, 100), (50*card, 100)], 2, "Tomato", "SeaGreen")
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", 800, 100)
frame.add_button("Reset", new_game)
label = frame.add_label("Turns = 0")
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
# get things rolling
new_game()
frame.start()
# Always remember to review the grading rubric |
import webbrowser
# Base Classe Video
class Video():
def __init__(self, title, duration, producer):
self.title = title
self.duration = duration
self.producer = producer
def show__trailer(self):
webbrowser.open(self.trailer_youtube_url)
# Classe that implements heritage and define a movie
class Movie(Video):
def __init__(self, title, duration, producer, storyline, poster_image,trailer_youtube):
Video.__init__(self, title, duration, producer)
self.storyline = storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
# Classe that implements heritage and define a serie
class Serie(Video):
def __init__(self, title, duration, producer, storyline, poster_image,trailer_youtube, episodes):
Video.__init__(self, title, duration, producer)
self.storyline = storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.episodes = episodes
|
import time
import math
import argparse
from typing import Optional, Tuple
from functools import partial
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
class BertSelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob=0.1):
super(BertSelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask):
# assume attention_mask: [batch_size, um_attention_heads, seq_len, seq_len]
# hidden_states: [batch_size, seq_len, config.hidden_size]
# mixed_*_layer: [batch_size, seq_len, num_attention_heads * attention_head_size = config.hidden_size]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
# {q,v}_layer: [batch_size, num_attention_heads, seq_len, attention_head_size]
# key_layer: [batch_size, num_attention_heads, attention_head_size, seq_len]
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# attention_scores: [batch_size, num_attention_heads, seq_len, seq_len]
attention_scores = torch.matmul(query_layer, key_layer)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# context_layer: [batch_size, num_attention_heads, seq_len, attention_head_size]
context_layer = torch.matmul(attention_probs, value_layer)
# context_layer: [batch_size, seq_len, num_attention_heads, attention_head_size]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# context_layer: [batch_size, seq_len, num_attention_heads * attention_head_size = config.hidden_size]
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = torch.reshape(context_layer, new_context_layer_shape)
context_layer = self.dense(context_layer)
return context_layer
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class GPT2SelfAttention(nn.Module):
def __init__(self, nx, n_ctx, n_head, attn_pdrop, resid_pdrop, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(attn_pdrop)
self.resid_dropout = nn.Dropout(resid_pdrop)
def _attn(self, q, k, v, attention_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
outputs = torch.matmul(w, v)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
attention_mask=None,
):
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value, attention_mask)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class BartSelfAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output
def build_bert_model_and_input(batch_size=1, seq_len=512, use_large=False, cuda=True, fp16=False):
if not use_large:
hidden_size, num_heads = 768, 12
else:
hidden_size, num_heads = 1024, 16
model = BertSelfAttention(hidden_size, num_heads).eval()
hidden_state = torch.randn(batch_size, seq_len, hidden_size)
if fp16:
model = model.half()
hidden_state = hidden_state.half()
attn_mask = torch.zeros(batch_size, 1, 1, seq_len).long()
if cuda:
model = model.cuda()
hidden_state = hidden_state.cuda()
attn_mask = attn_mask.cuda()
return model, (hidden_state, attn_mask)
def build_gpt2_model_and_input(batch_size=1, seq_len=512, use_large=False, cuda=True, fp16=False):
attn_pdrop, resid_pdrop, scale = 0.1, 0.1, True
if not use_large:
n_embed, n_ctx, n_head = 768, 1024, 12
else:
n_embed, n_ctx, n_head = 1024, 1024, 16
model = GPT2SelfAttention(n_embed, n_ctx, n_head, attn_pdrop, resid_pdrop, scale)
hidden_state = torch.randn(batch_size, seq_len, n_embed)
if fp16:
model = model.half()
hidden_state = hidden_state.half()
attn_mask = torch.zeros(batch_size, 1, 1, seq_len).long()
if cuda:
model = model.cuda()
hidden_state = hidden_state.cuda()
attn_mask = attn_mask.cuda()
return model, (hidden_state, attn_mask)
def build_bart_model_and_input(batch_size=1, seq_len=512, use_large=False, cuda=True, fp16=False):
if not use_large:
# d_model, encoder_attention_heads, attention_dropout
embed_dim, num_heads, dropout = 768, 12, 0.1
else:
embed_dim, num_heads, dropout = 1024, 16, 0.1
model = BartSelfAttention(embed_dim, num_heads, dropout)
hidden_state = torch.randn(batch_size, seq_len, embed_dim)
if fp16:
model = model.half()
hidden_state = hidden_state.half()
if cuda:
model = model.cuda()
hidden_state = hidden_state.cuda()
return model, (hidden_state,)
def bench_dense_attn_cpu(run_func, number=10, repeats=10):
run_func()
bench_res = []
for i in range(repeats):
time_record = []
for j in range(number):
tic = time.time()
run_func()
toc = time.time()
time_record.append(1000 * (toc - tic))
bench_res.append(np.mean(time_record))
return bench_res
def bench_dense_attn_gpu(run_func, number=100, repeats=10):
run_func()
bench_res = []
for i in range(repeats):
time_record = []
for j in range(number):
torch.cuda.synchronize()
tic = torch.cuda.Event(enable_timing=True)
toc = torch.cuda.Event(enable_timing=True)
tic.record()
run_func()
toc.record()
torch.cuda.synchronize()
elapsed = tic.elapsed_time(toc)
time_record.append(elapsed)
avg_time = np.mean(time_record)
bench_res.append(avg_time)
return bench_res
def run_dense_attn(dense_attn, inputs):
with torch.no_grad():
output = dense_attn(*inputs)
def run_bert_benchmark(batch_size=1, seq_len=512, use_large=False, cuda=True, fp16=False):
dense_attn, inputs = build_bert_model_and_input(batch_size=batch_size, seq_len=seq_len, use_large=use_large, cuda=cuda, fp16=fp16)
run_func = partial(run_dense_attn, dense_attn=dense_attn, inputs=inputs)
if cuda:
bench_res = bench_dense_attn_gpu(run_func)
else:
bench_res = bench_dense_attn_cpu(run_func)
print(f"Benchmark result ({'bert-large' if use_large else 'bert-base'}, {'GPU' if cuda else 'CPU'}, {'TC' if fp16 else 'NTC'}, {seq_len})")
print(bench_res)
print(f"mean: {np.mean(bench_res)}, std: {np.std(bench_res)}")
return np.mean(bench_res)
def run_gpt2_benchmark(batch_size=1, seq_len=512, use_large=False, cuda=True, fp16=False):
dense_attn, inputs = build_gpt2_model_and_input(batch_size=batch_size, seq_len=seq_len, use_large=use_large, cuda=cuda, fp16=fp16)
run_func = partial(run_dense_attn, dense_attn=dense_attn, inputs=inputs)
if cuda:
bench_res = bench_dense_attn_gpu(run_func)
else:
bench_res = bench_dense_attn_cpu(run_func)
print(f"Benchmark result ({'gpt2-medium' if use_large else 'gpt2-small'}, {'GPU' if cuda else 'CPU'}, {'TC' if fp16 else 'NTC'}, {seq_len})")
print(bench_res)
print(f"mean: {np.mean(bench_res)}, std: {np.std(bench_res)}")
return np.mean(bench_res)
def run_bart_benchmark(batch_size=1, seq_len=512, use_large=False, cuda=True, fp16=False):
dense_attn, inputs = build_bart_model_and_input(batch_size=batch_size, seq_len=seq_len, use_large=use_large, cuda=cuda, fp16=fp16)
run_func = partial(run_dense_attn, dense_attn=dense_attn, inputs=inputs)
if cuda:
bench_res = bench_dense_attn_gpu(run_func)
else:
bench_res = bench_dense_attn_cpu(run_func)
print(f"Benchmark result ({'bart-large' if use_large else 'bart-base'}, {'GPU' if cuda else 'CPU'}, {'TC' if fp16 else 'NTC'}, {seq_len})")
print(bench_res)
print(f"mean: {np.mean(bench_res)}, std: {np.std(bench_res)}")
return np.mean(bench_res)
BENCH_FUNCS = {
'bert': run_bert_benchmark,
'gpt2': run_gpt2_benchmark,
'bart': run_bart_benchmark,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default=None, type=str, required=True,
help="Model type selected in the list: bert-base, bert-large, "
"gpt2-small, gpt2-medium, bart-base, bart-large.")
parser.add_argument("--batch_size", default=1, type=int)
parser.add_argument("--seq_len", default=128, type=int, help="The maximum total input sequence length")
parser.add_argument("--cuda", default=False, action='store_true', help="Use GPU or not")
parser.add_argument("--fp16", default=False, action='store_true', help="Enable half precision inference")
parser.add_argument("--all", default=False, action='store_true',
help="Evaluate all models ('bert-base', 'gpt2-small', 'bart-base') "
"and all sequence lengths (128, 384, 512)")
args = parser.parse_args()
if not args.all:
model_name, variant = args.model_name.split('-')
use_large = variant in ['large', 'medium']
bench_func = BENCH_FUNCS[model_name]
bench_func(args.batch_size, args.seq_len, use_large, args.cuda, args.fp16)
else:
bench_results = dict()
for model_name in ['bert-base', 'gpt2-small', 'bart-base']:
bench_results[model_name] = dict()
model_name, variant = args.model_name.split('-')
use_large = variant in ['large', 'medium']
bench_func = BENCH_FUNCS[model_name]
for seq_len in [128, 384, 512]:
avg_lat = bench_func(args.batch_size, seq_len, use_large, args.cuda, args.fp16)
bench_results[model_name][seq_len] = avg_lat
df = pd.DataFrame(bench_results); print(df)
df.to_csv('bench_results.csv')
if __name__ == '__main__':
main()
|
from app.utils.dotdict import DotDict
from app.utils.extract_value import get_base_url_till_given_string
import unittest
class ExtractValue(unittest.TestCase):
def test_get_base_url_till_given_string(self):
request = DotDict({"base_url": "http://www.zooreach.com/category/fishes"})
string = 'category'
self.assertEqual(get_base_url_till_given_string(request, string), "http://www.zooreach.com/category/")
|
from argparse import ArgumentTypeError
import sys
sys.path.append("..")
from utils import connector
def get_table(cfg, limit, offset):
table = cfg.get('postgres', 'table')
data = connector.postgres_to_dataframe(table=table)
return data
|
__author__ = "Narwhale"
# #递归
# def fib(n):
# """斐波拉契"""
# if n == 0:
# return 1
# if n == 1:
# return 1
# if n == 2:
# return 2
# return fib(n-1)+fib(n-2)
#
#
# f = fib(50)
# print(f)
#
# #循环
# def fib(n):
# """斐波拉契"""
# a,b = 0,1
# while n > 0:
# a,b = b,a+b
# n -= 1
# return b
# f = fib(50)
# print(f)
# -*- coding:utf-8 -*-
class Solution:
def Fibonacci(self, n):
# write code here
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 1
a, b = 1, 1
while n > 2:
a, b = b, a + b
n -= 1
return b
s = Solution()
print(s.Fibonacci(4)) |
import datetime
import io
from time import sleep
from PIL import Image as Img_pil
from django.test import TestCase
from project_apps.users.models import CustomUser as User
from project_apps.plans.models import Plan, ThumbnailSize
from project_apps.images.models import Image
from rest_framework.reverse import reverse
def generate_image_file():
file = io.BytesIO()
image = Img_pil.new("RGBA", size=(500, 500), color=(155, 0, 0))
image.save(file, "png")
file.name = "test.png"
file.seek(0)
return file
class ImageViewSetTestCase(TestCase):
def setUp(self):
thumbnail_size_small = ThumbnailSize.objects.create(size=200)
thumbnail_size_big = ThumbnailSize.objects.create(size=400)
self.basic_plan = Plan.objects.create(
name="Basic", has_access_to_org_img=False, can_generate_expiring_links=False
)
self.premium_plan = Plan.objects.create(
name="Premium",
has_access_to_org_img=True,
can_generate_expiring_links=False,
)
self.enterprise_plan = Plan.objects.create(
name="Enterprise",
has_access_to_org_img=True,
can_generate_expiring_links=True,
)
self.basic_plan.available_thumbnail_sizes.add(thumbnail_size_small)
self.basic_plan.save()
self.premium_plan.available_thumbnail_sizes.add(
thumbnail_size_small, thumbnail_size_big
)
self.premium_plan.save()
self.enterprise_plan.available_thumbnail_sizes.add(
thumbnail_size_small, thumbnail_size_big
)
self.enterprise_plan.save()
self.username = "testuser"
self.password = "testpass"
self.user = User.objects.create_user(
self.username, password=self.password, plan=self.basic_plan
)
def test_image_upload(self):
logged_in = self.client.login(username=self.username, password=self.password)
img_to_upload = generate_image_file()
input_data = {"title": "test", "description": "test", "image": img_to_upload}
upload_img_url = "/api/v1/images"
response = self.client.post(upload_img_url, data=input_data, format="json")
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["title"], "test")
self.assertEqual(response.data["description"], "test")
self.assertEqual(response.data["owner"], self.user.pk)
def test_get_expiring_link_without_permission(self):
logged_in = self.client.login(username=self.username, password=self.password)
get_expiring_link_url = "/api/v1/images/1/generate_expiring_link"
input_data = {"time_to_expiry": 30}
response = self.client.post(
get_expiring_link_url, data=input_data, format="json"
)
self.assertEqual(response.status_code, 403)
def test_get_expiring_link(self):
logged_in = self.client.login(username=self.username, password=self.password)
# change plan to enterprise to get access to the functionality
self.user.plan = self.enterprise_plan
self.user.save()
img_to_upload = generate_image_file()
input_data = {"title": "test", "description": "test", "image": img_to_upload}
upload_img_url = "/api/v1/images"
self.client.post(upload_img_url, data=input_data, format="json")
get_expiring_link_url = "/api/v1/images/1/generate_expiring_link"
input_data = {"time_to_expiry": 300}
response = self.client.post(
get_expiring_link_url, data=input_data, format="json"
)
self.assertEqual(response.status_code, 200)
input_data = {"time_to_expiry": 30000}
response = self.client.post(
get_expiring_link_url, data=input_data, format="json"
)
self.assertEqual(response.status_code, 200)
# expiry time out of range - 299s
input_data = {"time_to_expiry": 299}
response = self.client.post(
get_expiring_link_url, data=input_data, format="json"
)
self.assertEqual(response.status_code, 400)
# expiry time out of range - 30 001s
input_data = {"time_to_expiry": 30001}
response = self.client.post(
get_expiring_link_url, data=input_data, format="json"
)
self.assertEqual(response.status_code, 400)
def test_get_original_img_link(self):
logged_in = self.client.login(username=self.username, password=self.password)
# change plan to premium to get link to original img
self.user.plan = self.premium_plan
self.user.save()
img_to_upload = generate_image_file()
input_data = {"title": "test", "description": "test", "image": img_to_upload}
upload_img_url = "/api/v1/images"
response = self.client.post(upload_img_url, data=input_data, format="json")
image_in_response = True if "image" in response.data else False
self.assertTrue(image_in_response)
class ExpiringLinkApiViewTestCase(TestCase):
def setUp(self):
self.basic_plan = Plan.objects.create(
name="Basic", has_access_to_org_img=False, can_generate_expiring_links=True
)
self.username = "testuser"
self.password = "testpass"
self.user = User.objects.create_user(
self.username, password=self.password, plan=self.basic_plan
)
img_to_upload = generate_image_file()
input_data = {"title": "test", "description": "test", "image": img_to_upload}
upload_img_url = "/api/v1/images"
logged_in = self.client.login(username=self.username, password=self.password)
self.client.post(upload_img_url, data=input_data, format="json")
get_expiring_link_url = "/api/v1/images/1/generate_expiring_link"
input_data = {"time_to_expiry": 300}
response = self.client.post(
get_expiring_link_url, data=input_data, format="json"
)
self.expiring_link = response.data["expiring-link"]
def test_expiring_link(self):
response = self.client.get(self.expiring_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "image/png")
|
n,a,d=map(int,input().split())
x=n*a%d
print(int(x))
|
### IMPORT STATEMENTS ###
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import pandas as pd
import os
import time
import matplotlib.pyplot as plt
import string
from models import *
from configs import cfg
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
### HELPER FUNCTIONS ###
# returns a pandas dataframe of the given file
# fname: string; path to the file
# num_rows: int; number of rows to read (for partial datasets)
# return: pandas Dataframe
def load_data(fname, num_rows = None):
return pd.read_csv(fname, nrows = num_rows)
# creates a one-hot encoding matrix for the given data
# data: 1d numpy array; list of items/features to encode
# dictionary: dict; mapping from the item to its index
# return: 2d numpy array
def encode_one_hot(data, dictionary):
# creates the 2d array of zeros
one_hot_encoding = np.zeros((data.shape[0], len(dictionary)))
# gathers the respective indices of each item
data_indices = [dictionary[item] for item in data]
# encodes the 1 for all the items
one_hot_encoding[range(data.shape[0]), data_indices] = 1
return one_hot_encoding
# converts a one-hot encoding of the reviews into strings
# data: 3d torch list; list of one-hot encoding
# dictionary: dict; mapping from the index to the character
# return: review strings (1d list)
def decode_one_hot_reviews(data):
extended_char = cfg['valid_char'] + 'SEP'
decoded = [''.join([extended_char[torch.argmax(c)] for c in review]) for review in data]
#decoded = [''.join([cfg['valid_char'][torch.argmax(c)]
#if torch.argmax(c) < cfg['valid_char_len']
#else '' for c in review]) for review in data]
return decoded
# cleans and processes (feature encoding) the training data
# orig_data: pandas Dataframe; raw data that is outputted from load_data
# returns: features (2d numpy array; one-hot), labels (1d numpy array of strings), beer dictionary (dict)
def process_train_data(orig_data):
print ("Processing training data")
print ("Original data shape: " + str(orig_data.shape))
# takes the relevant columns
data = orig_data[['beer/style', 'review/overall', 'review/text']].copy()
# --- DATA CLEANING ---
# drops the rows with missing data
data.replace('', np.nan, inplace = True)
data.dropna(inplace = True)
data.reset_index(drop = True, inplace = True)
# sets all characters to lower case
data['beer/style'] = data['beer/style'].str.lower()
data['review/text'] = data['review/text'].str.lower()
# converts all whitespace (space, tabs, newlines, etc.) into spaces
whitespace_regex = '[' + string.whitespace + ']'
data['review/text'] = data['review/text'].str.replace(whitespace_regex, ' ', regex = True)
# removes all invalid characters
invalid_char_regex = '[^' + cfg['valid_char'] + ']'
data['review/text'] = data['review/text'].str.replace(invalid_char_regex, '', regex = True)
print ("Data shape after cleaning: " + str(data.shape))
# --- DATA PROCESSING ---
# creates a list of beer and a dictionary to map a beer style to an index
beer_list = data['beer/style'].unique()
beer_to_index = dict(zip(beer_list, range(beer_list.shape[0])))
print ("Number of unique beers: " + str(beer_list.shape[0]))
# creates the input features
beer_encoding = encode_one_hot(data['beer/style'].values, beer_to_index)
score_encoding = data['review/overall'].values
score_encoding = score_encoding.reshape(score_encoding.shape[0], 1)
input_features = np.hstack((beer_encoding, score_encoding))
print ("Input feature matrix shape: " + str(input_features.shape))
# creates the labels
labels = data['review/text'].values
print ("Labels matrix shape: " + str(labels.shape))
return input_features, labels, beer_to_index
# updates the configurations based on the results of the processed dataset
def update_configurations(feature_length):
# sets the models' input dimensions to the size of features (beer style + score) + character encoding
cfg['input_dim'] = feature_length + cfg['output_dim']
# splits the dataset + labels into a training and validation set
# features: numpy array
# labels: numpy array
# percent_training: float; percentage (from 0.0 to 1.0) of data to be used for training
# returns: training features, training labels, validation features, validation labels (all numpy arrays)
def train_valid_split(features, labels, percent_training):
# gets the index of where to split
training_last_index = int(percent_training * features.shape[0])
x_train = features[:training_last_index]
y_train = labels[:training_last_index]
x_valid = features[training_last_index:]
y_valid = labels[training_last_index:]
print ("Training set size: " + str(x_train.shape[0]))
print ("Validation set size: " + str(x_valid.shape[0]))
return x_train, y_train, x_valid, y_valid
# cleans and processes (feature encoding) the testing data
# orig_data: pandas Dataframe; raw data that is outputted from load_data
# dictionary: dict; mapping from the beer style to its index (output of process_train_data)
# returns: features (2d numpy array; one-hot)
def process_test_data(orig_data, dictionary):
print ("Processing the testing data")
print ("Original data shape: " + str(orig_data.shape))
# takes the relevant columns
data = orig_data[['beer/style', 'review/overall']].copy()
# --- DATA CLEANING ---
# sets all characters to lower case
data['beer/style'] = data['beer/style'].str.lower()
# --- DATA PROCESSING ---
# creates the input features
beer_encoding = encode_one_hot(data['beer/style'].values, dictionary)
score_encoding = data['review/overall'].values
score_encoding = score_encoding.reshape(score_encoding.shape[0], 1)
input_features = np.hstack((beer_encoding, score_encoding))
print ("Input feature matrix shape: " + str(input_features.shape))
return input_features
# pads the reviews so that all reviews in the set have an equal size
# and adds the <SOS> and <EOS> tags to the beginning and end of the reviews
# orig_data: 2d list of ints; list of reviews with the characters converted to their respective indices
# outputs: 2d numpy array of ints; padded reviews with the characters as indices
def pad_data(orig_data):
# defines the character indices for the <SOS>, <EOS>, and <PAD> tags
sos_tag_index = cfg['valid_char_len']
eos_tag_index = sos_tag_index + 1
pad_tag_index = eos_tag_index + 1
# finds the longest review length
review_lengths = [len(review) for review in orig_data]
longest_review_length = np.max(review_lengths)
# pads the reviews and adds the <SOS> and <EOS> tags
padded_reviews = []
for review in orig_data:
pad_length = longest_review_length - len(review)
padded_review = [sos_tag_index] + review + [eos_tag_index] + [pad_tag_index] * pad_length
padded_reviews.append(padded_review)
return np.array(padded_reviews)
def train(model, model_name, criterion, optimizer, computing_device, x_train, y_train, x_valid, y_valid, cfg):
train_loss = []
valid_loss = []
valid_bleu = []
start_time = time.time()
softmax = nn.LogSoftmax(dim = 1)
bleu_smoothing = SmoothingFunction()
early_stop_count = 0
min_loss = 100
for epoch in range(1, cfg['epochs'] + 1):
print ('----- Epoch #' + str(epoch) + ' -----')
start_index = 0
end_index = cfg['batch_size']
losses = []
print ('----- Training -----')
while start_index < len(x_train):
# takes the minibatch subset
batch_x = x_train[start_index:end_index]
batch_y = y_train[start_index:end_index]
# converts the reviews char -> index
indexed_reviews = [[char_to_index[c] for c in review] for review in batch_y]
# pads the reviews
padded_reviews = pad_data(indexed_reviews)
# converts the review to a one-hot encoding
# and concatenates this to the input features
one_hot_length = cfg['output_dim']
final_batch_x = []
for features, reviews in zip(batch_x, padded_reviews):
for char_index in reviews[:-1]:
one_hot_encoding = np.zeros(one_hot_length)
one_hot_encoding[char_index] = 1
final_features = np.hstack((features, one_hot_encoding))
final_batch_x.append(final_features)
# converts the final array into a numpy array
final_batch_x = np.array(final_batch_x)
# resizes the flattened array into batch_size x sequence_length x feature_length
final_batch_x.resize(padded_reviews.shape[0], padded_reviews.shape[1] - 1, final_batch_x.shape[1])
# converts final input array to tensor
final_batch_x = torch.from_numpy(final_batch_x).float().to(computing_device)
# zeros the gradients
optimizer.zero_grad()
# passes the final input array to the model's forward pass
outputs, _ = model(final_batch_x)
soft_outputs = softmax(outputs)
# prints the actual reviews vs the predicted reviews
actual_reviews = batch_y
predicted_reviews = decode_one_hot_reviews(soft_outputs)
for i in range(1):
print ("Actual Review: " + actual_reviews[i])
print ("Predicted Review: " + predicted_reviews[i])
# reshapes the outputs to N x feature_length (for the loss function)
outputs = outputs.contiguous().view(-1, outputs.shape[2])
# creates the targets and reshapes it to a single dimension
targets = torch.from_numpy(padded_reviews[:, 1:]).long().to(computing_device)
targets = targets.contiguous().view(-1)
# passes the outputs and targets to the loss function and backpropagates
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
losses.append(loss.item())
print("E" + str(epoch) + "T Batch start index: " + str(start_index) + " | Loss: " + str(loss.item()))
print("Time elapsed: " + str(time.time() - start_time))
start_index = end_index
end_index += cfg['batch_size']
# stops training when the remaining data count is less than a minibatch
if end_index > len(x_train):
break
train_loss.append(np.mean(losses))
torch.save(model, model_name + "_e" + str(epoch) + ".pt")
print()
print ('----- Validating -----')
start_index = 0
end_index = cfg['batch_size']
losses = []
bleus = []
with torch.no_grad():
while start_index < len(x_valid):
# takes the minibatch subset
batch_x = x_valid[start_index:end_index]
batch_y = y_valid[start_index:end_index]
# converts the reviews char -> index
indexed_reviews = [[char_to_index[c] for c in review] for review in batch_y]
# pads the reviews
padded_reviews = pad_data(indexed_reviews)
# converts the review to a one-hot encoding
# and concatenates this to the input features
one_hot_length = cfg['output_dim']
final_batch_x = []
for features, reviews in zip(batch_x, padded_reviews):
for char_index in reviews[:-1]:
one_hot_encoding = np.zeros(one_hot_length)
one_hot_encoding[char_index] = 1
final_features = np.hstack((features, one_hot_encoding))
final_batch_x.append(final_features)
# converts the final array into a numpy array
final_batch_x = np.array(final_batch_x)
# resizes the flattened array into batch_size x sequence_length x feature_length
final_batch_x.resize(padded_reviews.shape[0], padded_reviews.shape[1] - 1, final_batch_x.shape[1])
# converts final input array to tensors
final_batch_x = torch.from_numpy(final_batch_x).float().to(computing_device)
# passes the final input array to the model's forward pass
outputs, _ = model(final_batch_x)
soft_outputs = softmax(outputs)
# prints the actual reviews vs the predicted reviews
actual_reviews = batch_y
predicted_reviews = decode_one_hot_reviews(soft_outputs)
for a, p in zip(actual_reviews, predicted_reviews):
bleus.append(sentence_bleu(a.split(), p.split(), weights = [1.0], smoothing_function = bleu_smoothing.method1))
for i in range(1):
print ("Actual Review: " + actual_reviews[i])
print ("Predicted Review: " + predicted_reviews[i])
# resizes the outputs to N x feature_length (for the loss function)
outputs = outputs.contiguous().view(-1, outputs.shape[2])
# creates the targets and reshapes it to a single dimension
targets = torch.from_numpy(padded_reviews[:, 1:]).long().to(computing_device)
targets = targets.contiguous().view(-1)
# passes the outputs and targets to the loss function
loss = criterion(outputs, targets)
losses.append(loss.item())
print("E" + str(epoch) + "V Batch start index: " + str(start_index))
print("Loss: " + str(loss.item()) + " | BLEU score: " + str(np.mean(bleus)))
print("Time elapsed: " + str(time.time() - start_time))
start_index = end_index
end_index += cfg['batch_size']
#
if end_index > len(x_valid):
break
average_loss = np.mean(losses)
valid_loss.append(average_loss)
valid_bleu.append(np.mean(bleus))
print()
# checks for early stopping when the validation loss is higher for x consecutive epochs
if average_loss >= min_loss:
early_stop_count += 1
if early_stop_count >= cfg['early_stop']:
break
else:
early_stop_count = 0
min_loss = average_loss
return train_loss, valid_loss, valid_bleu
def process_results(model_name, train_loss, valid_loss, valid_bleu):
# summarizes the results
print (model_name + " Results:")
print ("Training Loss: " + str(train_loss))
print ("Validation Loss: " + str(valid_loss))
print ("Validation Bleu Score: " + str(valid_bleu))
# graphs the loss curves
plt.clf()
plt.plot(range(len(train_loss)), train_loss, 'b--', label = 'Training Loss')
plt.plot(range(len(valid_loss)), valid_loss, 'r--', label = 'Validation Loss')
plt.grid(True)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title(model_name + " Loss Curve")
plt.legend(loc="upper right")
plt.savefig(model_name + " Loss.png")
# graphs the bleu score curve
plt.clf()
plt.plot(range(len(valid_bleu)), valid_bleu, 'r--', label = 'Validation Bleu Score')
plt.grid(True)
plt.xlabel("Epoch")
plt.ylabel("Bleu Score")
plt.title(model_name + " Bleu Score Curve")
plt.legend(loc="lower right")
plt.savefig(model_name + " Bleu Score.png")
def sample(outputs, temperature):
#logged = np.log(outputs) / temperature
#exped = np.exp(logged)
#sigmoided = exped / np.sum(exped)
distribution = outputs.div(temperature).exp()
print ('distribution')
print (distribution)
return torch.multinomial(distribution, 1)[0]
#return np.random.multinomial(1, sigmoided)
def generate(model, x_test, cfg):
# TODO: Given n rows in test data, generate a list of n strings, where each string is the review
# corresponding to each input row in test data.
predicted_reviews = []
extended_char = cfg['valid_char'] + 'SEP'
start_index = 0
end_index = cfg['batch_size']
start_time = time.time()
softmax = nn.Softmax(dim = 2)
print ('----- Testing -----')
with torch.no_grad():
while start_index < len(x_test):
# takes the minibatch subset
batch_x = x_test[start_index:end_index]
# sets the outputs as the <SOS> tag for each review
outputs = np.zeros((cfg['batch_size'], cfg['output_dim']))
outputs[:, cfg['valid_char_len']] = 1
# initializes the states
ht = None
ct = None
# initializes the predicted sentences
sentences = [[] for _ in range(cfg['batch_size'])]
# samples the next character until all are either <EOS> or <PAD> (all 1s are in the last 2 columns)
while np.sum(outputs[:, -2:]) < cfg['batch_size'] and len(sentences[0]) < cfg['max_len']:
# concatenates the outputs (previous characters) to the metadata to get the inputs
final_batch_x = np.hstack((batch_x, outputs))
# resizes the array into batch_size x sequence_length (1) x feature_length
final_batch_x.resize(final_batch_x.shape[0], 1, final_batch_x.shape[1])
# converts final input array to tensor
final_batch_x = torch.from_numpy(final_batch_x).float().to(computing_device)
# passes the final input array to the model's forward pass
if isinstance(model, bLSTM):
outputs, (ht, ct) = model(final_batch_x, ht, ct)
else:
outputs, ht = model(final_batch_x, ht)
#outputs = np.array([sample(c[0], cfg['gen_temp']) for c in outputs])
#outputs = outputs.numpy()
#outputs = outputs.reshape(outputs.shape[0], outputs.shape[2])
#outputs = outputs / cfg['gen_temp']
#outputs = np.exp(outputs)
#outputs = outputs / np.sum(outputs, axis = 1)[:, None]
outputs = outputs.div(cfg['gen_temp'])
outputs = softmax(outputs)
outputs = outputs.contiguous().view(outputs.shape[0], outputs.shape[2])
outputs = torch.multinomial(outputs, 1)
outputs = outputs.numpy()
sentences = np.hstack((sentences, outputs))
indexes = outputs.reshape(cfg['batch_size'])
outputs = np.zeros((cfg['batch_size'], cfg['output_dim']))
outputs[range(cfg['batch_size']), indexes] = 1
decoded = [''.join([extended_char[int(c)] for c in review]) for review in sentences]
predicted_reviews.append(decoded)
'''
for s in sentences:
decoded = ''
for c in s:
print(type(c))
decoded = decoded + extended_char[c]
predicted_reviews.append(decoded)
'''
#print ("Predicted Review: " + predicted_reviews[start_index])
print ("Predicted Review: " + decoded[0])
print("Batch start index: " + str(start_index))
print("Time elapsed: " + str(time.time() - start_time))
start_index = end_index
end_index += cfg['batch_size']
if start_index == len(x_test):
break
# case when the remaining data count is less than a minibatch
if end_index > len(x_test):
# adjusts the start and end indices to make the last subset the size of a minibatch
end_index = len(x_test)
start_index = end_index - cfg['batch_size']
# removes the last few predictions to avoid duplicates
predicted_reviews = predicted_reviews[:start_index]
print()
return predicted_reviews
def save_to_file(outputs, fname):
# TODO: Given the list of generated review outputs and output file name, save all these reviews to
# the file in .txt format.
raise NotImplementedError
### MAIN FUNCTION ###
if __name__ == "__main__":
train_data_fname = "/datasets/cs190f-public/BeerAdvocateDataset/BeerAdvocate_Train.csv"
test_data_fname = "/datasets/cs190f-public/BeerAdvocateDataset/BeerAdvocate_Test.csv"
out_fname = "Output_Reviews.txt"
# loads the data
train_data = load_data(train_data_fname, cfg['num_data'])
#test_data = load_data(test_data_fname, 50)
# processes the data to get the train, valid, and test sets
train_data, train_labels, beer_to_index = process_train_data(train_data)
x_train, y_train, x_valid, y_valid = train_valid_split(train_data, train_labels, cfg['train_percentage'])
#x_test = process_test_data(test_data, beer_to_index)
# updates the configurations based on the processed data
update_configurations(x_train.shape[1])
# creates the dictionaries to map a character to its index in a one-hot encoding
char_to_index = dict(zip(list(cfg['valid_char']), range(cfg['valid_char_len'])))
# gets the computing device (either cuda or cpu)
if torch.cuda.is_available():
computing_device = torch.device("cuda")
cfg['cuda'] = True
else:
computing_device = torch.device("cpu")
cfg['cuda'] = False
# defines the hyperparameters
model_number = '1'
cfg['num_data'] = 20000
cfg['batch_size'] = 16
cfg['hidden_dim'] = 32
cfg['layers'] = 1
cfg['learning_rate'] = 0.01
# trains the LSTM model
model = bLSTM(cfg).to(computing_device)
optimizer = optim.Adam(model.parameters(), cfg['learning_rate'])
criterion = nn.CrossEntropyLoss()
train_loss, valid_loss, valid_bleu = train(model, "LSTM" + model_number, criterion, optimizer, computing_device,
x_train, y_train, x_valid, y_valid, cfg)
process_results("LSTM Model " + model_number, train_loss, valid_loss, valid_bleu)
#predicted_reviews = generate(model, x_train, cfg)
#print (predicted_reviews)
# trains the GRU model
model = bGRU(cfg).to(computing_device)
optimizer = optim.Adam(model.parameters(), cfg['learning_rate'])
criterion = nn.CrossEntropyLoss()
train_loss, valid_loss, valid_bleu = train(model, "GRU" + model_number, criterion, optimizer, computing_device,
x_train, y_train, x_valid, y_valid, cfg)
process_results("GRU Model " + model_number, train_loss, valid_loss, valid_bleu)
#predicted_reviews = generate(model, x_test, cfg)
#print (predicted_reviews)
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.log_and_reg),
path('register', views.register),
path('login', views.login),
path('index', views.index),
path('logout', views.logout),
path('wall', views.wall),
path('post_message', views.post_message),
path('messages/<int:message_id>/post_comment', views.post_comment),
path('users/<int:user_id>', views.user),
path('messages/<int:message_id>/like', views.like),
path('messages/<int:message_id>/unlike', views.unlike),
path('messages/<int:message_id>/edit_messsage', views.edit_message),
path('messages/<int:message_id>/update', views.update),
path('users/<int:user_id>/update', views.update_user),
]
|
import tkinter as tk
tela = tk.Tk()
tela.geometry('500x250+500+400') |
"""
One of the most widely used formats for astronomical images is the Flexible Image Transport System.
In a FITS file, the image is stored in a numerical array, which we can load into a NumPy array.
FITS files also have headers which store metadata about the image.
FITS files are a standard format and astronomers have developed many libraries
(in many programming languages) that can read and write FITS files. We're going to use the Astropy module.
"""
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
import time
import statistics
hdulist = fits.open('image0.fits') # (Header/Data Unit) list.
hdulist.info()
data = hdulist[0].data
# Plot the 2D array
plt.imshow(data, cmap=plt.cm.viridis)
plt.xlabel('x-pixels (RA)')
plt.ylabel('y-pixels (Dec)')
plt.colorbar()
plt.show()
# load fits file and finds the brightest pixel
def load_fits(filename):
hdulist = fits.open(filename)
data = hdulist[0].data
arg_max = np.argmax(data)
max_pos = np.unravel_index(arg_max, data.shape)
return max_pos
"""
Now we will put everything together and round up this module by calculating the mean of a stack of FITS files.
Each individual file may or may not have a detected pulsar, but in the final stack you should be able to see a clear detection.
"""
def mean_fits(files):
n = len(files)
if n > 0:
hdulist = fits.open(files[0])
data = hdulist[0].data
hdulist.close() # free up the memory this file has taken up while we were working with it
for i in range(1, n):
hdulist = fits.open(files[i])
data += hdulist[0].data
hdulist.close()
mean = data / n
return mean
"""
Now we're going to look at a different statistical measure — the median,
which in many cases is considered to be a better measure than the mean due to its robustness to outliers.
However, a naive implementation of the median algorithm can be very inefficient when dealing with large datasets.
# for 600 000 images we ll stumble upon a memory problem
# must scale the data
# shouldnt hold all the data simultaneously
# Improving would be:
# 1) we can cut our images and make them 50x50 instead of 200x200
# 2) calculate running median
"""
# small reminders on how to test library or algorithm in terms of time and memory usage
# TIME
def time_stat(func, size, ntrials):
total = 0
for i in range(ntrials):
data = np.random.rand(size)
start = time.perf_counter()
res = func(data)
total += time.perf_counter() - start
return total/ntrials
print('{:.6f}s for statistics.mean'.format(time_stat(statistics.mean, 10**6, 10)))
# SIZE
a = np.zeros(5, dtype=np.int32)
b = np.zeros(5, dtype=np.float64)
for obj in [a, b]:
print('nbytes :', obj.nbytes)
print('size x itemsize:', obj.size*obj.itemsize)
# Return the median image, time of function run and the amount of memory used
# This funtion wont properly work with hundreds thousands images due to memory consumption
# That wasn't the issue for mean (one image at a time) but for median it is
def median_fits(filenames):
start = time.time() # Start timer
# Read in all the FITS files and store in list
FITS_list = []
for filename in filenames:
hdulist = fits.open(filename)
FITS_list.append(hdulist[0].data)
hdulist.close()
# Stack image arrays in 3D array for median calculation
FITS_stack = np.dstack(FITS_list)
median = np.median(FITS_stack, axis=2)
# Calculate the memory consumed by the data
memory = FITS_stack.nbytes
# or, equivalently:
# memory = 200 * 200 * len(filenames) * FITS_stack.itemsize
# convert to kB:
memory /= 1024
stop = time.time() - start # stop timer
return median, stop, memory
# Now saving space
# http://www.stat.cmu.edu/~ryantibs/papers/median.pdf
# https://groklearning.com/learn/data-driven-astro/module-2/12/
"""
The full algorithm for a set of data points works as follows:
Calculate their mean and standard deviation, and ;
Set the bounds: minval = and maxval = . Any value >= maxval is ignored;
Set the bin width: width = ;
Make an ignore bin for counting value < minval;
Make bins for counting values in minval and maxval, e.g. the first bin is minval <= value < minval + width;
Count the number of values that fall into each bin;
Sum these counts until total >= (N + 1)/2. Remember to start from the ignore bin;
Return the midpoint of the bin that exceeded (N + 1)/2.
"""
def running_stats(filenames):
'''Calculates the running mean and stdev for a list of FITS files using Welford's method.'''
n = 0
for filename in filenames:
hdulist = fits.open(filename)
data = hdulist[0].data
if n == 0:
mean = np.zeros_like(data)
s = np.zeros_like(data)
n += 1
delta = data - mean
mean += delta/n
s += delta*(data - mean)
hdulist.close()
s /= n - 1
np.sqrt(s, s)
if n < 2:
return mean, None
else:
return mean, s
def median_bins_fits(filenames, B):
# Calculate the mean and standard dev
mean, std = running_stats(filenames)
dim = mean.shape # Dimension of the FITS file arrays
# Initialise bins
left_bin = np.zeros(dim)
bins = np.zeros((dim[0], dim[1], B))
bin_width = 2 * std / B
# Loop over all FITS files
for filename in filenames:
hdulist = fits.open(filename)
data = hdulist[0].data
# Loop over every point in the 2D array
for i in range(dim[0]):
for j in range(dim[1]):
value = data[i, j]
mean_ = mean[i, j]
std_ = std[i, j]
if value < mean_ - std_:
left_bin[i, j] += 1
elif value >= mean_ - std_ and value < mean_ + std_:
bin = int((value - (mean_ - std_)) / bin_width[i, j])
bins[i, j, bin] += 1
return mean, std, left_bin, bins
def median_approx_fits(filenames, B):
mean, std, left_bin, bins = median_bins_fits(filenames, B)
dim = mean.shape # Dimension of the FITS file arrays
# Position of the middle element over all files
N = len(filenames)
mid = (N + 1) / 2
bin_width = 2 * std / B
# Calculate the approximated median for each array element
median = np.zeros(dim)
for i in range(dim[0]):
for j in range(dim[1]):
count = left_bin[i, j]
for b, bincount in enumerate(bins[i, j]):
count += bincount
if count >= mid:
# Stop when the cumulative count exceeds the midpoint
break
median[i, j] = mean[i, j] - std[i, j] + bin_width[i, j] * (b + 0.5)
return median |
import _judger
import hashlib
import logging
import os
import socket
import psutil
from config import SERVER_LOG_PATH
from exception import JudgeClientError
# 服务器工具类
# 日志工具类:配置logging基本的设置
# 获得日志对象
logger = logging.getLogger(__name__)
#设置服务器的日志路径,用于将日志写到文件中
handler = logging.FileHandler(SERVER_LOG_PATH)
#时间+日志级别+日志信息
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
#设置文件输出的格式
handler.setFormatter(formatter)
#日志按照设置的格式写到日志文件中
logger.addHandler(handler)
#设置日志的信息等级
logger.setLevel(logging.WARNING)
#服务器信息工具,使用psutil更加方便的获取到系统的信息
def server_info():
ver = _judger.VERSION
return {"hostname": socket.gethostname(),
"cpu": psutil.cpu_percent(),
"cpu_core": psutil.cpu_count(),
"memory": psutil.virtual_memory().percent,
"judger_version": ".".join([str((ver >> 16) & 0xff), str((ver >> 8) & 0xff), str(ver & 0xff)])}
#获取系统的token,这个token在使用docker—compose部署的时候已经制定:no_body_know,在这里进行加密,
# 加密过后的token有server.py进行调用
def get_token():
token = os.environ.get("TOKEN")
if token:
return token
else:
raise JudgeClientError("env 'TOKEN' not found")
#使用摘要算法sha256生成令牌
token = hashlib.sha256(get_token().encode("utf-8")).hexdigest()
|
#!/usr/bin/env python
from fabricate import *
def input1():
pass # source file
def gen():
run('sh','monad3-gen','--','gen')
def lst():
run('sh','monad3-run','source','--','list')
def output():
lst()
array = []
with open('list', 'r') as f:
for line in f:
line = line.replace('\n', '')
array.append(line)
if line == "gen": gen()
run('sh','-c','cat ' + ' '.join(array) + ' > output')
main()
|
#!/usr/bin/python
import itertools
primes = [2, 3, 5, 7, 11, 13, 17]
def tupToInt(num):
n = 0
for i in num:
n *= 10
n += i
return n
def property(num):
num = str(num)
for i in range(7):
if int(num[i+1:i+4]) % primes[i] != 0:
return False
return True
pans = list(itertools.permutations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
for i in range(len(pans)):
pans[i] = tupToInt(pans[i])
total = 0
for i in pans:
if property(i):
total += i
print(total)
|
from django.conf.urls import url
from . import views
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^$', views.index), # This line has changed!
url(r'^books$', views.books,name='books'),
url(r'^mining$', views.mining, name='mining'),
url(r'^charting$', views.charting, name='charting'),
url(r'^lending$', views.lending, name='lending'),
]
urlpatterns += staticfiles_urlpatterns() |
#-*- coding: utf-8 -*-
from lxml import etree
import os
def get_indentation_level(row, level_indent=4):
row = row.replace("\t", " "*level_indent)
i = 0
while row[i] == " ":
i += 1
return int(i / level_indent)
def validate(rows):
current_level = 0
for row in rows:
level = get_indentation_level(row)
if level - current_level > 1:
return False
current_level = level
return True
def get_path(elem):
return "/".join([e.get("name", "") for e in elem.xpath("./ancestor-or-self::*")])
def parse_taxonomy(rows):
rows = list(map(lambda x: x.decode("utf-8"), rows))
assert validate(rows), "Wrong structure"
tax = etree.Element("tax")
current_root = None
current_level = 0
for k, row in enumerate(rows):
level = get_indentation_level(row)
elem = etree.Element("node")
name = row.strip().lower().capitalize()
elem.set("name", name)
elem.set("num", str(k))
if level == 0 :
current_level = level
tax.append(elem)
elif level == current_level + 1 :
current_root.append(elem)
current_level = level
else:
step = current_level - level + 1
while step > 0 :
step -= 1
current_root = current_root.getparent()
current_root.append(elem)
current_level = level
current_root = tax.xpath("//*[@num = '"+str(k)+"']")[0]
current_root.set("path", get_path(current_root))
return tax
if __name__ == "__main__":
import sys
from lxml import etree
with open(sys.argv[1]) as f :
print (etree.tounicode(parse_tax(f)))
|
import csv
import representativeValue as rv #representativeValue.pyをrnという名前で読み込む .pyは不要
import sys
from operator import itemgetter
##########HEADER確認##########
def check_header(fileName):
filename = fileName
with open(filename) as f:
r = csv.reader(f, delimiter=',')
rows = [l for l in r]
cnt = 0
for row in rows[0]:
print(str(cnt) + " : " + row)
cnt = cnt + 1
def lineOneAMM(filename):
with open(filename) as f:
r = csv.reader(f, delimiter=',')
rows = [[int(ll) for ll in l] for l in r]
row = rows[0]
print('取り込んだデータ:'+str(row))
row.sort()
print('ソート後のデータ:'+str(row))
print('平均値:'+str(rv.average(row)))
print('中央値:'+str(rv.median(row)))
print('最頻値:'+str(rv.mode(row)))
#######ALL DATA########
def show_alldata(filename):
with open(filename) as f:
print (f.read())
######ALL DATA per 1########
def show_colandrowcnt(filename):
with open(filename) as f:
r = csv.reader(f, delimiter=',')
colcnt = 0
rowcnt = 0
errcolflg = False
for row in r:
colcnt = colcnt + 1
if colcnt == 1:
rowcnt = len(row)
if rowcnt != len(row):
errcolflg = True
if errcolflg == True:
print('※相違列数あり')
print('列数:' + str(rowcnt))
print ('行数:' + str(colcnt))
arg1 = sys.argv[1]
filename = arg1
# check_header(filename)
show_colandrowcnt(filename)
# text = ''
# filename = '../data/全国消費実態調査平成21年全国消費実態調査全国貯蓄負債編.csv'
# with open(filename) as f:
# r = csv.reader(f, delimiter=',')
# rows = [[ll for ll in l] for l in r]
# print(rows[0])
# print(rows[1])
# datarow = [1,5,10,11,13]
# data_col = [k for k in range(9,50,2)]
# for i in datarow:
# text += itemgetter(*data_col)(rows[i])
# filename = 'sampledata20191017.csv'
# with open(filename,'w') as f2:
# f2.write(','.join(text))
# row = rows[0]
# print('取り込んだデータ:'+str(row))
# row.sort()
# print('ソート後のデータ:'+str(row))
# print('平均値:'+str(rv.average(row)))
# print('中央値:'+str(rv.median(row)))
# print('最頻値:'+str(rv.mode(row)))
# arg1 = sys.argv[1]
######ALL DATA########
# arg1 = sys.argv[1]
# text = ''
# filename = arg1
# with open(filename) as f:
# r = csv.reader(f, delimiter=',')
# rows = [l for l in r]
# datacol = [1,3,5,7,8]
# # datacol = [1,3,5,7,8,9,10,11,12]
# for row in rows:
# for i in datacol:
# # print(row[i])
# # text += itemgetter(*row)(rows[int(i)])
# text += row[i] + " / "
# print(text)
# text = ""
# rows = [[ll for ll in l] for l in r]
# print(rows[0])
# print(rows[1])
# datarcol = [1,3,5,7,8,9,10,11,12]
# data_col = [k for k in range(9,50,2)]
# for i in datarow:
# text += itemgetter(*data_col)(rows[i])
|
numbers=[2,3,1,6,4,8,9]
numbers.clear() # bu clear o'z nomi bilan listni tozalaydi elementlaridan
print(numbers) |
import numpy as np
import matplotlib.pyplot as plt
import math
def normal(mu,sigma,x): #normal distribution
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
def eval(x):
return normal(-4,1,x) + normal(4,1,x)
#return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2)
def ref(x_star,x): #normal distribution
return normal(x,10,x_star)
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
|
import dash_bootstrap_components as dbc
from dash import html
placeholder = html.Div(
[
dbc.Placeholder(color="primary", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="secondary", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="success", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="warning", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="danger", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="info", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="light", className="me-1 mt-1 w-100"),
dbc.Placeholder(color="dark", className="me-1 mt-1 w-100"),
]
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home/$', views.index, name="index"),
url(r'^order_meal/(?P<meal_id>\d+?)$', views.index, name="order_meal"),
]
|
"""
Test agent. First version of the minecraft agent, which will function in a 2D
world (i.e. a slice of the 3D world) and solve a simple bridge problem.
Modified from example_plugin found in the original SpockBot repository
"""
import logging
import os
import sys
# custom plugins. can be placed anywhere that is accessible
from test_room_plugin import TestRoomPlugin
from test_agent_plugin import TestAgentPlugin
from self_movement_sensor_plugin import SelfMovementSensorPlugin
from sensor_timers_plugin import SensorTimersPlugin
from visual_sensor_plugin import VisualSensorPlugin
from percept_monitor_plugin import PerceptMonitorPlugin
from atomic_operators_plugin import AtomicOperatorsPlugin
from test_atomic_operators import TestAtomicOperatorsPlugin
#from wall_planner_plugin import WallPlannerPlugin
from visual_planner_plugin import VisualPlannerPlugin
#from test_planner_plugin import TestPlannerPlugin
# spock utilities and plugins
from spockbot import Client
from spockbot.plugins import default_plugins
#sys.path.insert(0, os.path.abspath('../SpockBot-Extra'))
#from plugins.echo_packet import EchoPacketPlugin
__author__ = ['Bradley Sheneman', 'Priyam Parashar']
logger = logging.getLogger('spockbot')
logger.setLevel(logging.INFO)
# this will only work if the server is set to offline mode
# to use online mode, USERNAME and PASSWORD must be for a valid MC account
# server is simply the name of the server. port is below (25565 by default)
USERNAME = 'Bot'
#PASSWORD = ''
SERVER = 'localhost'
settings = {
'start':
{'username': USERNAME},
'auth':
{'authenticated': False}}
# Any functionality that you want must be implemented in a plugin.
# You can define new plugins that listen for events from the game.
plugins = default_plugins
plugins.append(('TestRoom', TestRoomPlugin))
plugins.append(('SensorTimers', SensorTimersPlugin))
plugins.append(('SelfMovementSensor', SelfMovementSensorPlugin))
plugins.append(('VisualSensor', VisualSensorPlugin))
plugins.append(('PerceptMonitor', PerceptMonitorPlugin))
plugins.append(('AtomicOperators', AtomicOperatorsPlugin))
#plugins.append(('TestAtomicOperators', TestAtomicOperatorsPlugin))
#plugins.append(('WallPlanner', WallPlannerPlugin))
plugins.append(('VisualPlanner', VisualPlannerPlugin))
#plugins.append(('TestPlanner', TestPlannerPlugin))
#plugins.append(('echo', EchoPacketPlugin))
#plugins.append(('TestAgent', TestAgentPlugin))
# Instantiate and start the client
client = Client(plugins=plugins, settings=settings)
client.start(SERVER, 25565)
|
import re
with open('Scared.txt', mode='r') as word:
words = (word.read())
print(words)
a = re.findall('the', words)
pattern = re.compile('She')
print(a)
b = pattern.findall(words)
print(b)
pattern1 = re.compile(r"([a-zA-Z]).([e])")
c = pattern1.search(words)
print(c)
print(c.group(1))
# password checker
pattern2 = re.compile(r"[a-zA-Z0-9$%#@]{8,}\d$")
password = input('Input your password: ')
if re.match(r'[a-zA-Z0-9$%#@]{8,}', password):
print('Match!')
else:
print('Unmatched!')
check = pattern2.fullmatch(password)
print(check)
|
import json
from statistics import mode
from operator import itemgetter
import pandas as pd
from django.shortcuts import render
from itertools import groupby
from django.views.decorators.csrf import csrf_exempt
import requests
# Create your views here.
@csrf_exempt
def index(request):
link = "https://www.mocky.io/v2/5d403d913300003a209d2ad3"
f = requests.get(link)
test_tr = (f.text)
test_tr = str(test_tr)[1:-1]
ab = test_tr.split(",")
person = {}
for x in ab:
x = x.split(":")
if x[0].strip() in person.keys():
person[x[0].strip()].append(x[1])
else:
person[x[0].strip()] = [x[1]]
array_len = []
array_name = []
frequent = []
for per, value in person.items():
array_name.append(per)
array_len.append(len(value))
frequent.append(mode(value))
a = [list(x) for x in zip(array_name, array_len,frequent )]
b = sorted(a, key=itemgetter(1), reverse=True)[:5]
name = []
freq= []
msg=[]
for i in b:
count = 0
for j in i:
count = count + 1
if count == 1:
name.append(j)
if count == 2:
freq.append(j)
if count == 3:
msg.append(j)
para = {"name": name, "freq":freq, "msg":msg }
df = pd.DataFrame(para)
json_records = df.reset_index().to_json(orient='records')
data = []
data = json.loads(json_records)
context = {'d': data}
return render(request, 'sampleapp/home.html', context) |
from datetime import date
from typing import List
import uvicorn
from fastapi import FastAPI
from fastapi import HTTPException
from pydantic_aioredis import Model
from pydantic_aioredis import RedisConfig
from pydantic_aioredis import Store
# Create models as you would create pydantic models i.e. using typings
class Book(Model):
_primary_key_field: str = "title"
title: str
author: str
published_on: date
in_stock: bool = True
# Do note that there is no concept of relationships here
class Library(Model):
# the _primary_key_field is mandatory
_primary_key_field: str = "name"
name: str
address: str
app = FastAPI()
@app.on_event("startup")
async def redis_setup():
# Redisconfig. Change this configuration to match your redis server
redis_config = RedisConfig(
db=5, host="localhost", password="password", ssl=False, port=6379
)
# Create the store and register your models
store = Store(
name="some_name", redis_config=redis_config, life_span_in_seconds=3600
)
store.register_model(Book)
store.register_model(Library)
# Sample books. You can create as many as you wish anywhere in the code
books = [
Book(
title="Oliver Twist",
author="Charles Dickens",
published_on=date(year=1215, month=4, day=4),
in_stock=False,
),
Book(
title="Great Expectations",
author="Charles Dickens",
published_on=date(year=1220, month=4, day=4),
),
Book(
title="Jane Eyre",
author="Charles Dickens",
published_on=date(year=1225, month=6, day=4),
in_stock=False,
),
Book(
title="Wuthering Heights",
author="Jane Austen",
published_on=date(year=1600, month=4, day=4),
),
]
# Some library objects
libraries = [
Library(name="The Grand Library", address="Kinogozi, Hoima, Uganda"),
Library(name="Christian Library", address="Buhimba, Hoima, Uganda"),
]
await Book.insert(books)
await Library.insert(libraries)
@app.get("/book/{title}", response_model=List[Book])
async def get_book(title: str) -> Book:
response = await Book.select(ids=[title])
if response is None:
raise HTTPException(status_code=404, detail="Book not found")
return response
@app.get("/books", response_model=List[Book])
async def get_books():
return await Book.select()
@app.get("/libraries", response_model=List[Library])
async def get_libraries():
return await Library.select()
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8080)
|
'''graphical user interface'''
import gui
# --- event handlers ---
def greeting(evt):
import wx, sys
gui.alert('\n'.join([wx.version(), sys.version]), "gui2py hello world!")
# --- gui2py designer generated code starts ---
with gui.Window(title='gui2py minimal app', resizable=True, height='496px',
width='400px', image='', name='mywin'):
b = gui.Button(label='Click me!', name='button', default=True)
# --- gui2py designer generated code ends ---
mywin = gui.get("mywin")
# assign the event handlers
mywin['button'].onclick = greeting
if __name__ == '__main__':
mywin.show()
gui.main_loop()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import io
import math
from dataclasses import dataclass
from pathlib import PurePath
from typing import Sequence
import chevron
from pants.backend.go.util_rules.coverage import GoCoverMode
from pants.backend.go.util_rules.coverage_profile import (
GoCoverageBoundary,
GoCoverageProfile,
parse_go_coverage_profiles,
)
from pants.engine.fs import DigestContents
from pants.engine.internals.native_engine import Digest
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
# Adapted from Go toolchain.
# See https://github.com/golang/go/blob/a0441c7ae3dea57a0553c9ea77e184c34b7da40f/src/cmd/cover/html.go
#
# Note: `go tool cover` could not be used for the HTML support because it attempts to find the source files
# on its own using go list.
# See https://github.com/golang/go/blob/a0441c7ae3dea57a0553c9ea77e184c34b7da40f/src/cmd/cover/func.go#L200-L222.
#
# The Go rules have been engineered to avoid `go list` due to it needing, among other things, all transitive
# third-party dependencies available to it when analyzing first-party sources. Thus, the use of `go list` by
# `go tool cover` in this case means we cannot use `go tool cover` to generate the HTML.
#
# Original copyright:
# // Copyright 2013 The Go Authors. All rights reserved.
# // Use of this source code is governed by a BSD-style
# // license that can be found in the LICENSE file.
@dataclass(frozen=True)
class RenderGoCoverageProfileToHtmlRequest:
raw_coverage_profile: bytes
description_of_origin: str
sources_digest: Digest
sources_dir_path: str
@dataclass(frozen=True)
class RenderGoCoverageProfileToHtmlResult:
html_output: bytes
@dataclass(frozen=True)
class RenderedFile:
name: str
body: str
coverage: float
def _get_pkg_name(filename: str) -> str | None:
elems = filename.split("/")
i = len(elems) - 2
while i >= 0:
if elems[i] != "":
return elems[i]
i -= 1
return None
def _percent_covered(profile: GoCoverageProfile) -> float:
covered = 0
total = 0
for block in profile.blocks:
total += block.num_stmt
if block.count > 0:
covered += block.num_stmt
if total == 0:
return 0.0
return float(covered) / float(total) * 100.0
def _render_source_file(content: bytes, boundaries: Sequence[GoCoverageBoundary]) -> str:
rendered = io.StringIO()
for i in range(len(content)):
while boundaries and boundaries[0].offset == i:
b = boundaries[0]
if b.start:
n = 0
if b.count > 0:
n = int(math.floor(b.norm * 9)) + 1
rendered.write('<span class="cov{}" title="{}">'.format(n, b.count))
else:
rendered.write("</span>")
boundaries = boundaries[1:]
c = content[i]
if c == ord(">"):
rendered.write(">")
elif c == ord("<"):
rendered.write("<")
elif c == ord("&"):
rendered.write("&")
elif c == ord("\t"):
rendered.write(" ")
else:
rendered.write(chr(c))
return rendered.getvalue()
@rule
async def render_go_coverage_profile_to_html(
request: RenderGoCoverageProfileToHtmlRequest,
) -> RenderGoCoverageProfileToHtmlResult:
digest_contents = await Get(DigestContents, Digest, request.sources_digest)
profiles = parse_go_coverage_profiles(
request.raw_coverage_profile, description_of_origin=request.description_of_origin
)
files: list[RenderedFile] = []
pkg_name: str | None = None
cover_mode_set = False
for profile in profiles:
if pkg_name is None:
pkg_name = _get_pkg_name(profile.filename)
if profile.cover_mode == GoCoverMode.SET:
cover_mode_set = True
name = PurePath(profile.filename).name
file_contents: bytes | None = None
full_file_path = str(PurePath(request.sources_dir_path, name))
for entry in digest_contents:
if entry.path == full_file_path:
file_contents = entry.content
break
if file_contents is None:
continue
files.append(
RenderedFile(
name=name,
body=_render_source_file(file_contents, profile.boundaries(file_contents)),
coverage=_percent_covered(profile),
)
)
rendered = chevron.render(
template=_HTML_TEMPLATE,
data={
"pkg_name": pkg_name or "",
"set": cover_mode_set,
"files": [
{
"i": i,
"name": file.name,
"coverage": "{:.1f}".format(file.coverage),
"body": file.body,
}
for i, file in enumerate(files)
],
},
)
return RenderGoCoverageProfileToHtmlResult(rendered.encode())
_HTML_TEMPLATE = """\
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>{{#pkg_name}}{{pkg_name}}: {{/pkg_name}}Go Coverage Report</title>
<style>
body {
background: black;
color: rgb(80, 80, 80);
}
body, pre, #legend span {
font-family: Menlo, monospace;
font-weight: bold;
}
#topbar {
background: black;
position: fixed;
top: 0; left: 0; right: 0;
height: 42px;
border-bottom: 1px solid rgb(80, 80, 80);
}
#content {
margin-top: 50px;
}
#nav, #legend {
float: left;
margin-left: 10px;
}
#legend {
margin-top: 12px;
}
#nav {
margin-top: 10px;
}
#legend span {
margin: 0 5px;
}
<!--
Colors generated by:
def rgb(n):
if n == 0:
return "rgb(192, 0, 0)" # Red
# Gradient from gray to green.
r = 128 - 12*(n-1)
g = 128 + 12*(n-1)
b = 128 + 3*(n-1)
return f"rgb({r}, {g}, {b})"
def colors():
for i in range(11):
print(f".cov{i} {{ color: {rgb(i)} }}")
-->
.cov0 { color: rgb(192, 0, 0) }
.cov1 { color: rgb(128, 128, 128) }
.cov2 { color: rgb(116, 140, 131) }
.cov3 { color: rgb(104, 152, 134) }
.cov4 { color: rgb(92, 164, 137) }
.cov5 { color: rgb(80, 176, 140) }
.cov6 { color: rgb(68, 188, 143) }
.cov7 { color: rgb(56, 200, 146) }
.cov8 { color: rgb(44, 212, 149) }
.cov9 { color: rgb(32, 224, 152) }
.cov10 { color: rgb(20, 236, 155) }
</style>
</head>
<body>
<div id="topbar">
<div id="nav">
<select id="files">
{{#files}}
<option value="file{{i}}">{{name}} ({{coverage}}%)</option>
{{/files}}
</select>
</div>
<div id="legend">
<span>not tracked</span>
{{#set}}
<span class="cov0">not covered</span>
<span class="cov8">covered</span>
{{/set}}
{{^set}}
<span class="cov0">no coverage</span>
<span class="cov1">low coverage</span>
<span class="cov2">*</span>
<span class="cov3">*</span>
<span class="cov4">*</span>
<span class="cov5">*</span>
<span class="cov6">*</span>
<span class="cov7">*</span>
<span class="cov8">*</span>
<span class="cov9">*</span>
<span class="cov10">high coverage</span>
{{/set}}
</div>
</div>
<div id="content">
{{#files}}
<pre class="file" id="file{{i}}" style="display: none">{{{body}}}</pre>
{{/files}}
</div>
</body>
<script>
(function() {
var files = document.getElementById('files');
var visible;
files.addEventListener('change', onChange, false);
function select(part) {
if (visible)
visible.style.display = 'none';
visible = document.getElementById(part);
if (!visible)
return;
files.value = part;
visible.style.display = 'block';
location.hash = part;
}
function onChange() {
select(files.value);
window.scrollTo(0, 0);
}
if (location.hash != "") {
select(location.hash.substr(1));
}
if (!visible) {
select("file0");
}
})();
</script>
</html>
"""
def rules():
return collect_rules()
|
from .add import add
from .subtract import subtract |
import pandas as pd
import json
# Reading in CSV files with our data and extracting only the columns we need
credits = pd.read_csv("credits.csv")
credits = credits [['id', 'cast', 'crew']]
meta = pd.read_csv("movies_metadata.csv")
meta = meta [['id', 'title', 'genres']]
keywords = pd.read_csv("keywords.csv")
# Our initial data is very messy, so we'll create a function that converts it to a format
# that allows us to extract what we need
def clean_cast(col):
col = str(col)
col = col.replace("'",'"').replace("None","3").replace('.', '')
col = json.loads(col)
for i in range(len(col)):
col[i] = col[i]['name'].lower().replace(' ', '')
return col
# Applying our function to each DataFrame in turn. Going row by row with a try block isn't ideal,
# but it lets us bypass finding every last obscure error in the formatting of the data
for i in range(len(meta['genres'])):
try:
meta.at[i,'genres'] = ' '.join(clean_cast(meta.at[i,'genres']))
except:
meta = meta.drop([i], axis=0)
for i in range(len(credits)):
try:
credits.at[i,'cast'] = ' '.join(clean_cast(credits.at[i,'cast']))
credits.at[i,'crew'] = clean_cast(credits.at[i,'crew'])
except:
credits = credits.drop([i], axis=0)
for i in range(len(keywords['keywords'])):
try:
keywords.at[i,'keywords'] = ' '.join(clean_cast(keywords.at[i,'keywords']))
except:
keywords = keywords.drop([i], axis=0)
# Time to merge our dataframes together, so we'll first make sure the ID columns are the right type
for i in range(len(meta)):
try:
meta.at[i, 'id'] = int(meta.at[i, 'id'])
except:
meta = meta.drop([i], axis = 0)
# Merging
credits = pd.merge(left=credits, right=meta, how='left', on='id')
df = pd.merge(left=credits, right=keywords, how='left', on='id')
# Taking the first name of our crew column (just the director)
for i in range(len(df['crew'])):
df.at[i, 'crew'] = df.at[i, 'crew'][:1]
df.at[i, 'crew'] = ' '.join(df.at[i,'crew'])
# Taking the first three actors from the cast of each movie
for i in range(len(df['cast'])):
df.at[i,'cast'] = str(df.at[i,'cast'])
df.at[i,'cast'] = df.at[i,'cast'].split()
df.at[i,'cast'] = df.at[i,'cast'][:3]
df.at[i,'cast'] = ' '.join(df.at[i,'cast'])
# Creating lists for each column so we can zip them together
cast = [df.at[i,'cast'] for i in range(len(df['cast']))]
crew = [df.at[i,'crew'] for i in range(len(df['crew']))]
keywords = [df.at[i,'keywords'] for i in range(len(df['keywords']))]
genres = [df.at[i,'genres'] for i in range(len(df['genres']))]
# Zipping our columns together and deleting any null values
info = [str(c) + " " + str(d) + " " + str(k) + " " + str(g) for c, d, k, g in zip(cast, crew, keywords, genres)]
info = [inf.replace(' nan', '') for inf in info]
# Adding our mega column to our DataFrame and deleting the other columns
df['info'] = info
df = df[['id', 'title', 'info']]
df.to_csv('cleanmovies.csv', index=False) |
from __future__ import division # floating point division
import csv
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import dataloader as dtl
import regressionalgorithms as algs
def l2err(prediction,ytest):
""" l2 error (i.e., root-mean-squared-error) """
return np.linalg.norm(np.subtract(prediction,ytest))
def l1err(prediction,ytest):
""" l1 error """
return np.linalg.norm(np.subtract(prediction,ytest),ord=1)
def l2err_squared(prediction,ytest):
""" l2 error squared """
return np.square(np.linalg.norm(np.subtract(prediction,ytest)))
def geterror(predictions, ytest):
""" mean squared error """
# Can change this to other error values
# return (l2err_squared(predictions,ytest)/ytest.shape[0])/2
return 0.5*l2err_squared(predictions,ytest)/ytest.shape[0]
if __name__ == '__main__':
trainsize = 1000
testsize = 5000
numruns = 5
regressionalgs = {'Random': algs.Regressor(),
'Mean': algs.MeanPredictor(),
# 'FSLinearRegression5': algs.FSLinearRegression({'features': [1,2,3,4,5]}),
# 'LassoRegression5': algs.LassoRegression({'features': [1,2,3,4,5]}),
# 'FSLinearRegression50': algs.FSLinearRegression({'features': range(50)}),
# Increase the number of selected features (up to all the features)
# 'FSLinearRegression100': algs.FSLinearRegression({'features': range(100)}),
# 'FSLinearRegression200': algs.FSLinearRegression({'features': range(200)}),
'FSLinearRegression385': algs.FSLinearRegression({'features': range(385)}),
'RidgeLinearRegression385': algs.RidgeLinearRegression(),
'LassoRegression385': algs.LassoRegression(),
'SGD385': algs.SGD(),
'batchGD385': algs.batchGD(),
}
numalgs = len(regressionalgs)
# Enable the best parameter to be selected, to enable comparison
# between algorithms with their best parameter settings
parameters = (
{'regwgt': 0.0},
{'regwgt': 0.01},
{'regwgt': 1.0},
)
numparams = len(parameters)
errors = {}
# initialize for x and y axis
x = {}
y = {}
for learnername in regressionalgs:
errors[learnername] = np.zeros((numparams,numruns))
for r in range(numruns):
trainset, testset = dtl.load_ctscan(trainsize,testsize)
print(('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r))
for p in range(numparams):
params = parameters[p]
for learnername, learner in regressionalgs.items():
# Reset learner for new parameters
learner.reset(params)
print ('Running learner = ' + learnername + ' on parameters ' + str(learner.getparams()))
# Train model
learner.learn(trainset[0], trainset[1])
# Test model
predictions = learner.predict(trainset[0])
# get return value of errors from each regression function
y[learnername] = learner.data()
error = geterror(trainset[1], predictions) # change to training error
# stderr = np.std(predictions,ddof=1)
print ('Training error for ' + learnername + ': ' + str(error))
predictions_test = learner.predict(testset[0])
error_test = geterror(testset[1], predictions_test)
print ('Test error for ' + learnername + ': ' + str(error_test))
errors[learnername][p,r] = error
# errors[learnername][p,r] = stderr
""" standarad error calculation """
for learnername in regressionalgs:
sum_ = 0
std = 0
for p in range(numparams):
for r in range(numruns):
sum_ = sum_ + errors[learnername][p,r]
mean = sum_/numruns
# print ('Average error for ' + learnername + ': ' + str(mean))
for r in range(numruns):
std = std + (errors[learnername][p,r] - mean)**2
stderr = np.sqrt(std/numruns-1)/np.sqrt(numruns)
print ('Standard error for ' + learnername + ': ' + str(stderr))
for learnername in regressionalgs:
besterror = np.mean(errors[learnername][0,:])
bestparams = 0
for p in range(numparams):
aveerror = np.mean(errors[learnername][p,:])
if aveerror < besterror:
besterror = aveerror
bestparams = p
# Extract best parameters
learner.reset(parameters[bestparams])
#print ('Best parameters for ' + learnername + ': ' + str(learner.getparams()))
print ('Average error for ' + learnername + ': ' + str(besterror))
""" Draw plot of error versus epoches for SGD and BGD """
x['SGD385'] = np.arange(1000) # total 1000 epochs for SGD
x['batchGD385'] = np.arange(5000) # limit 5000 interations for BGD in case the x-axis goes to far
plt.plot(x['SGD385'], y["SGD385"], label='StochasticGradientDescent')
plt.plot(x['batchGD385'], y["batchGD385"], label='BatchGradientDescent')
plt.xlabel('Number of Epoches')
plt.ylabel('Mean Squared Error')
plt.title('MSE VS Epoches for BGD and SGD')
# plt.xlim([0,5000])
plt.ylim([20,350])
plt.legend()
plt.show()
|
#!/usr/bin/env python3
import math
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import *
import mnist_reader
from itertools import *
import numba
import pickle
import sys
import argparse
class ETA:
def __init__(self, _n):
self.n = _n
self.i = 0
def bump(self):
self.i += 1
def ratio(self):
return float(self.i) / self.n
def percent(self):
return "%4.2f %%" % (self.ratio() * 100)
# d = dimension
# lam=lambda
# ts = training samples. List of (xi, yi)
# T = number of iterations
# Fig 1. Pegasos algorithm
def train_linear(d, lam, ts, T=None, debug=False):
if T is None:
T = len(ts) * 2
w = np.zeros(d) # weight vector
t = 1 # current iteration
ixs = randint(0, len(ts), size=T + 1) # generate random indeces
# loop for the samples
while t <= T:
# calculate eta
eta = 1.0 / (float(lam) * float(t))
# current sample (x, y)
(x, y) = ts[ixs[t]]
if y * np.dot(w, x) < 1:
w = (1 - eta * lam) * w + eta * y * x
else:
w = (1 - eta * lam) * w
# Debugging code to plot the hyperplanes
if debug and t % (T//50) == 0:
if d == 2:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
pxs = []
pys = []
pcs = []
for ((x1, x2), y) in ts[:1000]:
if x1 * w[0] + x2 * w[1] >= 1:
pxs.append(x1)
pys.append(x2)
pcs.append('red' if y == 1 else 'blue')
ax.scatter(pxs, pys, c=pcs, marker='v')
pxs = []
pys = []
pcs = []
for ((x1, x2), y) in ts[:1000]:
if x1 * w[0] + x2 * w[1] <= 1:
pxs.append(x1)
pys.append(x2)
pcs.append('red' if y == 1 else 'blue')
ax.scatter(pxs, pys, c=pcs, marker='+')
# hyperplane is perpendicular to w
# so points will be (x1, x2) such that (x1w1 + x2w2 >= 1)
# x0w0 + x1w1 >= 1
# x1 >= (1 - x0w0) / w1 = 1/w1 - x0 w0/w1
# x2 >= 1/w1 - x0 * w0 / w1
xx = np.linspace(-10, 10)
yy = 1/w[1] - xx * w[0] / w[1]
ax.plot(xx, yy)
plt.show()
t += 1
return w
def classify_linear(w, x):
return 1 if np.dot(w, x) >= 1 else -1
# create a gaussian kernel for d dimensions. Use
# the outer function to create a closure for the inner function.
def gaussianK(x1, x2):
v = x1 - x2
nsq = 0.0
for i in range(len(x1)):
nsq += v[i] * v[i]
sigma = 1.0
return math.e ** (-nsq / (2 * sigma))
# @numba.jit(nopython=True)
# d: dimension
# p: power to raise
def polynomialK(x1, x2, d, p):
s = np.dot(x1, x2)
# s = 0
# i = 0
# while i < d:
# s += x1[i] * x2[i]
# i += 1
#
return (s + 1) ** p
# Figure 3: kernelized pegasos
# d: dimension
# lambda: tuning parameter
# ts: training samples
# K: kernel function: (training vec x training vec -> float)
def train_kernel_gauss(lam, ts, debug=False):
T = len(ts)
lam = float(lam)
# alpha
a = np.zeros(T)
t = 1
ixs = randint(0, n, size=(T+2))
print(" ")
while t <= T:
i = ixs[t]
(xi, yi) = ts[i]
# score
s = 0
j = 0
while j < n:
(xj, _) = ts[j]
s += a[j] * yi * gaussianK(xi, xj)
j += 1
s *= yi * 1.0 / (lam * float(t))
if s < 1:
a[i] = a[i] + 1
t += 1
if t % (T // 100) == 0:
print("\rtraining: %4.2f %%" %(float(t)/T * 100.0), end='')
return a
# Figure 3
# lam = lambda
# ts = training samples. List of (xi, yi)
# T = number of timesteps
# pow = polynomial to raise the kernel: (1 + x_i . x_j)^pow
def train_kernel_poly(lam, ts, T=None, pow=3):
if T is None:
T = len(ts)*3
print ("training poly kernel. #samples: %d | lambda: %4.3f | T: %d | pow: %4.2f" %
(len(ts), lam, T, pow))
lam = float(lam)
n = len(ts)
# alpha
a = np.zeros(len(ts))
# dimension of the training vectors
d = len(ts[0][0])
# training sample
t = 1
ixs = randint(0, n, size=(T+2))
print(" ")
while t <= T:
i = ixs[t]
(xi, yi) = ts[i]
# score
s = 0
j = 0
while j < n:
(xj, _) = ts[j]
s += a[j] * yi * polynomialK(xi, xj, d, pow)
j += 1
s *= yi * 1.0 / (lam * float(t))
if s < 1:
a[i] = a[i] + 1
t += 1
if t % (T // 100) == 0:
print("\rtraining: %4.2f %%" %(float(t)/T * 100.0), end='')
return a
# specialization of classify_kernel for K = polynomialK
def classify_kernel_poly(a, x, ts, pow=3):
d = ts[0][0]
s = 0
for j in range(len(ts)):
(xj, _) = ts[j]
s += a[j] * polynomialK(x, xj, d, pow)
return 1 if s >= 1 else -1
# a: alpha computed from training
# x: point to classify
# ts: training samples
# K: kernel function
def classify_kernel(a, x, ts, K):
s = 0
for j in range(len(ts)):
(xj, _) = ts[j]
s += a[j] * K(x, xj)
return 1 if s >= 1 else -1
def bool2y(b):
return 1 if b else -1
def train_test_linreg_linear():
print("LINEAR REGRESSION (with linear SVM): ")
NTRAIN = 100000
ts = []
for _ in range(NTRAIN):
x1 = np.random.rand() * 10
x2 = np.random.rand() * 10
t = bool2y(x1 > 3 * x2)
ts.append((np.asarray([x1, x2]), t))
w = train_linear(2, 0.01, ts, debug=False)
loss = 0
ts = []
NTEST = 1000
for _ in range(NTEST):
x1 = np.random.rand()
x2 = np.random.rand()
t = bool2y(x1 > 3 * x2)
if classify_linear(w, np.asarray([x1, x2])) != t:
loss += 1
print("total loss: ", loss)
print("avg loss: ", loss / NTEST)
def train_test_cube_kernel():
NTRAIN = 500
print("CUBIC with kernel (#training: %d):" % NTRAIN)
ts = []
for _ in range(NTRAIN):
x1 = np.random.rand() * 10
x2 = np.random.rand() * 10
t = bool2y(x1 >= x2 * x2 * x2)
ts.append((np.asarray([x1, x2]), t))
a = train_kernel_poly(0.01, ts, T=len(ts)*10)
loss = 0
NTEST = 100
print("\n\nrunning tests (total %d)" % NTEST)
for i in range(NTEST):
x1 = np.random.rand()
x2 = np.random.rand()
t = bool2y(x1 >= x2 * x2 * x2)
print("\rtesting: %4.2f %%" %(100.0 * i / NTEST), end='')
if classify_kernel_poly(a, np.asarray([x1, x2]), ts) != t:
loss += 1
print(" ")
print("total loss: ", loss)
print("avg loss: ", loss / NTEST)
# return list of tuples of (x, y)
def load_mnist(path, kind):
(xs, ys) = mnist_reader.load_mnist(path, kind=kind)
ts = []
for i in range(len(xs)):
if ys[i] > 1: continue
y = 1 if ys[i] == 0 else -1
ts.append((xs[i], y))
return ts
# train the fashion kernel on the large dataset
def train_fashion_kernel(N):
print("FASHION (first %s): " % N)
ts = load_mnist("data",kind="train")
ts = ts[:N]
print("fashion dataset sample: ", ts[0])
a = train_kernel_poly(0.01, ts, T=len(ts)*10)
return a
def test_fashion_kernel(a):
# take the first 'a' lenght sample from the traning set
# since the vector 'a' describes their weights
ts = load_mnist("data", kind="train")[:len(a)]
tests = load_mnist("data", kind="t10k")
tests = tests[:300]
print("\n\n")
print("#train : ", len(a))
print("#test samples: ", len(tests))
loss = 0
i = 0
N = len(tests)
for (x,y) in tests:
if classify_kernel_poly(a, x, ts) != y:
loss += 1
i += 1
if i % (N // 100) == 0:
print("\rtesting: %4.2f %%" % (100.0 * float(i)/N), end='')
print("\n")
print("total loss: ", loss)
print("avg loss: ", loss / len(tests))
def sample_train_test_fashion_kernel():
print("FASHION: ")
(x_train, y_train) = mnist_reader.load_mnist("data", kind="train")
ts = list(zip(x_train, y_train))
ts = ts[:1000]
print("fashion dataset sample: ", ts[0])
def parse(s):
p = argparse.ArgumentParser()
sub = p.add_subparsers(dest="command")
sub.add_parser("trainfashion", help="Train fashion model and save data")
sub.add_parser("testfashion", help="Test the fashion model from saved data")
sub.add_parser("demofashion", help="Train and test the fashion model on a small portion of the dataset")
sub.add_parser("demolinear", help="Train & test a linear model y = ax")
sub.add_parser("democubic", help="Train a test a cubic model y = x^3")
return p.parse_args(s)
if __name__ == "__main__":
p = parse(sys.argv[1:])
if p.command == "trainfashion":
a = train_fashion_kernel(10000)
with open("kernel-coeff.bin", "wb") as f:
pickle.dump(a, f)
elif p.command == "testfashion":
with open("kernel-coeff.bin", "rb") as f:
a = pickle.load(f)
test_fashion_kernel(a)
elif p.command =="demofashion":
a = train_fashion_kernel(400)
test_fashion_kernel(a)
elif p.command =="democubic":
train_test_cube_kernel()
elif p.command =="demolinear":
train_test_linreg_linear()
else:
print("please invoke with option. See --help for all options")
|
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import threading
import time
import numpy as np
from tkinter.messagebox import showinfo
VALORES = []
teste = None
tempoDeExecucao = 0
# Classe para criação de Threads
class minhaThread(threading.Thread):
def __init__(self, threadID, vetor, algoritmo, tempo):
threading.Thread.__init__(self)
self.threadID = threadID # Id da thread criada
self.vetor = vetor # lista desordenada
self.algoritmo = algoritmo # Algoritmo que deve ser utilizado para ordenação
self.tempo = tempo # Tempo de atraso - utilizado por conta da parte gráfica
self.kill = threading.Event() # Método usado para matar a thread ao fim da execução
def run(self):
inicio = time.time()
# Aqui, selecionamos o algoritmo que terá a HONRA de ordernar nosso vetor
if self.algoritmo == 'BubbleSort':
bubble_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'InsertionSort':
insertion_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'SelectionSort':
selection_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'MergeSort':
self.vetor.ordenado = merge_sort(self.vetor,0,len(self.vetor.lista)-1,self.tempo,inicio)
elif self.algoritmo == 'QuickSort':
start = 0
end = len(self.vetor.lista) - 1
quick_sort(self.vetor, start , end, self.tempo,inicio)
elif self.algoritmo == 'HeapSort':
heap_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'CountingSort':
counting_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'RadixSort':
radix_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'GnomeSort':
gnome_sort(self.vetor,self.tempo,inicio)
elif self.algoritmo == 'PancakeSort':
pancake_sort(self.vetor,self.tempo,inicio)
# Aqui calculamos o tempo final gasto pelo algoritmo
fim = time.time()
self.vetor.tempoAni = fim - inicio
# Para calcularmos o tempo de execução, retiramos o tempo levado pelos time.sleep
self.vetor.tempoExe = self.vetor.tempoAni - (self.vetor.qnt_sleep * self.tempo)
# Indicamos que o algoritmo terminou no prompt de comando.
print(self.algoritmo, "chegou ao fim.")
self.vetor.ordenado = True
# Chama função que "mata" a thread
self.stop()
def stop(self):
# Mata a thread
print("Thread encerrada.")
self.kill.set()
''' ----------------------> BUBBLE SORT <--------------------- '''
def bubble_sort(vetor,tempo,inicio):
elementos = len(vetor.lista)-1
vetor.ordenado = False
ordenado = False
j = 0
while not ordenado:
ordenado = True
for i in range(elementos):
if vetor.lista[i] > vetor.lista[i+1]:
vetor.lista[i], vetor.lista[i+1] = vetor.lista[i+1],vetor.lista[i]
ordenado = False
time.sleep(tempo/2)
vetor.qnt_sleep+= 0.5
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
''' ----------------------> INSERTION SORT <--------------------- '''
def insertion_sort(vetor,tempo,inicio):
vetor.ordenado = False
for i in range(1, len(vetor.lista)):
key = vetor.lista[i]
j = i - 1
while j >= 0 and key < vetor.lista[j]:
vetor.lista[j + 1] = vetor.lista[j]
j -= 1
vetor.lista[j + 1] = key
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
''' ----------------------> SELECTION SORT <--------------------- '''
def selection_sort(vetor,tempo,inicio):
vetor.ordenado = False
for i in range(len(vetor.lista)):
min_idx = i
for j in range(i + 1, len(vetor.lista)):
if vetor.lista[min_idx] > vetor.lista[j]:
min_idx = j
vetor.lista[i], vetor.lista[min_idx] = vetor.lista[min_idx], vetor.lista[i]
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
''' ----------------------> MERGE SORT <--------------------- '''
def merge(arr, l, m, r, tempo,inicio):
time.sleep(tempo)
arr.qnt_sleep+= 1
fim = time.time()
arr.tempoAni = fim - inicio
arr.tempoExe = arr.tempoAni - (arr.qnt_sleep * tempo)
n1 = m - l + 1
n2 = r- m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0 , n1):
L[i] = arr.lista[l + i]
for j in range(0 , n2):
R[j] = arr.lista[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2 :
if L[i] <= R[j]:
arr.lista[k] = L[i]
i += 1
else:
arr.lista[k] = R[j]
j += 1
k += 1
while i < n1:
arr.lista[k] = L[i]
i += 1
k += 1
while j < n2:
arr.lista[k] = R[j]
j += 1
k += 1
def merge_sort(arr,l,r,tempo,inicio):
if l < r:
m = (l+(r-1))//2
merge_sort(arr, l, m, tempo,inicio)
merge_sort(arr, m+1, r, tempo,inicio)
merge(arr, l, m, r, tempo,inicio)
return True
''' ----------------------> QUICK SORT <--------------------- '''
def partition(vetor, start, end, tempo,inicio):
pivot = vetor.lista[np.random.randint(start, end)]
bottom = start - 1
top = end
done = 0
while not done:
while not done:
bottom = bottom + 1
if bottom == top:
done = 1
break
if vetor.lista[bottom] > pivot:
vetor.lista[top] = vetor.lista[bottom]
break
while not done:
top = top - 1
if top == bottom:
done = 1
break
if vetor.lista[top] < pivot:
vetor.lista[bottom] = vetor.lista[top]
break
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
vetor.lista[top] = pivot
return top
def quick_sort(vetor, start, end, tempo,inicio):
vetor.ordenado = False
if start < end:
split = partition(vetor, start, end, tempo, inicio)
quick_sort(vetor, start, split - 1, tempo, inicio)
quick_sort(vetor, split + 1, end, tempo, inicio)
else:
return vetor.lista
''' ----------------------> HEAP SORT <--------------------- '''
def heapify(arr, n, i,inicio):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and arr[i] < arr[l]:
largest = l
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
heapify(arr, n, largest, inicio)
def heap_sort(vetor,tempo,inicio):
vetor.ordenado = False
n = len(vetor.lista)
for i in range(n, -1, -1):
heapify(vetor.lista, n, i,inicio)
time.sleep(tempo/2)
vetor.qnt_sleep+= 0.5
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
for i in range(n - 1, 0, -1):
vetor.lista[i], vetor.lista[0] = vetor.lista[0], vetor.lista[i]
heapify(vetor.lista, i, 0, inicio)
time.sleep(tempo/2)
vetor.qnt_sleep+= 0.5
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
''' ----------------------> COUNTING SORT <--------------------- '''
def counting_sort(vetor,tempo,inicio):
vetor.ordenado = False
m = max(vetor.lista) + 1
count = [0] * m
for a in vetor.lista:
count[a] += 1
i = 0
for a in range(m):
for c in range(count[a]):
vetor.lista[i] = a
i += 1
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
''' ----------------------> RADIX SORT <--------------------- '''
def counting_sort_r(vetor, n, exp, tempo,inicio):
vAux = [0] * 10
v_Ord = [0] * (n)
for i in range(n):
vAux[(int)((vetor.lista[i] / exp) % 10)] += 1
for i in range(1, 10):
vAux[i] += vAux[i - 1]
for i in range(n - 1, -1, -1):
v_Ord[vAux[(int)((vetor.lista[i] / exp) % 10)] - 1] = vetor.lista[i]
vAux[(int)((vetor.lista[i] / exp) % 10)] -= 1
for i in range(n):
vetor.lista[i] = v_Ord[i]
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
def radix_sort(vetor,tempo,inicio):
vetor.ordenado = False
m = max(vetor.lista)
n = len(vetor.lista)
exp = 1
while m >= exp :
counting_sort_r(vetor, n, exp, tempo,inicio)
exp *= 10
return vetor.lista
''' ----------------------> GNOME SORT <--------------------- '''
def gnome_sort(vetor, tempo,inicio):
index = 0
while index < len(vetor.lista):
if index == 0:
index = index + 1
if vetor.lista[index] >= vetor.lista[index - 1]:
index = index + 1
else:
vetor.lista[index], vetor.lista[index - 1] = vetor.lista[index - 1], vetor.lista[index]
index = index - 1
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
return vetor.lista
''' ----------------------> PANCAKE SORT <--------------------- '''
def flip(arr, i, tempo):
start = 0
while start < i:
temp = arr[start]
arr[start] = arr[i]
arr[i] = temp
start += 1
i -= 1
def findMax(arr, n, tempo):
mi = 0
for i in range(0,n):
if arr[i] > arr[mi]:
mi = i
return mi
def pancake_sort(vetor, tempo,inicio):
n = len(vetor.lista)
curr_size = n
while curr_size > 1:
time.sleep(tempo)
vetor.qnt_sleep+= 1
fim = time.time()
vetor.tempoAni = fim - inicio
vetor.tempoExe = vetor.tempoAni - (vetor.qnt_sleep * tempo)
mi = findMax(vetor.lista, curr_size, tempo)
if mi != curr_size-1:
flip(vetor.lista, mi, tempo)
flip(vetor.lista, curr_size-1, tempo)
curr_size -= 1
return vetor.lista
# Classe vetor, utilizada por todos os algoritmos
class vetor:
def __init__(self, lista):
self.lista = lista # Lista com os elementos não ordenados
self.ordenado = False # Flag indicando se o algoritmo terminou de ordenar
self.qnt_sleep = 0 # Quantidade de vezes que o algoritmo passou por um time.sleep
self.tempoExe = 0 # Tempo total de execução do algoritmo, desconsiderando o tempo de pausa
self.tempoAni = 0 # O Tempo total gasto pela animação, considerando o tempo de pausa
def __str__(self):
return self.nome
# Listbox das opções de algoritmos existentes
class App:
def __init__(self, janela):
self.janela = janela
self.listbox = Listbox(self.janela, selectmode=MULTIPLE, width=13, justify=CENTER)
self.listbox.pack()
self.listbox.place(x=215, y=150)
self.listbox.bind("<<ListboxSelect>>", self.callback)
self.listbox.insert(END, "BubbleSort")
self.listbox.insert(END, "InsertionSort")
self.listbox.insert(END, "SelectionSort")
self.listbox.insert(END, "MergeSort")
self.listbox.insert(END, "QuickSort")
self.listbox.insert(END, "HeapSort")
self.listbox.insert(END, "CountingSort")
self.listbox.insert(END, "RadixSort")
self.listbox.insert(END, "GnomeSort")
self.listbox.insert(END, "PancakeSort")
self.selection = self.listbox.curselection()
def callback(self, a):
if len(self.listbox.curselection()) > 2:
for i in self.listbox.curselection():
if i not in self.selection:
self.listbox.selection_clear(i)
self.selection = self.listbox.curselection()
def passaParam(self):
global VALORES
VALORES = [self.listbox.get(idx) for idx in self.listbox.curselection()]
print(VALORES)
# -----------------> TELA DE SIMULAÇÃO DOS ALGORITMOS
def tela_simulacao():
# Recebe os parametros da tela principal
global janela
teste.passaParam()
# Tratamento de possiveis erros de entrada do usuário
if len(VALORES) != 2:
showinfo("Erro no número de Algoritmos Selecionados!", "Escolha 2 Algoritmos para comparação.")
return
if int(varN.get()) <= 1:
showinfo("Erro no tamanho do vetor inserido!", "Você escolheu um valor de n menor ou igual a 1 (vetor ordenado).")
return
# Fecha Tela Principal
janela.destroy()
# Configura nova Tela
janela = Tk()
janela.configure(background='black')
janela.attributes('-fullscreen',True)
janela.title("ANÁLISE E COMPLEXIDADE DE ALGORITMOS")
janela.geometry("600x600+250+50")
# Recebe as Entradas do usuário
n = int(varN.get()) # Entrada do usuário - número de elementos
tempo = tempoDeExecucao # Entrada do usuário - tempo de atraso para exibição da animação
AlgoritmoA = VALORES[0] # Algoritmo usada para comparação
AlgoritmoB = VALORES[1] # #Algoritmo usada para comparação
# Cria vetor inicial - fixamos em 10.000 para não haver vetores com valores estratosféricos
lista = np.random.randint(10000, size=n)
algoritmosUsados = 2 # Permitiria (no futuro) a comparação de mais de um algoritmo
# Cria uma cópia da lista para cada algoritmo simulado
i = 0
Vetores = []
Figuras = []
while (i < algoritmosUsados):
listaProv = lista.copy()
Vetores.append(vetor(listaProv))
Figuras.append(Figure(dpi=80, facecolor='#105F10', linewidth=1.0))
Figuras[i].add_subplot(111).plot(Vetores[i].lista)
i = i + 1
# Labels de Tempo - Parte Visual
labelTempoA = Label(janela, text='Tempo de Animação (seg): ')
labelTempoA.pack()
labelTempoA.place(x=50, y=100)
labelTempoA2 = Label(janela, text='Tempo de Execução (seg): ')
labelTempoA2.pack()
labelTempoA2.place(x=50, y=200)
labelTempoB = Label(janela, text='Tempo de Animação (seg): ')
labelTempoB.pack()
labelTempoB.place(x=50, y=400)
labelTempoB2 = Label(janela, text='Tempo de Execução (seg): ')
labelTempoB2.pack()
labelTempoB2.place(x=50, y=500)
# Adiciona os gráficos de cada algoritmo na tela - Parte Visual
labelA = Label(janela, text=VALORES[0])
labelA.pack()
canvas = FigureCanvasTkAgg(Figuras[0], master=janela)
canvas.draw()
canvas.get_tk_widget().pack()
labelB= Label(janela, text=VALORES[1])
labelB.pack()
canvasB = FigureCanvasTkAgg(Figuras[1], master=janela)
canvasB.draw()
canvasB.get_tk_widget().pack()
# Exibe tempo de execução e animação - Parte Visual
labelTempoA_ = Label(janela, text=Vetores[0].tempoAni)
labelTempoA_.pack()
labelTempoA_.place(x=50, y=150)
labelTempoA__ = Label(janela, text=Vetores[0].tempoExe)
labelTempoA__.pack()
labelTempoA__.place(x=50, y=250)
labelTempoB_ = Label(janela, text=Vetores[1].tempoAni)
labelTempoB_.pack()
labelTempoB_.place(x=50, y=450)
labelTempoB__ = Label(janela, text=Vetores[1].tempoExe)
labelTempoB__.pack()
labelTempoB__.place(x=50, y=550)
# Botão de retorno do Menu Principal
menu_bt = Button(janela, text='Voltar/Menu Principal', command=tchauQuerida)
menu_bt.pack()
# Laço que permanece True enquanto houver vetores desordenados
start = False
while (Vetores[0].ordenado == False or Vetores[1].ordenado == False):
# Atualiza o tempo de execução na tela
labelTempoA_.configure(text=Vetores[0].tempoAni)
labelTempoA__.configure(text=Vetores[0].tempoExe)
labelTempoB_.configure(text=Vetores[1].tempoAni)
labelTempoB__.configure(text=Vetores[1].tempoExe)
# Atualiza o gráfico na tela
Figuras[0].clear()
Figuras[0].add_subplot(111).plot(Vetores[0].lista)
canvas.draw()
Figuras[0].canvas.flush_events()
Figuras[1].clear()
Figuras[1].add_subplot(111).plot(Vetores[1].lista)
canvasB.draw()
Figuras[1].canvas.flush_events()
# Cria cada algoritmo em uma thread para rodar simultanêamente
if start == False:
start = True
thread = minhaThread(0, Vetores[0], AlgoritmoA, tempo)
thread.start()
threadb = minhaThread(0, Vetores[1], AlgoritmoB, tempo)
threadb.start()
# Atualiza o gráfico com o resultado final
time.sleep(1)
Figuras[0].clear()
Figuras[0].add_subplot(111).plot(Vetores[0].lista)
canvas.draw()
Figuras[0].canvas.flush_events()
Figuras[1].clear()
Figuras[1].add_subplot(111).plot(Vetores[1].lista)
canvasB.draw()
Figuras[1].canvas.flush_events()
# Atualiza o tempo com o resultado final
labelTempoA_.configure(text=Vetores[0].tempoAni)
labelTempoA__.configure(text=Vetores[0].tempoExe)
labelTempoB_.configure(text=Vetores[1].tempoAni)
labelTempoB__.configure(text=Vetores[1].tempoExe)
# Indica na tela qual "venceu"
if Vetores[0].tempoExe < Vetores[1].tempoExe:
vencedor = VALORES[0] + ' teve um menor tempo de execução!'
elif Vetores[0].tempoExe > Vetores[1].tempoExe:
vencedor = VALORES[1] + ' teve um menor tempo de execução!'
elif Vetores[0].tempoExe == Vetores[1].tempoExe:
vencedor = 'Empate técnico em tempo de execução!'
labelVencedor = Label(janela, text=vencedor)
labelVencedor.pack()
labelVencedor.place(x=50, y=600)
# Indica no prompt que o processamento foi finalizado
print("Processamento finalizado")
# -----------------> FUNÇÃO PRINCIPAL
def menu():
# Cria a Janela Principal
global janela
janela = Tk()
label = Label(janela, text='ANÁLISE E COMPLEXIDADE DE ALGORITMOS')
label.pack()
# Label de número elementos
labelIn = Label(janela, text='Insira o número de elementos')
labelIn.place(x=170, y=50)
# Input de elementos
global varN
varN = StringVar()
inputN = Entry(janela, textvariable=varN, width=13, justify=CENTER)
inputN.pack()
varN.set(1000)
inputN.place(x=215,y=85)
# Input de Algoritmos Utilizados
label.place(x=132, y=5)
label2 = Label(janela, text='Selecione dois algoritmos')
label2.place(x=185, y=120)
# Input de Velocidade
label3 = Label(janela, text='Selecione a velocidade da animação')
label3.place(x=160, y=325)
global teste
teste = App(janela)
# Seleção de Velocidade
def radCall():
global tempoDeExecucao
radSel = radVar.get()
if radSel == 1:
print("selecionei 1")
tempoDeExecucao = 0
elif radSel == 2:
tempoDeExecucao = 0.2
elif radSel == 3:
tempoDeExecucao = 1
radVar = IntVar()
rad1 = Radiobutton(janela, text='Tempo Real', variable=radVar, value=1, command=radCall)
rad2 = Radiobutton(janela, text='Atraso Curto', variable=radVar, value=2, command=radCall)
rad3 = Radiobutton(janela, text='Atraso Longo', variable=radVar, value=3, command=radCall)
rad1.place(x=130, y=355)
rad2.place(x=220, y=355)
rad3.place(x=310, y=355)
rad1.select()
# Botão de simulação
menu_bt_simular = Button(janela, text='Simular', command=tela_simulacao)
menu_bt_simular.place(x=230, y=390)
# Configuração da Janela
janela.geometry("500x435+250+50")
janela.title("ANÁLISE E COMPLEXIDADE DE ALGORITMOS")
janela.configure(background='black')
janela.mainloop()
# -----------------> FUNÇÃO QUE VOLTA PARA A TELA PRINCIPAL
def tchauQuerida():
janela.destroy()
menu()
if __name__ == '__main__':
menu()
|
import sys
import json
import re
from flask_cors import CORS
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from json import dumps
bad_words_txt = open('badwords.txt', 'r').read()
bad_words_arr = bad_words_txt.splitlines()
print(bad_words_arr)
class REMOVE_BAD_WORDS(Resource):
def get(self, target_string):
for bad_word in bad_words_arr:
target_string = re.sub(bad_word, '****', target_string)
return {
"data" : target_string
}
app = Flask(__name__)
api = Api(app)
CORS(app)
#api.add_resource(REMOVE_BAD_WORDS,'/badwords?<string:todo_id>')
@app.route('/success/<name>')
def success(name):
return 'welcome %s' % name
@app.route('/badwords',methods = ['GET'])
def badwords():
target_string = request.args.get('target_string')
for bad_word in bad_words_arr:
bad_word = bad_word.replace('*', '').replace("(", '')
target_string = re.sub(r'\b{}\b'.format(bad_word), '****', target_string, flags=re.I)
response = jsonify({"data" : target_string})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
MYPORT = sys.argv[1]
if __name__ == '__main__':
app.run(host = "0.0.0.0",port=int(MYPORT))
|
x, x1, y, y1 = map(int, input().split(' '))
i = x * 60 + x1
f = y * 60 + y1
if (f > i):
m = f - i
else:
m = (24 * 60) - i + f
h = m // 60
m = m % 60
if h == 0 and m == 0:
h = 24
m = 0
print('O JOGO DUROU {} HORA(S) E {} MINUTO(S)'.format(h, m)) |
import json
from grant.utils.enums import ProposalStatus, CCRStatus
import grant.utils.admin as admin
from grant.utils import totp_2fa
from grant.user.models import admin_user_schema
from grant.proposal.models import proposal_schema, db, Proposal
from grant.ccr.models import CCR
from mock import patch
from ..config import BaseProposalCreatorConfig, BaseCCRCreatorConfig
from ..test_data import mock_blockchain_api_requests, test_ccr
json_checklogin = {
"isLoggedIn": False,
"is2faAuthed": False,
}
json_checklogin_true = {
"isLoggedIn": True,
"is2faAuthed": True,
}
json_2fa = {
"isLoginFresh": True,
"has2fa": False,
"is2faAuthed": False,
"backupCodeCount": 0,
"isEmailVerified": True,
}
class TestAdminAPI(BaseProposalCreatorConfig):
def p(self, path, data):
return self.app.post(path, data=json.dumps(data), content_type="application/json")
def login_admin(self):
# set admin
self.user.set_admin(True)
db.session.commit()
# login
r = self.p("/api/v1/admin/login", {
"username": self.user.email_address,
"password": self.user_password
})
self.assert200(r)
# 2fa on the natch
r = self.app.get("/api/v1/admin/2fa")
self.assert200(r)
# ... init
r = self.app.get("/api/v1/admin/2fa/init")
self.assert200(r)
codes = r.json['backupCodes']
secret = r.json['totpSecret']
uri = r.json['totpUri']
# ... enable/verify
r = self.p("/api/v1/admin/2fa/enable", {
"backupCodes": codes,
"totpSecret": secret,
"verifyCode": totp_2fa.current_totp(secret)
})
self.assert200(r)
return r
def r(self, method, path, data=None):
if not data:
return method(path)
return method(path, data=data)
def assert_autherror(self, resp, contains):
# this should be 403
self.assert403(resp)
print(f'...check that [{resp.json["message"]}] contains [{contains}]')
self.assertTrue(contains in resp.json['message'])
# happy path (mostly)
def test_admin_2fa_setup_flow(self):
# 1. initial checklogin
r = self.app.get("/api/v1/admin/checklogin")
self.assert200(r)
self.assertEqual(json_checklogin, r.json, msg="initial login")
def send_login():
return self.p("/api/v1/admin/login", {
"username": self.user.email_address,
"password": self.user_password
})
# 2. login attempt (is_admin = False)
r = send_login()
self.assert401(r)
# 3. make user admin
self.user.set_admin(True)
db.session.commit()
# 4. login again
r = send_login()
self.assert200(r)
json_checklogin['isLoggedIn'] = True
self.assertEqual(json_checklogin, r.json, msg="login again")
# 5. get 2fa state (fresh login)
r = self.app.get("/api/v1/admin/2fa")
self.assert200(r)
self.assertEqual(json_2fa, r.json, msg="get 2fa state")
# 6. get 2fa setup
r = self.app.get("/api/v1/admin/2fa/init")
self.assert200(r)
self.assertTrue('backupCodes' in r.json)
self.assertTrue('totpSecret' in r.json)
self.assertTrue('totpUri' in r.json)
codes = r.json['backupCodes']
secret = r.json['totpSecret']
uri = r.json['totpUri']
# 7. enable 2fa (bad hash)
r = self.p("/api/v1/admin/2fa/enable", {
"backupCodes": ['bad-code'],
"totpSecret": "BADSECRET",
"verifyCode": "123456"
})
self.assert_autherror(r, 'Bad hash')
# 8. enable 2fa (bad verification code)
r = self.p("/api/v1/admin/2fa/enable", {
"backupCodes": codes,
"totpSecret": secret,
"verifyCode": "123456"
})
self.assert_autherror(r, 'Bad verification code')
# 9. enable 2fa (success)
r = self.p("/api/v1/admin/2fa/enable", {
"backupCodes": codes,
"totpSecret": secret,
"verifyCode": totp_2fa.current_totp(secret)
})
self.assert200(r)
json_2fa['has2fa'] = True
json_2fa['is2faAuthed'] = True
json_2fa['backupCodeCount'] = 16
self.assertEquals(json_2fa, r.json)
# 10. check login (logged in)
r = self.app.get("/api/v1/admin/checklogin")
self.assert200(r)
self.assertEqual(json_checklogin_true, r.json, msg="checklogin - logged in")
# 11. 2fa state (logged in & verified)
r = self.app.get("/api/v1/admin/2fa")
self.assert200(r)
self.assertEqual(json_2fa, r.json, msg="get 2fa state (logged in)")
# 12. logout
r = self.app.get("/api/v1/admin/logout")
self.assert200(r)
json_checklogin['isLoggedIn'] = False
self.assertEquals(json_checklogin, r.json)
# 13. 2fa state (logged out)
r = self.app.get("/api/v1/admin/2fa")
self.assert403(r)
# 14. 2fa verify (fail; logged out)
r = self.p("/api/v1/admin/2fa/verify", {'verifyCode': totp_2fa.current_totp(secret)})
self.assert_autherror(r, 'Must be auth')
# 15. login
r = send_login()
self.assert200(r)
# 16. check login (logged in, not verified)
r = self.app.get("/api/v1/admin/checklogin")
self.assert200(r)
json_checklogin['isLoggedIn'] = True
self.assertEqual(json_checklogin, r.json, msg="checklogin - logged in, not verified")
# 17. 2fa state (logged in, not verified)
r = self.app.get("/api/v1/admin/2fa")
self.assert200(r)
json_2fa['is2faAuthed'] = False
self.assertEqual(json_2fa, r.json, msg="get 2fa state (logged in, not verified)")
# 18. 2fa verify (success: logged in)
r = self.p("/api/v1/admin/2fa/verify", {'verifyCode': totp_2fa.current_totp(secret)})
self.assert200(r)
json_2fa['is2faAuthed'] = True
self.assertEqual(json_2fa, r.json)
# 19. check login (natural login and verify)
r = self.app.get("/api/v1/admin/checklogin")
self.assert200(r)
self.assertEqual(json_checklogin_true, r.json, msg="checklogin - logged in")
# 20. logout
r = self.app.get("/api/v1/admin/logout")
self.assert200(r)
# 21. login
r = send_login()
self.assert200(r)
# 22. 2fa verify (use backup code)
r = self.p("/api/v1/admin/2fa/verify", {'verifyCode': codes[0]})
self.assert200(r)
json_2fa['is2faAuthed'] = True
json_2fa['backupCodeCount'] = json_2fa['backupCodeCount'] - 1
self.assertEqual(json_2fa, r.json)
# 23. logout
r = self.app.get("/api/v1/admin/logout")
self.assert200(r)
# 24. login
r = send_login()
self.assert200(r)
# 25. 2fa verify (fail: re-use backup code)
r = self.p("/api/v1/admin/2fa/verify", {'verifyCode': codes[0]})
self.assert_autherror(r, 'Bad 2fa code')
# Here ends the epic of Loginomesh.
def test_get_users(self):
self.login_admin()
resp = self.app.get("/api/v1/admin/users")
self.assert200(resp)
print(resp.json)
# 2 users created by BaseProposalCreatorConfig
self.assertEqual(len(resp.json['items']), 2)
def test_get_proposals(self):
self.login_admin()
resp = self.app.get("/api/v1/admin/proposals")
self.assert200(resp)
# 2 proposals created by BaseProposalCreatorConfig
self.assertEqual(len(resp.json['items']), 2)
def test_open_proposal_for_discussion_accept(self):
# an admin should be able to open a proposal for discussion
self.login_admin()
# proposal needs to be PENDING
self.proposal.status = ProposalStatus.PENDING
# approve open for discussion
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/discussion",
data=json.dumps({"isOpenForDiscussion": True})
)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.DISCUSSION)
proposal = Proposal.query.get(self.proposal.id)
self.assertEqual(proposal.status, ProposalStatus.DISCUSSION)
def test_open_proposal_for_discussion_reject(self):
# an admin should be able to reject opening a proposal for discussion
reject_reason = "this is a test"
self.login_admin()
# proposal needs to be PENDING
self.proposal.status = ProposalStatus.PENDING
# disapprove open for discussion
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/discussion",
data=json.dumps({"isOpenForDiscussion": False, "rejectReason": reject_reason})
)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.REJECTED)
self.assertEqual(resp.json["rejectReason"], reject_reason)
proposal = Proposal.query.get(self.proposal.id)
self.assertEqual(proposal.status, ProposalStatus.REJECTED)
self.assertEqual(proposal.reject_reason, reject_reason)
def test_open_proposal_for_discussion_bad_proposal_id_fail(self):
# request should fail if a bad proposal id is provided
bad_proposal_id = "11111111111111111111"
self.login_admin()
# approve open for discussion
resp = self.app.put(
f"/api/v1/admin/proposals/{bad_proposal_id}/discussion",
data=json.dumps({"isOpenForDiscussion": True})
)
self.assert404(resp)
def test_open_proposal_for_discussion_not_admin_fail(self):
# request should fail if user is not an admin
self.login_default_user()
# proposal needs to be PENDING
self.proposal.status = ProposalStatus.PENDING
# approve open for discussion
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/discussion",
data=json.dumps({"isOpenForDiscussion": True})
)
self.assert401(resp)
def test_open_proposal_for_discussion_not_pending_fail(self):
# request should fail if proposal is not in PENDING state
self.login_admin()
self.proposal.status = ProposalStatus.DISCUSSION
# approve open for discussion
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/discussion",
data=json.dumps({"isOpenForDiscussion": True})
)
self.assert400(resp)
def test_open_proposal_for_discussion_no_reject_reason_fail(self):
# denying opening a proposal for discussion should fail if no reason is provided
self.login_admin()
# proposal needs to be PENDING
self.proposal.status = ProposalStatus.PENDING
# disapprove open for discussion
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/discussion",
data=json.dumps({"isOpenForDiscussion": False})
)
self.assert400(resp)
def test_accept_proposal_with_funding(self):
self.login_admin()
# proposal needs to be DISCUSSION
self.proposal.status = ProposalStatus.DISCUSSION
# approve
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": True, "withFunding": True})
)
print(resp.json)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.LIVE)
self.assertEqual(resp.json["acceptedWithFunding"], True)
self.assertEqual(resp.json["target"], resp.json["contributionBounty"])
# milestones should have estimated dates
for milestone in resp.json["milestones"]:
self.assertIsNotNone(milestone["dateEstimated"])
def test_accept_proposal_without_funding(self):
self.login_admin()
# proposal needs to be DISCUSSION
self.proposal.status = ProposalStatus.DISCUSSION
# approve
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": True, "withFunding": False})
)
print(resp.json)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.LIVE)
self.assertEqual(resp.json["acceptedWithFunding"], False)
self.assertEqual(resp.json["contributionBounty"], "0")
# milestones should not have estimated dates
for milestone in resp.json["milestones"]:
self.assertIsNone(milestone["dateEstimated"])
def test_accept_proposal_changes_requested(self):
# an admin should be able to request changes on a proposal
reason = "this is a test"
self.login_admin()
# proposal needs to be DISCUSSION
self.proposal.status = ProposalStatus.DISCUSSION
# approve
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": False, "changesRequestedReason": reason})
)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.DISCUSSION)
self.assertEqual(resp.json["changesRequestedDiscussion"], True)
self.assertEqual(resp.json["changesRequestedDiscussionReason"], reason)
proposal = Proposal.query.get(self.proposal.id)
self.assertEqual(proposal.status, ProposalStatus.DISCUSSION)
self.assertEqual(proposal.changes_requested_discussion, True)
self.assertEqual(proposal.changes_requested_discussion_reason, reason)
def test_accept_proposal_changes_requested_no_reason_provided_fail(self):
# requesting changes to a proposal without providing a reason should fail
self.login_admin()
# proposal needs to be DISCUSSION
self.proposal.status = ProposalStatus.DISCUSSION
# approve
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": False})
)
self.assert400(resp)
def test_accept_proposal_changes_requested_not_discussion_fail(self):
# requesting changes on a proposal not in DISCUSSION should fail
self.login_admin()
self.proposal.status = ProposalStatus.PENDING
# disapprove
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": False, "changesRequestedReason": "test"})
)
self.assert400(resp)
def test_accept_proposal_not_discussion_fail(self):
# accepting a proposal not in DISCUSSION should fail
self.login_admin()
self.proposal.status = ProposalStatus.PENDING
# approve
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": True, "withFunding": True})
)
self.assert400(resp)
def test_resolve_changes_discussion(self):
# an admin should be able to resolve discussion changes
self.login_admin()
self.proposal.status = ProposalStatus.DISCUSSION
self.proposal.changes_requested_discussion = True
self.proposal.changes_requested_discussion_reason = 'test'
# resolve changes
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/resolve"
)
self.assert200(resp)
self.assertEqual(resp.json['changesRequestedDiscussion'], False)
self.assertIsNone(resp.json['changesRequestedDiscussionReason'])
def test_resolve_changes_discussion_wrong_status_fail(self):
# resolve should fail if proposal is not in a DISCUSSION state
self.login_admin()
self.proposal.status = ProposalStatus.PENDING
self.proposal.changes_requested_discussion = True
self.proposal.changes_requested_discussion_reason = 'test'
# resolve changes
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/resolve"
)
self.assert400(resp)
def test_resolve_changes_discussion_bad_proposal_fail(self):
# resolve should fail if bad proposal id is provided
self.login_admin()
bad_id = '111111111111'
# resolve changes
resp = self.app.put(
f"/api/v1/admin/proposals/{bad_id}/resolve"
)
self.assert404(resp)
def test_resolve_changes_discussion_no_changes_requested_fail(self):
# resolve should fail if changes are not requested on the proposal
self.login_admin()
self.proposal.status = ProposalStatus.DISCUSSION
self.proposal.changes_requested_discussion = False
self.proposal.changes_requested_discussion_reason = None
# resolve changes
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/resolve"
)
self.assert400(resp)
def test_change_proposal_to_accepted_with_funding(self):
self.login_admin()
# proposal needs to be DISCUSSION
self.proposal.status = ProposalStatus.DISCUSSION
# accept without funding
resp = self.app.put(
"/api/v1/admin/proposals/{}/accept".format(self.proposal.id),
data=json.dumps({"isAccepted": True, "withFunding": False})
)
self.assert200(resp)
self.assertEqual(resp.json["acceptedWithFunding"], False)
# change to accepted with funding
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/accept/fund"
)
self.assert200(resp)
self.assertEqual(resp.json["acceptedWithFunding"], True)
# milestones should have estimated dates
for milestone in resp.json["milestones"]:
self.assertIsNotNone(milestone["dateEstimated"])
# should fail if proposal is already accepted with funding
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/accept/fund"
)
self.assert404(resp)
self.assertEqual(resp.json['message'], "Proposal already accepted with funding.")
self.proposal.accepted_with_funding = False
# should fail if proposal is not version two
self.proposal.version = ''
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/accept/fund"
)
self.assert404(resp)
self.assertEqual(resp.json['message'], "Only version two proposals can be accepted with funding")
self.proposal.version = '2'
# should failed if proposal is not LIVE or APPROVED
self.proposal.status = ProposalStatus.DISCUSSION
self.proposal.accepted_with_funding = False
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/accept/fund"
)
self.assert404(resp)
self.assertEqual(resp.json["message"], 'Only live or approved proposals can be modified by this endpoint')
def test_reject_proposal_discussion(self):
self.login_admin()
# proposal needs to be PENDING
self.proposal.status = ProposalStatus.PENDING
# reject
resp = self.app.put(
"/api/v1/admin/proposals/{}/discussion".format(self.proposal.id),
data=json.dumps({"isOpenForDiscussion": False, "rejectReason": "Funnzies."})
)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.REJECTED)
self.assertEqual(resp.json["rejectReason"], "Funnzies.")
def test_reject_permanently_proposal(self):
rejected = {
"rejectReason": "test"
}
self.login_admin()
# no reject reason should 400
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/reject_permanently",
content_type='application/json'
)
self.assert400(resp)
# bad proposal id should 404
resp = self.app.put(
f"/api/v1/admin/proposals/111111111/reject_permanently",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert404(resp)
# bad status should 401
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/reject_permanently",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert401(resp)
self.proposal.status = ProposalStatus.PENDING
# should go through
resp = self.app.put(
f"/api/v1/admin/proposals/{self.proposal.id}/reject_permanently",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert200(resp)
self.assertEqual(resp.json["status"], ProposalStatus.REJECTED_PERMANENTLY)
self.assertEqual(resp.json["rejectReason"], rejected["rejectReason"])
@patch('grant.email.send.send_email')
def test_nominate_arbiter(self, mock_send_email):
mock_send_email.return_value.ok = True
self.login_admin()
self.proposal.status = ProposalStatus.LIVE
self.proposal.accepted_with_funding = True
# nominate arbiter
resp = self.app.put(
"/api/v1/admin/arbiters",
data=json.dumps({
'proposalId': self.proposal.id,
'userId': self.other_user.id
})
)
self.assert200(resp)
def test_create_rfp_succeeds(self):
self.login_admin()
resp = self.app.post(
"/api/v1/admin/rfps",
data=json.dumps({
"brief": "Some brief",
"category": "CORE_DEV",
"content": "CONTENT",
"dateCloses": 1553980004,
"status": "DRAFT",
"title": "TITLE"
})
)
self.assert200(resp)
def test_get_ccrs(self):
create_ccr(self)
# non-admins should fail
resp = self.app.get(
"/api/v1/admin/ccrs"
)
self.assert401(resp)
# admins should be able to retrieve ccrs
self.login_admin()
resp = self.app.get(
"/api/v1/admin/ccrs"
)
self.assert200(resp)
self.assertEqual(resp.json["total"], 1)
def test_delete_ccr(self):
ccr_json = create_ccr(self)
ccr_id = ccr_json["ccrId"]
fake_id = '11111111111111'
self.login_admin()
# bad CCR id should 404
resp = self.app.delete(
f"/api/v1/admin/ccrs/{fake_id}"
)
self.assert404(resp)
# good CCR id should 200
resp = self.app.delete(
f"/api/v1/admin/ccrs/{ccr_id}"
)
self.assert200(resp)
# ccr should be deleted
resp = self.app.get(
"/api/v1/admin/ccrs"
)
self.assert200(resp)
self.assertEqual(resp.json["total"], 0)
def test_get_ccr(self):
ccr_json = create_ccr(self)
ccr_id = ccr_json["ccrId"]
fake_id = '11111111111111'
self.login_admin()
# bad ccr id should 404
resp = self.app.get(
f"/api/v1/admin/ccrs/{fake_id}"
)
self.assert404(resp)
# good ccr id should 200
resp = self.app.get(
f"/api/v1/admin/ccrs/{ccr_id}"
)
self.assert200(resp)
self.assertEqual(resp.json, ccr_json)
def test_approve_ccr(self):
ccr1_json = create_ccr(self)
ccr1_id = ccr1_json["ccrId"]
ccr2_json = create_ccr(self)
ccr2_id = ccr2_json["ccrId"]
fake_id = '11111111111111'
accepted = {"isAccepted": True}
rejected = {
"isAccepted": False,
"rejectReason": "test"
}
submit_ccr(self, ccr1_id)
submit_ccr(self, ccr2_id)
self.login_admin()
# bad ccr id should 404
resp = self.app.put(
f"/api/v1/admin/ccrs/{fake_id}/accept",
data=json.dumps(accepted),
content_type='application/json'
)
self.assert404(resp)
# good ccr id that's accepted should be live
resp = self.app.put(
f"/api/v1/admin/ccrs/{ccr1_id}/accept",
data=json.dumps(accepted),
content_type='application/json'
)
self.assertStatus(resp, 201)
ccr = CCR.query.get(ccr1_id)
self.assertEqual(ccr.status, CCRStatus.LIVE)
# good ccr id that's rejected should be rejected
resp = self.app.put(
f"/api/v1/admin/ccrs/{ccr2_id}/accept",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert200(resp)
ccr = CCR.query.get(ccr2_id)
self.assertEqual(ccr.status, CCRStatus.REJECTED)
self.assertEqual(ccr.reject_reason, rejected["rejectReason"])
def test_reject_permanently_ccr(self):
ccr_json = create_ccr(self)
ccr_id = ccr_json["ccrId"]
rejected = {
"rejectReason": "test"
}
self.login_admin()
# no reject reason should 400
resp = self.app.put(
f"/api/v1/admin/ccrs/{ccr_id}/reject_permanently",
content_type='application/json'
)
self.assert400(resp)
# bad ccr id should 404
resp = self.app.put(
f"/api/v1/admin/ccrs/111111111/reject_permanently",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert404(resp)
# bad status should 401
resp = self.app.put(
f"/api/v1/admin/ccrs/{ccr_id}/reject_permanently",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert401(resp)
submit_ccr(self, ccr_id)
# should go through
resp = self.app.put(
f"/api/v1/admin/ccrs/{ccr_id}/reject_permanently",
data=json.dumps(rejected),
content_type='application/json'
)
self.assert200(resp)
self.assertEqual(resp.json["status"], CCRStatus.REJECTED_PERMANENTLY)
self.assertEqual(resp.json["rejectReason"], rejected["rejectReason"])
def create_ccr(self):
# create CCR draft
self.login_default_user()
resp = self.app.post(
"/api/v1/ccrs/drafts",
)
ccr_id = resp.json['ccrId']
self.assertStatus(resp, 201)
# save CCR
new_ccr = test_ccr.copy()
resp = self.app.put(
f"/api/v1/ccrs/{ccr_id}",
data=json.dumps(new_ccr),
content_type='application/json'
)
self.assertStatus(resp, 200)
return resp.json
def submit_ccr(self, ccr_id):
self.login_default_user()
resp = self.app.put(
f"/api/v1/ccrs/{ccr_id}/submit_for_approval"
)
self.assert200(resp)
return resp.json
|
from .event import Event
from .speaker import Speaker
from .schedule import Conference, Room
from .simpletz import SimpleTZ
|
from django.db import models
# Create your models here.
class Topic(models.Model):
topic_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.topic_text
class Question(models.Model):
topic = models.ForeignKey(Topic, on_delete= models.CASCADE)
question_text = models.CharField(max_length=200)
answer = models.IntegerField(default=0)
def __str__(self):
return self.question_text |
# Generated by Django 2.0.4 on 2018-05-13 20:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0005_remove_music_style'),
]
operations = [
migrations.AddField(
model_name='music',
name='style',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
|
import numpy as np
import scipy.interpolate as intp
from astropy.io import fits
from astropy.io import ascii
import sys
import imageSubs as iS
print 'subtracting average radial profile (this may take a while)'
#load file names, target list and median psf
files = ascii.read('NIRC2_sci_20020_1.txt')
fileNames = np.array(files['fileNames'])
targets = np.array(files['target'])
o = 512 #psf center (origin)
n = np.size(targets)
for i in range(n):
#exclude acquisition images
if targets[i]!=0:
#load and generate a non-discrete image
im = fits.getdata('results/'+fileNames[i][:-5]+'.reg.fits')
smoothim = intp.RectBivariateSpline(range(1024),range(1024),im)
#prepare f(r,theta) for radial average
f = np.zeros((300,360))
R = np.arange(300)
theta = np.arange(360)
for r in R:
for t in theta:
trad = np.radians(t)
xp = o + r*np.cos(trad)
yp = o + r*np.sin(trad)
f[r,t] = smoothim(yp,xp)
#median over all theta for every r and generate a non-discrete function
f = np.median(f, axis=1)
smoothf = intp.interp1d(R,f,bounds_error=False,fill_value=f[299])
#generate the r coordinate of every point in the image
xp, yp = np.arange(1024),np.arange(1024)
xg, yg = np.meshgrid(xp,yp)
rp = np.sqrt((xg-o)**2 + (yg-o)**2)
#subtract the subtracted image and output to file
imsub = im - smoothf(rp)
fits.writeto('results/'+fileNames[i][:-5]+'.ringsub.fits',imsub)
#progress bar
percent = float(i) / n
hashes = '#' * int(round(percent * 20))
spaces = ' ' * (20 - len(hashes))
sys.stdout.write("\rPercent: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
sys.stdout.write("\n")
#register and stack images
positions = ascii.read('starPositions.txt')
positions['x'] = np.ones(n)*512
positions['y'] = np.ones(n)*512
xref, yref = 512., 512.
iS.register(2,'results/',fileNames,'ringsub.', targets, positions, (xref,yref))
|
from migrate import conn_commons
class Order:
@staticmethod
def select_old_coupon(user_ids_str):
sql = "select * from t_coupon_user where user_mark in %s"
coupon_conn = conn_commons.Commons()
return coupon_conn.select_old_data(sql % user_ids_str, None)
@staticmethod
def insert_batch_new_coupon(coupon_list):
sql = """insert into t_user_coupon (mark_id,user_id,server_status,start_time,stop_time,use_time,limit_price,coupon_price,coupon_name,coupon_type)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
coupon_conn = conn_commons.Commons()
coupon_conn.insert_batch_db_order(sql, coupon_list)
@staticmethod
def select_old_order(user_ids_str):
sql = "select * from t_order_info where user_mark in %s order by add_time"
order_conn = conn_commons.Commons()
return order_conn.select_old_data(sql % user_ids_str, None)
@staticmethod
def select_old_item(order_ids_str):
sql = "select * from t_order_item where order_mark in %s"
item_conn = conn_commons.Commons()
return item_conn.select_old_data(sql % order_ids_str, None)
@staticmethod
def insert_new_order(order_list):
sql = """insert into t_order_info (mark_id,order_no,user_id,order_amount,delivery_amount,pay_amount,order_time,
pay_time,cancel_time,delivery_date,send_time,arrive_time,remark,order_type,order_status,coupon_id,order_source)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
order_conn = conn_commons.Commons()
order_conn.insert_batch_db_order(sql, order_list)
@staticmethod
def insert_new_item(item_list):
sql = """insert into t_order_item (mark_id,order_id,product_id,product_type,product_name,specification_ids,
quantity,base_price,sale_price,pay_amount,coupon_id)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
item_conn = conn_commons.Commons()
item_conn.insert_batch_db_order(sql, item_list)
@staticmethod
def insert_new_delivery(delivery_list):
sql = """insert into t_order_delivery (mark_id,order_id,contact,delivery_date,phone,delivery_address,delivery_area,remark,order_type)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
delivery_conn = conn_commons.Commons()
delivery_conn.insert_batch_db_order(sql, delivery_list)
@staticmethod
def select_old_back():
sql = """select * from t_back_history"""
back_conn = conn_commons.Commons()
return back_conn.select_old_data(sql, None)
@staticmethod
def insert_batch_new_back(back_list):
sql = """insert into t_back_history (mark_id,order_no,add_time,pay_status,cid)
values (%s,%s,%s,%s,%s)"""
back_conn = conn_commons.Commons()
back_conn.insert_batch_db_order(sql, back_list)
@staticmethod
def select_province_group():
sql = "SELECT province from t_user_address GROUP BY province"
province_conn = conn_commons.Commons()
return province_conn.select_db_order(sql, None)
@staticmethod
def update_province(code, name):
sql = "update t_user_address set province='%s' where province='%s'"
print(sql%(code, name))
province_conn = conn_commons.Commons()
province_conn.update_db_order(sql%(code, name), None)
@staticmethod
def select_city_group():
sql = "SELECT city from t_user_address GROUP BY city"
city_conn = conn_commons.Commons()
return city_conn.select_db_order(sql, None)
@staticmethod
def update_city(code, name):
sql = "update t_user_address set city='%s' where city='%s'"
city_conn = conn_commons.Commons()
return city_conn.update_db_order(sql%(code, name), None)
@staticmethod
def select_area_group():
sql = "SELECT area from t_user_address GROUP BY area"
area_conn = conn_commons.Commons()
return area_conn.select_db_order(sql, None)
@staticmethod
def update_area(code, name):
sql = "update t_user_address set area='%s' where area='%s'"
area_conn = conn_commons.Commons()
return area_conn.update_db_order(sql%(code, name), None) |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from ripozo.resources.fields.common import StringField
from ripozo_sqlalchemy import AlchemyManager, ScopedSessionHandler
from common.models import engine, Post, Comment
session_handler = ScopedSessionHandler(engine)
class PostManager(AlchemyManager):
model = Post
fields = ('id', 'username', 'post_text', 'comments.id',)
create_fields = ('username', 'post_text',)
_field_validators = {
'username': StringField('username', minimum=3, regex=re.compile(r'^[a-zA-Z0-9]+$'))
}
class CommentManager(AlchemyManager):
model = Comment
fields = ('id', 'username', 'comment_text', 'post_id',)
create_fields = ('username', 'post_id', 'comment_text',)
update_fields = ('username', 'comment',)
|
import logging
import redis
from flask import Flask, g
from settings.config import DefaultConfig
from settings.config import local as localconfig # Rename for clarity
from shared import snippets
from routes.authentication import authentication
from routes.game import game
from routes.generic import frontend
from acl import PermissionManager
class GenericApp(Flask):
def __init__(self, *args, **kwargs):
Flask.__init__(self, *args, **kwargs)
def get_send_file_max_age(self, name):
# Static files normally don't change during production, but during development, caching is curse.
return 0 if self.debug else Flask.get_send_file_max_age(self, name)
class TutorialApp(GenericApp):
BLUEPRINTS = (game, authentication, frontend)
def create_app(config=None):
import_name = __name__
app = TutorialApp(import_name=import_name)
configure_app(app, config, permission_manager=PermissionManager())
configure_hook(app)
configure_blueprints(app, app.BLUEPRINTS)
configure_logging(app)
configure_error_handlers(app)
return app
def configure_app(app, config, permission_manager=None):
app.config.from_object(DefaultConfig)
app.config.from_object(localconfig)
if config is not None:
app.config.from_object(config)
app.config.from_envvar('TUTORIAL_APP_CONFIG', silent=True) # Override setting by env var without touching codes.
app.config['redis'] = app.config['REDIS_CONNECTION'] or redis.StrictRedis(host=app.config['REDIS_HOST'],
port=app.config['REDIS_PORT'])
app.config['permission_manager'] = permission_manager
def configure_blueprints(app, blueprints):
for blueprint in blueprints:
app.register_blueprint(blueprint)
def configure_hook(app):
@app.before_request
def before_request():
g.redis = app.config['redis']
@app.after_request
def control_cache(response):
response.cache_control.no_cache = True
if response.mimetype == 'application/json':
response.cache_control.no_store = True
return response
def configure_logging(app):
message_format = "%(levelname)s:%(name)s:%(funcName)s:%(lineno)s:%(message)s"
log_level = getattr(logging, app.config.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=log_level, format=message_format)
if app.config['TESTING']:
return
app.logger.setLevel(log_level)
def configure_error_handlers(app):
@app.errorhandler(snippets.InvalidUsage)
def handle_invalid_usage(error):
return error.as_response()
@app.errorhandler(snippets.UndevelopedException)
def handle_undeveloped_exception(error):
app.logger.exception('Undeveloped code: %s', error.message)
return error.as_response()
def handle_invalid_db_operation(error):
app.logger.exception('Exception reached flask error handler...')
return snippets.create_response(message=str(error), status_code=400)
|
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
('webplatformcompat', '0020_populate_references'),
]
operations = [
migrations.RemoveField(
model_name='feature',
name='sections',
),
migrations.RemoveField(
model_name='historicalfeature',
name='sections',
),
migrations.RemoveField(
model_name='historicalsection',
name='note',
),
migrations.RemoveField(
model_name='section',
name='note',
),
migrations.AlterField(
model_name='historicalfeature',
name='references',
field=django_extensions.db.fields.json.JSONField(default='[]'),
),
]
|
'''
Preprocess data
Loading, Standardizing and Filtering the raw data to dimish false positive labels
'''
import pandas as pd
import glob
from .config import config
import os
import random
def load_data(data_dir):
'''
data_dir: path to .csv files. Optionall can be a path to a specific .csv file.
nsamples: Number of total samples, "all" will yield full dataset
'''
if(os.path.splitext(data_dir)[-1]==".csv"):
data=pd.read_csv(data_dir,index_col=0)
else:
#Gather data
data_paths=glob.glob(data_dir+"/*.csv")
dataframes = (pd.read_csv(f,index_col=0) for f in data_paths)
data = pd.concat(dataframes, ignore_index=False)
return(data)
def zero_area(data):
data=data[data.xmin!=data.xmax]
return(data)
#Allometry of height to tree size
def allometry(data):
pass
#Filter by ndvi threshold
def NDVI(data,threshold,data_dir):
#for each row
for row,index in data.iterrows():
#create the hyperspectral object
h=Hyperspectral(data_dir + row['hyperspec_path'])
#create clipExtent from box
clipExtent={}
clipExtent["xmin"]=row["xmin"]
clipExtent["ymin"]=row["ymin"]
clipExtent["xmax"]=row["xmax"]
clipExtent["ymax"]=row["ymax"]
#Calculate NDVI
NDVI=f.NDVI(clipExtent=clipExtent)
data['NDVI']=NDVI
#Create lower bound for NDVI
data=data[data.NDVI > threshold]
return(data)
|
#!/usr/env/bin/python3
import os
newfile=open("list.py","w+")
#print (newfile.mode)
#print (newfile.name)
#print (newfile.softspace)
#print (newfile.seek)
|
from bs4 import BeautifulSoup
import requests
url = 'http://www.winequality.com'
r = requests.get(url)
html_doc = r.text
print(html_doc)
soup = BeautifulSoup(html_doc)
print(soup.prtify)
print(soup.title)
tags = soup.find_all('a')
for link in tags:
print(link.get('bref'))
url1 = 'http://www.analyticsindiamag.com'
r = requests.get(url1)
html_doc = r.text
print(html_doc)
soup = BeautifulSoup(html_doc)
print(soup.prtify)
|
import logging
from jenkins.models import Job, Build, Artifact
from jenkins.utils import generate_job_name
def import_build_for_job(job_pk, build_number):
"""
Import a build for a job.
"""
job = Job.objects.get(pk=job_pk)
logging.info("Located job %s\n" % job)
client = job.server.get_client()
logging.info("Using server at %s\n" % job.server.url)
jenkins_job = client.get_job(job.name)
build_result = jenkins_job.get_build(build_number)
# TODO: Shouldn't access _data here.
build_details = {
"status": build_result.get_status(),
# TODO: What should we do with this ID we get from Jenkins?
# Discard? or only set it if we don't have one?
# "build_id": build_result._data["id"],
"duration": build_result._data["duration"],
"url": build_result.get_result_url(),
"console_log": build_result.get_console(),
}
logging.info("Processing build details for %s #%d" % (job, build_number))
Build.objects.filter(job=job, number=build_number).update(**build_details)
build = Build.objects.get(job=job, number=build_number)
for artifact in build_result.get_artifacts():
artifact_details = {
"filename": artifact.filename,
"url": artifact.url,
"build": build
}
logging.info("%s" % artifact_details)
Artifact.objects.create(**artifact_details)
def create_job(jobtype, server):
"""
Create a job in the given Jenkins Server.
"""
name = generate_job_name(jobtype)
job = Job.objects.create(jobtype=jobtype, server=server, name=name)
return job
def import_builds_for_job(job_pk):
"""
Import all Builds for a job using the job_pk.
TODO: Add testing - only used by command-line tool just now.
"""
job = Job.objects.get(pk=job_pk)
logging.info("Located job %s\n" % job)
client = job.server.get_client()
logging.info("Using server at %s\n" % job.server.url)
jenkins_job = client.get_job(job.name)
good_build_numbers = list(jenkins_job.get_build_ids())
logging.info("%s\n" % good_build_numbers)
for build_number in good_build_numbers:
import_build_for_job(job.pk, build_number)
|
"""
Health handler module.
The `HealthHandler` provides an interface to manipulate a Pokemon's health
whilst respecting the various hooks and calls required. The handler is
instantiated as a property on a pokemon typeclass, with the pokemon passed
as an argument. It looks for the health properties in the character's db
attributes handler to initialize itself and provide persistence. The Pokemon's
max health is calculated when required rather than stored.
Config Properties:
current (int): Current health of the Pokemon.
Config Requirements:
obj.db.health (int): Current health of the Pokemon.
Setup:
To use the HealthHandler, add it to a pokemon typeclass as follows:
from typeclass.hander_health import HealthHandler
...
@property
def heatlh(self):
return HealthHandler(self)
Use:
Health is added and subtracted using the `heal` and `dmg` methods or
regular arithmetic operators.
Example usage:
# Say self is a Pokemon.
> @py self.msg(str(self.health.current))
5
> @py self.msg(self.health.max)
10
> @py self.msg(self.health.percentage)
50%
> @py self.msg(self.health.dmg(2))
3
> @py self.msg(self.health.heal(4))
7
> @py self.msg(self.health-5)
2
> @py self.msg(self.health+5)
7
> @py self.msg(self.health.full())
10
"""
# from world.rules import calculate_health
class HealthException(Exception):
"""
Base exception class for HealthHandler.
Args:
msg (str): informative error message
"""
def __init__(self, msg):
self.msg = msg
class HealthHandler(object):
"""Handler for a characters health.
Args:
obj (Character): parent Pokemon object.
Properties
health (integer): returns current health
condition (list): returns a list of conditions
Methods:
add (str): add a condition to the character's condition list.
remove (str): remove a condition to the character's condition list.
"""
def __init__(self, obj):
"""
Save reference to the parent typeclass and check appropriate attributes
Args:
obj (typeclass): Pokemon typeclass.
"""
self.obj = obj
if not self.obj.attributes.has("health"):
msg = '`HealthHandler` requires `db.health` attribute on `{}`.'
raise HealthException(msg.format(obj))
@property
def current(self):
"""
Shows current health.
Returns:
current_health (str): Characters current health.
Returned if:
obj.heatlh.current
"""
return int(self.obj.db.health)
def __str__(self):
"""
Shows current health.
Returns:
current_health (str): Characters current health.
Returned if:
str(obj.health)
"""
return str(self.obj.db.health)
@property
def percentage(self):
"""
Shows current health formatted as a percentage.
Returns:
Health (str): Characters current health as percentage of max.
Returned if:
obj.heatlh.percent
"""
return "{}%".format(int(self.current * 100.0 / self.max))
@property
def max(self):
"""
Calculate Pokemon's max health.
Returns:
max_health (int): Max health determined by rules.
Returned if:
obj.heatlh.max
"""
# return int(calculate_health(self.obj.pokedex("stat")[0],
# self.obj.db.iv[0], self.obj.db.ev[0],
# self.obj.db.level))
return 10
def full(self):
"""
Resets health to maximum.
Returns:
health (int): Current health after fill.
Returned if:
obj.heatlh.full()
"""
self.obj.db.health = self.max
return self.obj.db.health
def heal(self, value):
"""
Support addition between between health and int, capping at max health.
Returns:
health (int): Current health after addition.
Returned if:
obj.heatlh.heal(5)
"""
if isinstance(value, int):
if (self.obj.db.health + value) > self.max:
self.obj.db.health = self.max
return self.obj.db.health
else:
self.obj.db.health += value
return self.obj.db.health
else:
return NotImplemented
def __add__(self, value):
"""
Support addition between between health and int, capping at max health.
Returns:
health (int): Current health after addition.
Returned if:
obj.heatlh+= 5
"""
if isinstance(value, int):
if (self.obj.db.health + value) > self.max:
self.obj.db.health = self.max
return self.obj.db.health
else:
self.obj.db.health += value
return self.obj.db.health
else:
return NotImplemented
def dmg(self, value):
"""
Support subtraction between health and int, capping at 0
Returns:
health (int): Current health after subtraction.
Returned if:
obj.heatlh.dmg(5)
"""
if isinstance(value, int):
if (self.obj.db.health - value) < 0:
self.obj.db.health = 0
# TODO Faint hook
return self.obj.db.health
else:
self.obj.db.health -= value
return self.obj.db.health
else:
return NotImplemented
def __sub__(self, value):
"""
Support subtraction between health and int, capping at 0
Returns:
health (int): Current health after subtraction.
Returned if:
obj.heatlh-5
"""
if isinstance(value, int):
if (self.obj.db.health - value) < 0:
self.obj.db.health = 0
# TODO Faint hook
return self.obj.db.health
else:
self.obj.db.health -= value
return self.obj.db.health
else:
return NotImplemented
def __mul__(self, value):
"""
Support multiplication between health and int.
Returns:
health (int): Current health after multiplication.
Returned if:
obj.heatlh*= 5
"""
if isinstance(value, int):
if (self.obj.db.health * value) > self.max:
self.obj.db.health = self.max
return self.obj.db.health
else:
self.obj.db.health *= value
return self.obj.db.health
else:
return NotImplemented
def __floordiv__(self, value):
"""
Support division between health and int.
Returns:
health (int): Current health after division.
Returned if:
obj.heatlh/= 5
"""
if isinstance(value, int):
return self.obj.db.health // value
else:
return NotImplemented
def __bool__(self):
"""
Support Boolean comparison of health.
Returns:
Boolean: True if not zero, False 0.
Returned if:
if obj.heatlh
"""
return bool(self.obj.db.health)
def __eq__(self, value):
"""
Support equality comparison between health and int.
Returns:
Boolean: True if equal, False if not.
Returned if:
obj.heatlh == 5
"""
if isinstance(value, int):
return self.obj.db.health == value
else:
return NotImplemented
def __ne__(self, value):
"""
Support non-equality comparison between health and int.
Returns:
Boolean: True if not equal, False if equal.
Returned if:
obj.heatlh != 5
"""
if isinstance(value, int):
return self.obj.db.health != value
else:
return NotImplemented
def __lt__(self, value):
"""
Support less than comparison between health and int.
Returns:
Boolean: True if less than, False if not.
Returned if:
obj.heatlh < 5
"""
if isinstance(value, int):
return self.obj.db.health < value
else:
return NotImplemented
def __le__(self, value):
"""
Support less than or equal to comparison between health and int.
Returns:
Boolean: True if less than or equal, False if not.
Returned if:
obj.heatlh <= 5
"""
if isinstance(value, int):
return self.obj.db.health <= value
else:
return NotImplemented
def __gt__(self, value):
"""
Support greater than comparison between health and int.
Returns:
Boolean: True if greater than, False if not.
Returned if:
obj.heatlh > 5
"""
if isinstance(value, int):
return self.obj.db.health > value
else:
return NotImplemented
def __ge__(self, value):
"""
Support greater than or equal to comparison between health and int.
Returns:
Boolean: True if greater than or equal, False if not.
Returned if:
obj.heatlh >= 5
"""
if isinstance(value, int):
return self.obj.db.health >= value
else:
return NotImplemented
|
""" Fluid Logo
Incompressible fluid simulation with obstacles and buoyancy.
"""
from phi.flow import *
# from phi.torch.flow import *
# from phi.tf.flow import *
# from phi.jax.flow import *
DOMAIN = dict(x=128, y=128, bounds=Box(x=100, y=100))
OBSTACLE_GEOMETRIES = [Box(x=(15 + x * 7, 15 + (x + 1) * 7), y=(41, 83)) for x in range(1, 10, 2)] + [Box['x,y', 43:50, 41:48], Box['x,y', 15:43, 83:90], Box['x,y', 50:85, 83:90]]
OBSTACLE = Obstacle(union(OBSTACLE_GEOMETRIES))
OBSTACLE_MASK = resample(OBSTACLE.geometry, to=CenteredGrid(0, ZERO_GRADIENT, **DOMAIN))
INFLOW = CenteredGrid(Box['x,y', 14:21, 6:10], ZERO_GRADIENT, **DOMAIN) + \
CenteredGrid(Box['x,y', 81:88, 6:10], ZERO_GRADIENT, **DOMAIN) * 0.9 + \
CenteredGrid(Box['x,y', 44:47, 49:51], ZERO_GRADIENT, **DOMAIN) * 0.4
velocity = StaggeredGrid(0, 0, **DOMAIN)
smoke = pressure = divergence = remaining_divergence = CenteredGrid(0, ZERO_GRADIENT, **DOMAIN)
for _ in view('smoke, velocity, pressure, OBSTACLE_MASK', play=False, namespace=globals()).range(warmup=1):
smoke = advect.semi_lagrangian(smoke, velocity, 1) + INFLOW
buoyancy_force = resample(smoke * (0, 0.1), to=velocity)
velocity = advect.semi_lagrangian(velocity, velocity, 1) + buoyancy_force
velocity, pressure = fluid.make_incompressible(velocity, (OBSTACLE,), Solve('CG-adaptive', 1e-5, x0=pressure))
remaining_divergence = field.divergence(velocity)
|
""" roll_dice
Roll dice for multiple players.
Author: Jack Jiang (z5129432)
Version: v01
Date: 28/08/2017
"""
from random import seed
from os import system
from input_players import input_players
from roll_dice import roll_dice
def play():
player_list = input_players()
now_rounds = 1
while True:
for player in player_list:
system('cls')
print(f'现在处于第 {now_rounds} 回合')
print(f'{player}掷出了:\n')
print(roll_dice())
print('按 Ctrl+C 结束游戏.')
input('按 Enter 键继续...')
now_rounds += 1
return
if __name__ == "__main__":
seed()
play()
|
# This script goes thhrough various useful functions from the os module as well as other useful built-in libraries
import os
filename = os.path.join(os.environ.get("HOME"), "test.txt") # example filename that we will use for this walkthrough
# listing all the files and folders inside the specified directory
print(os.listdir())
################################# FILE INFORMATION WITH OS.STAT #########################################
# This method allows us to see the statistics of a particular file
print(os.stat('notesfromcharlie_session.md'))
# say if you want to know the modification time of a file you could pass in this bit from the file: # st_mtime=1560099666
print(os.stat('notesfromcharlie_session.md').st_mtime) # you get something that isn't human readable because it is in linux time
############ CONVERTING EPOCH TIME TO READABLE TIME WITH DATETIME.DATETIME.FROMTIMESTAMP ##########
# so in order for us to understand what this means, you have to import a function from the datetime module to do that
from datetime import datetime
modification_time = os.stat('notesfromcharlie_session.md').st_mtime
print(datetime.fromtimestamp(modification_time))
########################## DIRECTORY WALK WITH OS.WALK()##################################################
# prints a bunch of tuples which contains the file tree of the specified directory.
# In this case, the specified directory is the current working directory
for i in os.walk(os.getcwd()):
print(i)
print(len(list(os.walk(os.getcwd())))) # more than a hundred files in this directory tree
# you can also unpack that generator as each element is a tuple with the following tuple structure
# you can look through this entire walk of files in the directory to see if a file that you are looking for exists
# This is actually a very useful function that a lot of people tend to use.
# Maybe the one tricky part is remembering how the arguments for this os.walk() function are unpacked
# The unpacking signature is as follows: dirname, dirpath, filename which corresponds to full pathname, the directories in that path, and the files in that directory
for dirpath, dirnames, filenames in os.walk(os.getcwd()):
print("current path: {}".format(dirpath))
print("directories: {}".format(dirnames))
print("Files: {}".format(filenames))
print() # this is to have a space in between each file
############################# ENVIRONMENT VARIABLES WITH OS.ENVIRON.GET() ##########################################
# Say that you want to access your HOME path information
# you can get your environment variables by using environ.get() method as such:
# The output is my home directory
print(os.environ.get("HOME")) # returns the directory where the HOME environment is pointing to in the environment variables
################################# GET THE FILENAME/LAST DIR NAME OS.PATH.BASENAME #################################
# if you want the file name of just the file itself use the os.path.basename() method to get the file name
print(os.path.basename(filename)) # returns the filename with the extension attached
#################################### DOES A PATH EXISTS: OS.PATH.EXISTS ############################################
# If you want to check if a path or file exists, you can use the os.path.exists() method that checks to see if it exists
print(os.path.exists(filename)) # returns a boolean
#################################### DOES A FILE EXISTS: OS.PATH.ISFILE ############################################
# If you wanna check if a thing is a file or not and if it exists, you can use the os.path.isfile() method
print(os.path.isfile(filename)) # returns a boolean
#################################### DOES A DIR EXISTS: OS.PATH.ISDIR ############################################
# Like the os.path.isfile() method, you can use the os.path.isdir() method to see if a thing is a directory or not
print(os.path.isdir(filename)) # returns a boolean
#################################### DOES A PATH EXISTS: OS.PATH.SPLIT() ############################################
# if you want to have split the path name into their separate directories as strings, you can use the split method
# It returns a list of strings of directories
print(os.path.split(filename)) # returns a tuple of strings of the full path as the first element and the filename as the second element
#################################### EXTRACT THE FILE EXTENSION AS THE LAST ELEMENT: OS.PATH.SPLITEXT() ############################################
# If you want to split out the file extension from the full path to file name, you can use teh splitext() method
# This is a very useful function that you haven't used a lot yet and maybe you'll find yourself using it a lot in the future
print(os.path.splitext(filename)) # returns a tuple of strings with the first element being the full path with the filename, and the second being just the file extension
|
import random
from model.contact import Contact
from model.group import Group
import allure
def test_add_contact_in_group(app, db, orm, check_ui):
with allure.step("If there are no groups create a group"):
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="Group for adding contacts"))
with allure.step("Given a list of contacts in a group"):
groups = orm.get_group_list()
group = random.choice(groups)
old_contacts_in_group = orm.get_contacts_in_group(group)
with allure.step("When adding a contact to group"):
if len(orm.get_contacts_not_in_group(group)) == 0:
app.contact.create(Contact(first_name="Contact for adding to group"))
contact= random.choice(orm.get_contacts_not_in_group(group))
app.contact.add_to_group(contact, group)
with allure.step("Then new list of contacts in a group is equal to the old list with the added contact"):
old_contacts_in_group.append(contact)
new_contacts_in_group = orm.get_contacts_in_group(group)
assert sorted(old_contacts_in_group, key = Contact.id_or_max) == sorted(new_contacts_in_group, key = Contact.id_or_max)
|
from datetime import datetime, timedelta
from django.contrib.gis.geos import Point
from django.test import TestCase
from elections.models import Election
from elections.tests.factories import (
ElectionFactory,
ElectionWithStatusFactory,
ModerationHistoryFactory,
ModerationStatusFactory,
related_status,
)
class TestElectionGeoQueries(TestCase):
lat = 51.5010089365
lon = -0.141587600123
fixtures = ["onspd.json"]
def test_election_for_point(self):
ElectionWithStatusFactory(group=None)
point = Point(self.lon, self.lat)
qs = Election.public_objects.for_point(point)
assert qs.count() == 1
def test_election_for_lat_lng(self):
ElectionWithStatusFactory(group=None)
qs = Election.public_objects.for_lat_lng(lat=self.lat, lng=self.lon)
assert qs.count() == 1
def test_election_for_postcode(self):
ElectionWithStatusFactory(group=None)
qs = Election.public_objects.for_postcode("SW1A 1AA")
assert qs.count() == 1
def test_current_elections(self):
# This is implicetly current
ElectionWithStatusFactory(group=None, poll_open_date=datetime.today())
# This is implicetly not current
ElectionWithStatusFactory(
group=None, poll_open_date=datetime.today() - timedelta(days=60)
)
# Elections in the far future are always current
ElectionWithStatusFactory(
group=None, poll_open_date=datetime.today() + timedelta(days=400)
)
# This is implicetly not current, but current manually set
ElectionWithStatusFactory(
group=None,
poll_open_date=datetime.today() - timedelta(days=60),
current=True,
)
# This is implicetly current, current manually set to False
ElectionWithStatusFactory(
group=None,
poll_open_date=datetime.today() - timedelta(days=1),
current=False,
)
assert Election.public_objects.current().count() == 3
def test_future_elections(self):
ElectionWithStatusFactory(group=None, poll_open_date=datetime.today())
ElectionWithStatusFactory(
group=None, poll_open_date=datetime.today() - timedelta(days=1)
)
assert Election.public_objects.future().count() == 1
def test_current_elections_for_postcode(self):
ElectionWithStatusFactory(group=None, poll_open_date=datetime.today())
ElectionWithStatusFactory(
group=None, poll_open_date=datetime.today(), division_geography=None
)
ElectionWithStatusFactory(
group=None, poll_open_date=datetime.today() - timedelta(days=60)
)
assert (
Election.public_objects.current().for_postcode("SW1A1AA").count()
== 1
)
def test_public_private_filter_simple(self):
# simple case: each election only has a single status event
ElectionWithStatusFactory(
group=None, moderation_status=related_status("Suggested")
)
ElectionWithStatusFactory(
group=None, moderation_status=related_status("Approved")
)
ElectionWithStatusFactory(
group=None, moderation_status=related_status("Rejected")
)
ElectionWithStatusFactory(
group=None, moderation_status=related_status("Deleted")
)
self.assertEqual(1, Election.public_objects.count())
self.assertEqual(4, Election.private_objects.count())
def test_public_private_filter_complex(self):
# set up 2 ballot objects
e1 = ElectionFactory(group=None)
e2 = ElectionFactory(group=None)
# to start off with they're both 'suggested'
ModerationHistoryFactory(
election=e1, status=ModerationStatusFactory(short_label="Suggested")
)
ModerationHistoryFactory(
election=e2, status=ModerationStatusFactory(short_label="Suggested")
)
self.assertEqual(0, Election.public_objects.count())
self.assertEqual(2, Election.private_objects.count())
# approve one of them
ModerationHistoryFactory(
election=e1, status=ModerationStatusFactory(short_label="Approved")
)
self.assertEqual(1, Election.public_objects.count())
self.assertEqual(
e1.election_id, Election.public_objects.all()[0].election_id
)
self.assertEqual(2, Election.private_objects.count())
# and then delete it again
ModerationHistoryFactory(
election=e1, status=ModerationStatusFactory(short_label="Deleted")
)
self.assertEqual(0, Election.public_objects.count())
self.assertEqual(2, Election.private_objects.count())
|
import pandas as pd
import numpy as np
from IPython.display import display
def display_all(df):
with pd.option_context("display.max_rows", 1000, "display.max_columns", 1000, "display.max_colwidth", 1000):
display(df)
def add_datepart(df, fldname, drop=True):
"""
The add_datepart method extracts particular date fields from a complete datetime
for the purpose of constructing categoricals.
Expanding date-time into additional fields to capture any trend/cyclical behavior
as a function of time at any of these granularities.
"""
fld = df[fldname]
if not np.issubdtype(fld.dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
for n in ('Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'):
df[targ_pre+n] = getattr(fld.dt,n.lower())
df[targ_pre+'Elapsed'] = fld.astype(np.int64) // 10**9
if drop: df.drop(fldname, axis=1, inplace=True)
# for when the data is time-based
# https://github.com/cedrickchee/knowledge/blob/master/courses/fast.ai/machine-learning/2017-edition/lesson-2-random-forest-deep-dive.md
def split_vals(a, n): return a[:n].copy(), a[n:].copy()
# n_valid = 12000 # same as Kaggle's test set size
# n_trn = len(df) - n_valid
# raw_train, raw_valid = split_vals(df_raw, n_trn)
# X_train, X_valid = split_vals(df, n_trn)
# y_train, y_valid = split_vals(y, n_trn)
|
# Generated by Django 2.1.5 on 2019-01-26 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Models', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='visit',
name='date',
field=models.DateTimeField(verbose_name='Дата'),
),
]
|
declare @date_from datetime
declare @date_to datetime
declare @report_type smallint = 0
declare @firm_id int = null
declare @company_id int = null
declare @partner_id int = null
declare @region_id int = nul
|
import itertools
from nltk.corpus import wordnet
#import enchant
import anagrind
import cfg # src file with globals
############parsing####################
def parse_clue(fullclue):
cluewords = fullclue.split()
for wd in cluewords:
if(cfg.charade_dict.has_key(wd)):
print(wd, " could indicate ", cfg.charade_dict.get(wd))
# generate_valid_anagrams(word)
#isValid_english(wd)
#meaning(wd)
print ('\n')
# is this word a charade word? How many options?
#create charade clue words list
#create charade_cand_list
#permute them all
# see if the solLen matches
# get defn
# does meaning match?
def print_clue_meaning():
''' For each word in the Clue print its NLTK meaning.'''
print ("Meaning of Clue:", cfg.indivClueWordsSeq)
for cW in cfg.indivClueWordsSeq:
print(myutils.store_meaning(cW))
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def reorder(head):
l={}
if head == None:
return None
i=0
while True:
l[i] = head
i+=1
head=head.next
if head == None:
break
n=i-1
if n == 0:
return l[0]
elif n%2 == 0:
j=0
while n > j:
l[j].next = l[n]
l[n].next = l[j+1]
j+=1
n-=1
l[n].next = None
elif n%2 == 1:
j=0
while n > j+1:
l[j].next = l[n]
l[n].next = l[j+1]
j+=1
n-=1
l[j].next = l[n]
l[n].next = None
|
import boto3
import json
import os
import sys
import time
from datetime import datetime
import decimal
import uuid
class Pekl(object):
def __init__(self, bucket_name, region_name=None):
self.bucket_name = bucket_name
if region_name is not None:
self.region = region_name
else:
# Check to see if the AWS_REGION environment variable has been set
self.region = os.environ.get("AWS_REGION", "us-east-1")
self.aws_lambda = boto3.client("lambda", region_name=self.region)
self.s3 = boto3.client("s3", region_name=self.region)
return None
def receive(self, event):
if "pekl_bucket_name" in event and "pekl_bucket_key" in event:
# a file has been passed through from the Pekl library, we need
# to collect it from S3
bucket_response = self.s3.get_object(
Bucket=event.get("pekl_bucket_name"),
Key=event.get("pekl_bucket_key")
)
try:
event = bucket_response.get("Body").read()
event = json.loads(event)
except Exception as exception:
# We are going to need much better exception handling in this
print "bugger"
return {
"error" : str(exception)
}
else:
if isinstance(event, str):
event = json.loads(event)
return event
def respond(self, event):
json_string = json.dumps(event, cls=DecimalEncoder)
size = sys.getsizeof(json_string)
if size > 5000000:
# This response risks being too big to respond with so we need to
# transfer it via the bucket
random_key = self.writeToBucket(json_string)
return json.dumps({
"pekl_bucket_name" : self.bucket_name,
"pekl_bucket_key" : random_key
}, cls=DecimalEncoder)
# if it's not larger than 5 fake MB then we should be returning the
# json string
return json_string
def invoke(self, function_name, body, region_name=None):
json_string = json.dumps(body, cls=DecimalEncoder)
size = sys.getsizeof(json_string)
if size > 100000:
random_key = self.writeToBucket(json_string)
json_string = json.dumps({
"pekl_bucket_name" : self.bucket_name,
"pekl_bucket_key" : random_key
}, cls=DecimalEncoder)
response = self.aws_lambda.invoke(
FunctionName = function_name,
InvocationType = "RequestResponse",
Payload = json_string
)
response_body = response.get("Payload").read()
return_dict = self.receive(response_body)
return return_dict
def invokeAsync(self, function_name, body, region_name):
json_string = json.dumps(body, cls=DecimalEncoder)
size = sys.getsizeof(json_string)
if size > 5000000:
random_key = self.writeToBucket(json_string)
json_string = json.dumps({
"pekl_bucket_name" : self.bucket_name,
"pekl_bucket_key" : random_key
}, cls=DecimalEncoder)
self.aws_lambda.invoke(
FunctionName = function_name,
InvocationType = "Event",
Payload = json_string
)
return None
def writeToBucket(self, json_string):
# Creating a random string that's based on milisecond time so
# that it's incredibly, incredibly unlikely to have a duplicate
# key
random_key = str(datetime.utcnow()).replace(" ", "") + str(uuid.uuid4()) + ".txt"
self.s3.put_object(
ACL="private",
Bucket=self.bucket_name,
Key=random_key,
Body=json_string
)
return random_key
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
# def _iterencode(self, o, markers=None):
# print "encoding this: %s" % str(o)
# if isinstance(o, decimal.Decimal):
# # wanted a simple yield str(o) in the next line,
# # but that would mean a yield on the line with super(...),
# # which wouldn't work (see my comment below), so...
# print o
# return float(str(o) for o in [o])
if isinstance(o, datetime):
return o.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return super(DecimalEncoder, self).default(o)
|
from collections import Counter
from sklearn.base import BaseEstimator
import numpy as np
class KNN(BaseEstimator):
def __init__(self, K):
self.data = []
self.K = K
def fit(self, data, ids):
self.data.extend(zip(data, ids))
def predict(self, predData):
result = []
for query in predData:
allDstQuer, closerIds = [], Counter()
# find distance from every other point
for vec in self.data:
dst = self.dist(vec[0], query)
allDstQuer.append((dst, vec[1]))
# sort distances so as to find K smallest
allDstQuer.sort()
for i in range(self.K):
vecId = allDstQuer[i][1]
closerIds[vecId] += 1
predId = closerIds.most_common(1)[0][0]
result.append(predId)
return result
def dist(self, vec1, vec2):
if type(vec1) is not np.ndarray:
vec1 = vec1.toarray()[0]
if type(vec2) is not np.ndarray:
vec2 = vec2.toarray()[0]
value, N = 0, min(len(vec1), len(vec2))
for i in range(N):
value += (vec1[i] - vec2[i]) * (vec1[i] - vec2[i])
return value
|
import sys
import os
sys.path.append("..")
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow , QApplication,QWidget, QMessageBox
os.system(r'pyuic5 -o uiclass.py ui\login.ui')
from uiclass import Ui_MainWindow
from PyQt5.QtCore import pyqtSlot
import db
class Mywindow(QMainWindow,Ui_MainWindow):
def __init__(self,parent = None):
QMainWindow.__init__(self,parent)
self.setMaximumSize(800,600)
self.setMinimumSize(800,600)
self.setupUi(self)
_translate = QtCore.QCoreApplication.translate
self.alliances = self.get_alliances()
num_ali = len(self.alliances)
for i in range(num_ali):
self.comboBox1.addItem("")
self.comboBox1.setItemText(i, _translate("MainWindow", str(i+1)+'.'+(self.alliances)[i]))
def get_alliances(self):
alliances = db.get_alliances_info()
return alliances
@pyqtSlot()
def on_pushButton1_clicked(self):
alliance = self.comboBox1.current_text()
description = ''
db.updata_alliance_description(alliance,description)
def main():
# up.main()
app = QtWidgets.QApplication(sys.argv) # 创建一个QApplication,也就是你要开发的软件app
MainWindow = QMainWindow() # 创建一个QMainWindow,用来装载你需要的各种组件、控件
# MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
# MainWindow.setFixedSize(MainWindow.width(), MainWindow.height());
ui = Mywindow(MainWindow)
ui.show() # 执行QMainWindow的show()方法,显示这个QMainWindow
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
from django.http import Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Question, Choice
# get questions
def index(request):
context = {
'questions': Question.objects.order_by('-pub_date')[:5]
}
return render(request, 'polls/index.html', context)
# show question details for vote
def detail(request, question_id):
try:
context = {
'question': Question.objects.get(pk=question_id)
}
except Question.DoesNotExist:
raise Http404('Question does not exist')
return render(request, 'polls/detail.html', context)
# show vote results
def results(request, question_id):
context = {
'question': get_object_or_404(Question, pk=question_id)
}
return render(request, 'polls/results.html', context)
# submit vote
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message': 'You did not select a choice'
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def results_data(request, question_id):
vote_data = []
question = Question.objects.get(pk=question_id)
votes = question.choice_set.all()
for i in votes:
vote_data.append({i.choice_text:i.votes})
print(vote_data)
return JsonResponse(vote_data, safe=False)
|
"""
ゼロから学ぶスパイキングニューラルネットワーク
- Spiking Neural Networks from Scratch
Copyright (c) 2020 HiroshiARAKI. All Rights Reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
class Izhikevich:
def __init__(self, a, b, c, d):
"""
Izhikevich neuron model
:param a: uのスケーリング係数
:param b: vに対するuの感受性度合い
:param c: 静止膜電位
:param d: 発火後の膜電位が落ち着くまでを司る係数
"""
self.a = a
self.b = b
self.c = c
self.d = d
def calc(self, inputs, time=300, dt=0.5, tci=10):
"""
膜電位(Membrane potential) v と回復変数(Recovery variable) u を計算する
:param inputs:
:param weights:
:param time:
:param dt:
:param tci:
:return:
"""
v = self.c
u = self.d
i = 0
monitor = {'v': [], 'u': []}
for t in range(int(time / dt)):
# uを計算
du = self.a * (self.b * v - u)
u += du * dt
monitor['u'].append(u)
# vを計算
dv = 0.04 * v ** 2 + 5 * v + 140 - u + inputs[t]
v += dv * dt
monitor['v'].append(v)
# 発火処理
if v >= 30:
v = self.c
u += self.d
return monitor
if __name__ == '__main__':
time = 300 # 実験時間 (観測時間)
dt = 0.125 # 時間分解能
pre = 50 # 前ニューロンの数
t = np.arange(0, time, dt)
# 入力データ (面倒臭いので適当な矩形波とノイズを合成して作った)
input_data = np.sin(0.5 * np.arange(0, time, dt))
input_data = np.where(input_data > 0, 20, 0) + 10 * np.random.rand(int(time/dt))
input_data_2 = np.cos(0.4 * np.arange(0, time, dt) + 0.5)
input_data_2 = np.where(input_data_2 > 0, 10, 0)
input_data += input_data_2
# Izhikevichニューロンの生成 (今回はRegular Spiking Neuronのパラメータ)
neuron = Izhikevich(
a=0.02,
b=0.2,
c=-65,
d=8
)
history = neuron.calc(input_data, time=time, dt=dt)
# 結果の描画
plt.figure(figsize=(10, 4))
# 入力データ
plt.subplot(3, 1, 1)
plt.plot(t, input_data)
plt.xlim(0, time)
plt.ylim(-1, pre)
plt.ylabel('Input current')
# 膜電位
# plt.subplot(3, 1, 2)
plt.plot(t, history['v'], label=f'a=0.2, b=2, c=-56, d=-16, I(t)=-99')
plt.ylabel('Membrane potential $v$ [mV]')
# 膜電位
plt.subplot(3, 1, 3)
plt.plot(t, history['u'], c='tab:orange')
plt.xlabel('time [ms]')
plt.ylabel('Recovery variable $u$')
plt.legend()
plt.show() |
#!bin/python3
import sys
"""
Execute Query of form (1 x y) or (2 x y)
SeqList , Querytype 1 or 2, x_val , y_val, lastAns,n
SeqList - Sequence nxn on which the Query has to be run
Query - Query either 1 or 2 based on which index has to be calculated
x_val - x value part of the query e.g 1 x y
y_val - y value part of the query
lastAns - last answer on which the update Query needs to be performed
n - n size
"""
def runQuery (seq,query,x,y,lastAns,n):
answer = 0
calc_index = (x ^ lastAns) % n;
if (calc_index < n and calc_index >=0):
if (query==1):
seq[calc_index].append(y)
return 0
elif (query==2):
answer = seq[calc_index][y%n]
return answer
"""
main() is not needed , added to simply block it
reads n,q and calls RunQuery with the sequence prepared already
"""
def main():
# read n , q
n,q = input().strip().split(' ')
n,q = int(n),int(q)
lastAns = 0
answerlist =[]
querylist =[]
seq =[]
for t in range(n):
seq.append([])
for i in range(q):
qt,x,y = input().strip().split(' ')
qt,x,y = int(qt),int(x),int(y)
lastAns = runQuery(seq,qt,x,y,lastAns,n)
if (qt==2):
answerlist.append(lastAns)
for x in answerlist:
print(x)
"""
test : method which performs test of main with input from file instead of console
-- in future we can customize it in better way to make it work with input from file or input from console
--- in a better way
"""
def test():
# read n , q
n,q = input().strip().split(' ')
n,q = int(n),int(q)
lastAns = 0
answerlist =[]
querylist =[]
seq =[]
for t in range(n):
seq.append([])
for i in range(q):
qt,x,y = input().strip().split(' ')
qt,x,y = int(qt),int(x),int(y)
lastAns = runQuery(seq,qt,x,y,lastAns,n)
if (qt==2):
answerlist.append(lastAns)
for x in answerlist:
print(x)
main()
|
for batch in range(649):
print '/home/mattmann/data/exp5/image_catalog/deploy/data/archive/chunks/' + str(batch) + '/filelist_chunk_' + str(batch) + '.txt'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.