blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a92459e9e8ae533390d8cd2ec5542ac8dbe5714e
|
7e9977bbf988fe2136529640afb08460cce833bc
|
/HeroRegistrationProj/manage.py
|
c117eea3936972ec247c0372ef508f7c1c854c19
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-2019-spring/django-fields-widgets-cw-itayanna
|
b8c761376f89fd0c8b4d2fead46b5dc75a7194df
|
7702fe9b541dfaf5ac0458729bcdacd538b6c232
|
refs/heads/master
| 2020-04-27T13:24:13.427170
| 2019-03-12T14:37:11
| 2019-03-12T14:37:11
| 174,368,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HeroRegistrationProj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"icage97@gmail.com"
] |
icage97@gmail.com
|
7586bcfb03e48fa8291c02cce4d8609e56263346
|
b7303814936202841acc710b793c7f9fc26ef6d9
|
/project-job-insights/src/jobs.py
|
49c761de2c98f1c4af8e0a3976d9187bccdfd63f
|
[] |
no_license
|
victor-felipe-code/projetos_trybe
|
4027aaa77f12180b051aea032fa4ef75ed7e9084
|
8e9d8c4a716321ab6b14eadd3de371326bbaf06a
|
refs/heads/master
| 2023-08-16T21:57:42.994317
| 2021-09-27T19:08:08
| 2021-09-27T19:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
import csv
from typing import List
def read(path: str) -> List[dict]:
"""Reads a file from a given path and returns its contents
Parameters
----------
path : str
Full path to file
Returns
-------
list
List of rows as dicts
"""
# esse código estava com algum comportamemto estranho
# result = list()
# with open(path) as jobs_file:
# header, *jobs_list = csv.reader(jobs_file)
# for job in jobs_list:
# temp_job = dict()
# for index in range(len(job)):
# temp_job[header[index]] = job[index] or "Uninformed"
# result.append(temp_job)
# return result
with open(path) as file:
list_file = csv.DictReader(file, delimiter=",", quotechar='"')
return list(list_file)
|
[
"victorfelipeoliveira1@gmail.com"
] |
victorfelipeoliveira1@gmail.com
|
4c746a134c722966896c1d1fd737f5e6b1e8bd09
|
dfa0f11043ea0848526ec069efb644c715fdf67c
|
/AppiumGS V0.1/Logic/Base.py
|
56aa7b37c92af587130eff1f7d26f1213518471f
|
[] |
no_license
|
shaonianshaonian/appiumyaml
|
81f4578863019ed07a9010c1232d3ed4f7d6bb12
|
0e00e8dc34ce7d8a0e6b83371b02a564bb89f4fb
|
refs/heads/master
| 2020-12-30T11:28:02.398290
| 2017-05-23T02:21:02
| 2017-05-23T02:21:02
| 91,561,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,168
|
py
|
__author__ = 'shaonianshaonian'
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time
class Base:
driver = None
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = '192.168.12.101:5555'
desired_caps['appPackage'] = 'honc.td'
desired_caps['appActivity'] = 'honc.td.feature.main.MainActivity'
desired_caps['unicodeKeyboard'] = 'true'
desired_caps['resetKeyboard'] = 'true'
def __init__(self):
pass
def connectAppium(self):
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', self.desired_caps)
# 获取屏幕截图
def saveScreenShot(self,name):
fmt = '%Y%m%d%H%M%S' # 定义时间显示格式
Date = time.strftime(fmt, time.localtime(time.time())) # 把传入的元组按照格式,输出字符串
PicName = "../Result/" + name +"-"+ Date + ".jpg"
self.driver.get_screenshot_as_file(PicName)
# 获得机器屏幕大小x,y
def getWindowsSize(self):
x = self.driver.get_window_size()['width']
y = self.driver.get_window_size()['height']
return (x, y)
# 屏幕向上滑动
def swipeUp(self,time=800):
l = self.getWindowsSize()
x1 = int(l[0] * 0.5) # x坐标
y1 = int(l[1] * 0.75) # 起始y坐标
y2 = int(l[1] * 0.25) # 终点y坐标
self.driver.swipe(x1, y1, x1, y2, time)
# 屏幕向下滑动
def swipeDown(self,time=800):
l = self.getWindowsSize()
x1 = int(l[0] * 0.5) # x坐标
y1 = int(l[1] * 0.25) # 起始y坐标
y2 = int(l[1] * 0.75) # 终点y坐标
self.driver.swipe(x1, y1, x1, y2, time)
# 屏幕向左滑动
def swipeLeft(self,time=800):
l = self.getWindowsSize()
x1 = int(l[0] * 0.75)
y1 = int(l[1] * 0.5)
x2 = int(l[0] * 0.05)
self.driver.swipe(x1, y1, x2, y1, time)
# 屏幕向右滑动
def swipeRight(self,time=800):
l = self.getWindowsSize()
x1 = int(l[0] * 0.05)
y1 = int(l[1] * 0.5)
x2 = int(l[0] * 0.75)
self.driver.swipe(x1, y1, x2, y1, time)
#重新封装单个元素定位方法
def find_element(self,loc,wait=15):
try:
WebDriverWait(self.driver,wait).until(lambda driver:driver.find_element(*loc).is_displayed())
return self.driver.find_element(*loc)
except:
print ("%s 页面中未能找到 %s 元素" %(self,loc))
return False
#重新封装一组元素定位方法
def find_elements(self,loc):
try:
if len(self.driver.find_elements(*loc)):
return self.driver.find_elements(*loc)
except:
print ("%s 页面中未能找到 %s 元素" %(self,loc))
return False
#重新封装按钮点击方法
def clickButton(self,loc,find_first=False):
try:
if find_first:
self.find_element(loc)
self.find_element(loc).click()
except AttributeError:
print ("%s 页面中未能找到 %s 按钮" %(self,loc))
return False
return True
#重新封装输入方法
def sendKeys(self,loc,value,clear_first=False,click_first=False):
try:
if click_first:
self.find_element(loc).click()
if clear_first:
self.find_element(loc).clear()
self.find_element(loc).send_keys(value)
except AttributeError:
print ("%s 页面中未能找到 %s 元素" %(self,loc))
return False
return True
#重新封装读属性方法
def getAttribute(self,loc,clear_first=False,click_first=False):
try:
if click_first:
self.find_element(loc).click()
if clear_first:
self.find_element(loc).clear()
return self.find_element(loc).get_attribute('text')
except AttributeError:
print ("%s 页面中未能找到 %s 元素" %(self, loc))
return False
|
[
"953577962@qq.com"
] |
953577962@qq.com
|
7e0d640376e63f86e98bb1d374db75fba481468c
|
98db4a1bb76ec58a5c42996a0e571c75913f40d6
|
/exercise32.py
|
e702477a06deb0f09b706a2fd825a02dc267d8e9
|
[] |
no_license
|
YoChen3086/PythonPractice
|
f26479e1c5c9494a20b47425336469101aded53b
|
cea760699fc9e55106d562b1d63f547190ac98b5
|
refs/heads/master
| 2020-05-13T22:31:38.652864
| 2019-04-16T10:37:04
| 2019-04-16T10:37:04
| 181,668,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
# 题目:
# 按相反的顺序输出列表的值。
a = ['one', 'two', 'three']
for i in a[::-1]:
print (i)
|
[
"d87487575107@gmail.com"
] |
d87487575107@gmail.com
|
bbf927c655fcaa281080a77ef766b9c16e906256
|
907edfeeccf9a11cbc8dbe7d104cbaa9d561baf1
|
/Algorithms/Embedding/2. Density Text/Error Diffusion/Density Sierra.py
|
813557e39c97ed85203207fcc6ea9d7b0da55ba0
|
[] |
no_license
|
JGPreston/Final-year-project
|
c31076e006f3d20505d4e12501ebb281c822e10e
|
f46e932bc6fcf22f242c332dc52dd2a05da2a239
|
refs/heads/master
| 2023-08-17T19:40:29.165112
| 2021-10-10T16:59:06
| 2021-10-10T16:59:06
| 415,642,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,682
|
py
|
from PIL import Image
import time
import numpy as np
import binascii
import os
import traceback
import re
import statistics
import collections
import matplotlib.pyplot as plt
from collections import Counter
#Excel
import openpyxl
#Add path to directory
import sys
sys.path.append("../../../Analysis/PSNR+SSIM/")
from PSNRSSIM import returnValues
#Variable threshold
blockSize = 4
sizeDifference = 1
#Main encoding section to access various parts to the encoding stage
def encoding(image):
message = "Why do programmers always mix up Halloween and Christmas? Because 31 OCT = 25 DEC!"
originalImage = np.array(image, 'float64') #Stores the original image to numpy array. Used later to compare what has changed
imageArray = np.array(image, 'float64') #Image to numpy array
blockPixels, density = getBlocks(imageArray) #Get the block pixels (threshold)x(threshold)
#Histogram of densities
#histogram(density) #Density histogram
embeddedBlocks = checkBlocksAndEmbed(blockPixels,density, message) #Creates blocks that contains part of the message
#Put created blocks into image
replaceBlocks(embeddedBlocks, imageArray, originalImage) #Replace the image blocks with the embedded blocks
return Image.fromarray(np.array(imageArray, 'uint8')) #Return image from the numpy array
#Uses the image parsed in and splits it into blocks determined by the block size
def getBlocks(imageArray):
blockPixels = []
density = []
for x in range(0, height,blockSize): #Iterate through the image in the size of the block
for y in range(0, width,blockSize):
pixels = []
for a in range(blockSize): #Get the block size section of x,y coordinates
for b in range(blockSize):
pixels.append(imageArray[x+a,y+b])
blockPixels.append(pixels)
density.append(pixels.count(0)) #The amount of black pixels in the block
return(blockPixels,density) #Return the image in seperate blocks and return corresponding density list
#Check blocks densities and embed when a block matches a density
def checkBlocksAndEmbed(blockPixels, density, message):
densityValue = density_selection(density) #Get two densities to embed to from the densities obtained during the blocking process
print("Embedding densities: " + str(densityValue))
#histogram(density)
message = bin(int.from_bytes(message.encode('utf-8', 'surrogatepass'), 'big'))[2:] #Message to binary
message = message.zfill(8*((len(message) + 7)//8)) #Fill in binary to make it 8 bits for 1 character
#totalDensityCount = density.count(densityValue[0]) + density.count(densityValue[1])
binaryCounter = 0
for counter, value in enumerate(blockPixels): #For every block
if(density[counter] == densityValue[0] or density[counter]== densityValue[1]): #Check if the density of the block is one of the two returned
if(binaryCounter < len(message)): #Only embed until the total length of the message is embedded
blockPixels[counter] = messageEmbed(value, int(message[binaryCounter]), density[counter], densityValue, counter) #Update the block (position is controlled with counter) with the embedded message block
binaryCounter+=1 #Add 1 to the binaryCounter so the next character in the message is embedded
return blockPixels #Return the newly made blockPixels
#Replace original image blocks with the created embedded blocks
def replaceBlocks(embeddedBlock, imageArray, originalImage):
original,density = getBlocks(originalImage) #Original image into blocks (so comparisons can be made)
counter = 0 #To iterate through the original blocks and the embedded blocks (comparisons)
for x in range(0,height,blockSize):
for y in range(0,width,blockSize):
if(embeddedBlock[counter] != original[counter]): #If the original block isn't the same as the embedded one...
counter1=0
for a in range(blockSize):
for b in range(blockSize):
imageArray[x+a,y+b] = embeddedBlock[counter][counter1] #Update the image to contain the embedded blocks values
counter1+=1
counter += 1
return imageArray #Return the image
#Embed the message into the block
def messageEmbed(value,toEmbed,density, densityValue, counter):
lower = densityValue[0]
higher = densityValue[1]
if((density == lower and toEmbed == 0) or (density == higher and toEmbed == 1)): #If the message bit to embed is 0 and the density value is lower or the message bit is 1 and the density value is higher, return an unaltered block
return(value)
if(density == lower and toEmbed == 1): #If the message bit to embed is 1 and the density value is lower, update the density to the higher one
for j in range(0,sizeDifference):
for counter,i in enumerate(value):
if i == 255:
value[counter] = 0
break
return(value)
if(density == higher and toEmbed == 0): #If the message bit to embed is 0 and the density is higher, update the density to the lower one
for j in range(0,sizeDifference):
for counter,i in enumerate(value):
if i == 0:
value[counter] = 255
break
return(value)
#Shows the density values in the image
def histogram(density):
labels, counts = np.unique(density,return_counts=True) #Labels and counts are a sum of the values from density
ticks = range(len(counts)) #The axis is a range of the total count
plt.bar(ticks,counts, align='center', label="Num of black pixels per block") #Plot bars
plt.xticks(ticks, labels) #Axis labels
plt.legend() #Plot the legend
plt.show()
#Selects two density values close to the mean of the densities
def density_selection(lst):
lst = sorted(lst, key=int)
mean = Counter(lst).keys() # equals to list(set(words))
meanCalc = 0
for i in mean:
meanCalc += i
mean = int(np.ceil((meanCalc/len(set(lst)))))
plusMean = abs(lst.count(mean+sizeDifference) - lst.count(mean))
minusMean = abs(lst.count(mean-sizeDifference) - lst.count(mean))
#Returns the lowest difference densities between the mean and the two densities either side
if(plusMean > minusMean):
return(mean-sizeDifference, mean)
else:
return(mean, mean+sizeDifference)
#To extract the message from the embedded image
def decode(decodeImage):
decodeImage = np.array(decodeImage, 'float64') #Image to numpy array
blockPixels,density = getBlocks(decodeImage) #Get the image into blocks and obtain density values
densityValue = density_selection(density) #Get the two densities to be used from the densities list
print("Extraction densities:" + str(densityValue))
extractedMessage = []
for counter, value in enumerate(blockPixels): #Iterate through the blocks
if(density[counter] == densityValue[0]): #If the density of the block matches the lower density value, append a 0 to the extracted message
extractedMessage.append('0')
elif(density[counter] == densityValue[1]): #If the density of the block matches the higher density value, append a 1 to the extracted message
extractedMessage.append('1')
extractedMessage = ''.join(extractedMessage) #Combine all of the 1's and 0's into a string
finalMessage =[]
for i in range(0, len(extractedMessage), 8): #Iterate through the extracted message 8 bits at a time
message = int(extractedMessage[i:i+8],2)
character = message.to_bytes((message.bit_length() + 7)//8, 'big').decode('utf-8', 'ignore') #Convert the 8 bits into a character
if(re.match(r'[\w ?=]+', character)): #Regex so that only characters, numbers and select symbols are converted to characters and stored
finalMessage.append(character) #Append the character that passed the regex criteria
print(''.join(finalMessage)[:84])
print()
analyse(finalMessage) #Send extracted message to be analysed against the original
def analyse(decryptMessage):
message = "Why do programmers always mix up Halloween and Christmas? Because 31 OCT = 25 DEC!" #Original message
message = list(message) #to list
value = 0
for item in message:
if item in decryptMessage:
value +=1 #Every time the characters match in the original and extracted list, add 1.
#print((value/84)*100)
messagePercents.append(((value/84)*100)) #As the original message
embedTimes = []
decryptTimes = []
psnrValues = []
ssimValues = []
messagePercents = []
fileList = []
for file in os.listdir("../../../Images/Basic Halftone/Error Diffusion/Sierra/"):
fileList.append(file[:-4]) #Remove the file extension so
fileList = sorted(fileList, key=int) #it can be sorted by int
for file in fileList: #For every file in the sorted file list
filename = os.fsdecode(file)
filename+=".png" #Add png file extension. Converts any file format to png
image = Image.open("../../../Images/Basic Halftone/Error Diffusion/Sierra/"+filename) #Open halftoned image
original = Image.open("../../../Images/Basic Halftone/Error Diffusion/Sierra/"+filename) #For comparing
height, width = image.size
print(filename)
start_time = time.time()
imageConverted = encoding(image)
embedTime = time.time() - start_time
imageConverted.save("../../../Images/Embedded/2. Density Text/Error Diffusion/Sierra/"+filename)
imageDecode = Image.open("../../../Images/Embedded/2. Density Text/Error Diffusion/Sierra/"+filename)
start_time = time.time()
decode(imageDecode)
decryptTime = time.time() - start_time
psnr, ssim = returnValues(original,imageConverted)
psnrValues.append(psnr)
ssimValues.append(ssim)
embedTimes.append(embedTime)
decryptTimes.append(decryptTime)
excel_document = openpyxl.load_workbook("../../../../Data/Data.xlsx") #Open excel
sheet = (excel_document['Density Embed']) #Selects sheet
#Input values to the sheet
multiple_cells = sheet['N4' : 'N51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = psnrValues[value]
multiple_cells = sheet['O4' : 'O51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = ssimValues[value]
multiple_cells = sheet['P4' : 'P51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = embedTimes[value]
multiple_cells = sheet['Q4' : 'Q51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = decryptTimes[value]
multiple_cells = sheet['R4' : 'R51']
for value, row in enumerate(multiple_cells):
for cell in row:
cell.value = messagePercents[value]
#End of inputting values
excel_document.save("../../../../Data/Data.xlsx")
|
[
"32721120+JPreston-1@users.noreply.github.com"
] |
32721120+JPreston-1@users.noreply.github.com
|
1994b2ed70e786f28ee1b7718183c2fd83e2f3a8
|
4f0195aa9a3a161f898bc85d7081792875ee096b
|
/test.py
|
c7f90cb17921659789c931821ed2354cde89e761
|
[
"Apache-2.0"
] |
permissive
|
dheeraj135/Flow-Free-Solver
|
141b789b0048493d6fff3a3fe380ccfcb5024979
|
325d377f9e0786105e64a893d91491a85abe38a5
|
refs/heads/main
| 2023-01-29T02:14:33.054486
| 2020-12-10T19:14:44
| 2020-12-10T19:14:44
| 319,928,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
from z3 import *
def solve(phi):
s = Solver()
s.add(phi)
r = s.check()
if r==sat:
print("sat")
m = s.model()
print(m)
else:
print("unsat")
solve(phi)
|
[
"dhirajyadav135@gmail.com"
] |
dhirajyadav135@gmail.com
|
885ab0e81a4c8b85a9a027b63a65dd41a961590c
|
63eae9dff408b3b8e3fa22fcfcbb012b025854bf
|
/shop/templatetags/app_filters.py
|
a440fed183a07d8b1b47772206ced7f0bffbd742
|
[] |
no_license
|
adityasam1994/dlw
|
2fe88858ea80e1d04cd3c9349ecdbcf41f24bb30
|
e0bc5a0b8f52e1deaa655d3d95d4860285a059bb
|
refs/heads/master
| 2020-08-09T03:40:31.143100
| 2019-10-16T09:51:16
| 2019-10-16T10:00:07
| 213,988,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,382
|
py
|
from django import template
from django.template.defaultfilters import stringfilter
import datetime
from django.utils import timezone
register = template.Library()
@register.filter(name='getimage')
@stringfilter
def getimage(string):
from shop.models import Image
im = Image.objects.filter(product_id=int(string))
return im[0]
@register.filter(name='getallimages')
@stringfilter
def getallimages(string):
from shop.models import Image
im = Image.objects.filter(product_id=int(string))
return im
@register.filter(name="getreviews")
@stringfilter
def getreviews(string):
from shop.models import Review
re = Review.objects.filter(product_id=int(string))
return re
@register.filter(name="getcustomer")
@stringfilter
def getcustomer(string):
from django.contrib.auth.models import User
u = User.objects.get(id=int(string))
return u
@register.filter(name='times')
def times(number):
num=0
if(number <=5 ):
num=number
else:
num=5
return range(num)
@register.filter(name="get_average_stars")
def get_average_stars(number):
from shop.models import Review
re = Review.objects.filter(product_id=number)
if len(re) != 0:
total=0
for r in re:
total=total+r.rating
average=total/len(re)
return int(round(average))
else:
return 0
@register.filter(name="get_star_count")
def get_star_count(number):
from shop.models import Review
re = Review.objects.filter(product_id=number)
return len(re)
@register.filter(name="checkcart")
def checkcart(number, uid):
from shop.models import Customer
from shop.models import Product
product=Product.objects.get(id=number)
mycart = Customer.objects.get(id=uid).cart
if mycart != "" and mycart is not None:
pp=1
cartsplit = mycart.split(",")
for c in cartsplit:
cc= c.split("/")
if int(cc[0]) == number:
pp=int(cc[1])
return pp
else:
return 1
@register.filter(name="checkcartbtn")
def checkcartbtn(number, uid):
from shop.models import Customer
from shop.models import Product
product=Product.objects.get(id=number)
mycart = Customer.objects.get(customer_id=uid).cart
if mycart != "" and mycart is not None:
pp=1
cartsplit = mycart.split(",")
for c in cartsplit:
cc= c.split("/")
if int(cc[0]) == number:
pp=int(cc[1])
return True
else:
return False
@register.filter(name="get_item_name")
@stringfilter
def get_item_name(string):
from shop.models import Product
sp=string.split("/")
pro=Product.objects.get(id=int(sp[0])).name
return pro
@register.filter(name="get_item_price")
@stringfilter
def get_item_price(string):
from shop.models import Product
from shop.models import Offer
per=0
sp=string.split("/")
off = Offer.objects.filter(percent_discount=True)
for o in off:
pros = o.product.all()
for pro in pros:
if pro.id == int(sp[0]):
per = o.percent
pro=Product.objects.get(id=int(sp[0])).price
prod = pro - (pro*per/100)
return prod
@register.filter(name="get_item_qty")
@stringfilter
def get_item_qty(string):
from shop.models import Product
sp=string.split("/")
return sp[1]
@register.filter(name="get_total_price")
@stringfilter
def get_total_price(string):
from shop.models import Product
sp=string.split("/")
pr = Product.objects.get(id=int(sp[0])).price
total=pr*int(sp[1])
return total
@register.filter(name="get_item_image")
@stringfilter
def get_item_image(string):
from shop.models import Image
sp=string.split("/")
pr = Image.objects.filter(product_id=int(sp[0]))
return pr[0]
@register.filter(name="get_cart_total")
def get_cart_total(list):
from shop.models import Product
from shop.models import Offer
tot=0
for s in list:
per=0
spp = s.split("/")
off = Offer.objects.filter(percent_discount=True)
for o in off:
pros = o.product.all()
for pro in pros:
if pro.id == int(spp[0]):
per = o.percent
prd = Product.objects.get(id=int(spp[0])).price
pr = prd - (prd*per/100)
tot=tot+(pr*int(spp[1]))
return tot
@register.filter(name="get_pid")
@stringfilter
def get_pid(string):
sp=string.split("/")
return int(sp[0])
@register.filter(name="splitorder")
def splitorder(number):
from shop.models import Order
orde = Order.objects.get(id=number).product_id
sp=orde.split(",")
return sp
@register.filter(name="checkoffer")
def checkoffer(number):
from shop.models import Offer
off = Offer.objects.filter(percent_discount=True)
ooo = None
for o in off:
pros = o.product.all()
for p in pros:
if p.id==number:
ooo= o.id
return ooo
@register.filter(name="get_off_price")
def get_off_price(number,oid):
from shop.models import Offer
off = Offer.objects.get(id=oid)
dis = off.percent
newprice = int(number * (100-dis)/100)
return newprice
@register.filter(name="get_off_percent")
def get_off_percent(number):
from shop.models import Offer
off = Offer.objects.get(id=number)
dis = int(off.percent)
return dis
@register.filter(name="get_item_count")
def get_item_count(number):
from shop.models import Customer
cus = Customer.objects.get(customer_id = number)
cart = cus.cart
cartsplit = cart.split(",")
return len(cartsplit)
@register.filter(name="get_total")
def get_total(number):
from shop.models import Customer
from shop.models import Product
cus = Customer.objects.get(customer_id = number)
cart = cus.cart
cartsplit = cart.split(",")
tot=0
for c in cartsplit:
cc=c.split("/")
price = Product.objects.get(id=int(cc[0])).price
tot = tot + price*int(cc[1])
return tot
@register.filter(name="canceldate")
def canceldate(number):
from shop.models import Shop_detail
cp = Shop_detail.objects.get(id=1).cancellation_period
number += datetime.timedelta(days=cp)
if timezone.now() > number:
return True
else:
return False
|
[
"adityanath1994@outlook.com"
] |
adityanath1994@outlook.com
|
64e6e5f1b7535a036a951d60f63e3f0d13603ffd
|
40bb50de9c868dc8ac23c426a9f8386ac463d18f
|
/src3/dataset.py
|
531220c9f7bc8d384f2bf3385cfe0eab2309274d
|
[] |
no_license
|
durbin-164/Bengali_Ai
|
8dcfd23e96a2c446315a542eb00528c6c468b384
|
36beb6da1bc04a4e4e83082dd080290f06bad161
|
refs/heads/master
| 2022-04-08T03:49:22.689816
| 2020-03-12T16:09:00
| 2020-03-12T16:09:00
| 242,517,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
import pandas as pd
import joblib
from PIL import Image
import albumentations
import numpy as np
import torch
class BengliDatasetTrain:
def __init__(self, folds, img_height, img_width, mean, std):
df = pd.read_csv('../input/train_folds.csv')
df = df[['image_id', 'grapheme_root','vowel_diacritic', 'consonant_diacritic', 'kfold']]
df = df[df.kfold.isin(folds)].reset_index(drop = True)
self.image_ids = df.image_id.values
self.grapheme_root = df.grapheme_root.values
self.vowel_diacritic = df.vowel_diacritic.values
self.consonant_diacritic = df.consonant_diacritic
if len(folds) == 1:
self.aug = albumentations.Compose([
albumentations.Resize(img_height, img_width),
albumentations.Normalize(mean, std, always_apply = True)
])
else:
self.aug = albumentations.Compose([
albumentations.Resize(img_height, img_width),
albumentations.ShiftScaleRotate(shift_limit=0.0625,
scale_limit=0.1,
rotate_limit=5,
p = 0.9),
albumentations.Rotate(limit = 5),
albumentations.RandomContrast(limit=0.2),
albumentations.GaussianBlur(blur_limit=7),
albumentations.RandomGamma(),
albumentations.RandomShadow(),
albumentations.GaussNoise(),
albumentations.ChannelShuffle(),
albumentations.Normalize(mean, std, always_apply = True)
])
def __len__(self):
return len(self.image_ids)
def __getitem__(self, item):
image = joblib.load(f'../input/image_pickles/{self.image_ids[item]}.pkl')
image = image.reshape(137,236).astype(float)
image = Image.fromarray(image).convert("RGB")
image = self.aug(image = np.array(image))["image"]
image = np.transpose(image, (2,0,1)).astype(np.float32)/255.0
return {
'image': torch.tensor(image, dtype = torch.float),
'grapheme_root': torch.tensor(self.grapheme_root[item], dtype = torch.long),
'vowel_diacritic': torch.tensor(self.vowel_diacritic[item], dtype = torch.long),
'consonant_diacritic': torch.tensor(self.consonant_diacritic[item], dtype = torch.long)
}
|
[
"masud.rana@infolytx.com"
] |
masud.rana@infolytx.com
|
3a12ef11cb456aa1655cff4b35934ba431905c60
|
f09e98bf5de6f6c49df2dbeea93bd09f4b3b902f
|
/google-cloud-sdk/lib/surface/kms/__init__.py
|
0d7325c69d646db4cfbaff6358f3574909329d0a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
Peterfeng100/notepal
|
75bfaa806e24d85189bd2d09d3cb091944dc97e6
|
d5ba3fb4a06516fec4a4ae3bd64a9db55f36cfcd
|
refs/heads/master
| 2021-07-08T22:57:17.407571
| 2019-01-22T19:06:01
| 2019-01-22T19:06:01
| 166,490,067
| 4
| 1
| null | 2020-07-25T04:37:35
| 2019-01-19T00:37:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for all of the Cloud KMS API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class CloudKms(base.Group):
"""Manage cryptographic keys in the cloud.
The gcloud kms command group lets you generate, use, rotate and destroy
Google Cloud KMS keys.
Cloud KMS is a cloud-hosted key management service that lets you manage
encryption for your cloud services the same way you do on-premises. You can
generate, use, rotate and destroy AES256 encryption keys. Cloud KMS is
integrated with IAM and Cloud Audit Logging so that you can manage
permissions on individual keys, and monitor how these are used. Use Cloud
KMS to protect secrets and other sensitive data which you need to store in
Google Cloud Platform.
More information on Cloud KMS can be found here:
https://cloud.google.com/kms/ and detailed documentation can be found here:
https://cloud.google.com/kms/docs/
"""
category = 'Identity and Security'
def Filter(self, context, args):
del context, args
base.DisableUserProjectQuota()
|
[
"kevinhk.zhang@mail.utoronto.ca"
] |
kevinhk.zhang@mail.utoronto.ca
|
59126b8ece1459e9fd42f05f6d93addec62fcf95
|
8698757521458c2061494258886e5d3cdfa6ff11
|
/word_embeddings/test/cross_validation_similarity.py
|
d138d4f193b83f117eac6f5e0a6ce69b794d605a
|
[
"MIT"
] |
permissive
|
ricvo/argo
|
546c91e84d618c4bc1bb79a6bc7cba01dca56d57
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
refs/heads/master
| 2023-02-25T01:45:26.412280
| 2020-07-05T22:55:35
| 2020-07-05T22:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
# export LC_ALL=en_US.UTF-8.
import pickle
from core.measures import evaluate_similarity_on_reverse_split
import numpy as np
import pandas as pd
import argparse
import os
def evaluate_cross_sim_and_org(dictionary, dataset, i_split, dataset_split, method, couples_data, ntop=1, cvcorrs={}):
p, I_inv, DV, I_norm, I_prod = methods_args[method]
sorted_data = sorted(couples_data, reverse=True, key=lambda x: x[1])
top_alphas, top_sims = zip(*sorted_data[:ntop])
if cvcorrs.get(dataset_split, None) is None:
cvcorrs[dataset_split] = {}
if cvcorrs[dataset_split].get(method, None) is None:
cvcorrs[dataset_split][method] = []
for alpha in top_alphas:
simeval = make_simeval(p_embeddings, dictionary, alpha, I_inv, DV,
p, I_norm, I_prod, method="cos")
corr = evaluate_similarity_on_reverse_split(dictionary, simeval, dataset, i_split)
cvcorrs[dataset_split][method].append([alpha, corr])
print("{} alpha {}:".format(dataset_split, alpha))
print('SPEARMAN CORR: %.2f ' % corr)
def make_simeval(p_embeddings, dictionary, alpha, I_inv, DV,
p, I_norm, I_prod, method="cos"):
def simeval(words1, words2):
return similarity_logmap_Esubmodel_trick(p_embeddings, dictionary, words1, words2,
alpha, I_inv, DV, p, I_prod, I_norm=I_norm,
method=method)
return simeval
def load_from_dir(simdir):
alphas = np.load(simdir + "/alphas.npy")
I0 = np.load(simdir + "/fisher-0.npy")
# I0_inv = np.linalg.inv(I0)
Iu = np.load(simdir + "/fisher-u.npy")
# Iu_inv = np.linalg.inv(Iu)
with open(simdir+"/base-similarities.pkl", 'rb') as f:
base_similarities = pickle.load(f)
with open(simdir+"/alpha-similarities.pkl", 'rb') as f:
alpha_similarities = pickle.load(f)
return alphas, I0, Iu, base_similarities, alpha_similarities
def main():
parser = argparse.ArgumentParser(description='Make cross-validation correlations.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('simdir', type=str, help="directory where to find similarity results")
parser.add_argument('--ntop', '-t', type=int, help="how many top")
args = parser.parse_args()
simdir = args.simdir
wemb_id = os.path.basename(os.path.normpath(simdir))
corpus, vstr, nstr = wemb_id.split('-')
vecsize = int(vstr[1:])
nepoch = int(nstr[1:])
ntop = args.ntop
alphas, I0, Iu, base_similarities, alpha_similarities = load_from_dir(simdir)
outputname = os.path.join(simdir, "alpha-similarities-cross-val-top{}.json".format(ntop))
datasets = ["wordsim353",
"mc", "rg", "scws",
"wordsim353sim", "wordsim353rel",
"men", "mturk287", "rw", "simlex999"
]
n_splits = 3
cvcorrs = {}
for d in datasets:
for m in methods_args:
curves = []
for n in range(n_splits):
all_couples = []
ds = d + "-split_{:}".format(n)
# load small, mid and large and merge
for key in data:
all_couples += list(zip(alphas[key], data[key][ds][m]))
all_couples.sort(key=lambda x: x[0])
all_couples = [(a, v) for a, v in all_couples if np.abs(a)<=70.1]
all_couples = [(a,v) for a,v in all_couples if not np.isnan(v)]
#find best top alphas
# calculate reverse for the selected alpha
# store results in the form {m: [(a1, s1), (a2, s2),...]}
evaluate_cross_sim_and_org(v_dictionary, d, n, ds, m, all_couples, ntop, cvcorrs=cvcorrs)
df = pd.DataFrame(cvcorrs)
df.to_csv(outputname, sep=' ')
if __name__ == "__main__":
main()
|
[
"volpi@rist.ro"
] |
volpi@rist.ro
|
b7a67705b4d88255bfb2c97fac26bb30ee8d70cf
|
d3ee2dea9d3eac84d6309eb522ea72c21819a04d
|
/Aula 05. Laço/calcula_serie.py
|
dc9d5f16d951b0eba494d900e87d0ee8764cbd75
|
[] |
no_license
|
ArthurCisotto/insper.dessoft
|
477faf97f6a340c06deb68c9838cd5da9b943e15
|
22b13b1b39141cfad6f3f57fc919fc02eddc1ee7
|
refs/heads/master
| 2023-07-05T03:06:13.542442
| 2021-09-01T11:41:46
| 2021-09-01T11:41:46
| 350,466,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
def calcula_serie(a,b,n):
soma = 0
for i in range(n):
soma += 1/(a**(i*b))
return soma
|
[
"noreply@github.com"
] |
noreply@github.com
|
a4739e6b7b3fa154e27241a940dfcb81a0f52890
|
3ce0e97886f4ab528470653991aebbde29b0c4eb
|
/catalog/tests/test_models.py
|
f4f4e9c867427250fc2d128bed151587bc0de567
|
[] |
no_license
|
ahashim01/django-locallibrary
|
5f4ef97c964e4a3a44aa936d687039a0e20300f7
|
d661336e8a258a9959918d0a070ada509e3c49e9
|
refs/heads/main
| 2023-06-25T21:53:51.898114
| 2021-07-23T14:42:14
| 2021-07-23T14:42:14
| 388,821,839
| 0
| 0
| null | 2021-07-23T14:42:15
| 2021-07-23T14:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
from django.test import TestCase
from catalog.models import Author
# Create your tests here.
class AuthorModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
Author.objects.create(first_name='Big', last_name='Bob')
def test_first_name_label(self):
author = Author.objects.get(id=1)
field_label = author._meta.get_field('first_name').verbose_name
self.assertEqual(field_label, 'first name')
def test_date_of_death_label(self):
author = Author.objects.get(id=1)
field_label = author._meta.get_field('date_of_death').verbose_name
self.assertEqual(field_label, 'Died')
def test_first_name_max_length(self):
author = Author.objects.get(id=1)
max_length = author._meta.get_field('first_name').max_length
self.assertEqual(max_length, 100)
def test_object_name_is_last_name_comma_first_name(self):
author = Author.objects.get(id=1)
expected_object_name = f'{author.last_name}, {author.first_name}'
self.assertEqual(str(author), expected_object_name)
def test_get_absolute_url(self):
author = Author.objects.get(id=1)
# This will also fail if the urlconf is not defined.
self.assertEqual(author.get_absolute_url(), '/catalog/author/1')
|
[
"ahmedalaahashem01@gmail.com"
] |
ahmedalaahashem01@gmail.com
|
07a63215fea749792f50a56c355479e76e5b0bc7
|
d180a90cdf7a3fb49117652ad437fdea8718b0cd
|
/clipped_linear.py
|
939f7d3efcf9ba1fff5eb58c311d2dfd5dc4c2c6
|
[] |
no_license
|
parap1uie-s/Co-Adaptive-Mixed-Precision-CNNs
|
31d08048cda2114919afd470321ddec2ec31e49e
|
3ea45558bc18f22b4b33b5127b9c349187440898
|
refs/heads/master
| 2020-11-23T19:10:30.879703
| 2019-12-16T10:22:33
| 2019-12-16T10:22:33
| 227,783,568
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,513
|
py
|
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import torch.nn as nn
import torch
import torch.nn.functional as F
from .quantizer import Quantizer
from .q_utils import *
import logging
msglogger = logging.getLogger()
###
# Clipping-based linear quantization (e.g. DoReFa, WRPN)
###
class LearnedClippedLinearQuantizeSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, input, clip_val, num_bits, dequantize, inplace):
ctx.save_for_backward(input, clip_val)
if inplace:
ctx.mark_dirty(input)
scale, zero_point = asymmetric_linear_quantization_params(num_bits, 0, clip_val.data[0], signed=False)
output = clamp(input, 0, clip_val.data[0], inplace)
output = linear_quantize(output, scale, zero_point, inplace)
if dequantize:
output = linear_dequantize(output, scale, zero_point, inplace)
return output
@staticmethod
def backward(ctx, grad_output):
input, clip_val = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input.le(0)] = 0
grad_input[input.ge(clip_val.data[0])] = 0
grad_alpha = grad_output.clone()
grad_alpha[input.lt(clip_val.data[0])] = 0
grad_alpha = grad_alpha.sum().expand_as(clip_val)
# Straight-through estimator for the scale factor calculation
return grad_input, grad_alpha, None, None, None
class ClippedLinearQuantization(nn.Module):
def __init__(self, num_bits, clip_val, dequantize=True, inplace=False):
super(ClippedLinearQuantization, self).__init__()
self.num_bits = num_bits
self.clip_val = clip_val
self.scale, self.zero_point = asymmetric_linear_quantization_params(num_bits, 0, clip_val, signed=False)
self.dequantize = dequantize
self.inplace = inplace
def forward(self, input):
input = clamp(input, 0, self.clip_val, self.inplace)
input = LinearQuantizeSTE.apply(input, self.scale, self.zero_point, self.dequantize, self.inplace)
# input = nonlinear(input)
# input = fa(input, self.num_bits)
return input
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__.__name__, self.num_bits, self.clip_val,
inplace_str)
class LearnedClippedLinearQuantization(nn.Module):
def __init__(self, num_bits, init_act_clip_val, dequantize=True, inplace=False):
super(LearnedClippedLinearQuantization, self).__init__()
self.num_bits = num_bits
self.clip_val = nn.Parameter(torch.Tensor([init_act_clip_val]))
self.dequantize = dequantize
self.inplace = inplace
def forward(self, input):
input = LearnedClippedLinearQuantizeSTE.apply(input, self.clip_val, self.num_bits,
self.dequantize, self.inplace)
return input
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return '{0}(num_bits={1}, clip_val={2}{3})'.format(self.__class__.__name__, self.num_bits, self.clip_val,
inplace_str)
class WRPNQuantizer(Quantizer):
"""
Quantizer using the WRPN quantization scheme, as defined in:
Mishra et al., WRPN: Wide Reduced-Precision Networks (https://arxiv.org/abs/1709.01134)
Notes:
1. This class does not take care of layer widening as described in the paper
2. The paper defines special handling for 1-bit weights which isn't supported here yet
"""
def __init__(self, model, optimizer, bits_activations=32, bits_weights=32, bits_overrides=None,
quantize_bias=False):
super(WRPNQuantizer, self).__init__(model, optimizer=optimizer, bits_activations=bits_activations,
bits_weights=bits_weights, bits_overrides=bits_overrides,
train_with_fp_copy=True, quantize_bias=quantize_bias)
def wrpn_quantize_param(param_fp, param_meta):
scale, zero_point = symmetric_linear_quantization_params(param_meta.num_bits, 1)
out = param_fp.clamp(-1, 1)
out = LinearQuantizeSTE.apply(out, scale, zero_point, True, False)
return out
def relu_replace_fn(module, name, qbits_map):
bits_acts = qbits_map[name].acts
if bits_acts is None:
return module
return ClippedLinearQuantization(bits_acts, 1, dequantize=True, inplace=module.inplace)
self.param_quantization_fn = wrpn_quantize_param
self.replacement_factory[nn.ReLU] = relu_replace_fn
#def dorefa_quantize_param(param_fp, param_meta):
# if param_meta.num_bits == 1:
# out = DorefaParamsBinarizationSTE.apply(param_fp)
# else:
# scale, zero_point = asymmetric_linear_quantization_params(param_meta.num_bits, 0, 1, signed=False)
# out = param_fp.tanh()
# out = out / (2 * out.abs().max()) + 0.5
# out = LinearQuantizeSTE.apply(out, scale, zero_point, True, False)
# out = 2 * out - 1
# return out
def fw(x, bitW):
if bitW == 32:
return x
x = F.tanh(x)
x = x / x.abs().max().expand_as(x) * 0.5 + 0.5
x = 2.0 * quantize(x, bitW) - 1.0
return x
class RoundNoGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, x, n):
return torch.round(x*n)/n
@staticmethod
def backward(ctx, g):
return g, None
def quantize(x, k):
n = float(2**k - 1.0)
x = RoundNoGradient.apply(x, n)
return x
def nonlinear(x):
return torch.clamp(torch.clamp(x, max=1.0), min=0.0)
def fa(x, bitA):
if bitA == 32:
return x
return quantize(x, bitA)
def dorefa_quantize_param(param_fp, param_meta):
if param_meta.num_bits == 1:
out = DorefaParamsBinarizationSTE.apply(param_fp)
else:
out = fw(param_fp, param_meta.num_bits)
return out
class DorefaParamsBinarizationSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, input, inplace=False):
if inplace:
ctx.mark_dirty(input)
E = input.abs().mean()
output = input.sign() * E
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class DorefaQuantizer(Quantizer):
"""
Quantizer using the DoReFa scheme, as defined in:
Zhou et al., DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients
(https://arxiv.org/abs/1606.06160)
Notes:
1. Gradients quantization not supported yet
2. The paper defines special handling for 1-bit weights which isn't supported here yet
"""
def __init__(self, model, optimizer=None, bits_activations=32, bits_weights=32, bits_overrides=OrderedDict(), quantize_bias=False):
super(DorefaQuantizer, self).__init__(model, optimizer=optimizer, bits_activations=bits_activations,
bits_weights=bits_weights, bits_overrides=bits_overrides,
train_with_fp_copy=True, quantize_bias=quantize_bias)
def relu_replace_fn(module, name, qbits_map):
bits_acts = qbits_map[name].acts
if bits_acts is None:
return module
return ClippedLinearQuantization(bits_acts, 1, dequantize=True, inplace=module.inplace)
self.param_quantization_fn = dorefa_quantize_param
self.replacement_factory[nn.ReLU] = relu_replace_fn
class PACTQuantizer(Quantizer):
"""
Quantizer using the PACT quantization scheme, as defined in:
Choi et al., PACT: Parameterized Clipping Activation for Quantized Neural Networks
(https://arxiv.org/abs/1805.06085)
Args:
act_clip_init_val (float): Initial clipping value for activations, referred to as "alpha" in the paper
(default: 8.0)
act_clip_decay (float): L2 penalty applied to the clipping values, referred to as "lambda_alpha" in the paper.
If None then the optimizer's default weight decay value is used (default: None)
"""
def __init__(self, model, optimizer, bits_activations=32, bits_weights=32, bits_overrides=None,
quantize_bias=False, act_clip_init_val=8.0, act_clip_decay=None):
super(PACTQuantizer, self).__init__(model, optimizer=optimizer, bits_activations=bits_activations,
bits_weights=bits_weights, bits_overrides=bits_overrides,
train_with_fp_copy=True, quantize_bias=quantize_bias)
def relu_replace_fn(module, name, qbits_map):
bits_acts = qbits_map[name].acts
if bits_acts is None:
return module
return LearnedClippedLinearQuantization(bits_acts, act_clip_init_val, dequantize=True,
inplace=module.inplace)
self.param_quantization_fn = dorefa_quantize_param
self.replacement_factory[nn.ReLU] = relu_replace_fn
self.act_clip_decay = act_clip_decay
# In PACT, LearnedClippedLinearQuantization is used for activation, which contains a learnt 'clip_val' parameter
# We optimize this value separately from the main model parameters
def _get_updated_optimizer_params_groups(self):
base_group = {'params': [param for name, param in self.model.named_parameters() if 'clip_val' not in name]}
clip_val_group = {'params': [param for name, param in self.model.named_parameters() if 'clip_val' in name]}
if self.act_clip_decay is not None:
clip_val_group['weight_decay'] = self.act_clip_decay
return [base_group, clip_val_group]
|
[
"shangfangxin@ShangFangxin.local"
] |
shangfangxin@ShangFangxin.local
|
10f5ab79003ff1e2cbfd7c31754b890b1ab31a6d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03283/s440786665.py
|
fe6e5bf6654473cf6b9ff410adac87f56d2c24dd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
class BIT:
def __init__(self, n):
self.n = n
self.bit = [0]*(self.n+1) # 1-indexed
def init(self, init_val):
for i, v in enumerate(init_val):
self.add(i, v)
def add(self, i, x):
# i: 0-indexed
i += 1 # to 1-indexed
while i <= self.n:
self.bit[i] += x
i += (i & -i)
def sum(self, i, j):
# return sum of [i, j)
# i, j: 0-indexed
return self._sum(j) - self._sum(i)
def _sum(self, i):
# return sum of [0, i)
# i: 0-indexed
res = 0
while i > 0:
res += self.bit[i]
i -= i & (-i)
return res
import sys
import io, os
input = sys.stdin.buffer.readline
#input = io.BytesIO(os.read(0,os.fstat(0).st_size)).readline
n, m, Q = map(int, input().split())
X = []
for i in range(m):
l, r = map(int, input().split())
l, r = l-1, r-1
X.append((l, r, 0, i))
for i in range(Q):
p, q = map(int, input().split())
p, q = p-1, q-1
X.append((p, q, 1, i))
X.sort(key=lambda x: (x[1], x[2]))
bit = BIT(550)
ans = [-1]*Q
for l, r, t, i in X:
if t == 0:
bit.add(l, 1)
else:
ans[i] = bit.sum(l, r+1)
print(*ans, sep='\n')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3230985ef9f2fafe228c5e3b1cd89a0fefb064cb
|
d5c8ca88c643492b95ca57e972b76bcc56aac251
|
/maker/l18n_maker/mktrans.py
|
9400be853f3388d9df62420c264846f9177cdd52
|
[
"MIT"
] |
permissive
|
olopost/l18n
|
b8462e6cd5eed0293e9e18cde23c850bc8553514
|
c6ef71a94f32537d5c072b493b94d35ee06d9bf4
|
refs/heads/master
| 2020-03-19T16:14:17.737843
| 2018-06-09T09:12:34
| 2018-06-09T09:12:34
| 136,706,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,664
|
py
|
import os
import codecs
from collections import defaultdict
from datetime import datetime
from copy import deepcopy
import re
from itertools import chain
import pytz
import polib
import six
from lxml import etree as ET
from .compat import configparser
from .settings import LOCALES, PO_PATH, PY_PATH
from .helpers import get_data_dir, log
OVERRIDES_DIR = os.path.join(os.path.dirname(__file__), 'overrides')
ITEMS = ('tz_locations', 'tz_cities', 'territories')
ALL_TIMEZONES = pytz.all_timezones
overrides = defaultdict(lambda: dict([(i, {}) for i in ITEMS]))
def mk_overrides():
if any(overrides.values()):
return overrides
for locfile in os.listdir(OVERRIDES_DIR):
tr = configparser.ConfigParser()
tr.readfp(codecs.open(os.path.join(OVERRIDES_DIR, locfile),
'r', 'utf8'))
for i in ITEMS:
try:
items = tr.items(i)
except configparser.NoSectionError:
continue
for item, value in items:
overrides[locfile][i][str(item)] = value
return overrides
def mk_locale_trans(loc, defaults=None):
if defaults is None:
defaults = defaultdict(dict)
trans_dict = deepcopy(mk_overrides()[loc])
missing = defaultdict(list)
not_missing_overrides = defaultdict(list)
not_missing_same = defaultdict(list)
no_longer_in_pytz = {
'tz_cities': list(set(trans_dict['tz_cities'].keys())
.difference(ALL_TIMEZONES))
}
for tz in no_longer_in_pytz['tz_cities']:
trans_dict['tz_cities'].pop(tz)
def save_trans(name, k, trans):
cur_trans = trans_dict[name].get(k, None)
if cur_trans:
# a translation is already defined
if cur_trans == trans:
not_missing_same[name].append(k)
else:
not_missing_overrides[name].append((trans, cur_trans))
else:
# no existing translation is defined, save it if different than
# default value
if trans != defaults[name].get(k, None):
trans_dict[name][k] = trans
# there are no territories defined in root.xml, so the default ones should
# be extracted from en.xml
ldml = ET.parse(os.path.join(get_data_dir(), 'main', '%s.xml'
% ('en' if loc == 'root' else loc))).getroot()
ter_required = set(pytz.country_names.keys()).difference(
defaults['territories'].keys())
for territory in ldml.find('localeDisplayNames').find('territories'):
if territory.tag != 'territory' \
or territory.get('alt', None) is not None:
continue
key = territory.get('type')
save_trans('territories', key, territory.text)
try:
ter_required.remove(key)
except KeyError:
pass
missing['territories'].extend(ter_required)
if loc == 'root':
# back to root.xml for timezones
ldml = ET.parse(os.path.join(get_data_dir(), 'main',
'root.xml')).getroot()
tz_required = set(ALL_TIMEZONES).difference(
defaults['tz_cities'].keys())
for zone in ldml.find('dates').find('timeZoneNames'):
if zone.tag != 'zone':
continue
key = zone.get('type')
try:
tz_required.remove(key)
except KeyError:
if key not in ALL_TIMEZONES:
continue
ex_city = zone.find('exemplarCity')
if ex_city is None:
city = city_location = key.split('/')[-1].replace('_', ' ')
else:
# stripping territory name from city_location in cases like
# 'city [territory]' or 'city, territory'
city = ex_city.text
city_location = re.sub('(?:, .*| \[.*\])$', '', city)
save_trans('tz_cities', key, city)
def save_location(loc_key, value):
if loc_key in chain(trans_dict['tz_locations'].keys(),
missing['tz_locations']):
return
if loc == 'root' and loc_key != key:
missing['tz_locations'].append(loc_key)
save_trans('tz_locations', loc_key, value)
for location in set(key.split('/')[:-1]):
save_location(location, location.replace('_', ' '))
if city != city_location:
# saving under the same key as in the tz_cities dict, so that
# city_location overrides city when building tz_fullnames dict
save_location(key, city_location)
if loc == 'root':
# populate missing default translations with raw city names
for zone in tz_required:
zone_split = zone.split('/')
save_trans('tz_cities', zone, zone_split[-1].replace('_', ' '))
for location in set(zone_split[:-1]):
if location in chain(trans_dict['tz_locations'].keys(),
missing['tz_locations']):
continue
missing['tz_locations'].append(location)
save_trans('tz_locations', location,
location.replace('_', ' '))
else:
# report missing translations
missing['tz_cities'].extend(tz_required)
return trans_dict, missing, not_missing_overrides, not_missing_same, \
no_longer_in_pytz
def mk_py(names):
"""
Generate .py file with a dict of default (english) values
"""
log('> Generating __maps.py with default (english) values')
py_file = codecs.open(PY_PATH, 'w', ' utf8')
py_file.write('# -*- coding: utf-8 -*-\n\n'
'# AUTOMATICALLY GENERATED FILE, DO NOT EDIT')
def write(key):
py_file.write('\n\n%s = {\n' % key)
for k, v in six.iteritems(names[key]):
py_file.write(u" '%s': u'%s',\n" % (k, v.replace(u"'", u"\\'")))
py_file.write('}')
for name in ITEMS:
write(name)
# close file
py_file.close()
def mk_po(loc, root_names, trans):
"""
Generate a .po file for locale loc
"""
header = u"""# PYTZ TIMEZONE CITIES AND TERRITORIES TRANSLATION FILE
msgid ""
msgstr ""
"Project-Id-Version: l18n\\n"
"Report-Msgid-Bugs-To: \\n"
"POT-Creation-Date: %(date)s\\n"
"PO-Revision-Date: \\n"
"Last-Translator: l18n maker\\n"
"Language-Team: \\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\\n"
"X-Poedit-SourceCharset: utf-8\\n"
"Language: """ % {'date': datetime.now(pytz.utc).replace(microsecond=0)}
log('> Generating .po file for locale ' + loc)
po_path = PO_PATH % loc
try:
os.makedirs(os.path.dirname(po_path))
except OSError:
pass
po_file = codecs.open(po_path, 'w', ' utf8')
po_file.write(header + loc + u'\\n"\n\n')
def write(key):
for k, v in six.iteritems(trans[key]):
try:
root_name = root_names[key][k]
except KeyError:
# this can happen if we're looking at tz locations and a
# translation is defined while there is no entry in the root
# In that case we need to fall back to tz_cities
if key == 'tz_locations':
root_name = root_names['tz_cities'][k]
else:
raise
po_file.write(u'msgid "%s"\nmsgstr "%s"\n\n' % (root_name, v))
for name in ITEMS:
write(name)
po_file.close()
return po_path
def mk_mo(po_path):
polib.pofile(po_path).save_as_mofile(po_path[:-3] + '.mo')
def mk_trans():
log('Starting cities and territories names translation')
# translations, missing, overriden in 'overrides' folder, same value in
# overrides folder, no longer in pytz database
result = [{}, {}, {}, {}, {}]
defaults = None
for loc in ('root',) + LOCALES:
for i, r in enumerate(mk_locale_trans(loc, defaults)):
if any(r.values()):
result[i][loc] = r
if loc == 'root' and i == 0:
defaults = r
for res, msg, post_msg in zip(
result[1:],
('Some translations are missing',
'Some translations were overridden by entries in an overrides/* file',
'Some translation overrides are no longer useful as they match the '
'CLDR translation',
'Some translation overrides are not in pytz.all_timezones!'),
('You may want to add them in overrides/* files',
None,
'You may want to remove them from the overrides/* files',
'You should remove them from the overrides/* files')):
if res:
log('')
log(msg)
for loc, dic in six.iteritems(res):
for name, ids in six.iteritems(dic):
if not ids:
continue
if isinstance(ids[0], six.string_types):
to_join = ids
else:
# ids is a list of doubles
to_join = ['"%s" (by "%s")' % x for x in ids]
log('- %s / %s: %s' % (loc, name, ', '.join(to_join)))
if post_msg:
log(post_msg)
log('')
trans = result[0]
root_names = trans['root']
mk_py(root_names)
for loc in LOCALES:
po_path = mk_po(loc, root_names, trans[loc])
mk_mo(po_path)
log('Cities and territories names translation completed')
return 0
|
[
"samuel@meyn.fr"
] |
samuel@meyn.fr
|
218dd255791efaf0be22ffbadcd7b8aeb15ea68e
|
133572e4bbe59a79a3cad64fa0de0500ca0d4a58
|
/uri/1025.py
|
444cba9e48b35aef76b9b4a2573942f1caae62b5
|
[] |
no_license
|
leandro-hbs/python-solutions
|
b8b74da52f466d613a62b480492cc1438db24eac
|
b3fa69b8f8d08610ddc6cb5b0114798e53d67c3d
|
refs/heads/master
| 2023-05-14T14:59:19.901397
| 2023-04-25T16:32:52
| 2023-04-25T16:32:52
| 225,023,995
| 0
| 1
| null | 2020-01-11T12:16:39
| 2019-11-30T14:26:21
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
from bisect import bisect_left
def binary_search(array, item):
i = (bisect_left(array, item))
return i+1 if i < len(array) and array[i] == item else -1
cont = 1
while True:
num, con = list(map(int,input().split()))
if num == 0 and con == 0:
break
marmores = []
print('CASE# {}:'.format(str(cont)))
for i in range(num+con):
if i < num:
marmores.append(int(input()))
if i == num:
marmores.sort()
if i >= num:
digit = int(input())
val = binary_search(marmores, digit)
if val == -1:
print(str(digit) + ' not found')
else:
print(str(digit) + ' found at ' + str(val))
cont+=1
|
[
"leandro.batista@academico.ifpb.edu.br"
] |
leandro.batista@academico.ifpb.edu.br
|
c9ea3706505e328d29c39892eea7aa6120d23b0d
|
c7aa86fc206ccdb91e89295704dd068892bda6f6
|
/util.py
|
9856587d7cdf0a52208fddc7d94f7018d3b5b3f8
|
[] |
no_license
|
ritobanrc/cryptopals
|
f2bb031f07de172f1f2735feae79a8f21dbc4e75
|
61c84db90516a915a24f4421d1216b4d2723579a
|
refs/heads/master
| 2020-03-16T13:08:49.489344
| 2020-01-22T02:09:09
| 2020-01-22T02:09:09
| 132,682,310
| 0
| 0
| null | 2018-05-09T21:52:21
| 2018-05-09T00:59:52
|
Python
|
UTF-8
|
Python
| false
| false
| 911
|
py
|
#!/usr/bin/env python3
import binascii
import os
import sys
from math import ceil
def block_print():
sys.stdout = open(os.devnull, 'w')
def enable_print():
sys.stdout = sys.__stdout__
def print_split_blocks_b64(text):
print(b'|'.join([binascii.b2a_base64(text)[16*i:16*i+16] for i in range(0, ceil(len(text)/16))]))
def print_split_blocks_hex(text):
print(b'|'.join([binascii.b2a_hex(text)[32*i:32*i+32] for i in range(0, ceil(len(text)/16))]).decode())
def print_split_blocks(text):
print(b'|'.join([text[16*i:16*i+16] for i in range(0, ceil(len(text)/16))]))
def get_index(block_n, char_id):
return block_n * 16 + char_id
def split_blocks(text):
return [text[i:i+16] for i in range(0, len(text), 16)]
def print_split_n(text, n):
print('|'.join([text[i:i+n] for i in range(0, len(text), n)]))
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
|
[
"ritobanrc@gmail.com"
] |
ritobanrc@gmail.com
|
174fab88159917da005df7f544b51c6f69f9d901
|
ef9ded4c36f812aafab8f9dcbe049bd7220da347
|
/sum.py
|
006d995e0efdb733a5c0a8084790e3560fc77475
|
[] |
no_license
|
willhyper/python_cuda_numba
|
4bc0fd6dd0513979c64a0a05906aea63683752c1
|
324c6916b4a4b98cc76eee638206bad0fe452a4d
|
refs/heads/master
| 2020-04-05T09:21:42.398129
| 2018-11-08T18:55:57
| 2018-11-08T18:55:57
| 156,752,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
#! /usr/bin/env python
import numpy as np
from numba import vectorize
import time
N = 1_000_000_000
@vectorize(['int16(int16, int16)'], target='cuda')
def vec_sum(a, b):
return a + b
def sum(a, b):
return a + b
def main():
A = np.ones(N, dtype=np.int16)
B = np.ones(N, dtype=np.int16)
C = np.zeros(N, dtype=np.int16)
start = time.time()
C = sum(A, B)
elapsed = time.time() - start
print(C[:5])
print('cpu elapsed', elapsed, 'sec')
start = time.time()
C = vec_sum(A, B)
elapsed = time.time() - start
print(C[:5])
print('gpu elapsed', elapsed, 'sec')
if __name__ == '__main__':
main()
|
[
"chchen@chchen-7250.adcorp.kla-tencor.com"
] |
chchen@chchen-7250.adcorp.kla-tencor.com
|
7703835a5b63c8526596eae2677539171409e3de
|
78b4cb19770cf6cba794fcf336b0ab6f6c753e2e
|
/SRC/product/migrations/0014_product_prddescountprice.py
|
550e0b9de019beeae3ec169787d499fb87251d7e
|
[] |
no_license
|
medoosh79/Souq
|
b10be70342135585d068754246db16653bc7be31
|
c01f84baee39c36def433bb3630e5bae8ce7e789
|
refs/heads/main
| 2023-04-01T06:11:22.250284
| 2021-04-09T22:55:00
| 2021-04-09T22:55:00
| 356,405,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Generated by Django 3.1.7 on 2021-04-04 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0013_auto_20210404_2136'),
]
operations = [
migrations.AddField(
model_name='product',
name='PrdDescountPrice',
field=models.DecimalField(decimal_places=2, default=0, max_digits=8, verbose_name='Product Descount Price'),
preserve_default=False,
),
]
|
[
"72854950+medoosh79@users.noreply.github.com"
] |
72854950+medoosh79@users.noreply.github.com
|
f3c25f57d7876d36705ee82b523352c7672cfbce
|
fd082e303f509d91b14d1a73304f58c9a8fdf1d6
|
/street_parking_reservation/parking_reservations/migrations/0002_auto_20201207_1839.py
|
1c1dea3006a1256259306e60f17d5bfdf7b8ea64
|
[] |
no_license
|
mihirsn/street-parking
|
27f1fc930ad32cfc41c73ecae067c36112ac84db
|
8cb3b8c0ca97bd48974bc3f4e889a143ae4716e9
|
refs/heads/main
| 2023-01-29T09:42:48.777284
| 2020-12-14T05:15:04
| 2020-12-14T05:15:04
| 318,709,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
# Generated by Django 2.2 on 2020-12-07 18:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parking_reservations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='parkingspot',
name='status',
field=models.CharField(
choices=[('A', 'A'), ('R', 'R'), ('S', 'S')], default=4, max_length=1
),
),
migrations.AlterField(
model_name='parkingspot',
name='spot_type',
field=models.IntegerField(
choices=[(2, 'Two-wheeler'), (4, 'Four-wheeler')], default='A'
),
),
]
|
[
"mihir_naik@ymail.com"
] |
mihir_naik@ymail.com
|
9366aeb52a075956515f6961f7209386098247ca
|
c6e8a4de46bfd8c355a4c443310594ce87ef9672
|
/src/drift_plot.py
|
55160aab7a8936f3cf3f21f0c11a20ca064b95c6
|
[] |
no_license
|
nuin/popgene.s2js
|
c9fac3df6803924fdcfce69506fa9e1d42da5852
|
c66389f4d3bbd728cc6eae4a49f0ff8cf86794bf
|
refs/heads/master
| 2023-02-25T20:48:43.147810
| 2021-01-27T02:33:10
| 2021-01-27T02:33:10
| 332,580,228
| 0
| 0
| null | 2021-01-27T02:33:11
| 2021-01-24T23:31:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
# Paulo Nuin Jan 2021
import altair as alt
import pandas as pd
import numpy as np
def plot_graph(json_file):
"""
:return:
"""
dataframe = pd.read_json(json_file)
dataframe.index.name = 'x'
dataframe = dataframe.reset_index().melt('x', var_name='category', value_name='y')
nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['x'], empty='none')
# The basic line
line = alt.Chart(dataframe).mark_line(interpolate='basis').encode(
x='x:Q',
y='y:Q',
color='category:N'
)
# Transparent selectors across the chart. This is what tells us
# the x-value of the cursor
selectors = alt.Chart(dataframe).mark_point().encode(x='x:Q',opacity=alt.value(0),).add_selection(nearest)
# Draw points on the line, and highlight based on selection
points = line.mark_point().encode(opacity=alt.condition(nearest, alt.value(1), alt.value(0)))
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5).encode(text=alt.condition(nearest, 'y:Q', alt.value(' ')))
# Draw a rule at the location of the selection
rules = alt.Chart(dataframe).mark_rule(color='gray').encode(x='x:Q',).transform_filter(nearest)
# Put the five layers into a chart and bind the data
chart = alt.layer(line, selectors, points, rules, text).properties(width=2000, height=800)
chart.save('drift.html')
if __name__ == '__main__':
plot_graph('drift.json')
|
[
"nuin@genedrift.org"
] |
nuin@genedrift.org
|
9e2f3cbeefa98d81d8846f8b703e92cee46224ea
|
8a707aacc9ecd999cb2a996f91a718ce02f6205c
|
/leetcode/CourseScheduleII.py
|
ede26c794c3d4748b777773929ba2f90dfd46583
|
[] |
no_license
|
seeyarh/interview-prep
|
e074f68f1c1c05b9ab0911f30b13dad69c7bbfb8
|
1af5f79ed9dcf334d2758e14a9c08e7880246a4f
|
refs/heads/master
| 2022-03-13T12:26:59.441211
| 2019-11-14T19:35:31
| 2019-11-14T19:35:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,644
|
py
|
'''
There are a total of n courses you have to take, labeled from 0 to n-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
Example 1:
Input: 2, [[1,0]]
Output: [0,1]
Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
course 0. So the correct course order is [0,1] .
Example 2:
Input: 4, [[1,0],[2,0],[3,1],[3,2]]
Output: [0,1,2,3] or [0,2,1,3]
Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
Hints:
This problem is equivalent to finding the topological order in a directed graph. If a cycle exists, no topological ordering exists and therefore it will be impossible to take all courses.
Topological Sort via DFS - A great video tutorial (21 minutes) on Coursera explaining the basic concepts of Topological Sort.
Topological sort could also be done via BFS.
'''
import collections
class Solution:
def findOrder(self, numCourses, prerequisites):
incoming_adj_list = collections.defaultdict(set)
adj_list = collections.defaultdict(set)
for i,j in prerequisites:
incoming_adj_list[i].add(j)
adj_list[j].add(i)
stack = [i for i in range(numCourses) if not incoming_adj_list[i]]
course_list = []
while(stack):
v = stack.pop()
course_list.append(v)
for neighbor in adj_list[v]:
incoming_adj_list[neighbor].remove(v)
if not incoming_adj_list[neighbor]:
stack.append(neighbor)
incoming_adj_list.pop(v)
if not incoming_adj_list:
return course_list
else:
return []
sol = Solution()
edge_list = [[1,0], [0,2], [1,2]]
print(f'Input: {3}, {edge_list}')
print(f'Output: {sol.findOrder(3, edge_list)}')
|
[
"collinsrhuffiii@gmail.com"
] |
collinsrhuffiii@gmail.com
|
d46560cbc49c27ce3492bf33bcef835f61989a6b
|
d9edec82527fc8b0c98e1288f2161e496b984722
|
/Regex_Sheet.py
|
7fcc29c3ef753384a4ae43da3aeea8c8b325f9ab
|
[
"MIT"
] |
permissive
|
HeyIamJames/PyGames
|
d8f1dfd13565945ac75959bc4a80a582f7efc8ed
|
ce2d667b0318a4c663933b2fce842d191f54bdc1
|
refs/heads/master
| 2020-05-21T23:34:07.415205
| 2018-06-21T06:26:48
| 2018-06-21T06:26:48
| 31,036,158
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#cheet sheet = https://www.debuggex.com/cheatsheet/regex/python
#return all ints of len 6
import re
re.findall(r"\D(\d{6})\D", s)
#include 6+
only len 6 = /([\W\w])\d{6}
#only 6
^\D*\d{15}\D*$
#how to replace char
re.sub('[d]', ' ', x)
>>> str = "h3110 23 cat 444.4 rabbit 11 2 dog"
>>> [int(s) for s in str.split() if s.isdigit()]
[23, 11, 2]
#invest result>
re.findall("\W\w", {1}, s)
s = "ds 4"
|
[
"noreply@github.com"
] |
noreply@github.com
|
2177c8bb67a563ce7dca172e126b7d7032796c68
|
e27e5ef945ca30d50456ee366c662820dce4aee9
|
/multiagent-particle-envs-master/multiagent/rendering.py
|
a80414a8b30c146d2a01f83f3f6a607be4f51ee7
|
[
"MIT"
] |
permissive
|
S4ltedF1sh/oodl-project
|
399b862f49cc8180a0bfbd38e6998079096d5093
|
b2a5f6e6e4d537fa2ce2cb2e1ceaf9d4bb300948
|
refs/heads/master
| 2023-01-21T13:10:07.820772
| 2020-11-30T12:51:08
| 2020-11-30T12:51:08
| 317,222,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,149
|
py
|
"""
2D rendering framework
"""
from __future__ import division
import os
import six
import sys
if "Apple" in sys.version:
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
#from gym.utils import reraise
from gym import error
try:
import pyglet
except ImportError as e:
raise e
#reraise(suffix="HINT: you can install pyglet directly via 'pip install pyglet'. But if you really just want to install all Gym dependencies and not have to think about it, 'pip install -e .[all]' or 'pip install gym[all]' will do it.")
try:
from pyglet.gl import *
except ImportError as e:
raise e
#reraise(prefix="Error occured while running `from pyglet.gl import *`",suffix="HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. If you're running on a server, you may need a virtual frame buffer; something like this should work: 'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'")
import math
import numpy as np
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return None
elif isinstance(spec, six.string_types):
return pyglet.canvas.Display(spec)
else:
raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = pyglet.window.Window(width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
# glEnable(GL_MULTISAMPLE)
glEnable(GL_LINE_SMOOTH)
# glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glLineWidth(2.0)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
self.window.close()
def window_closed_by_user(self):
self.close()
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width/(right-left)
scaley = self.height/(top-bottom)
self.transform = Transform(
translation=(-left*scalex, -bottom*scaley),
scale=(scalex, scaley))
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1,1,1,1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1,:,0:3]
self.window.flip()
self.onetime_geoms = []
return arr
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self.window.flip()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1,:,0:3]
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color=Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b, alpha=1):
self._color.vec4 = (r, g, b, alpha)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4 : glBegin(GL_QUADS)
elif len(self.v) > 4 : glBegin(GL_POLYGON)
else: glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
color = (self._color.vec4[0] * 0.5, self._color.vec4[1] * 0.5, self._color.vec4[2] * 0.5, self._color.vec4[3] * 0.5)
glColor4f(*color)
glBegin(GL_LINE_LOOP)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2*math.pi*i / res
points.append((math.cos(ang)*radius, math.sin(ang)*radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled: return FilledPolygon(v)
else: return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width/2, -width/2
box = make_polygon([(l,b), (l,t), (r,t), (r,b)])
circ0 = make_circle(width/2)
circ1 = make_circle(width/2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height)
# ================================================================
class SimpleImageViewer(object):
def __init__(self, display=None):
self.window = None
self.isopen = False
self.display = display
def imshow(self, arr):
if self.window is None:
height, width, channels = arr.shape
self.window = pyglet.window.Window(width=width, height=height, display=self.display)
self.width = width
self.height = height
self.isopen = True
assert arr.shape == (self.height, self.width, 3), "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(self.width, self.height, 'RGB', arr.tobytes(), pitch=self.width * -3)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
image.blit(0,0)
self.window.flip()
def close(self):
if self.isopen:
self.window.close()
self.isopen = False
def __del__(self):
self.close()
|
[
"minhvu.pham@stud.tu-darmstadt.de"
] |
minhvu.pham@stud.tu-darmstadt.de
|
61ed638564f28791b24b7cd7c21897b32fe62fd0
|
c93080264201fe6d0c84a79ae435022981d8ccf6
|
/panoptic/panoptic/doctype/facial_recognition_system/facial_recognition_system.py
|
85576d855955e672d0df3ef2428a5befc108e3a5
|
[
"MIT"
] |
permissive
|
wisharya/panoptic
|
100e733e9aad33d087851fc4ea9bd064e81954f2
|
7c9a0eeb6bd5d9032087ccb7c805a3e65a357ba8
|
refs/heads/master
| 2023-07-09T14:20:45.377441
| 2021-08-25T06:58:45
| 2021-08-25T06:58:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Internet Freedom Foundation and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class FacialRecognitionSystem(Document):
pass
|
[
"scm.mymail@gmail.com"
] |
scm.mymail@gmail.com
|
85555f604a4c7310411f4bc0868e6122dbbf2275
|
03c3d78a80093cc27e421e758d27ac1ab88af28c
|
/dactyl/filter_link_replacement.py
|
5f6e2dcc1869720e6d3cb42c72415ab5ab825824
|
[
"MIT"
] |
permissive
|
rippleclone/dactyl
|
aff6d244ea5a75422617924501f0f2661fffa33e
|
4672bfb4d351657791dd7bb4e216437509db587d
|
refs/heads/master
| 2021-04-28T08:30:27.724928
| 2018-02-17T01:26:09
| 2018-02-17T01:26:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,074
|
py
|
################################################################################
## Substitute Links filter ##
## Author: Rome Reginelli ##
## Copyright: Ripple Labs, Inc. 2017 ##
## ##
## Replaces the link substitution feature built into Dactyl < 0.4.0 with a ##
## filter to do about the same. ##
################################################################################
import re
LINK_SUBS_FIELD = "link_subs"
LINK_RE_SUBS_FIELD = "link_re_subs"
PARAMETER_REPLACE_FIELD = "replace_parameter_links"
IMAGE_SUBS_FIELD = "image_subs"
IMAGE_RE_SUBS_FIELD = "image_re_subs"
IMAGE_LINK_REGEX = re.compile(r"^[^.]+\.(png|jpg|jpeg|gif|svg)", re.I)
def filter_soup(soup, currentpage={}, target={}, pages=[], logger=None, **kwargs):
"""
Replaces links and image hrefs in the current page, based on a substitution
map in the target or page settings. Also looks into values in the current
page's metadata and replaces links there, in case the template uses fields
from the current page's metadata key/values.
"""
globals()["logger"] = logger
# currentpage already includes link subs inherited from the target
if LINK_SUBS_FIELD in currentpage:
link_subs = currentpage[LINK_SUBS_FIELD]
if (PARAMETER_REPLACE_FIELD in currentpage and
currentpage[PARAMETER_REPLACE_FIELD]):
substitute_parameter_links(currentpage, link_subs)
substitute_links(soup, link_subs)
if LINK_RE_SUBS_FIELD in currentpage:
link_re_subs = currentpage[LINK_RE_SUBS_FIELD]
re_sub_links(soup, link_re_subs)
if IMAGE_SUBS_FIELD in currentpage:
image_subs = currentpage[IMAGE_SUBS_FIELD]
substitute_images(soup, image_subs)
substitute_links(soup, image_subs)
if IMAGE_RE_SUBS_FIELD in currentpage:
image_re_subs = currentpage[IMAGE_RE_SUBS_FIELD]
re_sub_images(soup, image_re_subs)
re_sub_links(soup, image_re_subs)
def substitute_links(soup, link_subs):
"""
Takes a map of needle:replacement strings and changes the href values of
<a> tags in the soup, so that any that start with the needle are changed to
start with the replacement instead (preserving the remainder).
"""
links = soup.find_all("a", href=True)
for link in links:
for needle, replacement in link_subs.items():
if link["href"][:len(needle)] == needle:
new_href = replacement + link["href"][len(needle):]
logger.info("... replacing link '%s' with '%s'" %
(link["href"], new_href) )
link["href"] = new_href
def substitute_images(soup, image_subs):
"""
Takes a map of needle:replacement strings and changes the src of <img>
tags in the soup so that any that match the needle are changed to use the
replacement instead.
"""
images = soup.find_all("img")
for img in images:
for needle, replacement in image_subs.items():
if needle == img["src"]:
logger.info("... replacing image '%s' with '%s'" %
(img["src"], replacement) )
img["src"] = replacement
def re_sub_images(soup, image_re_subs):
"""
Takes a map of regular expressions to regular-expression replacements and
changes the src of any <img> tags in the soup by doing regular-expression
match/replace.
"""
images = soup.find_all("img", src=True)
for img in images:
for regex,replace_pattern in image_re_subs.items():
m = re.match(regex, img["src"])
if m:
new_path = re.sub(regex, replace_pattern, img["src"])
logger.info("... replacing image '%s' with '%s'" %
(img["src"], new_path) )
img["src"] = new_path
def re_sub_links(soup, link_re_subs):
"""
Takes a map of regular expressions to regular-expression replacements and
changes the href of any <a> tags in the soup by doing regular-expression
match/replace.
"""
links = soup.find_all("a", href=True)
for link in links:
for regex,replace_pattern in link_re_subs.items():
m = re.match(regex, link["href"])
if m:
new_path = re.sub(regex, replace_pattern, link["href"])
logger.info("... replacing link '%s' with '%s'" %
(link["href"], new_path) )
link["href"] = new_path
RESERVED_PAGE_KEYS = [
"html",
"md",
"category",
"targets"
]
def substitute_parameter_links(currentpage, link_subs):
"""
Takes a map of needle:replacement link substitutions and applies them to
string values in the current page's metadata parameters.
"""
for field,val in currentpage.items():
if field in RESERVED_PAGE_KEYS:
continue;
if type(val) != str:
continue
for needle, replacement in link_subs:
if val[:len(needle)] == needle:
new_val = replacement + val[len(needle):]
logger.info(("... replacing field '%s'; replacing value "+
"'%s' with '%s'") % (field, val, new_val) )
currentpage[field] = new_val
class MDLink: # Used for link substitution on markdown
"""A markdown link, in one of the following formats:
- [label](url "title") — an inline link (title optional)
-  — an inline image link
- [ref]: url — a reference link definition (could be an image or not)
Affects, but does not include, reference link instances, such as:
- [ref][]
- [label][ref]
"""
MD_LINK_REGEX = re.compile(
r"(\[([^\]]+)\]\(([^:)]+)\)|\[([^\]]+)\]:\s*(\S+)$)", re.MULTILINE)
MD_IMG_REGEX = re.compile(
r"!(\[([^\]]+)\]\(([^:)]+)\)|\[([^\]]+)\]:\s*(\S+)$)", re.MULTILINE)
def __init__(self, fullmatch, label, url, label2, url2):
self.fullmatch = fullmatch
if fullmatch[:1] == "!":
self.is_img = True
else:
self.is_img = False
if label:
self.label = label
self.url = url
self.is_reflink = False
elif label2:
self.label = label2
self.url = url2
self.is_reflink = True
def to_markdown(self):
"""Re-represent self as a link in markdown syntax"""
s = "[" + self.label + "]"
if self.is_reflink:
s += ": " + self.url
else:
s += "(" + self.url + ")"
return s
def filter_markdown(md, mode="html", currentpage={}, **kwargs):
"""
In "Githubify" mode, we need to do link substitution on the Markdown itself
since we won't have an opportunity to do it on the HTML output.
"""
if mode != "md":
return md
if LINK_SUBS_FIELD in currentpage:
link_subs = currentpage[LINK_SUBS_FIELD]
md = substitute_md_links(md, link_subs)
if IMAGE_SUBS_FIELD in currentpage:
image_subs = currentpage[IMAGE_SUBS_FIELD]
md = substitute_md_images(currentpage, image_subs)
md = substitute_md_links(currentpage, image_subs)
return md
def substitute_md_links(md, link_subs, do_images=False):
if do_images:
regex = MDLink.MARKDOWN_LINK_REGEX
else:
regex = MDLink.MARKDOWN_IMG_REGEX
links = [MDLink(*m) for m in regex.findall(md)]
for link in links:
for needle, replacement in link_subs:
if link.url[:len(needle)] == needle:
link.url = replacement + link.url[len(needle):]
md = md.replace(link.fullmatch, link.to_markdown())
return md
def substitute_md_images(md, image_subs):
return substitute_md_links(md, image_subs, do_images=True)
|
[
"mduo13@gmail.com"
] |
mduo13@gmail.com
|
68b7e0edc2cb15f4a5795fc8fe8fd3ace39dfb3b
|
cde5600114cb9f1bbed0fc2ab387aac4f9a18c63
|
/version.py
|
c8a2ef09f70534b9f3fa18d844c784c4abb7eb18
|
[
"BSD-3-Clause"
] |
permissive
|
RebeccaRrr/iVisDesigner
|
3a6416946a39d6c4256b43754f14fa0e55340aa6
|
35435dc168d281de923cec5fcd1f21aa7380137a
|
refs/heads/master
| 2020-03-22T10:39:51.288870
| 2018-02-16T22:31:05
| 2018-02-16T22:31:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
import commands
IV_version = "0.10alpha"
IV_rev = commands.getoutput("git rev-parse --short HEAD")
|
[
"donghao.ren@gmail.com"
] |
donghao.ren@gmail.com
|
2f418c58a6958c6e075720c48b9b2b0d5305095f
|
41ee6c4358b09979a4a4373933752b04ebba2025
|
/TP7/rl/cellular.py
|
2f282439f5c0a9ef57ce550b0e1b8090c50269e8
|
[] |
no_license
|
sromany/Apprentissage
|
897e3ec9df797fa171512e15d35f4aa12ea145f0
|
a9233aa6fb5c2e42c3da5c4a0df6d49be2554057
|
refs/heads/master
| 2021-06-13T00:44:14.898081
| 2017-03-07T11:55:44
| 2017-03-07T11:55:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,852
|
py
|
# coding=utf-8
import random
import sys
neighbourSynonyms = ('neighbours', 'neighbors', 'neighbour', 'neighbor')
class Cell:
def __getattr__(self, key):
if key in neighbourSynonyms:
pts = [self.world.getPointInDirection(
self.x, self.y, dir) for dir in range(self.world.directions)]
ns = tuple([self.world.grid[y][x] for (x, y) in pts])
for n in neighbourSynonyms:
self.__dict__[n] = ns
return ns
raise AttributeError(key)
class Agent:
def __setattr__(self, key, val):
if key == 'cell':
old = self.__dict__.get(key, None)
if old is not None:
old.agents.remove(self)
if val is not None:
val.agents.append(self)
self.__dict__[key] = val
def __getattr__(self, key):
if key == 'leftCell':
return self.getCellOnLeft()
elif key == 'rightCell':
return self.getCellOnRight()
elif key == 'aheadCell':
return self.getCellAhead()
raise AttributeError(key)
def turn(self, amount):
self.dir = (self.dir + amount) % self.world.directions
def turnLeft(self):
self.turn(-1)
def turnRight(self):
self.turn(1)
def turnAround(self):
self.turn(self.world.directions / 2)
# return True if successfully moved in that direction
def goInDirection(self, dir):
target = self.cell.neighbour[dir]
if getattr(target, 'wall', False):
#print "hit a wall"
return False
self.cell = target
return True
def goForward(self):
self.goInDirection(self.dir)
def goBackward(self):
self.turnAround()
self.goForward()
self.turnAround()
def getCellAhead(self):
return self.cell.neighbour[self.dir]
def getCellOnLeft(self):
return self.cell.neighbour[(self.dir - 1) % self.world.directions]
def getCellOnRight(self):
return self.cell.neighbour[(self.dir + 1) % self.world.directions]
def goTowards(self, target):
if self.cell == target:
return
best = None
for n in self.cell.neighbours:
if n == target:
best = target
break
dist = (n.x - target.x) ** 2 + (n.y - target.y) ** 2
if best is None or bestDist > dist:
best = n
bestDist = dist
if best is not None:
if getattr(best, 'wall', False):
return
self.cell = best
class World:
def __init__(self, cell=None, width=None, height=None, directions=8, filename=None):
if cell is None:
cell = Cell
self.Cell = cell
self.display = makeDisplay(self)
self.directions = directions
if filename is not None:
data = file(filename).readlines()
if height is None:
height = len(data)
if width is None:
width = max([len(x.rstrip()) for x in data])
if width is None:
width = 20
if height is None:
height = 20
self.width = width
self.height = height
self.image = None
self.reset()
if filename is not None:
self.load(filename)
def getCell(self, x, y):
return self.grid[y][x]
def getWrappedCell(self, x, y):
return self.grid[y % self.height][x % self.width]
def reset(self):
self.grid = [[self.makeCell(
i, j) for i in range(self.width)] for j in range(self.height)]
self.dictBackup = [[{} for i in range(self.width)]
for j in range(self.height)]
self.agents = []
self.age = 0
def makeCell(self, x, y):
c = self.Cell()
c.x = x
c.y = y
c.world = self
c.agents = []
return c
def randomize(self):
if not hasattr(self.Cell, 'randomize'):
return
for row in self.grid:
for cell in row:
cell.randomize()
def save(self, f=None):
if not hasattr(self.Cell, 'save'):
return
if isinstance(f, type('')):
f = file(f, 'w')
total = ''
for j in range(self.height):
line = ''
for i in range(self.width):
line += self.grid[j][i].save()
total += '%s\n' % line
if f is not None:
f.write(total)
f.close()
else:
return total
def load(self, f):
if not hasattr(self.Cell, 'load'):
return
if isinstance(f, type('')):
f = file(f)
lines = f.readlines()
lines = [x.rstrip() for x in lines]
fh = len(lines)
fw = max([len(x) for x in lines])
if fh > self.height:
fh = self.height
starty = 0
else:
starty = (self.height - fh) / 2
if fw > self.width:
fw = self.width
startx = 0
else:
startx = (self.width - fw) / 2
self.reset()
for j in range(fh):
line = lines[j]
for i in range(min(fw, len(line))):
self.grid[starty + j][startx + i].load(line[i])
def update(self):
if hasattr(self.Cell, 'update'):
for j, row in enumerate(self.grid):
for i, c in enumerate(row):
self.dictBackup[j][i].update(c.__dict__)
c.update()
c.__dict__, self.dictBackup[j][
i] = self.dictBackup[j][i], c.__dict__
for j, row in enumerate(self.grid):
for i, c in enumerate(row):
c.__dict__, self.dictBackup[j][
i] = self.dictBackup[j][i], c.__dict__
for a in self.agents:
a.update()
self.display.redraw()
else:
for a in self.agents:
oldCell = a.cell
a.update()
if oldCell != a.cell:
self.display.redrawCell(oldCell.x, oldCell.y)
self.display.redrawCell(a.cell.x, a.cell.y)
self.display.update()
self.age += 1
def getPointInDirection(self, x, y, dir):
if self.directions == 8:
dx, dy = [(0, -1), (1, -1), (
1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1)][dir]
elif self.directions == 4:
dx, dy = [(0, -1), (1, 0), (0, 1), (-1, 0)][dir]
elif self.directions == 6:
if y % 2 == 0:
dx, dy = [(1, 0), (0, 1), (-1, 1), (-1, 0),
(-1, -1), (0, -1)][dir]
else:
dx, dy = [(1, 0), (1, 1), (0, 1), (-1, 0),
(0, -1), (1, -1)][dir]
x2 = x + dx
y2 = y + dy
if x2 < 0:
x2 += self.width
if y2 < 0:
y2 += self.height
if x2 >= self.width:
x2 -= self.width
if y2 >= self.height:
y2 -= self.height
return (x2, y2)
def addAgent(self, agent, x=None, y=None, cell=None, dir=None):
self.agents.append(agent)
if cell is not None:
x = cell.x
y = cell.y
if x is None:
x = random.randrange(self.width)
if y is None:
y = random.randrange(self.height)
if dir is None:
dir = random.randrange(self.directions)
agent.cell = self.grid[y][x]
agent.dir = dir
agent.world = self
import time
def makeDisplay(world):
d = Display()
d.world = world
return d
class DummyDisplay:
def activate(self, size=4):
pass
def redraw(self):
pass
def redrawCell(self, x, y):
pass
def update(self):
pass
def setTitle(self, title):
pass
class TkinterDisplay:
activated = False
paused = False
title = ''
updateEvery = 1
root = None
delay = 0
def activate(self, size=4):
self.bg = None
self.size = size
if TkinterDisplay.root is None:
TkinterDisplay.root = Tkinter.Tk()
for c in self.root.winfo_children():
c.destroy()
self.activated = True
self.imageLabel = Tkinter.Label(self.root)
self.imageLabel.pack(side=Tkinter.LEFT, fill=Tkinter.BOTH, expand=1)
self.frameWidth, self.frameHeight = self.world.width * \
size, self.world.height * size
self.root.geometry(
'%dx%d' % (self.world.width * size, self.world.height * size))
self.root.update()
self.redraw()
self.root.bind('<Configure>', self.onConfigure)
self.root.bind('<Prior>', self.onPageUp)
self.root.bind('<Next>', self.onPageDown)
self.root.bind('<space>', self.pause)
self.root.bind('<Escape>', self.quit)
def quit(self, event):
self.root.destroy()
def update(self):
if not self.activated:
return
if self.world.age % self.updateEvery != 0 and not self.paused:
return
self.setTitle(self.title)
self.imageLabel.update()
if self.delay > 0:
time.sleep(self.delay * 0.1)
def setTitle(self, title):
if not self.activated:
return
self.title = title
title += ' %s' % makeTitle(self.world)
if self.root.title() != title:
self.root.title(title)
def onConfigure(self, event):
if event.width != self.frameWidth or event.height != self.frameHeight:
oldSize = self.size
scalew = event.width / self.world.width
scaleh = event.height / self.world.height
self.size = min(scalew, scaleh)
if self.size < 1:
self.size = 1
if oldSize < self.size:
self.imageCache.clear()
if oldSize != self.size:
self.redraw()
self.frameWidth = event.width
self.frameHeight = event.height
def onPageDown(self, event):
if self.updateEvery > 1:
self.updateEvery /= 2
else:
self.delay += 1
if self.delay > 10:
self.delay = 10
def onPageUp(self, event):
if self.delay > 0:
self.delay -= 1
else:
self.updateEvery *= 2
def pause(self, event=None):
self.paused = not self.paused
while self.paused:
self.update()
def getBackground(self):
if self.bg is None:
r, g, b = self.imageLabel.winfo_rgb(self.root['background'])
self.bg = '%c%c%c' % (r >> 8, g >> 8, b >> 8)
return self.bg
def redraw(self):
if not self.activated:
return
hexgrid = self.world.directions == 6
iw = self.world.width * self.size
ih = self.world.height * self.size
if hexgrid:
iw += self.size / 2
f = file('temp.ppm', 'wb')
f.write('P6\n%d %d\n255\n' % (iw, ih))
odd = False
for row in self.world.grid:
line = cStringIO.StringIO()
if hexgrid and odd:
line.write(self.getBackground() * (self.size / 2))
for cell in row:
if len(cell.agents) > 0:
c = self.getDataColour(cell.agents[-1])
else:
c = self.getDataColour(cell)
line.write(c * self.size)
if hexgrid and not odd:
line.write(self.getBackground() * (self.size / 2))
odd = not odd
f.write(line.getvalue() * self.size)
f.close()
self.image = Tkinter.PhotoImage(file='temp.ppm')
self.imageLabel.config(image=self.image)
imageCache = {}
def redrawCell(self, x, y):
if not self.activated:
return
sx = x * self.size
sy = y * self.size
if y % 2 == 1 and self.world.directions == 6:
sx += self.size / 2
cell = self.world.grid[y][x]
if len(cell.agents) > 0:
c = self.getTextColour(cell.agents[-1])
else:
c = self.getTextColour(cell)
sub = self.imageCache.get(c, None)
if sub is None:
sub = Tkinter.PhotoImage(width=1, height=1)
sub.put(c, to=(0, 0))
sub = sub.zoom(self.size)
self.imageCache[c] = sub
self.image.tk.call(self.image, 'copy', sub, '-from', 0, 0, self.size, self.size, '-to', sx, sy)
def getTextColour(self, obj):
c = getattr(obj, 'colour', None)
if c is None:
c = getattr(obj, 'color', 'white')
if callable(c):
c = c()
if isinstance(c, type(())):
if isinstance(c[0], type(0.0)):
c = (int(c[0] * 255), int(c[1] * 255), int(c[2] * 255))
return '#%02x%02x%02x' % c
return c
dataCache = {}
def getDataColour(self, obj):
c = getattr(obj, 'colour', None)
if c is None:
c = getattr(obj, 'color', 'white')
if callable(c):
c = c()
if isinstance(c, type(())):
if isinstance(c[0], type(0.0)):
c = (int(c[0] * 255), int(c[1] * 255), int(c[2] * 255))
return '%c%c%c' % c
else:
val = self.dataCache.get(c, None)
if val is None:
r, g, b = self.imageLabel.winfo_rgb(c)
val = '%c%c%c' % (r >> 8, g >> 8, b >> 8)
self.dataCache[c] = val
return val
class PygameDisplay:
activated = False
paused = False
title = ''
updateEvery = 1
delay = 0
screen = None
def activate(self, size=4):
self.size = size
pygame.init()
w = self.world.width * size
h = self.world.height * size
if self.world.directions == 6:
w += size / 2
if PygameDisplay.screen is None or PygameDisplay.screen.get_width() != w or PygameDisplay.screen.get_height() != h:
PygameDisplay.screen = pygame.display.set_mode(
(w, h), pygame.RESIZABLE, 32)
self.activated = True
self.defaultColour = self.getColour(self.world.grid[0][0].__class__())
self.redraw()
def redraw(self):
if not self.activated:
return
self.screen.fill(self.defaultColour)
hexgrid = self.world.directions == 6
self.offsety = (
self.screen.get_height() - self.world.height * self.size) / 2
self.offsetx = (
self.screen.get_width() - self.world.width * self.size) / 2
sy = self.offsety
odd = False
for row in self.world.grid:
sx = self.offsetx
if hexgrid and odd:
sx += self.size / 2
for cell in row:
if len(cell.agents) > 0:
c = self.getColour(cell.agents[0])
else:
c = self.getColour(cell)
if c != self.defaultColour:
try:
self.screen.fill(c, (sx, sy, self.size, self.size))
except TypeError:
print 'Error: invalid colour:', c
sx += self.size
odd = not odd
sy += self.size
def redrawCell(self, x, y):
if not self.activated:
return
sx = x * self.size + self.offsetx
sy = y * self.size + self.offsety
if y % 2 == 1 and self.world.directions == 6:
sx += self.size / 2
cell = self.world.grid[y][x]
if len(cell.agents) > 0:
c = self.getColour(cell.agents[0])
else:
c = self.getColour(cell)
self.screen.fill(c, (sx, sy, self.size, self.size))
def update(self):
if not self.activated:
return
if self.world.age % self.updateEvery != 0 and not self.paused:
return
self.setTitle(self.title)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
sys.exit()
elif event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.VIDEORESIZE:
self.onResize(event)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_PAGEUP:
if self.delay > 0:
self.delay -= 1
else:
self.updateEvery *= 2
elif event.type == pygame.KEYDOWN and event.key == pygame.K_PAGEDOWN:
if self.updateEvery > 1:
self.updateEvery /= 2
else:
self.delay += 1
if self.delay > 10:
self.delay = 10
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.pause()
pygame.display.flip()
if self.delay > 0:
time.sleep(self.delay * 0.1)
def setTitle(self, title):
if not self.activated:
return
self.title = title
title += ' %s' % makeTitle(self.world)
if pygame.display.get_caption()[0] != title:
pygame.display.set_caption(title)
def pause(self, event=None):
self.paused = not self.paused
while self.paused:
self.update()
def onResize(self, event):
if not self.activated:
return
pygame.display.set_mode(event.size, pygame.RESIZABLE, 32)
oldSize = self.size
scalew = event.size[0] / self.world.width
scaleh = event.size[1] / self.world.height
self.size = min(scalew, scaleh)
if self.size < 1:
self.size = 1
self.redraw()
def getColour(self, obj):
c = getattr(obj, 'colour', None)
if c is None:
c = getattr(obj, 'color', 'white')
if callable(c):
c = c()
if isinstance(c, type(())):
if isinstance(c[0], type(0.0)):
c = (int(c[0] * 255), int(c[1] * 255), int(c[2] * 255))
return c
return pygame.color.Color(c)
def saveImage(self, filename=None):
if filename is None:
filename = '%05d.bmp' % self.world.age
pygame.image.save(self.screen, filename)
def makeTitle(world):
text = 'age: %d' % world.age
extra = []
if world.display.paused:
extra.append('paused')
if world.display.updateEvery != 1:
extra.append('skip=%d' % world.display.updateEvery)
if world.display.delay > 0:
extra.append('delay=%d' % world.display.delay)
if len(extra) > 0:
text += ' [%s]' % ', '.join(extra)
return text
try:
import pygame
Display = PygameDisplay
except:
try:
import Tkinter
import cStringIO
Display = TkinterDisplay
except:
Display = DummyDisplay
|
[
"sromany@a-018833.ups.u-psud.fr"
] |
sromany@a-018833.ups.u-psud.fr
|
aff2e3d4a8b31eea14b1f27deb841c7f6fd6b5ff
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03139/s434904278.py
|
34502437335082bac61aadf38d8e30ed218dbb86
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
n,a,b = map(int,input().split(' '))
print("{0} {1}".format(min(a,b),max(a + b - n,0)))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
06f6074a20ddfa9793849de924eb5aac5f61a5bf
|
eddd90b807cff770b678f090b8fd9b32a9ced05e
|
/Madrid/July2022/smurf-busters/Interface/django_project/app/main/views.py
|
7692a0a1eb625206913ad61b5eaf2cb0ac40ad62
|
[
"MIT"
] |
permissive
|
SaturdaysAI/Projects
|
ba26768c20ffb4a3f3c6bc26dd5dd71ee5065fdf
|
99ab19f7896bc30cf059244962a7da318d4672bf
|
refs/heads/master
| 2023-03-11T05:27:52.837311
| 2022-11-25T22:36:24
| 2022-11-25T22:36:24
| 188,530,455
| 35
| 36
|
MIT
| 2023-03-03T01:41:24
| 2019-05-25T06:41:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 963
|
py
|
import json
import os
from django.shortcuts import render, HttpResponse
from django.http import JsonResponse
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
import hashlib
from functools import wraps
import logging
import copy
from django.template.loader import render_to_string
import pandas as pd
from demo import demo
def base(request, *args, **kwargs):
return render(request, "base.html")
def calculate_table(request, *args, **kwargs):
if request.headers.get('x-requested-with') == 'XMLHttpRequest':
df_table = demo()
df_table['percentage'] = df_table['percentage'].round(2)
params = request.body.decode()
if 'table1' in params:
data = df_table.iloc[:5]
else:
data = df_table.iloc[-5:]
response_dic = {'data': data}
return HttpResponse(render_to_string("tables/team1.html", response_dic, request))
|
[
"noreply@github.com"
] |
noreply@github.com
|
eb2a25ca1f321ccb18995a48ed60df38daeb05c7
|
7f3c09b2707fcd4f55ec1acccf94193e88b9ae33
|
/coffee maachine/ipython.py
|
a776d4efcc3a367f63552710692050d76bf9956a
|
[] |
no_license
|
regod10/my-python-projects
|
6f8f632d94087bc4a6376a5c3b96a1d0ebcaebec
|
83462a5357c0d79e23024419912d113dcb003847
|
refs/heads/main
| 2023-04-28T23:18:13.582599
| 2023-04-24T23:36:02
| 2023-04-24T23:36:02
| 311,811,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,390
|
py
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Consolas'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Python 3.8.8 (default, Feb 24 2021, 15:54:32) [MSC v.1928 64 bit (AMD64)]</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Type "copyright", "credits" or "license" for more information.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">IPython 7.21.0 -- An enhanced Interactive Python.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#00ff00;">In [</span><span style=" font-weight:600; color:#00ff00;">1</span><span style=" color:#00ff00;">]:</span> runfile('C:/Users/space/OneDrive/Desktop/300 days of python/untitled0.py', wdir='C:/Users/space/OneDrive/Desktop/300 days of python')</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#00ffff;"> File </span><span style=" color:#00ff00;">"C:\Users\space\OneDrive\Desktop\300 days of python\untitled0.py"</span><span style=" color:#00ffff;">, line </span><span style=" color:#00ff00;">73</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#ffff00;"> total += int(input("how many pennies":)) *0.01</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#ffffff;"> ^</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#ff0000;">SyntaxError:</span> invalid syntax</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#00ff00;">In [</span><span style=" font-weight:600; color:#00ff00;">2</span><span style=" color:#00ff00;">]:</span> runfile('C:/Users/space/OneDrive/Desktop/300 days of python/untitled0.py', wdir='C:/Users/space/OneDrive/Desktop/300 days of python')</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#00ffff;"> File </span><span style=" color:#00ff00;">"C:\Users\space\OneDrive\Desktop\300 days of python\untitled0.py"</span><span style=" color:#00ffff;">, line </span><span style=" color:#00ff00;">111</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#ffff00;"> print(f'Water: {resources["water"]}ml")</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#ffffff;"> ^</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#ff0000;">SyntaxError:</span> EOL while scanning string literal</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#00ff00;">In [</span><span style=" font-weight:600; color:#00ff00;">3</span><span style=" color:#00ff00;">]:</span> runfile('C:/Users/space/OneDrive/Desktop/300 days of python/untitled0.py', wdir='C:/Users/space/OneDrive/Desktop/300 days of python')</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">What would you like?(espresso/latte/cappuccino):latte</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Please insert coins.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">how many quarters?:10</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">how many dimes?:10</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">how many nickles?:10</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">how many pennies?:10</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Here is $1.1 in change.</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Here is your latte enjoy</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">What would you like?(espresso/latte/cappuccino):</p></body></html>
|
[
"noreply@github.com"
] |
noreply@github.com
|
b6064297dc58373f1ca18c6ddfb0b8c37d01e841
|
0a39aeb1ca9e124c818c86b55e169620aef74716
|
/WithFlask/project.py
|
b29eaaa9c71b3cfc8498357900740f04c21b3c91
|
[] |
no_license
|
muhammet-mucahit/FullStack-Exercises
|
72ffadd17d6df5ccd198568db5511c0678e28907
|
01699ab6cf86b6d7dd71a9c0462813a0da43e36a
|
refs/heads/master
| 2020-03-11T21:41:00.639683
| 2018-04-19T21:05:44
| 2018-04-19T21:05:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
app = Flask(__name__)
# Create session and connect to DB
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
# API PART
@app.route('/restaurants/<int:restaurant_id>/menu/JSON/')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON/')
def menuJSON(restaurant_id, menu_id):
menuItem = session.query(MenuItem).filter_by(id = menu_id).one()
return jsonify(MenuItem = menuItem.serialize)
# END OF API PART
@app.route('/')
@app.route('/restaurants/<int:restaurant_id>/')
def restaurantMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id)
return render_template('menu.html', restaurant = restaurant, items = items)
@app.route('/restaurants/<int:restaurant_id>/new', methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
if request.method == 'POST':
newItem = MenuItem(name = request.form['name'], restaurant_id = restaurant_id)
session.add(newItem)
session.commit()
flash("new menu item created!")
return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id = restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit', methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
editedItem = session.query(MenuItem).filter_by(id = menu_id).one()
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
session.add(editedItem)
session.commit()
flash("Menu item has been edited!")
return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))
else:
return render_template('editmenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = editedItem)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete', methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
deletedItem = session.query(MenuItem).filter_by(id = menu_id).one()
if request.method == 'POST':
session.delete(deletedItem)
session.commit()
flash("Menu item has been deleted!")
return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))
else:
return render_template('deletemenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = deletedItem)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
|
[
"mucahitaktepe@gmail.com"
] |
mucahitaktepe@gmail.com
|
2738586937ed37c650c08c83414758db6e9f901a
|
d411824122cd250cd2c95302cf339426b0ee2ec6
|
/ProblemSets/ProblemSet6/ps6.py
|
477bbf3422d89b07b9dbb56c3e82d0c90c1812b0
|
[] |
no_license
|
paulwithap/MITx6.00x
|
da86e3b2fc5ec2d27655c451e68f05a7ab1066c5
|
f34f59e0017a0a521e3b8779b236ea3532cc3581
|
refs/heads/master
| 2016-09-03T07:36:08.597909
| 2013-04-22T03:25:29
| 2013-04-22T03:25:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,508
|
py
|
# 6.00x Problem Set 6
# RSS Feed Filter
import feedparser
import string
import time
from project_util import translate_html
from Tkinter import *
#-----------------------------------------------------------------------
#
# Problem Set 6
#======================
# Code for retrieving and parsing RSS feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
summary = translate_html(entry.summary)
try:
subject = translate_html(entry.tags[0]['term'])
except AttributeError:
subject = ""
newsStory = NewsStory(guid, title, subject, summary, link)
ret.append(newsStory)
return ret
#======================
#======================
# Part 1
# Data structure design
#======================
# Problem 1
class NewsStory(object):
"""
Creates a news story object with a GUID, title, subject, summary, and
link.
Provides getter methods for each attribute.
"""
def __init__(self, guid, title, subject, summary, link):
self.guid = guid
self.title = title
self.subject = subject
self.summary = summary
self.link = link
def getGuid(self):
return self.guid
def getTitle(self):
return self.title
def getSubject(self):
return self.subject
def getSummary(self):
return self.summary
def getLink(self):
return self.link
#======================
# Part 2
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
raise NotImplementedError
# Whole Word Triggers
# Problems 2-5
class WordTrigger(Trigger):
def __init__(self, word):
self.word = word
def isWordIn(self, text):
"""
Returns True if word is in text, False otherwise.
"""
import re
text_as_list = re.split('\W+', text)
lowered_list = [i.lower() for i in text_as_list]
lowered_word = self.word.lower()
return lowered_word in lowered_list
class TitleTrigger(WordTrigger):
"""
Fires when a news item's title contains a given word.
"""
def evaluate(self, story):
return self.isWordIn(story.getTitle())
class SubjectTrigger(WordTrigger):
"""
Fires when a news item's subject contains a given word.
"""
def evaluate(self, story):
return self.isWordIn(story.getSubject())
class SummaryTrigger(WordTrigger):
"""
Fires when a news item's subject contains a given word.
"""
def evaluate(self, story):
return self.isWordIn(story.getSummary())
# Composite Triggers
# Problems 6-8
class NotTrigger(Trigger):
def __init__(self, Trigger):
self.T = Trigger
def evaluate(self, word):
return not self.T.evaluate(word)
class AndTrigger(Trigger):
def __init__(self, Trigger1, Trigger2):
self.T1 = Trigger1
self.T2 = Trigger2
def evaluate(self, word):
return self.T1.evaluate(word) and self.T2.evaluate(word)
class OrTrigger(Trigger):
def __init__(self, Trigger1, Trigger2):
self.T1 = Trigger1
self.T2 = Trigger2
def evaluate(self, word):
return self.T1.evaluate(word) or self.T2.evaluate(word)
# Phrase Trigger
# Question 9
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase
def evaluate(self, story):
return self.phrase in story.getTitle() or self.phrase in story.getSubject() or self.phrase in story.getSummary()
#======================
# Part 3
# Filtering
#======================
def filterStories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
result = []
for story in stories:
for trigger in triggerlist:
if trigger.evaluate(story) and story not in result:
result.append(story)
return result
#======================
# Part 4
# User-Specified Triggers
#======================
def makeTrigger(triggerMap, triggerType, params, name):
"""
Takes in a map of names to trigger instance, the type of trigger to make,
and the list of parameters to the constructor, and adds a new trigger
to the trigger map dictionary.
triggerMap: dictionary with names as keys (strings) and triggers as values
triggerType: string indicating the type of trigger to make (ex: "TITLE")
params: list of strings with the inputs to the trigger constructor (ex: ["world"])
name: a string representing the name of the new trigger (ex: "t1")
Modifies triggerMap, adding a new key-value pair for this trigger.
Returns a new instance of a trigger (ex: TitleTrigger, AndTrigger).
"""
# TODO: Problem 11
if triggerType == "TITLE":
trigger = TitleTrigger(params[0])
elif triggerType == "SUBJECT":
trigger = SubjectTrigger(params[0])
elif triggerType == "SUMMARY":
trigger = SummaryTrigger(params[0])
elif triggerType == "PHRASE":
trigger = PhraseTrigger(" ".join(params))
elif triggerType == "AND":
trigger = AndTrigger(triggerMap[params[0]], triggerMap[params[1]])
elif triggerType == "OR":
trigger = OrTrigger(triggerMap[params[0]], triggerMap[params[1]])
else:
trigger = NotTrigger(triggerMap[params[0]])
triggerMap[name] = trigger
return trigger
def readTriggerConfig(filename):
"""
Returns a list of trigger objects
that correspond to the rules set
in the file filename
"""
# Here's some code that we give you
# to read in the file and eliminate
# blank lines and comments
triggerfile = open(filename, "r")
all = [ line.rstrip() for line in triggerfile.readlines() ]
lines = []
for line in all:
if len(line) == 0 or line[0] == '#':
continue
lines.append(line)
triggers = []
triggerMap = {}
# Be sure you understand this code - we've written it for you,
# but it's code you should be able to write yourself
for line in lines:
linesplit = line.split(" ")
# Making a new trigger
if linesplit[0] != "ADD":
trigger = makeTrigger(triggerMap, linesplit[1],
linesplit[2:], linesplit[0])
# Add the triggers to the list
else:
for name in linesplit[1:]:
triggers.append(triggerMap[name])
return triggers
import thread
SLEEPTIME = 60 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you'll replace
# this with something more configurable in Problem 11
try:
# These will probably generate a few hits...
t1 = TitleTrigger("Obama")
t2 = SubjectTrigger("Romney")
t3 = PhraseTrigger("Election")
t4 = OrTrigger(t2, t3)
triggerlist = [t1, t4]
# TODO: Problem 11
# After implementing makeTrigger, uncomment the line below:
triggerlist = readTriggerConfig("triggers.txt")
# **** from here down is about drawing ****
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT,fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica",14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
# Gather stories
guidShown = []
def get_cont(newstory):
if newstory.getGuid() not in guidShown:
cont.insert(END, newstory.getTitle()+"\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.getSummary())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.getGuid())
while True:
print "Polling . . .",
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://rss.news.yahoo.com/rss/topstories"))
# Process the stories
stories = filterStories(stories, triggerlist)
map(get_cont, stories)
scrollbar.config(command=cont.yview)
print "Sleeping..."
time.sleep(SLEEPTIME)
except Exception as e:
print e
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
thread.start_new_thread(main_thread, (root,))
root.mainloop()
|
[
"pjaworski27@gmail.com"
] |
pjaworski27@gmail.com
|
71227b941e2809759abccb685f70469423fba4e5
|
431a1f738b1edfba7dad8d10a6b7520d51d917cb
|
/Samples/UserSamples/2017/xH_Differential_Reco/xH_NJETS_0_Config.py
|
4e0a6fdf11273f15866df7c41142cd35efb04132
|
[] |
no_license
|
aloeliger/DatacardCreator
|
5ce702e46fbb77e843b44d8fe088c2645a4a8f66
|
5c7e890276a5be079ed3b677a471c1dcadcba52d
|
refs/heads/master
| 2022-02-26T19:52:30.563747
| 2022-02-16T20:24:48
| 2022-02-16T20:24:48
| 215,602,523
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,623
|
py
|
from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.Signal_JES_17 import JES17Uncertainty
from Samples.Uncertainties.UserUncertainties.JER import JERUncertainty
from Samples.Uncertainties.UserUncertainties.MetRecoil import MetRecoilUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
from Samples.Uncertainties.UserUncertainties.Prefiring import PrefiringUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
#from Samples.Uncertainties.UserUncertainties.qqHTheory import qqHTheoryUncertainty
from Samples.Uncertainties.UserUncertainties.QCDAcceptanceUncertainties.DifferentialUncertainties.NJets_QCD_Uncertainties.GGZH_NJets_Differential_QCDScale_Uncertainty import GGZH_NJets_Differential_QCDScale_Uncertainty
from Samples.Uncertainties.UserUncertainties.QCDAcceptanceUncertainties.DifferentialUncertainties.NJets_QCD_Uncertainties.VH_NJets_Differential_QCDScale_Uncertainty import VH_NJets_Differential_QCDScale_Uncertainty
from Samples.Uncertainties.UserUncertainties.QCDAcceptanceUncertainties.DifferentialUncertainties.NJets_QCD_Uncertainties.qqH_NJets_Differential_QCDScale_Uncertainty import qqH_NJets_Differential_QCDScale_Uncertainty
from Samples.Uncertainties.UserUncertainties.QCDAcceptanceUncertainties.DifferentialUncertainties.NJets_QCD_Uncertainties.ttH_NJets_Differential_QCDScale_Uncertainty import ttH_NJets_Differential_QCDScale_Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
VBFSample = Sample()
VBFSample.name = 'xH_recoNJ_0'
VBFSample.path = '/data/aloeliger/SMHTT_Selected_2017_Deep/'
VBFSample.files = ['VBF.root','WHPlus.root','WHMinus.root','ZH.root','GGZHLLTT.root','GGZHNNTT.root','GGZHQQTT.root','ttH.root']
VBFSample.definition = 'is_Fiducial == 1.0 && njets == 0'
VBFSample.uncertainties = [
TESUncertainty(),
JES17Uncertainty(),
JERUncertainty(),
MetRecoilUncertainty(),
MuonESUncertainty(),
PrefiringUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
#qqHTheoryUncertainty(),
GGZH_NJets_Differential_QCDScale_Uncertainty(),
VH_NJets_Differential_QCDScale_Uncertainty(),
qqH_NJets_Differential_QCDScale_Uncertainty(),
ttH_NJets_Differential_QCDScale_Uncertainty(),
]
VBFSample.eventDictionaryInstance = MuTauEventDictionary
VBFSample.CreateEventWeight = VBFSample.CreateEventWeight_Standard
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
58eb34d13830641c05a389e7f32d562c587efb98
|
e79fb97c06e3a75bd0cf6135fbbd6c1ac08492cb
|
/cnn/vgg16net.py
|
1c14e72e799125617b2cdc37f77caf322527616b
|
[
"MIT"
] |
permissive
|
nocotan/chainer-examples
|
b1021e98654a6d377cc4669c7cedd57bca4f692d
|
d2b736231c6a6c2ba1effa3ddeb90770d7e020d9
|
refs/heads/master
| 2021-09-11T12:42:31.612581
| 2018-04-07T05:40:22
| 2018-04-07T05:40:22
| 78,973,921
| 13
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,288
|
py
|
# -*- coding: utf-8 -*-
import chainer
import chainer.functions as F
import chainer.links as L
class VGG16Net(chainer.Chain):
def __init__(self, num_class, train=True):
super(VGG16Net, self).__init__()
with self.init_scope():
self.conv1=L.Convolution2D(None, 64, 3, stride=1, pad=1)
self.conv2=L.Convolution2D(None, 64, 3, stride=1, pad=1)
self.conv3=L.Convolution2D(None, 128, 3, stride=1, pad=1)
self.conv4=L.Convolution2D(None, 128, 3, stride=1, pad=1)
self.conv5=L.Convolution2D(None, 256, 3, stride=1, pad=1)
self.conv6=L.Convolution2D(None, 256, 3, stride=1, pad=1)
self.conv7=L.Convolution2D(None, 256, 3, stride=1, pad=1)
self.conv8=L.Convolution2D(None, 512, 3, stride=1, pad=1)
self.conv9=L.Convolution2D(None, 512, 3, stride=1, pad=1)
self.conv10=L.Convolution2D(None, 512, 3, stride=1, pad=1)
self.conv11=L.Convolution2D(None, 512, 3, stride=1, pad=1)
self.conv12=L.Convolution2D(None, 512, 3, stride=1, pad=1)
self.conv13=L.Convolution2D(None, 512, 3, stride=1, pad=1)
self.fc14=L.Linear(None, 4096)
self.fc15=L.Linear(None, 4096)
self.fc16=L.Linear(None, num_class)
def __call__(self, x):
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 2, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv4(h))), 2, stride=2)
h = F.relu(self.conv5(h))
h = F.relu(self.conv6(h))
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv7(h))), 2, stride=2)
h = F.relu(self.conv8(h))
h = F.relu(self.conv9(h))
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv10(h))), 2, stride=2)
h = F.relu(self.conv11(h))
h = F.relu(self.conv12(h))
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv13(h))), 2, stride=2)
h = F.dropout(F.relu(self.fc14(h)))
h = F.dropout(F.relu(self.fc15(h)))
h = self.fc16(h)
return h
|
[
"noconoco.lib@gmail.com"
] |
noconoco.lib@gmail.com
|
fbd24af153c52662384f7d7d5b93abab23dda9e1
|
2e126d8779c6d0123252a483611c035742d626da
|
/features/steps/obj_file_steps.py
|
1f380d23f706082ecf1274f4922061e65d15220b
|
[
"MIT"
] |
permissive
|
natehouk/ray-tracer
|
1c250f624e202f9c17b120ac651efa672025a603
|
293eee036148e45b533431d13c3b6e375b4ca224
|
refs/heads/master
| 2023-03-01T03:53:22.824037
| 2021-02-14T09:42:23
| 2021-02-14T09:42:23
| 323,396,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,715
|
py
|
from behave import *
from parser import parse_obj_file, obj_to_group
from tuple import point
@given(u'gibberish ← a file containing')
def step_impl(context):
context.gibberish = context.text
@when(u'parser ← parse_obj_file(gibberish)')
def step_impl(context):
context.parser = parse_obj_file(context.gibberish)
@then(u'parser should have ignored 5 lines')
def step_impl(context):
assert context.parser.ignored == 5
@given(u'file ← a file containing')
def step_impl(context):
context.file = context.text
@when(u'parser ← parse_obj_file(file)')
def step_impl(context):
context.parser = parse_obj_file(context.file)
@then(u'parser.vertices[1] = point(-1, 1, 0)')
def step_impl(context):
context.parser.vertices[1] = point(-1, 1, 0)
@then(u'parser.vertices[2] = point(-1, 0.5, 0)')
def step_impl(context):
context.parser.vertices[2] = point(-1, 0.5, 0)
@then(u'parser.vertices[3] = point(1, 0, 0)')
def step_impl(context):
context.parser.vertices[3] = point(1, 0, 0)
@then(u'parser.vertices[4] = point(1, 1, 0)')
def step_impl(context):
context.parser.vertices[4] = point(1, 1, 0)
@when(u'g ← parser.default_group')
def step_impl(context):
context.g = context.parser.groups["default_group"]
@when(u't1 ← first child of g')
def step_impl(context):
context.t1 = context.g[0]
@when(u't2 ← second child of g')
def step_impl(context):
context.t2 = context.g[1]
@then(u't1.p1 = parser.vertices[1]')
def step_impl(context):
assert context.t1.p1 == context.parser.vertices[1]
@then(u't1.p2 = parser.vertices[2]')
def step_impl(context):
assert context.t1.p2 == context.parser.vertices[2]
@then(u't1.p3 = parser.vertices[3]')
def step_impl(context):
assert context.t1.p3 == context.parser.vertices[3]
@then(u't2.p1 = parser.vertices[1]')
def step_impl(context):
assert context.t2.p1 == context.parser.vertices[1]
@then(u't2.p2 = parser.vertices[3]')
def step_impl(context):
assert context.t2.p2 == context.parser.vertices[3]
@then(u't2.p3 = parser.vertices[4]')
def step_impl(context):
assert context.t2.p3 == context.parser.vertices[4]
@when(u't3 ← third child of g')
def step_impl(context):
context.t3 = context.g[2]
@then(u't3.p1 = parser.vertices[1]')
def step_impl(context):
assert context.t3.p1 == context.parser.vertices[1]
@then(u't3.p2 = parser.vertices[4]')
def step_impl(context):
assert context.t3.p2 == context.parser.vertices[4]
@then(u't3.p3 = parser.vertices[5]')
def step_impl(context):
assert context.t3.p3 == context.parser.vertices[5]
@given(u'file ← the file "triangles.obj"')
def step_impl(context):
f = open("files/triangles.obj", "r")
context.file = f.read()
@when(u'g1 ← "FirstGroup" from parser')
def step_impl(context):
context.g1 = context.parser.groups["FirstGroup"]
@when(u'g2 ← "SecondGroup" from parser')
def step_impl(context):
context.g2 = context.parser.groups["SecondGroup"]
@when(u't1 ← first child of g1')
def step_impl(context):
context.t1 = context.g1[0]
@when(u't2 ← first child of g2')
def step_impl(context):
context.t2 = context.g2[0]
@given(u'parser ← parse_obj_file(file)')
def step_impl(context):
context.parser = parse_obj_file(context.file)
@when(u'g ← obj_to_group(parser)')
def step_impl(context):
context.g = obj_to_group(context.parser)
@then(u'g includes "FirstGroup" from parser')
def step_impl(context):
for tri in context.parser.groups["FirstGroup"]:
assert tri in context.g.children
@then(u'g includes "SecondGroup" from parser')
def step_impl(context):
for tri in context.parser.groups["SecondGroup"]:
assert tri in context.g.children
|
[
"nate.houk@gmail.com"
] |
nate.houk@gmail.com
|
a527639510ff821b91fe8501552ad4cc17f1ed59
|
ff90ae8bdd9dbefb29f7345a221039b4e967f8d3
|
/sign/views_if.py
|
009b03d8450f69d9c3b92d3430426807f07b0860
|
[] |
no_license
|
long3ck/guest
|
ad5bd10b4d9914677bf186ea574a540e2d47744c
|
5e43099de0fa43fa1c4622c3c5cc11f3f49a8a51
|
refs/heads/master
| 2020-12-02T07:54:51.267221
| 2017-07-10T06:50:52
| 2017-07-10T06:50:52
| 85,649,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,591
|
py
|
#coding=utf-8
__author__ = 'Chenkun'
__date__ = '2017/06/16 14:47'
import time
from django.http import JsonResponse
from sign.models import Event, Guest
from django.db.utils import IntegrityError
from django.core.exceptions import ValidationError, ObjectDoesNotExist
# 添加发布会接口
def add_event(request):
eid = request.POST.get('eid','') # 发布会 id
name = request.POST.get('name','') # 发布会标题
limit = request.POST.get('limit','') # 限制人数
status = request.POST.get('status','') # 状态
address = request.POST.get('address','') # 地址
start_time = request.POST.get('start_time','') # 发布会时间
if eid == '' or name == '' or limit == '' or address == '' or start_time == '':
return JsonResponse({'status':10021,'message':'parameter error'})
result = Event.objects.filter(id = eid)
if result:
return JsonResponse({'status':10022,'message':'event id already exists'})
result = Event.objects.filter(name = name)
if result:
return JsonResponse({'status':10023,'message':'event name already exists'})
if status == '':
status = 1
try:
Event.objects.create(id=eid,name=name,limit=limit,address=address,status=int(status),start_time=start_time)
except ValidationError as e:
error = 'start_time format error. It must be in YYYY-MM-DD HH:MM:SS format.'
return JsonResponse({'status':10024,'message':error})
return JsonResponse({'status':200,'message':'add event success'})
# 发布会查询接口
def get_event_list(request):
eid = request.GET.get("eid", "") #发布会 id
name = request.GET.get("name", "") #发布会名称
if eid == '' and name == '':
return JsonResponse({'status':10021,'message':'parameter error'})
if eid != '':
event = {}
try:
result = Event.objects.get(id=eid)
except ObjectDoesNotExist:
return JsonResponse({'status':10022, 'message':'query result is empty'})
else:
event['name'] = result.name
event['limit'] = result.limit
event['status'] = result.status
event['address'] = result.address
event['start_time'] = result.start_time
return JsonResponse({'status':200, 'message':'success', 'data':event})
if name != '':
datas = []
results = Event.objects.filter(name__contains=name)
if results:
for r in results:
event = {}
event['name'] = r.name
event['limit'] = r.limit
event['status'] = r.status
event['address'] = r.address
event['start_time'] = r.start_time
datas.append(event)
return JsonResponse({'status':200, 'message':'success', 'data':datas})
else:
return JsonResponse({'status':10022, 'message':'query result is empty'})
#添加嘉宾接口
def add_guest(request):
eid = request.POST.get('eid','') # 关联发布会 id
realname = request.POST.get('realname','') # 姓名
phone = request.POST.get('phone','') # 手机号
email = request.POST.get('email','') # 邮箱
if eid =='' or realname == '' or phone == '':
return JsonResponse({'status':10021,'message':'parameter error'})
result = Event.objects.filter(id=eid)
if not result:
return JsonResponse({'status':10022,'message':'event id null'})
result = Event.objects.get(id=eid).status
if not result:
return JsonResponse({'status':10023,'message':'event status is not available'})
event_limit = Event.objects.get(id=eid).limit # 发布会限制人数
guest_limit = Guest.objects.filter(event_id=eid) # 发布会已添加的嘉宾数
if len(guest_limit) >= event_limit:
return JsonResponse({'status':10024,'message':'event number is full'})
event_time = Event.objects.get(id=eid).start_time # 发布会时间
etime = str(event_time).split(".")[0]
timeArray = time.strptime(etime, "%Y-%m-%d %H:%M:%S")
e_time = int(time.mktime(timeArray))
now_time = str(time.time()) # 当前时间
ntime = now_time.split(".")[0]
n_time = int(ntime)
if n_time >= e_time:
return JsonResponse({'status':10025,'message':'event has started'})
try:
Guest.objects.create(realname=realname,phone=int(phone),email=email,sign=0,event_id=int(eid))
except IntegrityError:
return JsonResponse({'status':10026,'message':'the event guest phone number repeat'})
return JsonResponse({'status':200,'message':'add guest success'})
# 嘉宾查询接口
def get_guest_list(request):
eid = request.GET.get("eid", "") # 关联发布会 id
phone = request.GET.get("phone", "") # 嘉宾手机号
if eid == '':
return JsonResponse({'status':10021,'message':'eid cannot be empty'})
if eid != '' and phone == '':
datas = []
results = Guest.objects.filter(event_id=eid)
if results:
for r in results:
guest = {}
guest['realname'] = r.realname
guest['phone'] = r.phone
guest['email'] = r.email
guest['sign'] = r.sign
datas.append(guest)
return JsonResponse({'status':200, 'message':'success', 'data':datas})
else:
return JsonResponse({'status':10022, 'message':'query result is empty'})
if eid != '' and phone != '':
guest = {}
try:
result = Guest.objects.get(phone=phone,event_id=eid)
except ObjectDoesNotExist:
return JsonResponse({'status':10022, 'message':'query result is empty'})
else:
guest['realname'] = result.realname
guest['phone'] = result.phone
guest['email'] = result.email
guest['sign'] = result.sign
return JsonResponse({'status':200, 'message':'success', 'data':guest})
# 嘉宾签到接口
def user_sign(request):
eid = request.POST.get('eid','') # 发布会 id
phone = request.POST.get('phone','') # 嘉宾手机号
if eid =='' or phone == '':
return JsonResponse({'status':10021,'message':'parameter error'})
result = Event.objects.filter(id=eid)
if not result:
return JsonResponse({'status':10022,'message':'event id null'})
result = Event.objects.get(id = eid).status
if not result:
return JsonResponse({'status':10023,'message':'event status is not available'})
event_time = Event.objects.get(id=eid).start_time # 发布会时间
etime = str(event_time).split(".")[0]
timeArray = time.strptime(etime, "%Y-%m-%d %H:%M:%S")
e_time = int(time.mktime(timeArray))
now_time = str(time.time()) # 当前时间
ntime = now_time.split(".")[0]
n_time = int(ntime)
if n_time >= e_time:
return JsonResponse({'status':10024,'message':'event has started'})
result = Guest.objects.filter(phone = phone)
if not result:
return JsonResponse({'status':10025,'message':'user phone null'})
result = Guest.objects.filter(event_id=eid,phone=phone)
if not result:
return JsonResponse({'status':10026,'message':'user did not participate in the conference'})
result = Guest.objects.get(event_id=eid,phone = phone).sign
if result:
return JsonResponse({'status':10027,'message':'user has sign in'})
else:
Guest.objects.filter(event_id=eid,phone=phone).update(sign='1')
return JsonResponse({'status':200,'message':'sign success'})
|
[
"long3ck@qq.com"
] |
long3ck@qq.com
|
1f6f414e833dcc2b69dc8ab9d2d28c1f6300c4d7
|
77c798d834efecff43bcb6d598e5d9a0370953ea
|
/store/urls.py
|
963dfb667e86e79d6e41f00311c49cf399f5a4d2
|
[] |
no_license
|
Mohaned-Elfeky/Django_ecommerce
|
2bd206eebb3853efde8cd445aec009f0e272fbfb
|
d816ecd3eb84fc2549e70b0f1581b8a0eb32856d
|
refs/heads/master
| 2023-06-16T09:37:35.733354
| 2021-07-13T14:15:54
| 2021-07-13T14:15:54
| 329,772,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.store,name="home"),
path('cart/',views.cart,name="cart"),
path('store/',views.store,name="store"),
path('checkout/',views.checkout,name="checkout"),
path('update_cart/',views.updateCart,name="update_cart"),
path('process_order/',views.processOrder,name="process_order"),
path('clear_cart/',views.clearCart,name="clear_cart"),
path('product/<slug:product_name>/<int:product_id>/',views.productDetails,name="product_details"),
path('search',views.search,name="search")
]
|
[
"Mohanedhassan1999@gmail.com"
] |
Mohanedhassan1999@gmail.com
|
8564fae4ea4edaef15f390a4f927ccfa825c49e8
|
f45cc0049cd6c3a2b25de0e9bbc80c25c113a356
|
/LeetCode/机器学习(ML)/plot_feature_transformation.py
|
c4a45b398f7482bac992a174a2f4a2381777a1fa
|
[] |
no_license
|
yiming1012/MyLeetCode
|
4a387d024969bfd1cdccd4f581051a6e4104891a
|
e43ee86c5a8cdb808da09b4b6138e10275abadb5
|
refs/heads/master
| 2023-06-17T06:43:13.854862
| 2021-07-15T08:54:07
| 2021-07-15T08:54:07
| 261,663,876
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,090
|
py
|
"""
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then train a
linear model on these features.
First fit an ensemble of trees (totally random trees, a random forest, or
gradient boosted trees) on the training set. Then each leaf of each tree in the
ensemble is assigned a fixed arbitrary feature index in a new feature space.
These leaf indices are then encoded in a one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble and ends up
in one leaf per tree. The sample is encoded by setting feature values for these
leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
# print(__doc__)
from sklearn import set_config
set_config(display='diagram')
# %%
# First, we will create a large dataset and split it into three sets:
#
# - a set to train the ensemble methods which are later used to as a feature
# engineering transformer;
# - a set to train the linear model;
# - a set to test the linear model.
#
# It is important to split the data in such way to avoid overfitting by leaking
# data.
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(n_samples=80000, random_state=10)
X_full_train, X_test, y_full_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=10)
X_train_ensemble, X_train_linear, y_train_ensemble, y_train_linear = \
train_test_split(X_full_train, y_full_train, test_size=0.5,
random_state=10)
# %%
# For each of the ensemble methods, we will use 10 estimators and a maximum
# depth of 3 levels.
n_estimators = 10
max_depth = 3
# %%
# First, we will start by training the random forest and gradient boosting on
# the separated training set
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
random_forest = RandomForestClassifier(
n_estimators=n_estimators, max_depth=max_depth, random_state=10)
random_forest.fit(X_train_ensemble, y_train_ensemble)
gradient_boosting = GradientBoostingClassifier(
n_estimators=n_estimators, max_depth=max_depth, random_state=10)
_ = gradient_boosting.fit(X_train_ensemble, y_train_ensemble)
# %%
# The :class:`~sklearn.ensemble.RandomTreesEmbedding` is an unsupervised method
# and thus does not required to be trained independently.
from sklearn.ensemble import RandomTreesEmbedding
random_tree_embedding = RandomTreesEmbedding(
n_estimators=n_estimators, max_depth=max_depth, random_state=0)
# %%
# Now, we will create three pipelines that will use the above embedding as
# a preprocessing stage.
#
# The random trees embedding can be directly pipelined with the logistic
# regression because it is a standard scikit-learn transformer.
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
rt_model = make_pipeline(
random_tree_embedding, LogisticRegression(max_iter=1000))
rt_model.fit(X_train_linear, y_train_linear)
# %%
# Then, we can pipeline random forest or gradient boosting with a logistic
# regression. However, the feature transformation will happen by calling the
# method `apply`. The pipeline in scikit-learn expects a call to `transform`.
# Therefore, we wrapped the call to `apply` within a `FunctionTransformer`.
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import OneHotEncoder
def rf_apply(X, model):
return model.apply(X)
rf_leaves_yielder = FunctionTransformer(
rf_apply, kw_args={"model": random_forest})
rf_model = make_pipeline(
rf_leaves_yielder, OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=1000))
rf_model.fit(X_train_linear, y_train_linear)
# %%
def gbdt_apply(X, model):
return model.apply(X)[:, :, 0]
gbdt_leaves_yielder = FunctionTransformer(
gbdt_apply, kw_args={"model": gradient_boosting})
gbdt_model = make_pipeline(
gbdt_leaves_yielder, OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=1000))
gbdt_model.fit(X_train_linear, y_train_linear)
# %%
# We can finally show the different ROC curves for all the models.
import matplotlib.pyplot as plt
from sklearn.metrics import plot_roc_curve
fig, ax = plt.subplots()
models = [
("RT embedding -> LR", rt_model),
("RF", random_forest),
("RF embedding -> LR", rf_model),
("GBDT", gradient_boosting),
("GBDT embedding -> LR", gbdt_model),
]
model_displays = {}
for name, pipeline in models:
model_displays[name] = plot_roc_curve(
pipeline, X_test, y_test, ax=ax, name=name)
_ = ax.set_title('ROC curve')
# %%
fig, ax = plt.subplots()
for name, pipeline in models:
model_displays[name].plot(ax=ax)
ax.set_xlim(0, 0.2)
ax.set_ylim(0.8, 1)
_ = ax.set_title('ROC curve (zoomed in at top left)')
|
[
"1129079384@qq.com"
] |
1129079384@qq.com
|
06e6cc27001cefebe085879ad2f6c4cf3e09a305
|
19ef57b9fdca2c082add5191e5b623a4c25a1ffb
|
/Xml.py
|
40a0cabbcaa308af4255a8876d4afe4c8db8e87f
|
[] |
no_license
|
Sniper970119/Spider
|
3ee7fc0f789bd14de10111aede4e6f8b58769ccf
|
d729a18b7000fe0e1bd9ca266206fa32d4fdc9ba
|
refs/heads/master
| 2020-03-23T18:25:11.467156
| 2018-08-09T11:58:07
| 2018-08-09T11:58:07
| 141,907,508
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,691
|
py
|
# -*- coding:utf-8 -*-
from lxml import etree
from bs4 import BeautifulSoup
import re
from pyquery import PyQuery as pq
# xml test -> tostring
# html = etree.parse('test.xml', etree.HTMLParser())
# result = etree.tostring(html)
# print(result.decode('utf-8'))
# xml test -> all
# html = etree.parse('test.xml', etree.HTMLParser())
# result = html.xpath('//*')
# print(result)
# xml test -> son
# html = etree.parse('test.xml', etree.HTMLParser())
# result = html.xpath('//li/a')
# print(result)
# xml test -> father
# html = etree.parse('test.xml', etree.HTMLParser())
# result = html.xpath('//a[@herf="link4.html"]/../@class')
# print(result)
# xml test -> get attribute
# html = etree.parse('test.xml', etree.HTMLParser())
# result = html.xpath('//li/a/@href')
# print(result)
# xml test -> get attribute which have more than one values
# text = '''
# <li class="li li-first"><a href="link.html">first item</a></li>
# '''
# html = etree.HTML(text)
# result = html.xpath('//li[contains(@class, "li")]/a/text()')
# print(result)
# xml test -> match message by more than one attribute
# text = '''
# <li class="li li-first" name="item"><a href="link.html">first item</a></li>
# '''
# html = etree.HTML(text)
# result = html.xpath('//li[contains(@class, "li")and @name="item"]/a/text()')
# print(result)
# xml test -> output by order
# text = '''
# <li class="li li-first" name="item"><a href="link.html">first item</a></li>
# '''
# html = etree.HTML(text)
# result = html.xpath('//li[1]/a/text()') # print the first node
# print(result)
# result = html.xpath('//li[last()]/a/text()') # print the last node
# print(result)
# result = html.xpath('//li[position<3]/a/text()') # print the nodes whose position is smaller than 3
# print(result)
# result = html.xpath('//li[last()-2]/a/text()') # print the antepenultimate node
# print(result)
# xml test -> node axle
# text = '''
# <li class="li li-first" name="item"><a href="link.html">first item</a></li>
# '''
# html = etree.HTML(text)
# result = html.xpath('//li[1]/ancestor::*')
# print(result)
# result = html.xpath('//li[1]/ancestor::div')
# print(result)
# result = html.xpath('//li[1]/attribute::*')
# print(result)
# result = html.xpath('//li[1]/child::a[@href="link1.html"]')
# print(result)
# result = html.xpath('//li[1]/descendant::span')
# print(result)
# result = html.xpath('//li[1]/following::*[2]')
# print(result)
# result = html.xpath('//li[1]/following-sibling::*')
# print(result)
# beautiful soup test
# html = '''
# <html>
# <head>
# <title>The Dormouse's story</title>
# </head>
# <body>
# <p class="title" name="dromouse"><b>The Dormouse's story</b></p>
# <p class="story">Once upon a time there were three little sisters;and their names were
# <a href="http://www.baidu.com/01" class="sister" id="link1"><!--Elsie--></a>
# <a href="http://www.baidu.com/02" class="sister" id="link2">Lacie</a> and
# <a href="http://www.baidu.com/03" class="sister" id="link3">Tillie</a>;
# and they lived at the bottom of a well.
# </p>
# <p class="story">...</p>
# '''
# soup = BeautifulSoup(html, 'lxml')
# print(soup.prettify())
# print(soup.title.string)
# beautiful soup test -> find_all
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body">
# <ul class="list" id="list-1" name="elements">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# <li class="element">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# </ul>
# </div>
# </div>
# '''
# soup = BeautifulSoup(html, 'lxml')
# print(soup.find_all(name='ul'))
# print()
# for ul in soup.find_all(name='ul'):
# print(ul.find_all(name='li'))
# print()
# print(soup.find_all(attrs={'id': 'list-1'}))
# print()
# print(soup.find_all(id='list-1'))
# print()
# print(soup.find_all(text=re.compile('Foo')))
# beautiful soup test -> find_all
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body">
# <ul class="list" id="list-1" name="elements">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# <li class="element">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# </ul>
# </div>
# </div>
# '''
# soup = BeautifulSoup(html, 'lxml')
# print(soup.find(name='ul'))
# beautiful soup test -> find
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body">
# <ul class="list" id="list-1" name="elements">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# <li class="element">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# </ul>
# </div>
# </div>
# '''
# soup = BeautifulSoup(html, 'lxml')
# print(soup.select('.panel .panel-heading'))
# print()
# print(soup.select('ul li'))
# print()
# print(soup.select('#list-2 .element'))
# print()
# for ul in soup.select('li'): # attribute
# print(ul['class'])
# print(ul.attrs['class'])
# print('Get Text:', ul.get_text())
# print('String:', ul.string)
# pyquery test
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body">
# <ul class="list" id="list-1" name="elements">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# <li class="element">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# print(doc('li'))
# print()
# doc = pq(url='http://www.sniper97.cn') # use url
# print(doc('title'))
# print()
# doc = pq(filename='test.xml') # use file
# print(doc('li'))
# print()
# pyquery test -> css
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body" id="list-0">
# <ul class="list" id="list-1" name="elements">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# <li class="element">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# print(doc('#list-0 .list li'))
# pyquery test -> find node
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body" id="list-0">
# <ul class="list" id="list-1" name="elements">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# <li class="element">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element">Foo</li>
# <li class="element">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# item = doc('ul')
# print(item)
# print()
# lis = item.find('li') # son node
# print(lis)
# print()
# par = item.parents() # parents node
# print(par)
# print()
# par = item.parents('.panel-body') # parent node , only one point
# print(par)
# print()
# node = doc('li')
# print(node.siblings('.element')) # find brother node
# pyquery test -> find node
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body" id="list-0">
# <ul class="list" id="list-1" name="elements">
# <li class="element1"><a href="www.123.com">Foo</li>
# <li class="element2"><a href="www.123.com">Bar</li>
# <li class="element3"><a href="www.123.com">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element4"><a href="www.123.com">Foo</li>
# <li class="element5"><a href="www.123.com">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# item = doc('ul')
# print(item.attr('class'))
# print(item.attr.id)
# item = doc('li')
# print(item.attr('class')) # can not output attr
# for i in item.items():
# print(i.attr('class')) # can output attr
# print(i.text()) # can output text
# print(i.html()) # can output html
# # pyquery test -> class handle
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body" id="list-0">
# <ul class="list" id="list-1" name="elements">
# <li class="element1"><a href="www.123.com">Foo</li>
# <li class="element2"><a href="www.123.com">Bar</li>
# <li class="element3"><a href="www.123.com">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element4"><a href="www.123.com">Foo</li>
# <li class="element5"><a href="www.123.com">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# ul = doc('ul')
# print(ul)
# print()
# ul.add_class('action')
# print(ul)
# print()
# ul.remove_class('action')
# print(ul)
# print()
# pyquery test -> attribute handle
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body" id="list-0">
# <ul class="list" id="list-1" name="elements">
# <li class="element1"><a href="www.123.com">Foo</li>
# <li class="element2"><a href="www.123.com">Bar</li>
# <li class="element3"><a href="www.123.com">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element4"><a href="www.123.com">Foo</li>
# <li class="element5"><a href="www.123.com">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# ul = doc('ul')
# print(ul)
# print()
# ul.attr('name', 'sniper')
# print(ul)
# print()
# ul.text('change item')
# print(ul)
# print()
# ul.html('<a href="www.123.com">')
# print(ul)
# print()
# pyquery test -> attribute handle
# html = '''
# <div class="panel">
# <div class="panel-heading">
# <h4>Hello</h4>
# </div>
# <div class="panel-body" id="list-0">
# <ul class="list" id="list-1" name="elements">
# <li class="element1"><a href="www.123.com">Foo</li>
# <li class="element2"><a href="www.123.com">Bar</li>
# <li class="element3"><a href="www.123.com">Jay</li>
# </ul>
# <ul class="list list-small" id="list-2">
# <li class="element4"><a href="www.123.com">Foo</li>
# <li class="element5"><a href="www.123.com">Bar</li>
# </ul>
# </div>
# </div>
# '''
# doc = pq(html)
# li = doc('li:first-child')
# print(li)
# print()
# li = doc('li:last-child')
# print(li)
# print()
# li = doc('li:nth-child(2)')
# print(li)
# print()
# li = doc('li:gt(2)')
# print(li)
# print()
# li = doc('li:nth-child(2n)')
# print(li)
# print()
# li = doc('li:contains(Bar)')
# print(li)
# print()
|
[
"30463691+Sniper970119@users.noreply.github.com"
] |
30463691+Sniper970119@users.noreply.github.com
|
5788b3bbab2f799f304dbe17c00bef1aea0c7b18
|
9fdf64c2d07762cc71e99c9659437ec2f827975a
|
/Loop 9.py
|
ce131db5f2b98f65925fbe6aa8f646af548f3f79
|
[] |
no_license
|
Kome7a/automatebsp-tasks
|
bb72c41f0a678e67a377a7062150f5aed2e50cf1
|
583839d0c7e18a9cb189ba43049e59f25a3e2335
|
refs/heads/master
| 2022-04-24T20:09:05.867104
| 2020-04-23T12:15:59
| 2020-04-23T12:15:59
| 257,557,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
for i in range(0, 5):
print("*")
for b in range(0, 5):
b += 1
print(i, b)
|
[
"e.kontilov@gmail.com"
] |
e.kontilov@gmail.com
|
8e01f44ae69c477fb05ba33df3c1be93d5e4bbc7
|
a390bd7c0b1cf19f255913c03ad8c0231a0ea635
|
/scrapy/Tencent/Tencent/untils/osdemo.py
|
ae208923f96b397ed7bc40b8b1532cbc8e136311
|
[] |
no_license
|
ChangfengHU/bigdata_notes
|
d03c736f29bc55f33b17c626b09de0a2fdcd5a17
|
67cbf66d83b2f1a7b182afa375069df237ca7506
|
refs/heads/master
| 2020-04-14T23:55:02.408530
| 2019-02-02T16:23:54
| 2019-02-02T16:23:54
| 164,221,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
#encoding: utf-8
import os
path=os.path.dirname(os.path.dirname(__file__))
print(path)
|
[
"faker_322@outlook.com"
] |
faker_322@outlook.com
|
fae2741b274219369d600210ca0936e5d0a71661
|
2e66469b5a1856bbc12b7038eab53c9e5b7f858e
|
/smoothing.py
|
85d4ba71011ee69cb49f01a1b1c4b3b7a5c75092
|
[
"MIT"
] |
permissive
|
elaina03/Edge-preserving-Multiscale-Image-Decomposition-based-on-Local-Extrema
|
62ec7eec62f17ea0d111e73f805a3ce99b852767
|
4304be64b44adb6b6a75f2ed2e2fd142e1f3a3ce
|
refs/heads/main
| 2023-03-07T10:09:35.709817
| 2021-02-16T03:41:06
| 2021-02-16T03:41:06
| 335,565,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,481
|
py
|
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import splu
import cv2
import sys
def getColorExact(localExtrema, ntscIm):
[n,m,d] = ntscIm.shape
imgSize = n*m
nI = np.zeros(ntscIm.shape, dtype=ntscIm.dtype)
nI[...,0] = ntscIm[...,0]
indsM = np.arange(imgSize).reshape(n, m)
wd = 1
length = 0
consts_len = 0
col_inds = np.zeros(imgSize*(2*wd+1)**2, dtype=int)
row_inds = np.zeros(imgSize*(2*wd+1)**2, dtype=int)
vals = np.zeros(imgSize*(2*wd+1)**2)
gvals = np.zeros((2*wd+1)**2)
for i in range(n):
for j in range(m):
if not localExtrema[i,j]:
tlen = 0
for ii in range(max(0,i-wd),min(i+wd+1,n)):
for jj in range(max(0,j-wd),min(j+wd+1,m)):
if(ii != i) or (jj != j):
row_inds[length] = consts_len
col_inds[length] = indsM[ii,jj]
gvals[tlen] = ntscIm[ii,jj,0]
length = length +1
tlen = tlen+1
t_val = ntscIm[i,j,0] # center pixel Y value
gvals[tlen] = t_val
c_var = np.mean((gvals[:tlen+1] - np.mean(gvals[:tlen+1]) )**2)
csig=c_var*0.6
mgv = min((gvals[:tlen]-t_val)**2)
if csig < (-mgv/np.log(0.01)):
csig = -mgv/np.log(0.01)
if csig < 0.000002:
csig = 0.000002
gvals[:tlen] = np.exp(-(gvals[:tlen]-t_val)**2/csig)
gvals[:tlen] = gvals[:tlen]/sum(gvals[:tlen])
vals[length-tlen:length] = -gvals[:tlen]
row_inds[length] = consts_len
col_inds[length] = indsM[i,j]
vals[length]=1
length = length+1
consts_len = consts_len+1
vals = vals[:length]
col_inds = col_inds[:length]
row_inds = row_inds[:length]
A_csc = csc_matrix((vals, (row_inds, col_inds)), shape=(consts_len, imgSize))
LU = splu(A_csc)
b = np.zeros(A_csc.shape[0],dtype=ntscIm.dtype )
for dim in range(1,d):
curIm = ntscIm[:,:,dim]
b[indsM[localExtrema != 0]] = curIm[localExtrema]
new_vals = LU.solve(b)
nI[...,dim] = new_vals.reshape((n,m))
return nI
def EdgePreservingSmooth(I,k=3):
"""
Implement "Edge-preserving Multiscale Image Decomposition based on Local Extrema"
Parameters
-----------
I: input image( BGR image or grayscale image )
k: kernel size, default = 3
Returns
-----------
M: smoothed image( BGR image or grayscale image )
localMax: local maxima extrema( boolean matrix )
localMin: local minima extrema( boolean matrix )
MaxEnvelope: extremal envelopes of maxima extrema( Y+ extremal envelopes at each BGR channel )
MinEnvelope: extermal envelope of minima extrema( Y+ extremal envelopes at each BGR channel )
"""
# wd: half width of kernel size
wd = k//2
if I.ndim == 3:
channel = I.shape[2]
YUV = cv2.cvtColor(I,cv2.COLOR_BGR2YUV)
Y = np.double(YUV[:,:,0])/255
image = np.double(I)/255
#cv2.imshow("Y",Y)
#cv2.waitKey(0)
else:
channel = 1
Y = np.double(I)/255
print("Extrema location")
height,width = Y.shape
localMax = np.zeros( Y.shape, dtype=bool)
localMin = np.zeros( Y.shape, dtype=bool)
for i in range(height):
for j in range(width):
center = Y[i,j]
ii_start = max(0,i-wd)
ii_end = min(i+wd+1,height)
jj_start = max(0,j-wd)
jj_end = min(j+wd+1,width)
cover = Y[ii_start:ii_end,jj_start:jj_end]
maxcount = np.sum(cover > center)
mincount = np.sum(center > cover)
if maxcount <= k-1:
localMax[i,j] = True
if mincount <= k-1:
localMin[i,j] = True
print("Extermal envelope construction")
Y_BGR = np.zeros((height,width,4))
Y_BGR[...,0] = Y;
for i in range(channel):
Y_BGR[...,i+1] = image[...,i]
MaxEnvelope = getColorExact(localMax, Y_BGR)
MinEnvelope = getColorExact(localMin, Y_BGR)
print("Computation of the smoothed mean")
M = (MaxEnvelope[:,:,1:(channel+1)] + MinEnvelope[:,:,1:(channel+1)])/2;
M = (M*255).astype(np.uint8)
#cv2.imshow("M",M)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
return M, localMax, localMin, MaxEnvelope, MinEnvelope
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage:', sys.argv[0], '<ImagePath>', '<KernelSize>', '<Iteration>')
sys.exit(1)
imagepath = sys.argv[1]
kernelsize = int(sys.argv[2])
iteration = int(sys.argv[3])
I = cv2.imread(imagepath)
M= I.copy()
for i in range(iteration):
print('Iteration: ', str(i+1))
M,localmax, localmin, maxenvelope, minenvelope = EdgePreservingSmooth(M,kernelsize)
kernelsize += 4
print('')
I_YUV = cv2.cvtColor(I,cv2.COLOR_BGR2YUV)
M_YUV = cv2.cvtColor(M,cv2.COLOR_BGR2YUV)
D = I_YUV[:,:,0]-M_YUV[:,:,0]
# Make the grey scale image have three channels
grey_3_channel = cv2.cvtColor(D, cv2.COLOR_GRAY2BGR)
numpy_horizontal = np.hstack(( I, M, grey_3_channel))
cv2.imshow('Edge-preserving Smooth Result', numpy_horizontal)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"ellie931492@gmail.com"
] |
ellie931492@gmail.com
|
e60b1bbebff2af542d443aeaabf3effd9c727c2e
|
27fdb210daacc29a7031c2f8e0e2cf28f6e78e48
|
/objects/_train_det.py
|
9c568c8923d1d3bd1c61f75dcd6f9eba09120ab7
|
[] |
no_license
|
cubefreaker/webautomation
|
e79774bef554ce3b5796d3deefec3f41ea34ff69
|
be86def2540ef72d84ca2a0281ac4e5f53b738ca
|
refs/heads/master
| 2021-06-13T00:46:47.216292
| 2019-03-28T07:19:32
| 2019-03-28T07:19:32
| 178,146,195
| 0
| 0
| null | 2021-06-01T23:32:36
| 2019-03-28T07:06:41
|
Python
|
UTF-8
|
Python
| false
| false
| 11,242
|
py
|
import time
import names
import datetime
import random, string
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, ElementNotVisibleException
from selenium.webdriver.common.action_chains import ActionChains
class InvTrain(object):
def __init__(self, driver):
self.driver = driver
self.driver.find_element(By.LINK_TEXT, "INVOICING").click()
self.driver.find_element(By.LINK_TEXT, "Train").click()
def get_url(self, link):
self.url = link
return self.url
def add(self):
driver = self.driver
driver.find_element(By.LINK_TEXT, "Add").click()
def customer(self, customer=None):
driver = self.driver
driver.find_element(By.XPATH, "(.//*[normalize-space(text()) and normalize-space(.)='Customer'])[5]/following::span[4]").click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if customer is None:
li = driver.find_element(By.CLASS_NAME, 'select2-results__options').text.splitlines()
a.send_keys(random.choice(li))
a.send_keys(Keys.ENTER)
else:
a.send_keys(customer)
a.send_keys(Keys.ENTER)
def pax_type(self, pax_type=None):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[2]/div[1]/div/span[2]/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if pax_type is None:
a.send_keys('Adult')
a.send_keys(Keys.ENTER)
else:
a.send_keys(pax_type)
a.send_keys(Keys.ENTER)
def pax_f_name(self, f_name=None):
driver = self.driver
a = driver.find_element(By.NAME, "FirstName")
self.gen = random.choice(['male', 'female'])
self.fname = names.get_first_name(gender = self.gen)
if f_name is None:
a.send_keys(self.fname)
else:
a.send_keys(f_name)
def pax_l_name(self, l_name=None):
driver = self.driver
a = driver.find_element(By.NAME, "LastName")
self.lname = names.get_last_name()
if l_name is None:
a.send_keys(self.lname)
else:
a.send_keys(l_name)
def pax_email(self, email=None):
driver = self.driver
a = driver.find_element(By.NAME, "Email")
mail = '%s.%s@gmail.com' % (self.fname.lower(), self.lname.lower())
if email is None:
a.send_keys(mail)
else:
a.send_keys(email)
def pax_title(self):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[3]/div[1]/div/span[2]/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
title = 'Mr' if self.gen == 'male' else 'Mrs'
a.send_keys(title)
a.send_keys(Keys.ENTER)
def phone(self, m_phone=None):
driver = self.driver
a = driver.find_element(By.NAME, "MobilePhone")
if m_phone is None:
self.mphone = '08%s' % (''.join(random.choice(string.digits) for _ in range(9)))
a.send_keys(self.mphone)
else:
a.send_keys(m_phone)
def h_phone(self, h_phone=None):
driver = self.driver
a = driver.find_element(By.NAME, "HomePhone")
if h_phone is None:
self.hphone = '0%s' % (''.join(random.choice(string.digits) for _ in range(9)))
a.send_keys(self.hphone)
else:
a.send_keys(h_phone)
def o_phone(self, o_phone=None):
driver = self.driver
a = driver.find_element(By.NAME, "OtherPhone")
if o_phone is None:
a.send_keys(random.choice(([self.mphone, self.hphone, '-'])))
else:
a.send_keys(o_phone)
def remarks(self, *remark):
driver = self.driver
try:
driver.find_element_by_name("Remark1").click()
except ElementNotVisibleException:
driver.find_element(By.XPATH, "(.//*[normalize-space(text()) and normalize-space(.)='*For internal use only'])[1]/i[1]").click
li = []
for i in remark:
li.append(i)
remark1 = driver.find_element_by_name("Remark1")
remark2 = driver.find_element_by_name("Remark2")
remark3 = driver.find_element_by_name("Remark3")
remark4 = driver.find_element_by_name("Remark4")
remark5 = driver.find_element_by_name("Remark5")
remark6 = driver.find_element_by_name("Remark6")
remark1.send_keys(li[0])
remark2.send_keys(li[1])
remark3.send_keys(li[2])
remark4.send_keys(li[3])
remark5.send_keys(li[4])
remark6.send_keys(li[5])
def supplier(self, supplier=None):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[6]/div[1]/div/span[2]/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if supplier is None:
li = driver.find_element(By.CLASS_NAME, 'select2-results__options').text.splitlines()
a.clear()
a.send_keys(random.choice(li))
a.send_keys(Keys.ENTER)
else:
a.clear()
a.send_keys(supplier)
a.send_keys(Keys.ENTER)
def pnr_code(self, pnr=None):
a = self.driver.find_element(By.NAME, "PnrCode")
if pnr is None:
self.pnr = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
a.send_keys(self.pnr)
else:
a.send_keys(pnr)
def ticket_no(self, tk_no=None):
a = self.driver.find_element(By.NAME,"TicketNo")
if tk_no is None:
pass
else:
a.send_keys(tk_no)
def ticketed_date(self, t_date=None):
a = self.driver.find_element(By.NAME, 'TicketedDate')
curr_date = datetime.datetime.now()
self.date = curr_date.strftime('%d-%m-%Y')
if t_date is None:
a.clear()
a.send_keys(self.date)
a.send_keys(Keys.ENTER)
else:
a.clear()
a.send_keys(t_date)
a.send_keys(Keys.ENTER)
def issuer(self, issuer=None):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[8]/div/div/span[2]/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if issuer is None:
li = driver.find_element(By.CLASS_NAME, 'select2-results__options').text.splitlines()
a.send_keys(random.choice(li))
a.send_keys(Keys.ENTER)
else:
a.send_keys(issuer)
a.send_keys(Keys.ENTER)
def train_no(self, no=None):
a = self.driver.find_element(By.NAME, 'TrainNo')
if no is None:
no = 'KA %s' % (''.join(random.choice(string.digits) for _ in range(3)))
a.clear()
a.send_keys(no)
else:
a.clear()
a.send_keys('KA %s' % (no))
def t_class(self, Class=None):
a = self.driver.find_element(By.XPATH, "(.//*[normalize-space(text()) and normalize-space(.)='Class'])[1]/following::input[1]")
if Class is None:
Class = ''.join(random.choice(string.ascii_uppercase) for _ in range(3))
a.send_keys(Class)
else:
a.send_keys(Class)
def origin(self, ori=None):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[10]/div[1]/div/span[2]/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if ori is None:
a.send_keys('PSE')
a.send_keys(Keys.ENTER)
else:
a.send_keys(ori)
a.send_keys(Keys.ENTER)
def destination(self, dest=None):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[11]/div[1]/div/span[2]/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if dest is None:
a.send_keys('BD')
a.send_keys(Keys.ENTER)
else:
a.send_keys(dest)
a.send_keys(Keys.ENTER)
def dep_date(self, d_date=None):
a = self.driver.find_element(By.NAME, 'DepDate')
if d_date is None:
a.clear()
a.send_keys(self.date)
a.send_keys(Keys.ENTER)
else:
a.clear()
a.send_keys(d_date)
a.send_keys(Keys.ENTER)
def arr_date(self, arr_date=None):
a = self.driver.find_element(By.NAME, 'ArrDate')
if arr_date is None:
a.clear()
a.send_keys(self.date)
a.send_keys(Keys.ENTER)
else:
a.clear()
a.send_keys(arr_date)
a.send_keys(Keys.ENTER)
def base_fare(self, base=None):
driver = self.driver
a = driver.find_element(By.NAME, 'Basic')
if base is None:
base = '1%s00.00' % (''.join(random.choice(string.digits) for _ in range(3)))
a.clear()
a.send_keys(base)
else:
a.clear()
a.send_keys(base)
def service_fee(self, s_fee=None):
driver = self.driver
a = driver.find_element(By.NAME, "ServiceFee")
if s_fee is None:
s_fee = '%s000.00' % (''.join(random.choice(string.digits) for _ in range(2)))
a.clear()
a.send_keys(s_fee)
else:
a.clear()
a.send_keys(s_fee)
def comm_type(self, comm=None):
driver = self.driver
driver.find_element(By.XPATH, '//*[@id="modal-add"]/div/div/div[2]/div[12]/div[4]/div/span/span[1]/span').click()
a = driver.find_element(By.CLASS_NAME, "select2-search__field")
if comm is None:
self.comm = driver.find_element(By.CLASS_NAME, "select2-results__options").text.splitlines()
a.send_keys(random.choice(self.comm))
a.send_keys(Keys.ENTER)
else:
a.send_keys(comm)
a.send_keys(Keys.ENTER)
def commission(self, comm=None):
driver = self.driver
a = driver.find_element(By.NAME, "AgentComm")
if comm is None:
if self.comm == 'Amount':
comm = '1%s00.00' % (''.join(random.choice(string.digits) for _ in range(2)))
a.clear()
a.send_keys(comm)
elif self.comm == 'Percent':
comm = '%s.00' % (random.choice(string.digits))
a.clear()
a.send_keys(comm)
else:
return
else:
a.clear()
a.send_keys(comm)
def save_det(self):
self.driver.find_element(By.ID, "savePax").click()
def confirm_comm(self):
self.driver.find_element(By.XPATH, '//*[@id="modal-save"]/div/div/div[3]/a[2]').click()
|
[
"hamzah_habibi@rocketmail.com"
] |
hamzah_habibi@rocketmail.com
|
542c19bcd75dbf4dbe3ce07f321301a0984f87a2
|
4600b1a9aa934ed67abae370c5e167d0b93f02f3
|
/TTP/Exfiltration/data_encryption.py
|
190876cc5dab40ada90b505c06c4b83c818d8e27
|
[] |
no_license
|
nallamuthu/RedTeam
|
c9141c6c5f4d0de1299dce5b9690294f1db4bde7
|
6f312b528263711c3fe9bd7c20b43d5dc49b3204
|
refs/heads/master
| 2021-04-11T12:53:05.205022
| 2020-07-04T08:27:20
| 2020-07-04T08:27:20
| 249,022,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,727
|
py
|
import pyAesCrypt
from Crypto.Cipher import AES
import base64
import hashlib
from Crypto.Cipher import AES
from Crypto import Random
from arc4 import ARC4
from itertools import *
import sys
from cryptography.fernet import Fernet
'''
def write_key():
def load_key():
def decrypt_file_fernet(filename, key):
def encrypt_file_fernet(filename, key):
def xor_file_encode(input_file_name,output_file_name,password):
def xor_file_decode(encoded_file_name,output_file_name,password):
def xor_data_encode(input_data,password): -> return cipher_data
def xor_data_decode(cipher_data,password): -> return plain_data
def rc4_data_encrypt(plain_data, password): -> return cipher_data
def rc4_data_decrypt(cipher_data, password): -> return plain_data
def aes_data_encrypt(plain_data,aes_key): -> return cipher_data
def aes_data_decrypt(cipher_data,aes_key): -> return plain_data
'''
BLOCK_SIZE = 16
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
#Generates a key and save it into a file
def write_key():
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
#Loads the key from the current directory named `key.key`
def load_key():
return open("key.key", "rb").read()
#Given a filename (str) and key (bytes), it encrypt the file and create encrypted file
def encrypt_file_fernet(filename, key):
f = Fernet(key)
with open(filename, "rb") as file:
# read all file data
file_data = file.read()
encrypted_data = f.encrypt(file_data)
with open("Encrypted_File", "wb") as file:
file.write(encrypted_data)
#Given a filename (str) and key (bytes), it decrypts the file and create encrypted file
def decrypt_file_fernet(filename, key):
f = Fernet(key)
with open(filename, "rb") as file:
# read the encrypted data
encrypted_data = file.read()
# decrypt data
decrypted_data = f.decrypt(encrypted_data)
# write the original file
with open("decrpt.xlsx", "wb") as file:
file.write(decrypted_data)
#Write the data to file
def write_to_file(input_data,file_name):
f = open(file_name, 'wb')
f.write(input_data)
f.close()
#XOR Encode the Text File
def xor_file_encode(input_file_name,output_file_name,password):
plain_data=open(input_file_name).read()
cipher_data = xor_data_encode(plain_data,password)
open(output_file_name, 'w').write(''.join(cipher_data))
return output_file_name
#XOR Decode the Text File
def xor_file_decode(encoded_file_name,output_file_name,password):
cipher_data=open(encoded_file_name).read()
plain_data = xor_data_decode(cipher_data,password)
open(output_file_name, 'w').write(''.join(plain_data))
return output_file_name
##Decode Data - XOR
def xor_data_encode(input_data,password):
cipher_data = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(input_data, cycle(password)))
return cipher_data
##Decode Data - XOR
def xor_data_decode(cipher_data,password):
plain_data = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(cipher_data, cycle(password)))
return plain_data
##Encrypt Data - RC4
def rc4_data_encrypt(plain_data, password):
arc4 = ARC4(password)
cipher_data = arc4.encrypt(plain_data)
return cipher_data
##Decrypt Data - RC4
def rc4_data_decrypt(cipher_data, password):
arc4 = ARC4(password)
plain_data=arc4.decrypt(cipher_data)
return plain_data
#Encrypt Data - AES
def aes_data_encrypt(plain_data, password):
private_key = hashlib.sha256(password.encode("utf-8")).digest()
plain_data = pad(plain_data)
iv = Random.new().read(AES.block_size)
cipher = AES.new(private_key, AES.MODE_CBC, iv)
cipher_data = base64.b64encode(iv + cipher.encrypt(plain_data))
return cipher_data
#Decrypt Data - AES
def aes_data_decrypt(cipher_data, password):
private_key = hashlib.sha256(password.encode("utf-8")).digest()
cipher_data = base64.b64decode(cipher_data)
iv = cipher_data[:16]
cipher = AES.new(private_key, AES.MODE_CBC, iv)
plain_data= unpad(cipher.decrypt(cipher_data[16:]))
return plain_data
write_key()
key = load_key()
#Encrypt any File
encrypt_file_fernet('Book1.xlsx',key);
decrypt_file_fernet("Encrypted_File",key);
sys.exit(0)
#Only Text File XOR Encoding
output_file=xor_file_encode('i.txt','123_enc','hello')
output_file=xor_file_decode('File_Enc','card.txt','hello')
#output_file=xor_file_encode('Output.xlsx','output_file','hello')
cipher_data=aes_data_encrypt('hello','852')
print(cipher_data)
plain_data=aes_data_decrypt(cipher_data,'852')
print(plain_data)
cipher_data=rc4_data_encrypt('hello','852')
print(cipher_data)
plain_data=rc4_data_decrypt(cipher_data,'852')
print(plain_data)
cipher_data=xor_data_encode('hello','852')
print(cipher_data)
plain_data=xor_data_decode(cipher_data,'852')
print(plain_data)
|
[
"nallamuthu.92@gmail.com"
] |
nallamuthu.92@gmail.com
|
6f5fc191e7adbbfba9fe72f95be2821fb8e2cd90
|
384eb64b746b4e5e3d49c495bcc66925f81085cf
|
/contacts/views.py
|
a52c2c90673bc0246acc73f3cceb0d4edb4e434d
|
[] |
no_license
|
julianodesenv/python-schedule
|
c650e87f4257ca929e7af4f7250c375a676436bb
|
e3727a80dc42df3c269100d4234be9d89c3a95cb
|
refs/heads/master
| 2023-08-15T16:03:11.637381
| 2021-10-21T19:24:35
| 2021-10-21T19:24:35
| 419,715,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404
from django.core.paginator import Paginator
from django.db.models import Q, Value
from django.db.models.functions import Concat
from .models import Contact
from django.contrib import messages
def index(request):
contacts = Contact.objects.order_by('-id').filter(
view=True
)
paginator = Paginator(contacts, 10)
page = request.GET.get('page')
contacts = paginator.get_page(page)
return render(request, 'contacts/index.html', {'contacts': contacts})
def show(request, contact_id):
contact = get_object_or_404(Contact, id=contact_id)
if not contact.view:
raise Http404()
return render(request, 'contacts/show.html', {'contact': contact})
def search(request):
term = request.GET.get('term')
if term is None or not term:
messages.add_message(request, messages.ERROR, 'Campo de pesquisa não pode ser vazio!')
return redirect('index')
#raise Http404()
campos = Concat('name', Value(' '), 'last_name')
#contacts = Contact.objects.order_by('-id').filter(
# Q(name__icontains=term) | Q(last_name_icontains=term), # add OR in query
# view=True
#)
contacts = Contact.objects.annotate(
full_name=campos
).filter(
Q(full_name__icontains=term) | Q(phone__icontains=term)
)
#print(contacts.query)
paginator = Paginator(contacts, 10)
page = request.GET.get('page')
contacts = paginator.get_page(page)
return render(request, 'contacts/index.html', {'contacts': contacts})
|
[
"julianodesenv@gmail.com"
] |
julianodesenv@gmail.com
|
cb713521b660e4f2441b52912c4a2d4572fefc08
|
d205918a6d47e8e3303fd105083c6cbdf94da3ad
|
/handOnRaspi/2servo.py
|
6ff5ee06810ebb0ebd3a5d3caad10d2ad2821818
|
[
"MIT"
] |
permissive
|
tfrere/robot-repo
|
08bf0080a6b262243a526aa7ef496277d4b7f2b2
|
66220f604991540f7d23148c524d41d775add263
|
refs/heads/master
| 2022-12-04T13:10:30.080067
| 2022-11-23T20:22:34
| 2022-11-23T20:22:34
| 33,684,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
from Tkinter import *
import time
from RIPO import PWM
for i = 0 ; i ++ ; i < 2:
servo[i] = PWM.Servo()
servo[0] = GPIO.PWM(17, 100);
servo[1] = GPIO.PWM(18, 100);
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
scale = Scale(frame, from_=0, to=180, orient=HORIZONTAL, command=self.update)
scale.grid(row=0)
def update(self, angle):
duty = float(angle) / 10.0 + 2.5
pwm.ChangeDutyCycle(duty)
root = Tk()
root.wm_title('Servo Control')
app = App(root)
root.geometry("200x50+0+0")
root.mainloop()
|
[
"tfrere"
] |
tfrere
|
a8ff9045b8723ec30df50533cea07324a8e8def4
|
6d92d6ce638b59ff1250ce39a55e672f905a0de0
|
/analysis/utils.py
|
1be344f004368d1331c60d6b98ab24be0e88ebde
|
[] |
no_license
|
SaGagnon/gecode-5-extension
|
df3a17c3ef243a00facf9e96396cf096ee381b16
|
513b95f7b7996f69e7ea45191a44d68cfdedb823
|
refs/heads/master
| 2021-09-06T14:41:14.305373
| 2018-02-07T17:19:54
| 2018-02-07T17:19:54
| 92,327,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,434
|
py
|
#%load /home/sam/gecode-5.0.0-extension/analysis/utils.py
#%%writefile /home/sam/gecode-5.0.0-extension/analysis/utils.py
from sklearn.linear_model import LogisticRegressionCV
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import pandas as pd
from io import StringIO
%matplotlib inline
def get_node_in_exs(exs, path_db):
req_sql = """
SELECT d.*,
CASE WHEN r.exec_id IS NOT NULL THEN 1 ELSE 0 END as in_sol
FROM densities AS d
LEFT JOIN results AS r
ON d.exec_id=r.exec_id
AND d.var_id=r.var_id
AND d.val=r.val
WHERE d.exec_id = $2;
"""
df = pd.DataFrame()
for ex in exs:
req_sql_ex = req_sql.replace('$2', str(ex))
output = !sqlite3 -header -csv {path_db} "{req_sql_ex}"
if len(output) == 0: continue
df = df.append(
pd.read_csv(
StringIO(output.n),
index_col=['exec_id','node_id','var_id','val']
)
)
return df
def get_node_in_exs_old_db(exs, path_db, sat=True):
req_sql = """
SELECT d.*,
n.sat,
CASE WHEN r.exec_id IS NOT NULL THEN 1 ELSE 0 END as in_sol
FROM densities AS d
JOIN nodes AS n
ON d.exec_id=n.exec_id
AND d.node_id=n.node_id
$1
LEFT JOIN results AS r
ON d.exec_id=r.exec_id
AND d.var_idx=r.var_idx
AND d.val=r.val
AND r.res_id=0 -- TEMPORAIRE
WHERE d.exec_id = $2
AND EXISTS (
SELECT exec_id
FROM results as rr
WHERE rr.exec_id = $2
);
"""
if sat:
req_sql = req_sql.replace('$1',"AND n.sat=1")
df = pd.DataFrame()
for ex in exs:
req_sql_ex = req_sql.replace('$2', str(ex))
output = !sqlite3 -header -csv {path_db} "{req_sql_ex}"
if len(output) == 0:continue
df = df.append(
pd.read_csv(
StringIO(output.n),
index_col=['exec_id','node_id','prop_id','var_idx','val']
)
)
return df
def get_node_in_exs_2runs(path_db, exs=[]):
req_sql = """
SELECT d.*,
CASE WHEN r.exec_id IS NOT NULL THEN 1 ELSE 0 END as in_sol
FROM densities AS d
LEFT JOIN results AS r
ON d.exec_id=r.exec_id
AND d.var_id=r.var_id
AND d.val=r.val
"""
if len(exs) != 0:
_exs = ""
for x in exs:
_exs += str(x) + ','
_exs = _exs[:-1]
req_sql += " WHERE exec_id in (" + _exs + ")"
output = !sqlite3 -header -csv {path_db} "{req_sql}"
return pd.read_csv(
StringIO(output.n),
index_col=['exec_id','node_id','prop_id','var_id','val']
)
features_subset = [
"max_sd",
"a_avg_sd"
]
def plot_features_sln_sep(features):
width = 3
height = math.ceil(len(features)/width)
plt.figure(figsize=(16,4*height))
for i, feature in enumerate(features):
plt.subplot(width,height, i+1)
plt.title(feature)
sns.kdeplot(df[df.in_sol == False][feature], color='r')
sns.kdeplot(df[df.in_sol == True][feature], color='g')
plt.gca().legend_.remove()
plt.ylim(0,10)
def get_X_y(df):
return df.iloc[:,:-1], df.iloc[:,-1]
# TEMP
def print_coefs(clf, features):
print('double _x = 0;')
for i, coef in enumerate(clf.coef_[0]):
print("_x += %.4f * %s;" % (coef, features[i][1]))
print('double intercept = %.4f;' % (clf.intercept_))
print('_x += intercept;')
# TEMP
def solved_graph(df, xcol, labels={}, xlabel="", ylabel=False, heur_to_plot=[]):
if heur_to_plot == []:
heur_to_plot = df.reset_index()['heur'].unique()
for heur in heur_to_plot:
_len = len(df.loc[heur])
x = list(df.loc[heur].dropna().sort_values(by=xcol)[xcol].values)
y = [(i+1)/_len*100 for i,_ in enumerate(x)]
x = [0] + x
y = [0] + y
lab = heur
if heur in labels:
lab = labels[heur]
plt.plot(x,y, label=lab)
plt.xscale('log')
if xlabel != "" : plt.xlabel(xlabel)
else: plt.xlabel(xcol)
if ylabel: plt.ylabel('% solved')
else: plt.setp(plt.gca().get_yticklabels(), visible=False)
plt.ylim(0,100)
plt.legend(loc='lower right')
def read_data_grappe(path):
df = pd.read_csv(path, sep=' ', names=['heur', 'ex', 'failures', 'time'])
df = df.replace(-1, np.nan)
df = df.set_index(['heur', 'ex'])
return df
def failures_time_solved(df, title='', **kwargs):
plt.suptitle(title)
plt.subplot(1,2,1)
solved_graph(df, 'failures', 'Number of failures', ylabel=True, **kwargs)
plt.subplot(1,2,2)
solved_graph(df, 'time', 'Time (ms)', **kwargs)
def print_graph_latex(plt_func, filename, width, height):
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
plt.style.use('seaborn-white')
plt.style.use('seaborn-paper')
pp = PdfPages(filename + '.pdf')
plt.figure(figsize=cm2inch(width, height))
plt_func()
plt.tight_layout()
plt.savefig(pp, format='pdf')
pp.close()
|
[
"samuel.gagnon92@gmail.com"
] |
samuel.gagnon92@gmail.com
|
60d6e65af9f944e832c385a6b61c4c17b8af6a39
|
ca251e4375fcb48f12448d9cc096a2e3d1302859
|
/scraper/course.py
|
7f36a2ef443b0b5a81e6111b68256da2ce43bac4
|
[] |
no_license
|
kejinlu/itunesu-scraper
|
0ebaebc67713d67d5fb7900dc9546dc20b44cb6e
|
db755e840324807a49a6d0e1960e613aece0129a
|
refs/heads/master
| 2021-01-10T08:49:21.178384
| 2015-10-29T09:23:09
| 2015-10-29T09:23:09
| 45,172,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
from urllib.request import urlopen
from urllib.parse import urlparse
from bs4 import BeautifulSoup
__author__ = 'Luke'
class Course:
def __init__(self, url):
html = urlopen(url)
bs_html = BeautifulSoup(html.read(), "html.parser")
self.title = bs_html.select_one("#title > div.left > h1").get_text(strip=True)
self.publisher = bs_html.select_one("#title > div.left > h2").get_text(strip=True)
self.category = bs_html.select_one("#left-stack > div.lockup.product.course.itunes-u > ul > li.genre > a > span").get_text(strip=True)
self.rating = bs_html.select_one("#left-stack > div.extra-list.customer-ratings > div")["aria-label"]
bs_video_trs = bs_html.find_all("tr",attrs={"class":"podcast-episode video"})
if bs_video_trs is not None:
self.video_urls = [bs_video_tr["video-preview-url"] for bs_video_tr in bs_video_trs]
c = Course("https://itunes.apple.com/cn/course/1.-logistics-ios-8-overview/id961180099?i=333886882&mt=2")
print(c.video_urls)
|
[
"kejinlu@gmail.com"
] |
kejinlu@gmail.com
|
09bcf995025392e1643a1fa07ce77a9170e3ef38
|
f1c9912377f94746fcc9066ae401cb52142122cb
|
/ABC081A-Placing_Marbles.py
|
471510dc6cf50f539c26be402964e1042d37759a
|
[] |
no_license
|
kkashieng/AtCoder_Beginners_Selection
|
e5aea5c5374d30520129dec97056c4d59ff4b08e
|
5929abc8e9dd880c8d1fcbccf2cd7a65a9052bfc
|
refs/heads/master
| 2023-05-12T14:52:22.600301
| 2021-05-29T02:57:12
| 2021-05-29T02:57:21
| 370,271,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
input_str = input()
s1 = int(input_str[0])
s2 = int(input_str[1])
s3 = int(input_str[2])
print(s1 + s2 + s3)
|
[
"kkashi.engineer@gmail.com"
] |
kkashi.engineer@gmail.com
|
299affddd531b4f31bf702683ad0a1283bef8bcd
|
fd49e917e314c28dd7ad1aeea38f4d92adc3f616
|
/Padding_with_Edge_Values/MNIST_Network_Stretch_Compress_Resize.py
|
522589e3b892e636baf19ee0e8426f6472d50bac
|
[] |
no_license
|
pulindur/MNIST_Image-processing
|
0a6297b4a11fe56d98f797218691c475588e4056
|
763fdf2e37976e5f4a6a717d7037ac9bbd74fe6b
|
refs/heads/master
| 2021-01-21T11:00:25.060937
| 2017-05-18T19:04:14
| 2017-05-18T19:04:14
| 91,719,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import numpy as np
import cv2
from PIL import Image
from scipy.misc import imresize
from random import randint
batch_size = 10
num_classes = 10
epochs = 12
####################################################################
# Loading preprocessed Images
train_img = np.load('train_img_2_2.npy')
train_lab = np.load('train_lab_2_2.npy')
test_img = np.load('test_img_2_2.npy')
test_lab = np.load('test_lab_2_2.npy')
####################################################################
# input image dimensions
img_rows, img_cols = 28, 28
if K.image_data_format() == 'channels_first':
train_img = train_img.reshape(train_img.shape[0], 1, img_rows, img_cols)
test_img = test_img.reshape(test_img.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
train_img = train_img.reshape(train_img.shape[0], img_rows, img_cols, 1)
test_img = test_img.reshape(test_img.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
train_img = train_img.astype('float32')
test_img = test_img.astype('float32')
train_img /= 255
test_img /= 255
print('x_train shape:', train_img.shape)
print(train_img.shape[0], 'train samples')
print(test_img.shape[0], 'test samples')
# convert class vectors to binary class matrices
train_lab = keras.utils.to_categorical(train_lab, num_classes)
test_lab = keras.utils.to_categorical(test_lab, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(train_img, train_lab,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(test_img, test_lab))
score = model.evaluate(test_img, test_lab, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
[
"noreply@github.com"
] |
noreply@github.com
|
f3097de81d59a4155e526536d2ae31e634e087bb
|
ddaa20f2ff0aaed4c6beeba888c4213405fdd586
|
/pypi_server/timeit.py
|
fe647520cadd0075e0453b61ab33572b87994218
|
[
"MIT"
] |
permissive
|
mosquito/pypi-server
|
689fb84dd0cc56a70c7bfa6157b8defa76d774d8
|
825571aae6fd17616e404ad8a9b72ef791a4fc46
|
refs/heads/master
| 2023-08-17T14:17:50.177008
| 2021-11-14T17:11:52
| 2021-11-14T17:11:52
| 47,583,364
| 129
| 58
|
MIT
| 2021-11-14T17:11:53
| 2015-12-07T22:30:53
|
Python
|
UTF-8
|
Python
| false
| false
| 899
|
py
|
# encoding: utf-8
import logging
from functools import wraps
from time import time
from concurrent.futures import Future
log = logging.getLogger(__name__)
def timeit(func):
def log_result(start_time):
log.debug(
'Time of execution function "%s": %0.6f',
".".join(filter(
None,
(
func.__module__,
func.__class__.__name__ if hasattr(func, '__class__') else None,
func.__name__
)
)),
time() - start_time
)
@wraps(func)
def wrap(*args, **kwargs):
start_time = time()
result = func(*args, **kwargs)
if isinstance(result, Future):
result.add_done_callback(lambda x: log_result(start_time))
else:
log_result(start_time)
return result
return wrap
|
[
"me@mosquito.su"
] |
me@mosquito.su
|
0e57035ef99cbd34871be4fba11fd76ce12ba2a4
|
6d15bb2fdf7459e6fa738c5d99bff2c3c4c77bb5
|
/five.py
|
fcadeac61986435a0ea293f4b25dc7f2544b3a4d
|
[] |
no_license
|
roziana-rdrgs/Python
|
84427b44138c3eaa30578465c02156a402db9427
|
829136a7fc0596a17d236b7b1bddc2b54459576e
|
refs/heads/master
| 2020-07-04T04:02:15.838316
| 2019-08-19T14:21:07
| 2019-08-19T14:21:07
| 202,148,896
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
'''Escreva um algoritmo que armazene 20 valores em uma estrutura composta. Em seguida, troque
o primeiro elemento com o último, o segundo com o penúltimo, o terceiro com o antepenúltimo e
assim sucessivamente. Mostre os valores depois da troca.'''
list_one = []
for i in range(0,20):
list_one.append(i)
print(list_one)
list_one.reverse()
print(list_one)
|
[
"noreply@github.com"
] |
noreply@github.com
|
85cda000e47e6c1d9a565c3a66425d714c4f4005
|
4f799c12c6e2a64d37e946f82caa3df60b941448
|
/Basic Syntax and Commands/ex9.py
|
733508cdfdb503734f2f950a2b7a58b15596cdf5
|
[] |
no_license
|
madhur3u/Python3
|
09725ac0ae702ce577806b7bebd885687d234415
|
49411964c405075817b02ba4d09d404908725dbe
|
refs/heads/main
| 2023-09-06T04:21:04.583582
| 2021-11-01T11:42:37
| 2021-11-01T11:42:37
| 402,317,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
#use of \n and triple quotes in print
print('A\nB\nC\nD\nE\n')
#\n start a newline
print('''Hello
this can also be used rather than \\n
it will print as we write in code''')
# rather than single we can also use triple quotes """ like this """
|
[
"noreply@github.com"
] |
noreply@github.com
|
70af007d7f7ce4b7ee0a57b362e9ffa3749b1eb9
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pAFxfge35bT3zj4Bs_24.py
|
fbbbfecf880247970104edc5a6cad8636470e2df
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
"""
Write a function that accepts `base` (decimal), `height` (decimal) and `shape`
("triangle", "parallelogram") as input and calculates the area of that shape.
### Examples
area_shape(2, 3, "triangle") ➞ 3
area_shape(8, 6, "parallelogram") ➞ 48
area_shape(2.9, 1.3, "parallelogram") ➞ 3.77
### Notes
* Area of a triangle is `0.5 * b * h`
* Area of a parallelogram is `b * h`
* Assume triangle and parallelogram are the only inputs for `shape`.
"""
def area_shape(base, height, shape):
area =()
if shape == "triangle":
area = 0.5*base*height
else:
area = base*height
return area
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
5832f7a40e054803e89e8aa6ce8edfbd875d9c18
|
0b6e079d5b24a49cad884d229c30af18b60a295c
|
/learning/unique_elements.py
|
ca50f046851145c855be3da42676d17b189c0ed5
|
[] |
no_license
|
ldocao/dataiku_census
|
c1b2bb362652250cad874eec9cdc3d5f50dd4a89
|
ef9547cd1ae9b83119b56c0645751f2b5c2a5e82
|
refs/heads/master
| 2021-01-10T09:17:26.316478
| 2015-10-02T14:38:29
| 2015-10-02T14:38:29
| 43,143,691
| 0
| 0
| null | 2015-09-30T15:38:58
| 2015-09-25T13:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
import numpy as np
import pandas as pd
import load_data as ld
import feature_engineering as feat
import ipdb
import matplotlib.pyplot as plt
from constants import PREDICTION_COLNAME
import utils
training_file = "../data/census_income_learn.csv"
metadata_file = "../data/census_income_metadata.txt"
def unique_elements(list_a, list_b):
"""Return the list of unique elements in set_a, and set_b"""
set_a = set(list_a)
set_b = set(list_b)
unique_elements = set_a ^ set_b
unique_a = unique_elements.intersection(set_a)
unique_b = unique_elements.intersection(set_b)
return unique_a, unique_b
df = ld.prepare_dataframe(training_file, metadata_file=metadata_file)
df = feat.drop_high_nan(df, threshold=0.3)
nrows = len(df)
colnames = df.columns.values
is_categorical = np.array([not df[c].is_numeric() for c in colnames])
categorical_variables = list(colnames[is_categorical])
df_true = df[df[PREDICTION_COLNAME].values]
df_false = df[df[PREDICTION_COLNAME].values == False]
compared_colnames = categorical_variables + ["age", "detailed industry recode", "detailed occupation recode"]
for c in categorical_variables:
unique_true, unique_false = unique_elements(df_true[c].values, df_false[c].values)
print "======", c.upper()
print unique_false
print unique_true
|
[
"ldocao@gmail.com"
] |
ldocao@gmail.com
|
e1192f20c1443a8b2576b6617f3bfc99a1cfd2ed
|
ece9d19165e8f8c4f302b94cd19c577c3618f819
|
/collegelist1/urls.py
|
fa083cdb980eaa07757e7e7831a128926d8a7fa9
|
[] |
no_license
|
shreya3232/python_collegelist
|
82ab1fcccee6cf213b6cc286833b9a4674292039
|
2ddaf2e65d65fd3e5a3490b57550081209bac1e8
|
refs/heads/master
| 2023-06-01T11:26:01.764270
| 2021-06-27T12:24:17
| 2021-06-27T12:24:17
| 380,731,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns =[
path('admin/', admin.site.urls),
path('', include('polls.urls'))
]
|
[
"shreyasgowda2345@gmail.com"
] |
shreyasgowda2345@gmail.com
|
1a2055c82f8b2b3d858e86a5931e89220f739c3f
|
c3a84a07539c33040376f2c1e140b1a1041f719e
|
/wagtail-stubs/users/utils.pyi
|
6e8f3c6034fd36111d79271ece10284a95e4e72c
|
[] |
no_license
|
tm-kn/tmp-wagtail-stubs
|
cc1a4434b7142cb91bf42efb7daad006c4a7dbf4
|
23ac96406610b87b2e7751bc18f0ccd27f17eb44
|
refs/heads/master
| 2023-01-20T14:41:33.962460
| 2020-11-30T23:15:38
| 2020-11-30T23:15:38
| 317,332,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
pyi
|
from typing import Any
from wagtail.core.compat import AUTH_USER_APP_LABEL as AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME as AUTH_USER_MODEL_NAME
delete_user_perm: Any
def user_can_delete_user(current_user: Any, user_to_delete: Any): ...
def get_gravatar_url(email: Any, size: int = ...): ...
|
[
"hi@tmkn.org"
] |
hi@tmkn.org
|
3325a09516701a4aa7c221f51bc6de2bc3ee490e
|
eae2ad130311853fb1e90b05312840e72f607da6
|
/05_Scale_Factor.py
|
5f878b107257b6adabaa1d70be88660a29b6d6b2
|
[] |
no_license
|
DCUniverse1990/01_Recipes
|
aedceb870517198ca17892b9a083803b02e28d62
|
52ce3ded879770fb266ec21090158c9acd08d06f
|
refs/heads/master
| 2020-12-27T22:11:39.213183
| 2020-03-17T21:42:26
| 2020-03-17T21:42:26
| 238,078,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
# Ingredients list
#
# Number Checking Function
def num_check(question):
error = "please enter a number that is more than zero"
valid = False
while not valid:
response = (input(question))
if response.lower() == "xxx":
return "xxx"
else:
try:
if float(response) <= 0:
print(error)
else:
return response
except ValueError:
print(error)
# Not blank Function goes here
def not_blank(question, error_msg, num_ok):
error = error_msg
valid = False
while not valid:
response = input(question)
has_errors = ""
if num_ok != "yes":
# look at each character in sting and if its a number, complain
for letter in response:
if letter.isdigit() == True:
has_errors = "yes"
break
if response == "":
print(error)
continue
elif has_errors != "":
print(error)
continue
else:
return response
# Main Routine goes here
# Main Routine...
scale_Factor = eval(input("Scale Factor: "))
# Set up empty ingredient list
Ingredients = []
# Loop to ask users to enter an ingredient
stop = ""
while stop != "xxx":
amount = num_check("What is the amount for the ingredient? ")
# If exit code is typed...
if amount.lower() == "xxx" and len(Ingredients) > 1:
break
# Check that list contains at least two items
elif amount.lower() == "xxx" and len(Ingredients) <2:
print("You need at least two ingredients in the list. "
"Please add more ingredients. ")
continue
# If exit code is not entered, add to ingredient list
else:
# Ask user for ingredient (Via not blank function)
get_Ingredient = not_blank("Please type in an ingredient name: ",
"This cant be blank",
"yes")
amount = float(amount) * scale_Factor
if amount % 1 == 0:
amount = int(amount)
elif amount * 10 % 1 == 0:
amount = "{:.1f}".format(amount)
else:
amount = "{:.2f}".format(amount)
Ingredients.append("{} units {}".format(amount, get_Ingredient))
# Output list
for item in Ingredients:
print(item)
|
[
"51687475+DCUniverse1990@users.noreply.github.com"
] |
51687475+DCUniverse1990@users.noreply.github.com
|
9f8d3ee17be567dc042a06d5bb1c4b7c7da85158
|
cfdc0ace0ca25048d72e952481ca2a4efc01052e
|
/Synthetic_Unsup.py
|
975cf05f77cb0238626b3c164157ab12a90d9893
|
[] |
no_license
|
cmimlg/CBMDWHA
|
418291efd928e62831bec4961fac88d5992b2927
|
13c9667d033262acc080ce0a2d8a01af5c66bd7a
|
refs/heads/master
| 2021-09-05T21:12:59.987983
| 2018-01-31T02:48:35
| 2018-01-31T02:48:35
| 119,619,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,873
|
py
|
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from itertools import cycle
import pandas as pd
from sklearn.decomposition import IncrementalPCA
from sklearn.cluster import DBSCAN
import os
import logging
from sklearn import metrics
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import Birch
import matplotlib.cm as cm
from sklearn.decomposition import TruncatedSVD
import scipy.io
from skfeature.function.similarity_based import SPEC
from skfeature.utility import unsupervised_evaluation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics.cluster import adjusted_rand_score
from pyclustering.cluster.birch import birch
from sklearn.neighbors import NearestNeighbors
import operator
# create logger for the application
logger = logging.getLogger('CMDWABD Logger')
ch = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
def read_data():
os.chdir("/home/admin123/Clustering_MD/Paper/clustering.experiments/")
fp = "Synthetic_Data_Recoded.csv"
df = pd.read_csv(fp)
gt_assgn = df["C"]
cols_req = df.columns.tolist()
df = df[cols_req]
return df, gt_assgn
def do_mini_batch_km_range():
df, gt_assgn = read_data()
X = df.as_matrix()
krange = np.arange(start = 2, stop = 6, step = 1)
ari_values = []
for k in krange:
# Compute clustering with MiniBatchKMeans.
print ("Calculating solution for k = " + str(k))
mbk = MiniBatchKMeans(init='k-means++', n_clusters= k, batch_size= 2000,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
mbk.fit(X)
clus_labels = mbk.labels_
ari = adjusted_rand_score(gt_assgn, clus_labels)
ari_values.append(ari)
plt.scatter(krange, ari_values, color = "red")
plt.title("ARI Versus K - Synthetic Dataset, batch-size = 2000")
plt.xlim([0,10])
plt.xlabel("K")
plt.ylabel("ARI")
plt.grid()
plt.show()
return ari_values
def do_fs_mini_batch_km_range():
df, gt_assgn = read_data()
X = df.as_matrix()
ipca = IncrementalPCA(n_components = 2)
X_ipca = ipca.fit_transform(X)
del X
krange = np.arange(start = 2, stop = 6, step = 1)
ari_values = []
for k in krange:
# Compute clustering with MiniBatchKMeans.
print ("Calculating solution for k = " + str(k))
mbk = MiniBatchKMeans(init='k-means++', n_clusters= k, batch_size= 2000,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
mbk.fit(X_ipca)
clus_labels = mbk.labels_
ari = adjusted_rand_score(gt_assgn, clus_labels)
ari_values.append(ari)
plt.scatter(krange, ari_values, color = "red")
plt.title("ARI Versus K With FE- Synthetic Dataset, batch-size = 2000")
plt.xlim([0,10])
plt.xlabel("K")
plt.ylabel("ARI")
plt.grid()
plt.show()
return
def do_mini_batch_kmeans():
fp = "/home/admin123/Clustering_MD/Paper/clustering.experiments/"\
"Jan_2016_Delays_Recoded.csv"
df = pd.read_csv(fp)
X = df.as_matrix()
mbk = MiniBatchKMeans(init='k-means++', n_clusters= 35, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
df.loc[:,"Cluster"] = mbk.labels_
fp_out = "/home/admin123/Clustering_MD/Paper/clustering.experiments/" \
"jan2016_delay_data_clustered.csv"
df.to_csv(fp_out, index = False)
print("Done with Minibatch K-Means, starting incremental PCA...")
ipca = IncrementalPCA(n_components = 2)
X_ipca = ipca.fit_transform(X)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
ax = plt.gca()
n_clusters = 35
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X_ipca[mask, 0], X_ipca[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.set_title("Mini Batch KMeans Airline Delay for January 2016")
ax.set_xlabel("Principal Component 1")
ax.set_ylabel("Principal Component 2")
plt.grid()
plt.show()
return
def do_inc_pca():
fp = "/home/admin123/Big_Data_Paper_Code_Data/HD_problems/CaliforniaHousing/cal_housing.csv"
df = pd.read_csv(fp)
X = df.as_matrix()
ipca = IncrementalPCA(n_components = 2, batch_size = 100, whiten = True)
X_ipca = ipca.fit_transform(X)
return ipca, df, X_ipca
def do_DBSCAN():
df, gt_assgn = read_data()
X = df.as_matrix()
del df
logger.debug("Starting DBSCAN on large dataset - " + str(X.shape[0]) + " rows!")
db = DBSCAN(eps= 0.4, min_samples = 10)
db = db.fit(X)
logger.debug("Done with DBSCAN !")
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
ari = adjusted_rand_score(gt_assgn, labels)
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
logger.debug("ARI is " + str(ari))
logger.debug("Number of Clusters is : " + str(n_clusters))
return
def do_GS_DBSCAN():
df, gt_assgn = get_sample()
X = df.as_matrix()
del df
eps_range = np.linspace(0.01, 0.5, 10)
ns_range = np.linspace(5, 1000, 20, dtype = np.int)
res_dict = dict()
for e, n in [(e, n) for e in eps_range for n in ns_range]:
logger.debug("running eps = " + str(e) + " n = " + str(n))
db = DBSCAN(eps= e, min_samples = n)
db = db.fit(X)
logger.debug("Done with DBSCAN !")
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
ari = adjusted_rand_score(gt_assgn, labels)
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
key = "Eps = " + str(e) + " n = " + str(n)
res_dict[key] = ari
return res_dict
def do_BIRCH(nc = 100):
os.chdir("/home/admin123/Clustering_MD/Paper/clustering.experiments/")
fp = "Jan_2016_Delays_Recoded.csv"
df = pd.read_csv(fp)
X = df.as_matrix()
del df
ipca = IncrementalPCA(n_components = 2)
X_ipca = ipca.fit_transform(X)
del X
logger.debug("Starting BIRCH on large dataset - " + str(X_ipca.shape[0]) + " rows!")
brc = Birch(branching_factor=50, n_clusters=nc,\
threshold=0.25,compute_labels=True)
brc = brc.fit(X_ipca)
labels = brc.predict(X_ipca)
logger.debug("Done with BIRCH !")
chis = metrics.calinski_harabaz_score(X_ipca, labels)
logger.debug("CH index score : " + str(chis))
colors = cm.rainbow(np.linspace(0, 1, nc))
ax = plt.gca()
for l,c in zip(labels, colors):
mask = labels == l
ax.plot(X_ipca[mask, 0], X_ipca[mask, 1], 'w',\
markerfacecolor=c , marker='.')
ax.set_title("BIRCH Airline Delay for January 2016")
ax.set_xlabel("Principal Component 1")
ax.set_ylabel("Principal Component 2")
plt.grid()
plt.show()
return
def do_BIRCH_K_Range(low = 2, high = 6):
df, gt_assgn = read_data()
X = df.as_matrix()
logger.debug("Starting BIRCH on large dataset - " +\
str(X.shape[0]) + " rows!")
kvals = np.arange(start = low, stop = high, step = 1)
ari_dict = dict()
for k in kvals:
logger.debug("Running k = " + str(k))
brc = Birch(n_clusters=k, threshold= 1.0)
brc = brc.fit(X)
logger.debug("Done fitting !")
clus_labels = brc.labels_
logger.debug("Done with BIRCH !")
ari = adjusted_rand_score(gt_assgn, clus_labels)
ari_dict[k] = ari
logger.debug("Done with BIRCH!")
ax = plt.gca()
plt.scatter(ari_dict.keys(), ari_dict.values(), color = "red")
ax.set_title("ARI versus K - BIRCH")
ax.set_xlabel("K")
ax.set_ylabel("ARI")
plt.grid()
plt.show()
return ari_dict
def do_FS_DBSCAN():
X_ipca = get_data()
logger.debug("Starting DBSCAN on large dataset - " + str(X_ipca.shape[0]) + " rows!")
db = DBSCAN(eps= 0.05, min_samples = 100, n_jobs = 2)
db = db.fit(X_ipca)
logger.debug("Done with DBSCAN !")
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
chis = metrics.calinski_harabaz_score(X_ipca, labels)
logger.debug("CH index score : " + str(chis))
logger.debug("Number of Clusters : " + str(n_clusters_))
return
def get_data():
os.chdir("/home/admin123/Clustering_MD/Paper/clustering.experiments/")
fp = "Jan_2016_Delays_Recoded.csv"
df = pd.read_csv(fp)
X = df.as_matrix()
del df
ipca = IncrementalPCA(n_components = 2, whiten = True)
X_ipca = ipca.fit_transform(X)
del X
return X_ipca
def gen_data(write = True):
os.chdir("/home/admin123/Clustering_MD/Paper/clustering.experiments/")
fp = "Syn_Mixed_Data.csv"
X, y = make_blobs(n_samples= int(1e6), centers=3,\
n_features=2, random_state = 0)
df = pd.DataFrame(X)
df["C"] = y
df.columns = ["N1", "N2", "C"]
df["C1"] = df.apply(set_c1vals, axis = 1)
df["C2"] = df.apply(set_c2vals, axis = 1)
if write:
df.to_csv(fp, index = False)
return df
def set_c1vals(row):
c1_vals = ["CAT_VAR1_VAL_" + str(i+1) for i in range(3)]
if row["C"] == 0:
c1_mn = np.random.multinomial(1, (0.7, 0.15, 0.15), 1)
c1_val = c1_vals[np.argmax(c1_mn)]
elif row["C"] == 1:
c1_mn = np.random.multinomial(1, (0.15, 0.7, 0.15), 1)
c1_val = c1_vals[np.argmax(c1_mn)]
else:
c1_mn = np.random.multinomial(1, (0.15, 0.15, 0.7), 1)
c1_val = c1_vals[np.argmax(c1_mn)]
return c1_val
def set_c2vals(row):
c2_vals = ["CAT_VAR2_VAL_" + str(i+1) for i in range(3)]
if row["C"] == 0:
c2_mn = np.random.multinomial(1, (0.8, 0.15, 0.05), 1)
c2_val = c2_vals[np.argmax(c2_mn)]
elif row["C"] == 1:
c2_mn = np.random.multinomial(1, (0.05, 0.8, 0.15), 1)
c2_val = c2_vals[np.argmax(c2_mn)]
else:
c2_mn = np.random.multinomial(1, (0.15, 0.05, 0.8), 1)
c2_val = c2_vals[np.argmax(c2_mn)]
return c2_val
def get_sample(frac = 0.01):
os.chdir("/home/admin123/Clustering_MD/Paper/clustering.experiments/")
fp = "Synthetic_Data_Recoded.csv"
df = pd.read_csv(fp)
df_sample = df.sample(frac = frac)
df_sample_gtl = df_sample["C"]
cols_req = df_sample.columns.tolist()
cols_req.remove("C")
df_sample = df_sample[cols_req]
return df_sample, df_sample_gtl
def gen_knn_graph():
df, lbl = get_sample()
X = df.as_matrix()
nbrs = NearestNeighbors(n_neighbors=10, algorithm='ball_tree').fit(X)
dist, _ = nbrs.kneighbors(X)
kng = {i:dist[i,9] for i in range(X.shape[0])}
sorted_kng = sorted(kng.items(), key=operator.itemgetter(1),\
reverse = True)
vals = [v[1] for v in sorted_kng]
inds = [ i for i in range(len(vals))]
plt.scatter(inds, vals, color = "red")
plt.grid()
plt.show()
return
|
[
"noreply@github.com"
] |
noreply@github.com
|
5ecce4c7bc6f82cb036331ca45fb67166154c4e5
|
bbe53d0171efbc78ca43f409b4a5235df51f36fa
|
/learning/djangoLearning/django-start/mysite/dev_settings.py
|
24dea2919335488abf7ac20fb0a273ed26c2b821
|
[] |
no_license
|
brianwang/gftop
|
2758ec93e326ba5e801af48f951c73b5761bb25d
|
12a48eafb5114da325515fce4b97e744638e6faf
|
refs/heads/master
| 2021-01-12T08:16:43.816679
| 2012-12-12T16:25:29
| 2012-12-12T16:25:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
from os import getcwd
MYSITE_BASE_DIR = getcwd()
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SUPER_USER_NAME = 'admin'
SUPER_USER_EMAIL = 'admin@test.com'
SUPER_USER_PASSWORD = 'admin'
SITE_URL = '127.0.0.1:8000'
SITE_NAME = 'MySite'
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'mysite.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'localhost'
# EMAIL_PORT =
# EMAIL_HOST_USER =
# EMAIL_HOST_PASSWORD =
|
[
"gaofeitop@0700f6e2-4af4-11de-a39b-05eb13f3dc65"
] |
gaofeitop@0700f6e2-4af4-11de-a39b-05eb13f3dc65
|
e3a39fc46f0ee31bcf4cbf6f4eef75379c9eb87e
|
8242f7c33e37db242a6a839cccd6a48b79ddbfa9
|
/erase/main.py
|
71861978925ebc25fc1531a3c889e94202072e29
|
[] |
no_license
|
elitan/open-kattis
|
d2be23868f3be6613bcbf4e9381a30f283199082
|
7bec84b054c639ed3d534671bfc0f57dee289d27
|
refs/heads/master
| 2021-01-17T08:51:46.340776
| 2016-10-10T19:17:52
| 2016-10-10T19:17:52
| 65,326,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
import fileinput
import sys
n = int(raw_input()) # we dont need this one
s_1 = map(int, list(raw_input().rstrip()))
s_2 = map(int, list(raw_input().rstrip()))
zero = (0 + n) % 2
one = (1 + n) % 2
converter = {0: zero, 1: one}
c = 0
s_len = len(s_1)
while c < s_len:
if converter[s_1[c]] != s_2[c]:
print("Deletion failed")
sys.exit()
c += 1
print("Deletion succeeded")
|
[
"johan@eliasson.me"
] |
johan@eliasson.me
|
f6596548b7fa5559711759789d920ec0d921df4d
|
c30d4f174a28aac495463f44b496811ee0c21265
|
/python/testData/inspections/PyChainedComparisonsInspectionWithConstantInTheMiddle/test.py
|
528cf139bbeb40defe6d8ab5676c7da1bb0c5e48
|
[
"Apache-2.0"
] |
permissive
|
sarvex/intellij-community
|
cbbf08642231783c5b46ef2d55a29441341a03b3
|
8b8c21f445550bd72662e159ae715e9d944ba140
|
refs/heads/master
| 2023-05-14T14:32:51.014859
| 2023-05-01T06:59:21
| 2023-05-01T06:59:21
| 32,571,446
| 0
| 0
|
Apache-2.0
| 2023-05-01T06:59:22
| 2015-03-20T08:16:17
|
Java
|
UTF-8
|
Python
| false
| false
| 51
|
py
|
# PY-16397
x, y = 2, 3
if x > 0 and y < 0:
pass
|
[
"valentina.kiryushkina@jetbrains.com"
] |
valentina.kiryushkina@jetbrains.com
|
d441dd65e1ae7d648ca833ebfdc5cc1f7de0ed35
|
05566ffcd009af2e862d531bd299e2e363d104a3
|
/havi_task/settings.py
|
6f5b267ddba9aff826948b30f7fffef106e5810c
|
[] |
no_license
|
dhruv970331/havi_task
|
97d996704853297ccc3edff5e90688ba0351e6f9
|
621ee4d188af5d2ab45d857ae2cfcbd016fab0c5
|
refs/heads/master
| 2022-09-07T22:42:01.681728
| 2020-05-21T18:44:33
| 2020-05-21T18:44:33
| 265,439,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
"""
Django settings for havi_task project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9-ht%zt%r6y5dy87f+3na**ot3!hhbi7ks-=8j-3cw(_h7%ai)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'phonenumber_field',
'job_application'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'havi_task.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'havi_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'havi_task',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'job_application:home'
LOGOUT_REDIRECT_URL = 'job_application:home'
|
[
"dhruvesh.malhotra@botreetechnologies.com"
] |
dhruvesh.malhotra@botreetechnologies.com
|
8dfed265be1c06ea8f35ebbd00266dfe63dac901
|
78ddf555db358f9683db00bf49b3d1a45cd998a9
|
/forum/migrations/0021_chatmessage_sort.py
|
d75812cc0e448e53e140658de19f30fed144c366
|
[] |
no_license
|
azeez010/AfriconnForum
|
9e0aa7a6abb40f20ac0739b9cadc75da89e91110
|
1b597850208c3caef474b9412bdf02dfefd1a30b
|
refs/heads/master
| 2022-12-19T12:49:09.389062
| 2020-09-19T14:40:48
| 2020-09-19T14:40:48
| 298,311,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Generated by Django 3.0.6 on 2020-08-11 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0020_members_description'),
]
operations = [
migrations.AddField(
model_name='chatmessage',
name='sort',
field=models.IntegerField(default=0),
),
]
|
[
"dataslid@gmail.com"
] |
dataslid@gmail.com
|
5832177f7caf30f5ebbf643fdb28c66b5706991a
|
6774b2eddc2d530a4d1a802f42ce81693bf8063d
|
/keplerian_classical_orbital_elements.py
|
5bedeaea0d7adf571b0e5147c49c1b39d2c8f989
|
[
"MIT"
] |
permissive
|
jasonody/orbital-mechanics
|
eb8dbc240f868e851a2cc93e4fc407b982bc1fb0
|
abe50da66c1598d1c31f6f3a4faf313e6cdebc7c
|
refs/heads/master
| 2022-11-06T17:54:25.750415
| 2020-06-25T03:32:34
| 2020-06-25T03:32:34
| 270,379,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
import planetary_data as pd
import tools as t
from OrbitPropagatorKep import OrbitPropagator as OP
tspan = 3600 * 24 * 1.0
dt = 10.0
cb = pd.earth
if __name__ == '__main__':
# coes: a (pd radius + average of perigee and apogee), e (eccentricity), i (inclination), ta (true anomoly), aop (arguement of perigee), raan (right ascension of ascending node)
# ISS (low Earth orbit)
# Find orbital elements here: https://www.heavens-above.com/orbit.aspx?satid=25544
c0 = [cb['radius'] + 414.0, 0.0002136, 51.6417, 0.0, 45.6438, 356.6431, 'date']
# geostationary orbit
c1 = [cb['radius'] + 35800.0, 0.0, 0.0, 0.0, 0.0, 0.0, 'date']
# random orbit
c2 = [cb['radius'] + 3000.0, 0.3, 20.0, 0.0, 15.0, 40.0, 'date']
# Hubble space telescope
# https://www.heavens-above.com/orbit.aspx?satid=20580
c3 = [cb['radius'] + 538.0, 0.0002727, 28.4742, 0.0, 68.4651, 337.8887, 'date']
# create orbit propagator
op0 = OP(c0, tspan, dt, coes=True)
op1 = OP(c1, tspan, dt, coes=True)
op2 = OP(c2, tspan, dt, coes=True)
op3 = OP(c3, tspan, dt, coes=True)
t.plot_n_orbits([op0.rs, op1.rs, op2.rs, op3.rs], labels=['ISS', 'GEO', 'Random', 'Hubble'], show_plot=True)
|
[
"jasonody@users.noreply.github.com"
] |
jasonody@users.noreply.github.com
|
76659e18cdb1432d1c91a30be7eeb85f667e9f96
|
2befb6f2a5f1fbbd5340093db43a198abdd5f53b
|
/pythonProject/FBVPermission/FBVPermission/settings.py
|
a64ea42a4ed445c5878381f3f08aa7ccabac8eb3
|
[] |
no_license
|
JanardanPandey/RestAPI
|
1956d3529782d18ef2118961f6286e3213665aad
|
654933a4d9687076a00c6f4c57fc3dfee1a2c567
|
refs/heads/master
| 2023-06-14T07:02:31.702000
| 2021-07-02T07:50:59
| 2021-07-02T07:50:59
| 382,357,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,298
|
py
|
"""
Django settings for FBVPermission project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-)ys_5umafpe$al@h)y*&q^gt$#m-=d%%fztc=rfl!2xykh(@n*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.api',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'FBVPermissionApp',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.api.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FBVPermission.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.api.context_processors.api',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FBVPermission.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.api.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.api.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.api.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.api.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"janardanpandey0510@gmail.com"
] |
janardanpandey0510@gmail.com
|
695ea8c157f1aaf3380cd1020e37aa6fa63cde66
|
4540da66e67c3e8908f4aeedd49f2efdec21d110
|
/python/animate.py
|
2542b92183bf3c74ab3e98e552fc8cae4198eb33
|
[
"MIT"
] |
permissive
|
mayghalV/planets-sim
|
6d2e7163955efe4b0cc006372e792b37ca24b560
|
7b52aa1804663d0764b6389783183a62a0655daa
|
refs/heads/master
| 2022-11-30T00:25:54.527513
| 2020-07-28T07:32:28
| 2020-07-28T07:32:28
| 275,545,186
| 0
| 0
|
MIT
| 2020-07-06T07:22:27
| 2020-06-28T08:53:27
|
Rust
|
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
"""
Note to update planet_sim.pyd:
1. run 'cargo build --release
2. (on Windows) Copy target/release/planet_sim.dll to python/
3. Rename the extension .dll to .pyd
"""
from typing import Dict, List, Tuple
import numpy as np
import planet_sim
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import json
print_flag = False
config_file = 'config/config_orbit.json'
result = planet_sim.read_config_and_simulate_system(config_file)
if print_flag:
for r in result:
print(f'Time: {r.time}')
print(r.positions)
for k, v in r.positions.items():
print(f'{k}: x={v.x}, y={v.y}')
with open(config_file) as f:
d = json.load(f)
planet_names = [planet_config['id'] for planet_config in d['planets']]
def build_planet_position_lists(time_positions, planet_names):
"""
Reformat export for matplot lib, Returns a dict of the following format
{
'planet_1': {'x': [1,2,3],
'y': [4,5,6],
},
'planet_2': {'x': [1,2,3],
'y': [4,5,6],
},
...
}
"""
planet_dict = {}
for planet_name in planet_names:
x_positions = [time_position.positions[planet_name].x for time_position in time_positions]
y_positions = [time_position.positions[planet_name].y for time_position in time_positions]
planet_dict[planet_name] = {'x': x_positions, 'y': y_positions}
return planet_dict
position_lists = build_planet_position_lists(result, planet_names)
fig, ax = plt.subplots()
colors = ['firebrick',
'limegreen',
'dodgerblue',
'gold',
'darkorange',
'midnightblue'
'darkviolet',
'deeppink'
]
# Build the initial line objects to update in the animation
points = []
lines = []
annotations = []
min_x, max_x = 0, 0
min_y, max_y = 0, 0
for i, planet_name in enumerate(planet_names):
x = position_lists[planet_name]['x'][0]
y = position_lists[planet_name]['y'][0]
# Add planet point
p, = ax.plot(x, y, marker='o', color=colors[i])
points.append(p)
# Add trailing line
l, = ax.plot(x, y, marker='', color=colors[i], alpha=0.5)
lines.append(l)
# Add annotation
a = ax.annotate(planet_name, (x, y), fontsize=8)
annotations.append(a)
# Set the scale of the axis
for x_y_dict in position_lists.values():
min_x = min(min_x, min(x_y_dict['x']))
max_x = max(max_x, max(x_y_dict['x']))
min_y = min(min_y, min(x_y_dict['y']))
max_y = max(max_y, max(x_y_dict['y']))
ax.set_ylim([min_y, max_y])
ax.set_xlim([min_x, max_x])
def animate(i):
for j, planet_name in enumerate(planet_names):
x = position_lists[planet_name]['x'][i]
y = position_lists[planet_name]['y'][i]
points[j].set_data(x, y)
lines[j].set_data(position_lists[planet_name]['x'][:i+1], position_lists[planet_name]['y'][:i+1])
annotations[j].set_position((x,y))
# Animate the plot and show
line_ani = animation.FuncAnimation(fig, animate, len(result), interval=1)
plt.show()
|
[
"mayghalvijapura@live.co.uk"
] |
mayghalvijapura@live.co.uk
|
8e9763cb78dec4a5b44a07cf246af0b20cd8087e
|
cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1
|
/xlsxwriter/test/worksheet/test_write_sheet_views2.py
|
e53240875a8168457b6d934d3a2a907b87e127ae
|
[
"BSD-2-Clause"
] |
permissive
|
glasah/XlsxWriter
|
bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec
|
1e8aaeb03000dc2f294ccb89b33806ac40dabc13
|
refs/heads/main
| 2023-09-05T03:03:53.857387
| 2021-11-01T07:35:46
| 2021-11-01T07:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,446
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteSheetViews(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="1" topLeftCell="A2" activePane="bottomLeft" state="frozen"/><selection pane="bottomLeft"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(0, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" topLeftCell="B1" activePane="topRight" state="frozen"/><selection pane="topRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" ySplit="1" topLeftCell="B2" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="B1" sqref="B1"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes('G4')
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views5(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(3, 6, 3, 6, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozenSplit"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
eaddacbc2a44c1225dfdff93ef10b03dbe73735a
|
50bd857f8869e76b1bb086e227a0eb2195e2a0ad
|
/2020/강의/06-탐색/새.py
|
7c85df30ebf162f9fbea8e3b48a920ba04d1199f
|
[] |
no_license
|
imacoolgirlyo/Algorithm
|
0c64ad0286056ab53e2ae673e400be290d7490c6
|
9cfa897bb9eb8733b442b0f1d55a7d8ece8b8091
|
refs/heads/master
| 2022-12-07T12:20:06.167375
| 2022-12-02T14:10:06
| 2022-12-02T14:10:06
| 234,060,720
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
N = int(input())
rest = N
K = 1
result = 0
while rest > 0:
if K > rest:
K = 1
rest = rest-K # 14-1, 13-2, 11-3 , 8-4, 4-1 , 3-2, 1-1
K += 1
result += 1
print(result)
|
[
"tjrud1438@naver.com"
] |
tjrud1438@naver.com
|
eec13eec5d10278982bc45147f08f50b093ed311
|
dabc981714dd9297e811355fbb2f9f9a45c2281f
|
/board/views/_update_prices_form_save_view.py
|
f93a6b11355db636b1b5013068e659924677e47b
|
[] |
no_license
|
pmisters/django-code-example
|
4c9c8c7edb174875ae4df4d32ae73b0897fc2333
|
745ac9d0c89d8ee44885cc862882d6c4d13993a0
|
refs/heads/master
| 2023-01-07T11:55:55.670943
| 2020-11-14T11:14:19
| 2020-11-14T11:14:19
| 312,801,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,001
|
py
|
import datetime
import re
from typing import Any, Dict, TYPE_CHECKING
from django import http
from django.views.generic import View
from board.permissions import Permissions
from board.usecases import UpdateReservationPrices
from board.value_objects import ReservationErrors, ReservationUpdateEvent
from common import functions as cf
from common.i18n import translate as _
from common.loggers import Logger
from common.mixins import AjaxServiceMixin
if TYPE_CHECKING:
from board.entities import Reservation
class UpdatePricesFormSaveView(AjaxServiceMixin, View):
http_method_names = ['post']
permissions = [Permissions.RESERVATION_UPDATE]
def post(self, request: http.HttpRequest, *args: Any, **kwargs: Any) -> http.HttpResponse:
data = request.POST.dict() # noqa
plan_id = cf.get_int_or_none(data.get('rate_plan')) or 0
if plan_id <= 0:
return self.json_error(_('agenda:plan:error'))
return self.process_usecase(
UpdateReservationPrices,
self.kwargs['hid'],
self.kwargs['pk'],
self.kwargs['rid'],
plan_id,
request.user,
self.parse_price_data(data),
)
def render_success(self, ctx: 'Reservation' = None, **kwargs) -> http.HttpResponse:
ReservationUpdateEvent.dispatch(
house_id=ctx.house_id,
pk=ctx.id,
start_date=ctx.checkin,
end_date=ctx.checkout,
user_id=self.request.user.id,
)
return self.json_success()
def render_failure(self, failure: Any, **kwargs) -> http.HttpResponse:
Logger.warning(__name__, failure)
if failure.failure in (ReservationErrors.missed_house, ReservationErrors.missed_reservation):
raise http.Http404(failure.error)
elif failure.failure == ReservationErrors.missed_rateplan:
return self.json_error(_('agenda:plan:error'))
elif failure.failure == ReservationErrors.wrong_period:
return self.json_error(_('agenda:error:ota_update_period'))
return http.HttpResponseServerError()
@staticmethod
def parse_price_data(data: Dict[str, Any]) -> Dict[datetime.date, Dict[str, Any]]:
result = {}
pattern = re.compile(r'\[(\d{4}-\d{2}-\d{2})]')
for key, value in data.items():
if not key.startswith('room[') and not key.startswith('price['):
continue
match = pattern.search(key)
if match is None:
continue
day = cf.get_date_or_none(match.group(1))
if day is None:
continue
if day not in result:
result[day] = {'room': None, 'price': None, 'day': day}
if key.startswith('room['):
result[day]['room'] = cf.get_int_or_none(value)
elif key.startswith('price['):
result[day]['price'] = cf.get_decimal_or_none(value)
return result
|
[
"pavel@knigun.com"
] |
pavel@knigun.com
|
f0e31d42a89688c442c70186387712ca2f1e8ca3
|
2dc30a70bd995f1038d8a6f200de25a775153a82
|
/accepted_codes/UJ-3685166-src.py
|
2310f943b7fc3082373e7c6f6f2194642c799395
|
[] |
no_license
|
pratyush-nigam/spoj-stuff
|
ba57d37cf6cdaebf43202c2131f8ce806c5d416b
|
0a6871ace91cd0dea67195127919ebb6c1817078
|
refs/heads/master
| 2020-06-01T04:16:16.183256
| 2013-04-07T06:26:35
| 2013-04-07T06:26:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
import sys
def fun():
s=raw_input()
a,b=(int(i) for i in s.split(' '))
flag=0
if(a==0):
if(b==0):
flag=1
while(flag!=1):
print a**b
s=raw_input()
a,b=(int(i) for i in s.split(' '))
flag=0
if(a==0):
if(b==0):
flag=1
return 0
fun()
sys.exit(0)
|
[
"pratyush.nigam@gmail.com"
] |
pratyush.nigam@gmail.com
|
67320410f535ebde71ef2996dbb812f555810d7e
|
3550b8f2069f2f00d702012db6bd3c880a74abb8
|
/Homework-2/get_job.py
|
8751bc83269951ea5523596d827321f2f6630126
|
[] |
no_license
|
Donfa1con/PythonHse
|
9bb2482b00a22ca561becca10b0eeba43bd0cb8e
|
550edb788f2295a8020924409f5ccf9cc059d590
|
refs/heads/master
| 2020-03-27T19:14:46.957525
| 2018-09-01T07:17:07
| 2018-09-01T07:17:07
| 146,975,326
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import requests
import json
from help_function import record_to_file
api_key = '???'
def load_data_from_url(method, api_key):
headers = { 'X-Api-App-Id' : api_key }
url = 'https://api.superjob.ru/2.0/%s' % method
params = {
'town' : 4,
'count' : 100,
'catalogues[]' : [604, 45, 47, 48, 49, 51],
'keywords[]' : ['программист','разработчик']
}
data = json.loads(requests.get(url, headers=headers, params=params).text)
return data
if __name__ == '__main__':
data = load_data_from_url('vacancies', api_key)
record_to_file(data['objects'],'Data.json')
|
[
"diziganshin@inbox.ru"
] |
diziganshin@inbox.ru
|
6a64acaaeff4af395c3f83589d246735caccd6a5
|
b147cae100a830c0a026c1677fde8bf43eb641df
|
/reactdjango/frontend/urls.py
|
e2475b5e0938049d78b77b6aa4b5f3086f36accb
|
[] |
no_license
|
wlawt/lead_manager
|
382c6d0344778d2897df547397527b2d2dc01a60
|
2968773b0d929d0ebcd2dffd719e712601594546
|
refs/heads/master
| 2023-01-23T07:56:38.411678
| 2021-04-10T00:09:29
| 2021-04-10T00:09:29
| 206,666,216
| 0
| 0
| null | 2023-01-07T09:25:14
| 2019-09-05T22:16:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 138
|
py
|
from django.urls import path
from . import views
urlspatterns = [
path("", views.index) # The views.py index method we just created
]
|
[
"horizon.william14@gmail.com"
] |
horizon.william14@gmail.com
|
00250e3d95d0f109cc7852e32d0a437133ebdc65
|
2c0377022f0d11df517b2addc00925f5763e7494
|
/main.py
|
1bdd1f2943d4caa8bee59db1f7c106d2e54b0050
|
[
"MIT"
] |
permissive
|
Sagu12/TextSummarizer
|
7ecc1731abeb68baa1420cff0bef9f04824ae440
|
3c0f1f1c50f28c787eaf14f9c181a65dfd4af567
|
refs/heads/main
| 2023-02-09T14:50:43.199347
| 2021-01-07T09:37:10
| 2021-01-07T09:37:10
| 327,564,948
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
from flask import Flask, render_template,request
import requests
from bs4 import BeautifulSoup
import nltk
import pandas as pd
#https://www.youtube.com/watch?v=h8fE_G9a_Oo
app = Flask(__name__)
def get_wiki_content(url):
req_obj = requests.get(url)
text = req_obj.text
soup = BeautifulSoup(text)
all_paras = soup.find_all("p")
wiki_text = ''
for para in all_paras:
wiki_text += para.text
return wiki_text
def top10_sent(url):
required_text = get_wiki_content(url)
stopwords = nltk.corpus.stopwords.words("english")
sentences = nltk.sent_tokenize(required_text)
words = nltk.word_tokenize(required_text)
word_freq = {}
for word in words:
if word not in stopwords:
if word not in word_freq:
word_freq[word] = 1
else:
word_freq[word] += 1
max_word_freq = max(word_freq.values())
for key in word_freq.keys():
word_freq[key] /= max_word_freq
sentences_score = []
for sent in sentences:
curr_words = nltk.word_tokenize(sent)
curr_score = 0
for word in curr_words:
if word in word_freq:
curr_score += word_freq[word]
sentences_score.append(curr_score)
sentences_data = pd.DataFrame({"sent":sentences, "score":sentences_score})
sorted_data = sentences_data.sort_values(by = "score", ascending = True).reset_index()
top10_rows = sorted_data.iloc[0:11,:]
#top_10 = list(sentences_data.sort_values(by = "score",ascending = False).reset_index().iloc[0:11,"sentences"])
return " ".join(list(top10_rows["sent"]))
@app.route("/", methods = ["GET", "POST"])
def index():
if request.method == "POST":
url = request.form.get("url")
url_content = top10_sent(url)
return url_content
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
696df3b6530b6d357c43a5714c1244e091b7a965
|
5a27d00e32e5be596b38eb6b2b735b2c94536513
|
/codechef/SSEC2/hack_it.py
|
3996933223a3c9dadf9bec3ab1e87a59a8b25ae4
|
[] |
no_license
|
vishk95/Challenges
|
dc29d28fce4ced044ce60874eb53c0713adb2558
|
047798af14a8b47608da975b1b578bee9736e7c7
|
refs/heads/main
| 2023-03-08T23:16:34.571460
| 2021-03-05T12:42:25
| 2021-03-05T12:42:25
| 336,181,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
def correction(i):
res = ""
for digit in range(len(i)):
i[digit] = str(int(i[digit]) - 2)
print(int(res.join(i)))
if __name__ == '__main__':
n = int(input().strip())
passwords = []
for _ in range(n):
passwords.append(list(input().strip()))
for i in passwords:
correction(i)
|
[
"vishalat1994@gmail.com"
] |
vishalat1994@gmail.com
|
7ef164e5244b594ae4e7f8f84113336ddbcda4e7
|
a22da75b98315c688c84c87436aaf99eee67e9b7
|
/visual_bar.py
|
ab7f3e4f5b3b0d54cbe6c270ad3a26270eae0d54
|
[] |
no_license
|
ArtemisWang/AutomaticVideoClipAndMixing
|
57f08c9e30f4b5ce3ea88f1e6c0cb44490f09759
|
9679360cc110d4c9fab1596ee95c56e4e19f7a05
|
refs/heads/master
| 2021-10-24T18:23:03.817925
| 2019-03-27T10:49:01
| 2019-03-27T10:49:01
| 177,972,242
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
name_list = ['0.2', '0.4', '0.6', '0.8']
num_list = [0.0161, 0.009, 0.0089, 0.0135] ## p1
num_list1 = [0.0037, 0.0065, 0.0054, 0.0065]
num_list2 = [0.0247, 0.0151, 0.0133, 0.0199] ## p2
num_list3 = [0.0025, 0.0033, 0.0033, 0.0033]
num_list4 = [0.0197, 0.011, 0.0101, 0.019] ## p3
num_list5 = [0.0022, 0.0026, 0.0025, 0.0026]
tick = ['p1']*4+['p2']*4+['p3']*4
x = list(range(len(num_list)))
z = list(range(len(num_list)))
total_width, n = 0.6, 3
width = total_width / n
bar_width = width - 0.02
# ax2 = plt.twiny()
plt.bar(x, num_list, width=bar_width, label='DFMR', fc='powderblue')
for xx,yy in zip(x, num_list):
plt.text(xx, yy+0.0002, 'N=1', ha='center')
#'khaki' 'darkkhaki'
plt.bar(x, num_list1, width=bar_width, label='DFBM', fc='cadetblue')
for i in range(len(x)):
x[i] = x[i] + width
z = z + x # 'peachpuff' 'peru'
plt.bar(x, num_list2, width=bar_width, fc='powderblue')
plt.bar(x, num_list3, width=bar_width, fc='cadetblue')
plt.bar(x, num_list, width=0, tick_label=name_list)
for xx,yy in zip(x, num_list2):
plt.text(xx, yy+0.0002, 'N=2', ha='center')
for i in range(len(x)):
x[i] = x[i] + width
z = z + x
plt.bar(x, num_list4, width=bar_width, fc='powderblue')
plt.bar(x, num_list5, width=bar_width, fc='cadetblue')
for xx,yy in zip(x, num_list4):
plt.text(xx, yy+0.0002, 'N=3', ha='center')
plt.xlabel('$\mu$')
plt.ylabel('BLEU')
# ax2.bar(z, num_list*3, width=0, tick_label=tick)
# ax2.set_xlabel('p')
# plt.bar(y, num_list, width=0, tick_labecpl=name_list)
plt.legend()
plt.savefig('bar_cp.pdf', dpi=600)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
f76e89dc0176bd78e18083cf96494132976542a0
|
fb38630bcb0e8d1acd8bdf559d6b19b3008122c7
|
/1024.py
|
155a7a07bd35104229c06ad7c99890d3f205affc
|
[
"MIT"
] |
permissive
|
ossfile/1024-web-crawler
|
de14f4c2146fd9097a46da22d9e957054efc32bb
|
83f939a53a921c49fbbacb5c5b8000c88b1da858
|
refs/heads/master
| 2022-04-03T12:46:56.238010
| 2019-12-25T03:33:24
| 2019-12-25T03:33:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
import requests
import re
import os
from lxml import etree
try:
flag=1
while flag<=270:
base_url='https://t66y.com/'
page_url='https://t66y.com/thread0806.php?fid=16&search=&page='+str(flag)
get=requests.get(page_url)
article_url=re.findall(r'<h3><a href="(.*)" target="_blank" id="">(?!<.*>).*</a></h3>',str(get.content,'gbk',errors='ignore'))
for url in article_url:
tittle=['default']
getpage=requests.get(str(base_url)+str(url))
tittle=re.findall(r'<h4>(.*)</h4>',str(getpage.content,'gbk',errors='ignore'))
file=tittle[0]
if os.path.exists(file)==False:
os.makedirs(file)
tree=etree.HTML(str(getpage.content,'gbk',errors='ignore'))
img_url=tree.xpath('//input/@data-src')
filename=1
for download_url in img_url:
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36Name','Referer':'https://t66y.com'}
req=requests.get(url=download_url,headers=headers)
file_path='./'+file+"/"+str(filename)+'.jpg'
with open(file_path,'wb') as f:
print('开始下载:'+file+"----第"+str(filename)+"张图片")
f.write(req.content)
filename+=1
print(file+"--下载完成")
except:
print("崩了,兄弟")
|
[
"lzhengycy@outlook.com"
] |
lzhengycy@outlook.com
|
8b7029f0ee977278cae4cbb89fb2abc357a8cf00
|
8cc4229dccec92c4143159bbbd567008095a2b27
|
/backend/appengine/categoria/categoria_model.py
|
7c2a5f1ab530b8dca88eb8ed10becc0c656f109f
|
[] |
no_license
|
SamaraCardoso27/Projeto
|
138f65a04f467bc4d7e71c236ff20bd30a5867b0
|
5269785c3cc023328d46e12b3b0373dd9d553d0f
|
refs/heads/master
| 2016-09-01T22:01:34.408630
| 2015-04-19T18:56:02
| 2015-04-19T18:56:02
| 30,780,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from google.appengine.ext.db import IntegerProperty
from gaeforms.base import Form, StringField, IntegerField
from gaeforms.ndb.form import ModelForm
class Categoria(ndb.Model):
nome=ndb.StringProperty(required=True)
quantidade= ndb.IntegerProperty(required=True)
criacao = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_ordenada_por_nome(cls):
return cls.query().order(Categoria.nome)
class CategoriaForm(ModelForm):
_model_class = Categoria
_include = [Categoria.nome]
quantidade=IntegerField(required=True,lower=0)
|
[
"samaracardosodossantos@gmail.com"
] |
samaracardosodossantos@gmail.com
|
9a12029623af66c700d989ba7253121601a4f6d5
|
2d27444b26de173ed1b7454f72d102050c7a6b07
|
/Tuples/tuples04.py
|
bfb1b4cb89f5c12608af7a77689cf38b9a60a51a
|
[] |
no_license
|
imrishuroy/Python-Projects
|
6b93454dcb30c307aece07d611855f8b718fb8e8
|
f15a0e7da702a30618658ce4f4650807daaae759
|
refs/heads/master
| 2023-04-17T04:33:38.889592
| 2021-05-08T08:35:09
| 2021-05-08T08:35:09
| 335,221,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
# Tuples and Dictionary
d = dict()
d['Rishu'] = 91
d['Prince'] = 100
for k , v in d.items():
print(k,v)
tups = d.items()
print(tups)
|
[
"rishukumar.prince@gmail.com"
] |
rishukumar.prince@gmail.com
|
ef5c21aa5d3669148a72593a4b8121792c545794
|
ca61417d925ce53b83d0767224d58cd3b2da57cc
|
/detector_api.py
|
a1487f12909e4d75a8b79e58ae62ed90c5899267
|
[
"Apache-2.0"
] |
permissive
|
qilei123/mmdetection_rop
|
423ed5e63a84c8eeba1cb823b14d16ed906289f8
|
cbdbb2b521c94c2f3eeebb2f2069663199f679bc
|
refs/heads/master
| 2020-05-09T23:38:28.969119
| 2019-08-20T03:21:07
| 2019-08-20T03:21:07
| 181,505,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,210
|
py
|
# -*- coding:utf-8 -*-
import json
import argparse
import mmcv
from mmcv.runner import load_checkpoint
from mmdet.models import build_detector
from mmdet.apis import inference_detector, show_result, show_single_category_result
import cv2
import glob
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import numpy as np
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def py_cpu_nms(dets,scores, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
#scores = dets[:, 4] #bbox打分
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#打分从大到小排列,取index
order = scores.argsort()[::-1]
#keep为最后保留的边框
keep = []
while order.size > 0:
#order[0]是当前分数最大的窗口,肯定保留
i = order[0]
keep.append(i)
#计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#交/并得到iou值
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收
inds = np.where(ovr <= thresh)[0]
#order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口
order = order[inds + 1]
return keep
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
def nms_result(json_result):
boxes = []
boxscores = []
for result in json_result['results']:
boxes.append([int(result['bbox'][1]),int(result['bbox'][0]),int(result['bbox'][1])+int(result['bbox'][3]),int(result['bbox'][0])+int(result['bbox'][2])])
boxscores.append(result['score'])
boxes = np.array(boxes,dtype = np.float32)
boxscores = np.array(boxscores,dtype = np.float32)
#print(boxes)
if len(boxes)>0:
#index = py_cpu_softnms(boxes, boxscores, method=3)
index = py_cpu_nms(boxes,boxscores,0.15)
#print(index)
temp_list = []
for index in index:
temp_list.append(json_result['results'][int(index)])
json_result['results']=temp_list
class lesion_detector():
def __init__(self,name='DR_lesion_detector'):
self.name = name
self.json_result = None
self.cfg = None
self.model = None
self.threshold = 0.1
def init_predictor(self,config_dir='/home/intellifai/docker_images/mmdetection4dr/configs/faster_rcnn_dr_4lesions/faster_rcnn_x101_32x4d_fpn_1x_dr_4lesions_7_a_with_focal_loss_smallset_advance_optdataset4_deephead_v1.py',model_dir='/home/intellifai/docker_images/mmdetection_models/epoch_9.pth'):
self.cfg = mmcv.Config.fromfile(config_dir)
self.cfg.model.pretrained = None
self.model = build_detector(self.cfg.model, test_cfg=self.cfg.test_cfg)
_ = load_checkpoint(self.model, model_dir)
def prediction(self,img_dir,show_save_dir='/home/intellifai/docker_images/mmdetection_models/test_pytorch_detector.jpg'):
img = mmcv.imread(img_dir)
result = inference_detector(self.model, img, self.cfg)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
json_result = dict()
json_result['image_dir'] = img_dir
json_result['results']=[]
for label in range(len(bbox_result)):
bboxes = bbox_result[label]
for i in range(bboxes.shape[0]):
if float(bboxes[i][4])> self.threshold:
data = dict()
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['label'] = str(label+1)
json_result['results'].append(data)
nms_result(json_result)
if not show_save_dir=='':
image = cv2.imread(img_dir)
for result in json_result['results']:
bbox = [int(result['bbox'][0]),int(result['bbox'][1]),int(result['bbox'][2]),int(result['bbox'][3])]
cv2.rectangle(image,(bbox[0],bbox[1]),(bbox[0]+bbox[2],bbox[1]+bbox[3]),(0,255,0),2)
cv2.putText(image,str(result['label']),(bbox[0]+bbox[2],bbox[1]),cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2,cv2.LINE_AA)
cv2.imwrite(show_save_dir,image)
#cv2.imshow('test',image)
#cv2.waitKey(0)
self.json_result = json_result
return self.json_result
def getResult(self):
return self.json_result
def getDetectorName(self):
return self.name
import glob
def test():
LesionDetector = lesion_detector()
config_dir = 'configs/faster_rcnn_dr_4lesions/faster_rcnn_x101_32x4d_fpn_1x_dr_4lesions_7_a_with_focal_loss_smallset_advance_optdataset4_deephead_v1.py'
#model_dir = '/data0/qilei_chen/AI_EYE/BostonAI4DB7/work_dirs/faster_rcnn_r50_fpn_1x_with_focal_loss_smallset_advance_optdataset4/epoch_9.pth'
model_dir = '/home/intellifai/docker_images/mmdetection_models/epoch_9.pth'
LesionDetector.init_predictor(config_dir,model_dir)
#img_dir = '/data0/qilei_chen/Development/Datasets/KAGGLE_DR/val/0/*.jpeg'
#img_dir = '/data0/qilei_chen/AI_EYE/Messidor/cropped_base_jpeg/*.jpeg'
img_dir = '/home/intellifai/docker_images/mmdetection_models/test_data/val2014/*.jpg'
#show_save_dir = '/data0/qilei_chen/Development/test_pytorch_detector.jpg'
show_save_dir = '/home/intellifai/docker_images/mmdetection_models/test_pytorch_detector.jpg'
#show_save_dir = ''
img_dirs = glob.glob(img_dir)
#for i in range(10000):
results = dict()
results['results']=[]
for img_dir in img_dirs:
print(img_dir)
oldtime=datetime.datetime.now()
result = LesionDetector.prediction(img_dir,show_save_dir)
newtime=datetime.datetime.now()
print((newtime-oldtime).microseconds/1000)
results['results'].append(result)
with open('/data0/qilei_chen/AI_EYE/Messidor/head_v1_detect_results.json','w') as json_file:
json.dump(results,json_file)
test()
|
[
"qileimail123@gmail.com"
] |
qileimail123@gmail.com
|
bf74aa88f5156a86b04175d222bc82650995a74d
|
a72402e6bfa6b06812d530443baa296f53355cc0
|
/clos_get_dhcp_ip.py
|
74082f6a065ed52862e9cd0f9efb1d2333013951
|
[] |
no_license
|
robertdigital/virtual-router-with-qemu
|
41ac1061b36fa592d67668d4e708d93e0b6156fe
|
a34b53d0e2110abb42b393693e40fd8bb8080f4e
|
refs/heads/master
| 2020-12-27T20:23:59.334606
| 2019-05-10T13:48:58
| 2019-05-10T13:48:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
#!/usr/bin/env python
import os
import json
import sys
#dhcp_table file contain all IP and macaddress in <ip> <mac> format per line.
#cat upload/* > dhcp_table.txt
output_dir = "clos_test"
dhcp_file = output_dir+"/dhcp_table.txt"
clos_file = output_dir+"/nodes.with.vm.json"
def read_json_file(filename):
F = open(filename)
d = json.load(F)
F.close()
return d
def write_json_file(filename, j):
f = open(filename, 'w')
json.dump(j, f, sort_keys=True, indent=4)
return
def jsonpretty(text):
return json.dumps(text, indent=4, sort_keys=True)
def write_file(filename, data):
if filename is not '':
f = open(filename, 'w')
f.write(data)
f.close()
return
def read_file(filename):
cmds = ""
lines = []
if str(filename) is not '' and not os.stat(filename).st_size == 0:
try:
finput = open(filename, 'r')
lines = [x.replace('\n', '') for x in finput]
finput.close()
except:
return lines
return lines
lines= read_file(dhcp_file)
mac_ip = {}
for line in lines:
ip = line.split()[0]
mac = line.split()[1]
mac_ip[mac] = ip
nodes = read_json_file(clos_file)
print jsonpretty(mac_ip)
line = ""
for x in ["superspines", "spines", "leaves"]:
for item in sorted(nodes[x]):
data = nodes[x][item]
vm_id = data['node_id_hex']
hostname = str(data['hostname'])
vm_type = str(data['vm_type'])
if len(vm_id) < 2:
vm_id = "0"+vm_id
eth0_mac = "0c:55:01:00:"+vm_id+":00"
eth0_mac = eth0_mac.replace(':','')
if eth0_mac in mac_ip:
eth0_ip = mac_ip[eth0_mac]
data['mgmt_ip'] = eth0_ip
else:
print "ERROR: "+hostname+" with mac: "+eth0_mac+" NOT FOUND"
continue
line += eth0_ip+" "+hostname+" "+vm_type+"-"+vm_id+"\n"
#write_json_file("clos_result/nodes/"+hostname, data)
write_file(output_dir+"/hostfile.txt", line)
#write_json_file(clos_file, nodes)
|
[
"rendo.aw@gmail.com"
] |
rendo.aw@gmail.com
|
5407f917a3df1126560376f7fd4e574c2a9a703d
|
1e198980ecce4ff1e5e08d71d4127c9be19e6296
|
/syntax/operator.py
|
92155c9e9ebc0712debd2c71753d20cf2483301a
|
[] |
no_license
|
rheehot/python101
|
a2af8bb47dd5ecd792f262379e86cafaf04b3313
|
a8be8b6796ea2b5375aeb7455a8715059ed44410
|
refs/heads/master
| 2023-02-20T18:36:17.105974
| 2021-01-26T18:03:13
| 2021-01-26T18:03:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
# 파이썬은 7개의 연산자를 지원한다.
# 산술연산자
# 1) 사칙연산자 : + - * /(진짜 나누기)
# 2) 제곱 : **
# 3) 나머지 : %
# 4) // : 몫
print(3 ** 4) # 3의 4승
print(5 % 2) # 1
print(5 / 2) # 2.5
print(5 // 2) # 1
# 비교연산자(관계연산자)
# == != < > <= >=
# 비교에 의한 True / False 값 반환
# 할당연산자
# 변수에 값을 할당하기 위해서 사용
# = += -= *= /= %= //=
# 산술연산자를 모두 = 을 붙여서 할당연산자처럼 만들수 있다.
a = 10
a *= 4
print(a) # 40
b = 12
b /= 5
print(b)
c = 14
c %= 4
print(c)
d = 17
d //= 2
print(d)
# 논리연산자
# and, or, not
# Bitwise 연산자
# &(AND), |(OR), ^(XOR), ~(Complement), <<, >>(Shift)
# 실제 활용을 통해서 알아보자
# 멤버쉽 연산자 : 포함 여부 판별
# in, not in
a = ["javascript", "java", "python"]
b = "rust" in a # False
c = "python" in a # True
d = "java" not in a # True
print(b, c, d)
# Identity 연산자 : 양쪽의 Operand의 동일 여부 판별
# is, is not
name = "jjanmo"
nickname = name
isSame = name is nickname
print(isSame) # True
## 참고
# Identity 연산자 : is / is not => 객체 비교
# 비교 연산자 : == / != => 값 비교
|
[
"jjanmo07@hanmail.net"
] |
jjanmo07@hanmail.net
|
cda213adb638250573fe948b2e2f4b4dddf419bd
|
22415d4998b30ba01ba249463b19f051110feff0
|
/fileserver/shared.py
|
97d9e07d5b2b77f7fdc1247820ffa850511c56d3
|
[] |
no_license
|
dimchat/station-py
|
9fcc0a3f2ef4383cc7da521db0ed9eee2f9ccfad
|
64966a06aa10640f4f5af4327bd74d5db6467fe0
|
refs/heads/master
| 2023-08-20T21:56:07.313923
| 2023-08-13T06:35:21
| 2023-08-13T06:35:21
| 158,835,688
| 11
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,596
|
py
|
# -*- coding: utf-8 -*-
# ==============================================================================
# MIT License
#
# Copyright (c) 2022 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import getopt
import sys
from typing import Optional, Set
from dimples import hex_decode
from dimples.database import Storage
from libs.utils import Singleton
from libs.common import Config
@Singleton
class GlobalVariable:
def __init__(self):
super().__init__()
self.config: Optional[Config] = None
# cached values
self.__image_types: Optional[Set[str]] = None
self.__allowed_types: Optional[Set[str]] = None
self.__md5_secret: Optional[bytes] = None
@property
def server_host(self) -> str:
return self.config.get_string(section='ftp', option='host')
@property
def server_port(self) -> int:
return self.config.get_integer(section='ftp', option='port')
#
# download
#
@property
def avatar_url(self) -> str:
return self.config.get_string(section='ftp', option='avatar_url')
@property
def download_url(self) -> str:
return self.config.get_string(section='ftp', option='download_url')
#
# upload
#
@property
def avatar_directory(self) -> str:
return self.config.get_string(section='ftp', option='avatar_dir')
@property
def upload_directory(self) -> str:
return self.config.get_string(section='ftp', option='upload_dir')
@property
def image_file_types(self) -> Set[str]:
types = self.__image_types
if types is None:
types = self.__get_set(section='ftp', option='image_types')
assert len(types) > 0, 'image file types not set'
self.__image_types = types
return types
@property
def allowed_file_types(self) -> Set[str]:
types = self.__allowed_types
if types is None:
types = self.__get_set(section='ftp', option='allowed_types')
assert len(types) > 0, 'allowed file types not set'
self.__allowed_types = types
return types
def __get_set(self, section: str, option: str) -> Set[str]:
result = set()
value = self.config.get_string(section=section, option=option)
assert value is not None, 'string value not found: section=%s, option=%s' % (section, option)
array = value.split(',')
for item in array:
string = item.strip()
if len(string) > 0:
result.add(string)
return result
@property
def md5_secret(self) -> bytes:
key = self.__md5_secret
if key is None:
string = self.config.get_string(section='ftp', option='md5_secret')
assert string is not None, 'md5 key not set'
key = hex_decode(string=string)
self.__md5_secret = key
return key
def show_help(cmd: str, app_name: str, default_config: str):
print('')
print(' %s' % app_name)
print('')
print('usages:')
print(' %s [--config=<FILE>]' % cmd)
print(' %s [-h|--help]' % cmd)
print('')
print('optional arguments:')
print(' --config config file path (default: "%s")' % default_config)
print(' --help, -h show this help message and exit')
print('')
def create_config(app_name: str, default_config: str) -> Config:
""" Step 1: load config """
cmd = sys.argv[0]
try:
opts, args = getopt.getopt(args=sys.argv[1:],
shortopts='hf:',
longopts=['help', 'config='])
except getopt.GetoptError:
show_help(cmd=cmd, app_name=app_name, default_config=default_config)
sys.exit(1)
# check options
ini_file = None
for opt, arg in opts:
if opt == '--config':
ini_file = arg
else:
show_help(cmd=cmd, app_name=app_name, default_config=default_config)
sys.exit(0)
# check config filepath
if ini_file is None:
ini_file = default_config
if not Storage.exists(path=ini_file):
show_help(cmd=cmd, app_name=app_name, default_config=default_config)
print('')
print('!!! config file not exists: %s' % ini_file)
print('')
sys.exit(0)
# load config from file
config = Config.load(file=ini_file)
print('>>> config loaded: %s => %s' % (ini_file, config))
return config
|
[
"albert.moky@gmail.com"
] |
albert.moky@gmail.com
|
618f83793b61456aa8298f3a72b371b921d7f30a
|
293db74378eb425d54ae2ea4735d442d594cc0b8
|
/myapp/migrations/0004_auto_20160517_0559.py
|
665ada23577a4d573d90b4f6498924033b5b5e4e
|
[] |
no_license
|
ajithkjames/contactsmanager
|
6c5944ee126411db71bcb43a274a6de92c5c236d
|
c546e4fd53e835d85f66aef0890f9a46e945d275
|
refs/heads/master
| 2020-07-03T00:33:56.353982
| 2016-11-19T12:48:18
| 2016-11-19T12:48:18
| 74,207,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-17 05:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_members_contact_member'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='id',
),
migrations.AlterField(
model_name='group',
name='group_code',
field=models.CharField(max_length=30, primary_key=True, serialize=False, unique=True),
),
]
|
[
"you@example.com"
] |
you@example.com
|
ee4d0edf66d7a8c4ddce1c673e56b805bace6794
|
039c5187dd45b8dd2c960c1570369d6eb11eae83
|
/soufang/config.py
|
efd983939f003ba81021d15df92d8a15a3eca8df
|
[] |
no_license
|
huazhicai/spider
|
5636951c1e0db4dc7b205cacfe8e881a08ff2015
|
d72ce471b0388d6d594853120c8e8f93694015a6
|
refs/heads/master
| 2021-07-24T23:01:15.124742
| 2017-11-04T09:05:46
| 2017-11-04T09:05:46
| 107,860,473
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,411
|
py
|
# 爬取房天下的楼盘的评论
# 获取城市名称
import re
import requests
from bs4 import BeautifulSoup
def get_city():
url = 'http://www.fang.com/SoufunFamily.htm'
html_content = requests.get(url)
# <a href="http://gaoling.fang.com/" target="_blank">高陵</a>
pattern = re.compile(r'<a href="http://(\w+)\.fang\.com/" target="_blank">.+?</a>', re.S)
items = re.findall(pattern, html_content.text)
print(len(set(items)))
print(set(items))
# get_city()
CITYS = ['yt', 'zaozhuang', 'zhongwei', 'qianxi', 'boluo', 'hegang', 'yl', 'yunfu', 'meishan', 'fq', 'yangchun',
'linzhi', 'rudong', 'mengjin', 'feicheng', 'zhucheng', 'bengbu', 'huainan', 'dongxing', 'xinmi', 'linqu',
'luanxian', 'jingmen', 'wenan', 'zb', 'huzhou', 'yuzhong', 'xf', 'fenghua', 'us', 'longkou', 'lijiang',
'ganzi', 'hbjz', 'sz', 'tl', 'hbzy', 'minqing', 'gongzhuling', 'laiwu', 'gxby', 'qingzhen', 'zz', 'anqing',
'linfen', 'ruian', 'xinghua', 'feixi', 'lujiang', 'njgc', 'anning', 'jxfc', 'tongshan', 'anyang', 'luoning',
'pingtan', 'shiyan', 'chengde', 'wuzhong', 'zhouzhi', 'liaozhong', 'qingxu', 'zhaotong', 'jm', 'jiaozhou',
'taishan', 'tc', 'hechi', 'whhn', 'anshun', 'xinyi', 'wuhan', 'huaiyuan', 'xj', 'yingtan', 'jlys', 'ruijin',
'lyg', 'xlglm', 'changge', 'changli', 'honghe', 'huaibei', 'bazhong', 'longhai', 'chifeng', 'ld', 'macau',
'heyuan', 'mudanjiang', 'yilan', 'xiangxiang', 'zjfy', 'panzhihua', 'jiujiang', 'tieling', 'xiuwen', 'faku',
'jinxian', 'hbyc', 'benxi', 'hlbe', 'jiaonan', 'deqing', 'shaoyang', 'bijie', 'shangrao', 'heihe', 'suizhou',
'nanjing', 'alaer', 'germany', 'jimo', 'anqiu', 'wujiaqu', 'baoji', 'qinzhou', 'wuzhishan', 'guan', 'jiangdu',
'yuxian', 'liyang', 'xinjin', 'jiayuguan', 'huizhou', 'tongling', 'haiyang', 'jintan', 'gaomi', 'kuitun', 'yc',
'ruyang', 'erds', 'shangyu', 'xiaogan', 'xinyu', 'dz', 'tmsk', 'zjxs', 'huangshan', 'baishan', 'yongcheng',
'huidong', 'pengzhou', 'lnta', 'hengxian', 'taizhou', 'ly', 'luanchuan', 'ziyang', 'anshan', 'huadian',
'qingyang', 'datong', 'st', 'kelamayi', 'tulufan', 'tonghua', 'jiande', 'qianan', 'zhoukou', 'guangrao',
'yongkang', 'chuzhou', 'liupanshui', 'changdu', 'ny', 'zs', 'huangshi', 'xianning', 'kaifeng', 'spain',
'diqing', 'ruzhou', 'hbbz', 'jh', 'sf', 'tongchuan', 'dengfeng', 'wafangdian', 'yuncheng', 'cd', 'aj',
'zhangye', 'pulandian', 'laizhou', 'jinhu', 'changchun', 'zigong', 'qiannan', 'loudi', 'sdpy', 'ali',
'gaobeidian', 'dengzhou', 'kaiyang', 'jiaozuo', 'yiyang', 'xinmin', 'dujiangyan', 'dingxing', 'ytcd',
'yueyang', 'yongtai', 'penglai', 'cangzhou', 'huoqiu', 'shihezi', 'huaihua', 'jieyang', 'fanchang', 'jn',
'linqing', 'tengzhou', 'nujiang', 'cswc', 'lf', 'pingliang', 'wg', 'zy', 'bazhou', 'tianshui', 'pizhou',
'dehui', 'malaysia', 'weinan', 'xiantao', 'tj', 'lnzh', 'changshu', 'fuyang', 'sansha', 'hbwj', 'dh', 'yuxi',
'taixing', 'meizhou', 'xm', 'zhangzhou', 'linan', 'ahsuzhou', 'zoucheng', 'yinchuan', 'chizhou', 'heze',
'peixian', 'jinchang', 'ganzhou', 'funing', 'jingdezhen', 'wuzhou', 'bh', 'huaian', 'xuchang', 'chaoyang',
'jz', 'lvliang', 'yk', 'qz', 'la', 'anda', 'dianpu', 'cq', 'ksys', 'chicago', 'gaoyang', 'shuyang', 'gdlm',
'sh', 'hz', 'gz', 'songyuan', 'nc', 'dongtai', 'changle', 'sg', 'cqnanchuan', 'leiyang', 'nanan',
'zhangjiajie', 'greece', 'shunde', 'guangyuan', 'baoshan', 'tongren', 'linxia', 'dangtu', 'huludao', 'wz',
'yongdeng', 'hetian', 'xingtai', 'haiyan', 'sdjy', 'boston', 'donggang', 'jy', 'rz', 'yuhuan', 'wuan',
'guzhen', 'dali', 'ningde', 'neijiang', 'fangchenggang', 'sdsh', 'xn', 'nanyang', 'tongcheng', 'nn', 'hnyz',
'jixi', 'chuxiong', 'emeishan', 'laixi', 'betl', 'chaozhou', 'deyang', 'sdcl', 'xz', 'dongfang', 'gongyi',
'pinghu', 'jl', 'qd', 'sanming', 'xt', 'maoming', 'zhijiang', 'haimen', 'lianjiang', 'xinjian', 'sq',
'yanbian', 'guyuan', 'hami', 'qianjiang', 'yongning', 'suining', 'yibin', 'jxja', 'wlcb', 'dayi', 'sxly',
'dangyang', 'haining', 'lantian', 'lc', 'hd', 'puyang', 'qitaihe', 'quanshan', 'dingxi', 'jx', 'weihai', 'dy',
'chaohu', 'bozhou', 'bj', 'kashi', 'yili', 'jiuquan', 'ningxiang', 'ahcf', 'xuancheng', 'xinji', 'luzhou',
'heshan', 'shangzhi', 'zjtl', 'alsm', 'baicheng', 'wuchang', 'chunan', 'kaili', 'zhaoqing', 'cqliangping',
'lasa', 'cqchangshou', 'haian', 'qujing', 'hbjs', 'huian', 'liling', 'yangquan', 'jingjiang', 'jianyang',
'jiyuan', 'zhenjiang', 'hbql', 'shanwei', 'wuhu', 'zj', 'rikaze', 'feidong', 'daqing', 'pingxiang', 'cqwulong',
'xianyang', 'aba', 'zhangjiakou', 'agent', 'byne', 'pingdu', 'shizuishan', 'wuhe', 'jinzhou', 'my', 'liuyang',
'huxian', 'zhoushan', 'tianmen', 'qixia', 'zhaoyuan', 'zhuji', 'jizhou', 'enshi', 'cqtongliang', 'jncq',
'hezhou', 'yangqu', 'zhongmou', 'fengcheng', 'tz', 'yuyao', 'bulgaria', 'dxal', 'fushun', 'yichun', 'jr',
'qingyuan', 'baoying', 'baise', 'xingyang', 'haidong', 'yixing', 'pingdingshan', 'hanzhong', 'lhk', 'yanshi',
'cqzhongxian', 'zh', 'xinyang', 'hengyang', 'au', 'youxian', 'guilin', 'hbys', 'renqiu', 'putian', 'luan',
'nt', 'mianyang', 'xishuangbanna', 'gaoyou', 'shangluo', 'quangang', 'puer', 'xam', 'yangjiang', 'qionglai',
'yizheng', 'wuwei', 'jiamusi', 'yutian', 'zhangqiu', 'haixi', 'shannan', 'hnyy', 'cn', 'xinzheng', 'portugal',
'jiangyan', 'enping', 'bt', 'liuzhou', 'kangping', 'luannan', 'jc', 'longyan', 'dandong', 'zunyi', 'hailin',
'sxyulin', 'wushan', 'hebi', 'laiyang', 'hailaer', 'changyi', 'rugao', 'yanling', 'cyprus', 'zouping', 'hbzx',
'xintai', 'scjt', 'hbps', 'xx', 'nanping', 'luoyuan', 'xinle', 'fengdu', 'hblt', 'changde', 'cz', 'wanning',
'sx', 'yz', 'laishui', 'huangnan', 'xilinhaote', 'zhaodong', 'zhuozhou', 'liangshan', 'jxfuzhou', 'yidu',
'wenling', 'yanan', 'fs', 'hnxa', 'zunhua', 'dl', 'fuan', 'binzhou', 'liaoyang', 'jinzhong', 'xiangxi', 'sjz',
'leshan', 'yueqing', 'bayan', 'xinzhou', 'nanchong', 'jssn', 'huanggang', 'hljyichun', 'chongzuo', 'guoluo',
'ninghai', 'bd', 'fuling', 'yancheng', 'quzhou', 'yiwu', 'nb', 'nongan', 'fjax', 'zhumadian', 'donghai', 'cs',
'qhd', 'dazhou', 'cixi', 'ezhou', 'puning', 'gannan', 'guigang', 'zhaozhou', 'taian', 'yongqing', 'haicheng',
'dehong', 'sanmenxia', 'shuozhou', 'zhenhai', 'qidong', 'wuxi', 'siping', 'abazhou', 'sy', 'danzhou',
'dingzhou', 'jsfx', 'tongxiang', 'ls', 'qianxinan', 'yaan', 'fuxin', 'shishi', 'linhai', 'shangqiu', 'zjg',
'chongzhou', 'luohe', 'huairen', 'shaoguan', 'cqkaixian', 'xian', 'naqu', 'yushu', 'akesu', 'xiangyang',
'ankang', 'fz', 'kuerle', 'qj', 'suzhou', 'baiyin', 'cqjiangjin', 'jian', 'dg', 'kzls', 'kaiping', 'longnan',
'wenshan', 'panjin', 'ks', 'songxian', 'haibei', 'changxing', 'chenzhou', 'linyi', 'jingzhou', 'hn',
'qingzhou', 'ya', 'guangan', 'laibin', 'qiqihaer', 'yongchun', 'wf', 'zhongxiang', 'binxian', 'lincang',
'changzhi', 'gaoling', 'yongzhou', 'lankao', 'zhuzhou', 'hs', 'qiandongnan', 'wuhai', 'yichuan', 'shennongjia',
'shuangyashan', 'suihua', 'jining', 'liaoyuan', 'mas']
|
[
"936844218@qq.com"
] |
936844218@qq.com
|
dc42acc3aeb1dbbb67aad2c35d110995714ba7eb
|
8d943e4a4af2a08653988dba7f6a342eea113e9c
|
/awspy/ecs/__init__.py
|
650f3cb335dd9ab6f2d98703cb6cd2618adb43a3
|
[
"MIT"
] |
permissive
|
ScholarPack/awspy
|
1d7b7b2b427334590fac4d67a73a4b1ad575fdec
|
750e38c5f3914dc4bb9d4536fad6a1af5c2bc4a7
|
refs/heads/main
| 2023-06-17T00:11:27.934590
| 2021-07-16T12:25:20
| 2021-07-16T12:25:20
| 385,636,223
| 0
| 0
|
MIT
| 2021-07-16T12:25:21
| 2021-07-13T14:40:03
|
Python
|
UTF-8
|
Python
| false
| false
| 29
|
py
|
from .fargate import Fargate
|
[
"joe@scholarpack.com"
] |
joe@scholarpack.com
|
5a20c1124e3ec671a9fe3d3225fe84106e044deb
|
ef339cfc51184ecd95b92e198994f54ef3001656
|
/section2/beautifulsoup/beautifulsoup1.py
|
69e7338d6b2e521cfd9d35d785aecf233a9be875
|
[] |
no_license
|
akmris00/pythoncrowring
|
a03385e92fb276d3a87a703da9017de1e3ce0db1
|
6b676debce3acab9856f0b7411942218b11583e7
|
refs/heads/master
| 2020-05-05T13:21:09.069117
| 2019-04-29T02:18:03
| 2019-04-29T02:18:03
| 180,073,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
from bs4 import BeautifulSoup
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
html = """
<html><body>
<ul>
<li><a href="http://www.naver.com">naver</a></li>
<li><a href="http://www.daum.net">daum</a></li>
<li><a href="http://www.daum.com">daum</a></li>
<li><a href="http://www.google.com">google</a></li>
<li><a href="http://www.tistory.com">tistory</a></li>
</ul>
</body></html>
"""
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all("a")
#print("links", type(links))
a = soup.find_all("a", string="daum")
print(a)
b = soup.find_all("a", limit=3)
print(b)
c = soup.find_all(string=["naver","google"])
print(c)
for a in links:
# print(type(a))
# print(a)
href = a.attrs
txt = a.string
print(txt)
print(href)
# print('txt >> ', txt, 'href >>', href)
|
[
"31494886+akmris00@users.noreply.github.com"
] |
31494886+akmris00@users.noreply.github.com
|
8c63ae9a0ae5a2f7dad10eff114d3314b1fefcc5
|
452e027d23fa443c0ec54bc9dd9ef50dbe7820b9
|
/grader/configfile.py
|
220d3677e6e49282ab5504a78758acc8f392ee8a
|
[] |
no_license
|
jakobj/grader
|
af3859b35a3e569d4dd841097d6a64e8162a5431
|
c69b9a6d638cc4fe78986d7a0ec15bed512b98f6
|
refs/heads/master
| 2021-01-21T03:31:06.952052
| 2015-05-05T08:15:06
| 2015-05-05T08:15:06
| 35,836,901
| 0
| 0
| null | 2015-05-18T19:02:41
| 2015-05-18T19:02:41
| null |
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
import configparser
import operator
import collections
class _Section:
def __init__(self, configparser, section, type=str):
self.cp = configparser
self.section = section
self.type = type
def __getitem__(self, item):
try:
value = self.cp.get(self.section, item)
except configparser.NoOptionError:
value = None
if value is None:
raise KeyError(item)
value = self.type(value)
return value
def __setitem__(self, item, value):
self.cp.set(self.section, item, str(value))
def get(self, item, fallback):
try:
return self.__getitem__(item)
except KeyError:
return fallback
def create(self, item, fallback=None):
if fallback is None:
fallback = self.type
try:
return self.__getitem__(item)
except KeyError:
value = fallback()
self.__setitem__(item, value)
return value
def clear(self, *keys):
for key in keys or self.keys():
self.cp.remove_option(self.section, key)
def keys(self):
for name, value in self.cp.items(self.section):
yield name
def values(self):
for name, value in self.cp.items(self.section):
yield self.type(value)
def items(self):
for name, value in self.cp.items(self.section):
yield name, self.type(value)
def print_sorted(self):
for key, val in sorted(self.items(), key=operator.itemgetter(1)):
print(key, '=', val)
class ConfigFile:
def __init__(self, filename, **sections):
self.filename = filename
cp = configparser.ConfigParser(comment_prefixes='#', inline_comment_prefixes='#')
cp.read(filename)
self.sections = collections.OrderedDict()
for section, type in sections.items():
if not cp.has_section(section):
cp.add_section(section)
self.sections[section] = _Section(cp, section, type)
self.cp = cp
def __getitem__(self, section):
return self.sections[section]
def save(self, filename=None):
filename = filename if filename is not None else self.filename
with open(filename, 'w') as f:
self.cp.write(f)
|
[
"zbyszek@in.waw.pl"
] |
zbyszek@in.waw.pl
|
3c797fe5b44b6c379bf7b9f24fb25eccdde31f0a
|
d700b9ad1e0b7225871b65ce0dafb27fb408c4bc
|
/students/k3342/kursoviks/Salnikova_Nadezhda/alpinism-server/Lr3/settings.py
|
a8a620419b2f15386ce6df96cc1499b273f97ca9
|
[
"MIT"
] |
permissive
|
TonikX/ITMO_ICT_WebProgramming_2020
|
a8c573ed467fdf99327777fb3f3bfeee5714667b
|
ba566c1b3ab04585665c69860b713741906935a0
|
refs/heads/master
| 2023-01-11T22:10:17.003838
| 2020-10-22T11:22:03
| 2020-10-22T11:22:03
| 248,549,610
| 10
| 71
|
MIT
| 2023-01-28T14:04:21
| 2020-03-19T16:18:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,929
|
py
|
"""
Django settings for Lr3 project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a^=#f--(47%9npb9i3i6%1djiza^h&v)wdkhthbo=qg(-+bj!f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'alpinism.apps.AlpinismConfig',
#'django_summernote',
'rest_framework',
"rest_framework.authtoken",
'djoser',
'alpinism',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Lr3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Lr3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'lr3.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'alpinism',
'USER': 'uninadia',
'PASSWORD': 'kitten',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_DIR = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
'rest_framework.permissions.AllowAny',
),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
}
CORS_ORIGIN_ALLOW_ALL = True
|
[
"uninadia@yandex.ru"
] |
uninadia@yandex.ru
|
e6cfad498bd1578ed61cc72f6ff9f0afede40cf4
|
c651ea919f24fcf51cbe27d1c336b9324fda74e6
|
/crypto/500-john-pollard/solve.py
|
b84ac8258c6a01329b9083966cb61303ce369c20
|
[] |
no_license
|
paiv/picoctf2019
|
31f611b21bcab0d1c84fd3cb246c7dd58f6949df
|
90b1db56ac8c5b47ec6159d45c8decd6b90d06d5
|
refs/heads/master
| 2020-08-11T03:38:55.580861
| 2019-10-11T20:48:44
| 2019-10-11T20:48:44
| 214,483,178
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
#!/usr/bin/env python
from Crypto.PublicKey import RSA
from Crypto.Util.number import inverse as modinv
def solve():
N = 4966306421059967
P = 73176001
Q = 67867967
E = 65537
assert N == P * Q
D = modinv(E, (P - 1) * (Q - 1))
key = RSA.construct((N, E, D, P, Q))
return key.exportKey().decode('ascii')
if __name__ == '__main__':
print(solve())
|
[
"pavels.code@gmail.com"
] |
pavels.code@gmail.com
|
29afa6286ed62ea50b6c5c7058b06d924626062a
|
f5c6c0a7036fae1878f17c1262469e8df0387391
|
/polls/views.py
|
d2c56a57f5ab8689b91882cdac841cf8281c12bd
|
[] |
no_license
|
Ilyabuk/django1
|
1733c6f469e5b7a7340b2ecb04df1f16c8c2d972
|
1bbf5bfc32969d5f004b431a3533a5a8337b6b2d
|
refs/heads/master
| 2021-09-02T04:40:23.579808
| 2017-12-30T11:52:17
| 2017-12-30T11:52:17
| 115,792,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("HttpResponse")
|
[
"ilyabuk@gmail.com"
] |
ilyabuk@gmail.com
|
c8145d6c24dc99f140a89cac5e485e16c2be0d30
|
805b939113372eeed5b9597a75993f853bb1cdac
|
/数据挖掘/test039.py
|
f0e6c69bb0a150c844728c786d932aee7510f7b7
|
[] |
no_license
|
840614476/my_test
|
0fa9961656ad5c032c8f6396ea84991640ae722d
|
7a86c47305824c4163579864d70da7a3c30cb0d4
|
refs/heads/master
| 2020-07-30T07:35:22.445181
| 2019-09-24T09:20:24
| 2019-09-24T09:20:24
| 206,029,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:TBH time:2019/9/10
import jieba.analyse
import jieba.posseg
#更改词频
#
text = "我喜欢上海东方明珠"
#jieba.add_word("上海东方", freq=700)
ci_1 = jieba.cut(text)
for item in ci_1:
print(item)
print("")
#分析关键词
#
text2 = "我喜欢上海东方明珠"
tag = jieba.analyse.extract_tags(text2, 3)
print(tag)
print("")
#返回词语的位置
#
pos = jieba.tokenize(text2)
for item in pos:
print(item)
|
[
"840614476@qq.com"
] |
840614476@qq.com
|
cff6e57b065a2b0a1025d8fa362771af3f2d5a3f
|
316e094af6a43cf1d1a50dc741583e166516fda1
|
/lappd/spectra/pickle_data_lcwp_single_dir.py
|
ec92fd75796933aec04a7e564488a9e05428e9e6
|
[
"MIT"
] |
permissive
|
jbenito3/hyperk
|
650fb9a12418b2bcdfeada076c98c8a748f12f00
|
b75ca789d51b239d1d97221b024782c927aaa09a
|
refs/heads/master
| 2020-05-21T02:28:49.065421
| 2017-02-22T17:00:45
| 2017-02-22T17:00:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,381
|
py
|
#!/usr/bin/env ipython
import pandas as pd
import sys
import os
from scipy import signal
# Defaults come from TekTronix 'scope
def filter_signal(data, nSamples = 1000, frameSize = 40e-9):
'''Butterworth filter of signal.'''
fs = float(nSamples)/frameSize # sample rate (1000 samples in 40ns)
nyq = fs*0.5 # Nyquist frequency
high = 700e6/nyq # high frequency cut-off
b, a = signal.butter(2, high, 'low', analog = False)
y = signal.lfilter(b, a, data)
return y
# Conversion factors # Cn:INSPECT?
vert_gain = float(sys.argv[3]) #6.1035e-07 # "VERTICAL_GAIN"
vert_offset = -5.700e-2 # "VERTICAL_OFFSET"
vert_units = 'V' # "VERTUNIT"
horiz_units = 's' # "HORUNIT"
horiz_scale = 2.500e-10 # "HORIZ_INTERVAL"
# Data characteristics
n_samples = 402 # TODO: find this automatically
frameSize = n_samples*horiz_scale
print "Frame size:", frameSize
# Read folder names from directory
#measurements = [d for d in os.listdir('' + sys.argv[1] + '/') if (d[0]!='.' and (not (d.endswith('.sh') or d.endswith('.log') or d.endswith('.pkl') or d.endswith('.txt'))))] # Filters out system files
measurements = [sys.argv[1]]
print measurements
n_measurements = len(measurements)
measurement_count = 1
for measurement in measurements:
print('Run %d of %d' % (measurement_count, n_measurements))
# Initialise dictionaries for producing final DataFrame
output_dict = {}
output_dict['time'] = []
output_dict['eventID'] = []
output_dict['voltage'] = []
output_dict['filtered_voltage'] = []
# Produce time index
index = [i * horiz_scale * 1e9 for i in range(n_samples)] # Convert to ns
# Read filenames from inner directory
#files = [x for x in os.listdir('' + sys.argv[1] + '/' + measurement) if x.endswith(sys.argv[2]+'.txt')]
files = [x for x in os.listdir(measurement) if x.endswith(sys.argv[2]+'.txt')]
n_files = len(files)
file_count = 0
# Loop through each file_pair to extract events
for file in files:
#data = open((''+sys.argv[1]+'/'+measurement+ '/'+file),'r').read()
data = open((measurement+ '/'+file),'r').read()
n_points = int(len(data)/4)
n_events = int(n_points/n_samples)
# Convert HEX to mV
dec_data = []
for i in range(n_points):
dec_value = int(data[i*4:(i*4)+4], 16)
dec_data .append((dec_value*vert_gain + vert_offset)*1e3) # to mV
# Separate into events
for i in range(n_events):
output_dict['time'] .extend(index)
output_dict['eventID'] .extend([i + file_count*n_events] * n_samples)
voltages = []
for j in range(n_samples):
voltages .append(dec_data[i*n_samples+j])
output_dict['voltage'] .extend(voltages)
# Apply Butterworth filter
output_dict['filtered_voltage'] .extend(filter_signal(voltages, n_samples, frameSize))
file_count += 1
measurement_count += 1
# Convert dictionaries to df then pickle
output_df = pd.DataFrame.from_dict(output_dict)
print output_df
print "Events:", output_df.shape[0]/n_samples
output_df .to_pickle(measurement + '_' + sys.argv[2]+'.pkl')
|
[
"greig.cowan@gmail.com"
] |
greig.cowan@gmail.com
|
63ecfb627b54627f1864d4e3a644988f45674628
|
ebdd36e75bccc8c3f80e231b4b0409c2cceaea20
|
/UPTEES/Detectors/StructureAnalyzer.py
|
85f3374061f394ac23c202d43077c89987f2de19
|
[] |
no_license
|
MaximumEntropy/UPSITE
|
c9865dd675fe8dfd821de9d6af84aa74f224605d
|
77b29ce00c3e0b35d566b6935871e4dda184ec79
|
refs/heads/master
| 2021-01-10T16:19:42.139505
| 2015-10-28T07:32:02
| 2015-10-28T07:32:02
| 45,011,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,234
|
py
|
#from Detector import Detector
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/..")
import Utils.ElementTreeUtils as ETUtils
from collections import defaultdict
import types
class StructureAnalyzer():
def __init__(self, defaultFileNameInModel="structure.txt"):
self.modelFileName = defaultFileNameInModel
self.reset()
# Initialization ##########################################################
def reset(self):
self.relations = None
self.entities = None
self.events = None
self.modifiers = None
self.targets = None
# supporting analyses
self.eventArgumentTypes = None
self.edgeTypes = None
def _init(self):
self.reset()
self.relations = {}
self.entities = {}
self.events = {}
self.modifiers = {}
self.targets = {}
def isInitialized(self):
return self.events != None
# analysis ################################################################
def analyze(self, inputs, model=None):
self._init()
if type(inputs) in types.StringTypes:
inputs = [inputs]
for xml in inputs:
print >> sys.stderr, "Analyzing", xml
xml = ETUtils.ETFromObj(xml)
for document in xml.getiterator("document"):
# Collect elements into dictionaries
entityById = {}
for entity in document.getiterator("entity"):
entityById[entity.get("id")] = entity
interactions = []
interactionsByE1 = defaultdict(list)
for interaction in document.getiterator("interaction"):
interactions.append(interaction)
interactionsByE1[interaction.get("e1")].append(interaction)
siteOfTypes = self.buildSiteOfMap(interactions, interactionsByE1, entityById)
# Add entity elements to analysis
for entity in document.getiterator("entity"):
self.addEntityElement(entity, interactionsByE1)
# Add interaction elements to analysis
for interaction in interactions:
self.addInteractionElement(interaction, entityById, siteOfTypes[interaction])
# Calculate event definition argument limits from event instances
for event in self.events.values():
event.countArguments()
self._updateSupportingAnalyses()
if model != None:
self.save(model)
def buildSiteOfMap(self, interactions, interactionsByE1, entityById):
siteOfTypes = defaultdict(set)
#interactionsByE2 = {}
#for interaction in interactions:
# interactionsByE2[interaction.get("e2")] = interaction
interactionById = {}
for interaction in interactions:
interactionById[interaction.get("id")] = interaction
for interaction in interactions:
if interaction.get("type") == "Site":
if interaction.get("siteOf") != None: # annotated primary arguments
siteOfTypes[interaction].add(interactionById[interaction.get("siteOf")].get("type"))
else:
triggerId = interaction.get("e1")
entityEntityId = interaction.get("e2")
siteParentProteinIds = set()
for interaction2 in interactionsByE1[entityEntityId]:
if interaction2.get("type") == "SiteParent":
siteParentProteinIds.add(interaction2.get("e2"))
for interaction2 in interactionsByE1[triggerId]:
if interaction2 == interaction or interaction2.get("Type") == "Site":
continue
if interaction2.get("e1") == triggerId and interaction2.get("e2") in siteParentProteinIds:
siteOfTypes[interaction].add(interaction2.get("type"))
return siteOfTypes
def addTarget(self, element):
if element.get("given") != "True":
if element.tag == "interaction":
targetClass = "INTERACTION"
elif element.tag == "entity":
targetClass = "ENTITY"
else:
raise Exception("Unsupported non-given element type " + element.tag)
if targetClass not in self.targets:
self.targets[targetClass] = Target(targetClass)
self.targets[targetClass].targetTypes.add(element.get("type"))
def addInteractionElement(self, interaction, entityById, siteOfTypes):
self.addTarget(interaction)
if not (interaction.get("event") == "True"):
self.addRelation(interaction, entityById)
else:
e1Type = entityById[interaction.get("e1")].get("type")
e2Type = entityById[interaction.get("e2")].get("type")
if e1Type not in self.events:
raise Exception("Argument " + interaction.get("id") + " of type " + interaction.get("type") + " for undefined event type " + e1Type)
self.events[e1Type].addArgumentInstance(interaction.get("e1"), interaction.get("type"), e1Type, e2Type, siteOfTypes)
def addRelation(self, interaction, entityById):
relType = interaction.get("type")
if relType not in self.relations:
self.relations[relType] = Relation(relType)
e1Type = entityById[interaction.get("e1")].get("type")
e2Type = entityById[interaction.get("e2")].get("type")
self.relations[relType].addInstance(e1Type, e2Type, interaction.get("directed") == "True", interaction.get("e1Role"), interaction.get("e2Role"), interaction.get("id"))
def addEntityElement(self, entity, interactionsByE1):
# Determine extraction target
self.addTarget(entity)
entityType = entity.get("type")
isEvent = entity.get("event") == "True"
if not isEvent and entity.get("id") in interactionsByE1: # event can be also defined by simply having outgoing argument edges
for interaction in interactionsByE1[entity.get("id")]:
if interaction.get("event") == "True":
isEvent = True
break
if isEvent:
if entityType not in self.events:
self.events[entityType] = Event(entityType)
self.events[entityType].addTriggerInstance(entity.get("id"))
else:
if entityType not in self.entities:
self.entities[entityType] = Entity(entityType)
# check for modifiers
for modType in ("speculation", "negation"):
if (entity.get(modType) != None):
if modType not in self.modifiers:
self.modifiers[modType] = Modifier(modType)
self.modifiers[modType].entityTypes.add(entityType)
def _updateSupportingAnalyses(self):
self.eventArgumentTypes = set()
self.edgeTypes = defaultdict(lambda:defaultdict(set))
# Add relations to edge types
for relation in self.relations.values():
for e1Type in relation.e1Types:
for e2Type in relation.e2Types:
self.edgeTypes[e1Type][e2Type].add(relation.type)
if not relation.directed:
self.edgeTypes[e2Type][e1Type].add(relation.type)
# Process arguments
for eventType in sorted(self.events.keys()):
# Remove conflicting entities
if eventType in self.entities:
print >> sys.stderr, "Warning, removing ENTITY conflicting with EVENT for type " + eventType
del self.entities[eventType]
# Update analyses
event = self.events[eventType]
for argType in event.arguments:
# Update set of known event argument types
self.eventArgumentTypes.add(argType)
# Add argument to edge types (argument is always directed)
argument = event.arguments[argType]
for e2Type in argument.targetTypes:
self.edgeTypes[eventType][e2Type].add(argType)
# validation ##############################################################
def getRelationRoles(self, relType):
if relType not in self.relations:
return None
relation = self.relations[relType]
if relation.e1Role == None and relation.e2Role == None:
return None
else:
return (relation.e1Role, relation.e2Role)
def hasEvents(self):
return len(self.events) > 0
def hasModifiers(self):
return len(self.modifiers) > 0
def hasDirectedTargets(self):
if "INTERACTION" not in self.targets: # no interactions to predict
return False
for event in self.events.values(): # look for event argument targets (always directed)
for argType in event.arguments:
if argType in self.targets["INTERACTION"].targetTypes:
return True
for relType in self.relations: # look for directed relation targets
relation = self.relations[relType]
assert relation.directed in [True, False]
if relation.directed and relType in self.targets["INTERACTION"].targetTypes:
return True
return False
def isDirected(self, edgeType):
if edgeType in self.eventArgumentTypes:
return True
else:
relation = self.relations[edgeType]
assert relation.directed in [True, False]
return relation.directed
def isEvent(self, entityType):
return entityType in self.events
def isEventArgument(self, edgeType):
if edgeType in self.eventArgumentTypes:
return True
else:
assert edgeType in self.relations, (edgeType, self.relations)
return False
def getArgSiteOfTypes(self, entityType, edgeType, strict=False):
#if not edgeType in self.eventArgumentTypes:
# raise Exception("Edge type " + str(edgeType) + " is not an event argument and cannot be a site")
if not entityType in self.events:
if strict:
raise Exception("No event of type " + str(entityType))
return set()
event = self.events[entityType]
if not edgeType in event.arguments:
if strict:
raise Exception("Event of type " + str(entityType) + " cannot have an argument of type " + str(edgeType))
return set()
return self.events[entityType].arguments[edgeType].siteOfTypes
def getArgLimits(self, entityType, argType):
argument = self.events[entityType].arguments[argType]
return (argument.min, argument.max)
def getValidEdgeTypes(self, e1Type, e2Type, forceUndirected=False):
assert type(e1Type) in types.StringTypes
assert type(e2Type) in types.StringTypes
if self.events == None:
raise Exception("No structure definition loaded")
validEdgeTypes = set()
if e1Type in self.edgeTypes and e2Type in self.edgeTypes[e1Type]:
if forceUndirected:
validEdgeTypes = self.edgeTypes[e1Type][e2Type]
else:
return self.edgeTypes[e1Type][e2Type] # not a copy, so careful with using the returned set!
if forceUndirected and e2Type in self.edgeTypes and e1Type in self.edgeTypes[e2Type]:
validEdgeTypes = validEdgeTypes.union(self.edgeTypes[e2Type][e1Type])
return validEdgeTypes
def isValidEntity(self, entity):
if entity.get("type") in self.entities and entity.get("event") != "True":
return True
else:
return False
def isValidRelation(self, interaction, entityById=None, issues=None):
if interaction.get("type") not in self.relations:
if issues != None: issues["INVALID_TYPE:"+interaction.get("type")] += 1
return False
relDef = self.relations[interaction.get("type")]
e1 = interaction.get("e1")
if e1 not in entityById:
if issues != None: issues["MISSING_E1:"+interaction.get("type")] += 1
return False
e2 = interaction.get("e2")
if e2 not in entityById:
if issues != None: issues["MISSING_E2:"+interaction.get("type")] += 1
return False
e1 = entityById[e1]
e2 = entityById[e2]
if e1.get("type") in relDef.e1Types and e2.get("type") in relDef.e2Types:
return True
elif (not relDef.directed) and e1.get("type") in relDef.e2Types and e2.get("type") in relDef.e1Types:
return True
else:
if issues != None: issues["INVALID_TARGET:"+interaction.get("type")] += 1
return False
def isValidArgument(self, interaction, entityById=None, issues=None):
if interaction.get("type") not in self.eventArgumentTypes:
if issues != None: issues["INVALID_TYPE:"+interaction.get("type")] += 1
return False
e1 = interaction.get("e1")
if e1 not in entityById:
if issues != None: issues["MISSING_E1:"+interaction.get("type")] += 1
return False
e1 = entityById[e1]
if e1.get("type") not in self.events:
if issues != None: issues["INVALID_EVENT_TYPE:"+interaction.get("type")] += 1
return False
eventDef = self.events[e1.get("type")]
if interaction.get("type") not in eventDef.arguments:
if issues != None: issues["INVALID_TYPE:"+interaction.get("type")] += 1
return False
argDef = eventDef.arguments[interaction.get("type")]
e2 = interaction.get("e2")
if e2 not in entityById:
if issues != None: issues["MISSING_E2:"+interaction.get("type")] += 1
return False
e2 = entityById[e2]
if e2.get("type") in argDef.targetTypes:
return True
else:
if issues != None: issues["INVALID_TARGET:"+interaction.get("type")+"->"+e2.get("type")] += 1
return False
def isValidEvent(self, entity, args=None, entityById=None, noUpperLimitBeyondOne=True, issues=None):
if args == None:
args = []
if type(entity) in types.StringTypes:
entityType = entity
else:
entityType = entity.get("type")
valid = True
# check type
if entityType not in self.events:
if issues != None:
issues["INVALID_TYPE:"+entityType] += 1
return False
# check validity of proposed argument count
eventDefinition = self.events[entityType]
if len(args) < eventDefinition.minArgs:
if issues != None:
issues["ARG_COUNT_TOO_LOW:"+entityType] += 1
valid = False
else:
return False
if (not noUpperLimitBeyondOne) and len(args) > eventDefinition.maxArgs:
if issues != None:
issues["ARG_COUNT_TOO_HIGH:"+entityType] += 1
valid = False
else:
return False
# analyze proposed arguments
argTypes = set()
argTypeCounts = defaultdict(int)
argE2Types = defaultdict(set)
for arg in args:
if type(arg) in [types.TupleType, types.ListType]:
argType, argE2Type = arg
else: # arg is an interaction element
argType = arg.get("type")
argE2Type = entityById[arg.get("e2")].get("type")
argTypeCounts[argType] += 1
argE2Types[argType].add(argE2Type)
argTypes.add(argType)
argTypes = sorted(list(argTypes))
# check validity of proposed arguments
argumentDefinitions = eventDefinition.arguments
for argType in argTypes:
# Check if valid argument
if argType not in argumentDefinitions:
if issues != None:
issues["INVALID_ARG_TYPE:"+entityType+"."+argType] += 1
valid = False
else:
return False
else:
argDef = argumentDefinitions[argType]
# Check minimum limit
if argTypeCounts[argType] < argDef.min: # check for minimum number of arguments
if issues != None:
issues["TOO_FEW_ARG:"+entityType+"."+argType] += 1
valid = False
else:
return False
# Check maximum limit
# noUpperLimitBeyondOne = don't differentiate arguments beyond 0, 1 or more than one.
if (not noUpperLimitBeyondOne) and argTypeCounts[argType] > argDef.max: # check for maximum number of arguments
if issues != None:
issues["TOO_MANY_ARG:"+entityType+"."+argType] += 1
valid = False
else:
return False
# Check validity of E2 types
for e2Type in argE2Types[argType]:
if e2Type not in argDef.targetTypes:
if issues != None:
issues["INVALID_ARG_TARGET:"+entityType+"."+argType+":"+e2Type] += 1
valid = False
else:
return False
# check that no required arguments are missing
for argDef in argumentDefinitions.values():
if argDef.type not in argTypes: # this type of argument is not part of the proposed event
if argDef.min > 0: # for arguments not part of the event, the minimum limit must be zero
if issues != None:
issues["MISSING_ARG:"+entityType+"."+argDef.type] += 1
valid = False
else:
return False
return valid
def _removeNestedElement(self, root, element):
for child in root:
if child == element:
root.remove(child)
break
else:
self._removeNestedElement(child, element)
def validate(self, xml, printCounts=True, simulation=False, debug=False):
counts = defaultdict(int)
for document in xml.getiterator("document"):
while (True):
# Collect elements into dictionaries
entityById = {}
entities = []
for entity in document.getiterator("entity"):
entityById[entity.get("id")] = entity
entities.append(entity)
interactionsByE1 = defaultdict(list)
arguments = []
relations = []
keptInteractions = set()
keptEntities = set()
for interaction in document.getiterator("interaction"):
interactionsByE1[interaction.get("e1")].append(interaction)
if interaction.get("event") == "True":
arguments.append(interaction)
else:
relations.append(interaction)
for relation in relations:
issues = defaultdict(int)
if self.isValidRelation(relation, entityById, issues):
keptInteractions.add(relation)
else:
counts["RELATION:"+relation.get("type")] += 1
if debug: print >> sys.stderr, "Removing invalid relation", issues
for argument in arguments:
issues = defaultdict(int)
if self.isValidArgument(argument, entityById, issues):
keptInteractions.add(argument)
else:
counts["ARG:"+argument.get("type")] += 1
if debug: print >> sys.stderr, "Removing invalid argument", argument.get("id"), argument.get("type"), issues
for entityId in sorted(entityById):
entity = entityById[entityId]
entityType = entity.get("type")
if entityType in self.events:
issues = defaultdict(int)
eventArgs = []
for arg in interactionsByE1[entityId]:
if arg in keptInteractions:
eventArgs.append(arg)
if self.isValidEvent(entity, eventArgs, entityById, issues=issues):
keptEntities.add(entity)
else:
counts["EVENT:"+entity.get("type")] += 1
if debug: print >> sys.stderr, "Removing invalid event", entityId, issues
elif entityType in self.entities:
keptEntities.add(entity)
else:
counts["ENTITY:"+entity.get("type")] += 1
if debug: print >> sys.stderr, "Removing unknown entity", entityId
# clean XML
interactions = arguments + relations
if not simulation:
for interaction in interactions:
if interaction not in keptInteractions:
self._removeNestedElement(document, interaction)
for entityId in sorted(entityById):
entity = entityById[entityId]
if entity not in keptEntities:
self._removeNestedElement(document, entity)
if len(interactions) == len(keptInteractions) and len(entities) == len(keptEntities):
break
print >> sys.stderr, "Validation removed:", counts
return counts
# Saving and Loading ######################################################
def toString(self):
if self.events == None:
raise Exception("No structure definition loaded")
s = ""
for entity in sorted(self.entities):
s += str(self.entities[entity]) + "\n"
for event in sorted(self.events):
s += str(self.events[event]) + "\n"
for relation in sorted(self.relations):
s += str(self.relations[relation]) + "\n"
for modifier in sorted(self.modifiers):
s += str(self.modifiers[modifier]) + "\n"
for target in sorted(self.targets):
s += str(self.targets[target]) + "\n"
return s
def save(self, model, filename=None):
if filename == None:
filename = self.modelFileName
if model != None:
filename = model.get(filename, True)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = open(filename, "wt")
f.write(self.toString())
f.close()
if model != None:
model.save()
def load(self, model, filename=None):
# load definitions
if filename == None:
filename = self.modelFileName
if model != None:
filename = model.get(filename)
f = open(filename, "rt")
lines = f.readlines()
f.close()
# initialize
self._init()
# add definitions
for line in lines:
if line.startswith("ENTITY"):
definitionClass = Entity
definitions = self.entities
elif line.startswith("EVENT"):
definitionClass = Event
definitions = self.events
elif line.startswith("RELATION"):
definitionClass = Relation
definitions = self.relations
elif line.startswith("MODIFIER"):
definitionClass = Modifier
definitions = self.modifiers
elif line.startswith("TARGET"):
definitionClass = Target
definitions = self.targets
else:
raise Exception("Unknown definition line: " + line.strip() + ' ' + filename)
definition = definitionClass()
definition.load(line)
definitions[definition.type] = definition
self._updateSupportingAnalyses()
def rangeToTuple(string):
assert string.startswith("["), string
assert string.endswith("]"), string
string = string.strip("[").strip("]")
begin, end = string.split(",")
begin = int(begin)
end = int(end)
return (begin, end)
class Target():
def __init__(self, targetClass=None):
if targetClass != None:
assert targetClass in ["INTERACTION", "ENTITY"]
self.type = targetClass
self.targetTypes = set()
def __repr__(self):
return "TARGET " + self.type + "\t" + ",".join(sorted(list(self.targetTypes)))
def load(self, line):
line = line.strip()
if not line.startswith("TARGET"):
raise Exception("Not a target definition line: " + line)
tabSplits = line.split("\t")
self.type = tabSplits[0].split()[1]
assert self.type in ["INTERACTION", "ENTITY"]
self.targetTypes = set(tabSplits[1].split(","))
class Entity():
def __init__(self, entityType=None):
self.type = entityType
def __repr__(self):
return "ENTITY " + self.type
def load(self, line):
line = line.strip()
if not line.startswith("ENTITY"):
raise Exception("Not an entity definition line: " + line)
self.type = line.split()[1]
class Event():
def __init__(self, eventType=None):
self.type = eventType
self.minArgs = -1
self.maxArgs = -1
self.arguments = {}
self._argumentsByE1Instance = defaultdict(lambda:defaultdict(int)) # event instance cache
self._firstInstanceCache = True
def addTriggerInstance(self, entityId):
self._argumentsByE1Instance[entityId] = defaultdict(int)
def addArgumentInstance(self, e1Id, argType, e1Type, e2Type, siteOfTypes):
# add argument to event definition
if argType not in self.arguments:
argument = Argument(argType)
if not self._firstInstanceCache: # there have been events before but this argument has not been seen
argument.min = 0
self.arguments[argType] = argument
if siteOfTypes != None and len(siteOfTypes) > 0:
self.arguments[argType].siteOfTypes = self.arguments[argType].siteOfTypes.union(siteOfTypes)
self.arguments[argType].targetTypes.add(e2Type)
# add to event instance cache
self._argumentsByE1Instance[e1Id][argType] += 1
def countArguments(self):
# Update argument limits for each argument definition
for eventInstance in self._argumentsByE1Instance.values():
for argType in self.arguments: # check all possible argument types for each event instance
if argType in eventInstance:
self.arguments[argType].addCount(eventInstance[argType])
else: # argument type does not exist in this event instance
self.arguments[argType].addCount(0)
# Update event definition argument limits
totalArgs = sum(eventInstance.values())
if self.minArgs == -1 or self.minArgs > totalArgs:
self.minArgs = totalArgs
if self.maxArgs == -1 or self.maxArgs < totalArgs:
self.maxArgs = totalArgs
# Set valid min and max for events with no arguments
if self.minArgs == -1:
self.minArgs = 0
if self.maxArgs == -1:
self.maxArgs = 0
# Reset event instance cache
self._argumentsByE1Instance = defaultdict(lambda:defaultdict(int))
self._firstInstanceCache = False
def __repr__(self):
s = "EVENT " + self.type + " [" + str(self.minArgs) + "," + str(self.maxArgs) + "]"
for argType in sorted(self.arguments.keys()):
s += "\t" + str(self.arguments[argType])
return s
def load(self, line):
line = line.strip()
if not line.startswith("EVENT"):
raise Exception("Not an event definition line: " + line)
tabSplits = line.split("\t")
self.type = tabSplits[0].split()[1]
for tabSplit in tabSplits[1:]:
argument = Argument()
argument.load(tabSplit)
self.arguments[argument.type] = argument
# Define maximum and minimum depending on model
if "[" in tabSplits[0]:
self.minArgs, self.maxArgs = rangeToTuple(tabSplits[0].split()[2])
else: # old model file
for argument in self.arguments.values():
if self.minArgs == -1 or (self.minArgs == 0 and argument.min > 0) or (argument.min > 0 and argument.min < self.minArgs):
self.minArgs = argument.min
if self.minArgs == -1:
self.minArgs = 0
self.maxArgs = 999
print >> sys.stderr, "Warning, EVENT " + self.type + " does not have argument limits. Possibly using a model file from version <2.2. Argument limits set to [" + str(self.minArgs) + "-" + str(self.maxArgs) + "]."
class Argument():
def __init__(self, argType=None):
self.type = argType
self.min = -1
self.max = -1
self.targetTypes = set()
self.siteOfTypes = set()
def addCount(self, count):
if self.min == -1 or self.min > count:
self.min = count
if self.max == -1 or self.max < count:
self.max = count
def __repr__(self):
s = self.type
if len(self.siteOfTypes) > 0:
s += " {" + ",".join(sorted(list(self.siteOfTypes))) + "}"
return s + " [" + str(self.min) + "," + str(self.max) + "] " + ",".join(sorted(list(self.targetTypes)))
def load(self, string):
splits = string.strip().split()
self.type = splits[0]
assert len(splits) in (3,4), string
if len(splits) == 4:
assert splits[1].startswith("{") and splits[1].endswith("}"), string
self.siteOfTypes = set(splits[1].strip("{").strip("}").split(","))
self.min, self.max = rangeToTuple(splits[1 + len(splits) - 3])
self.targetTypes = set(splits[2 + len(splits) - 3].split(","))
class Modifier():
def __init__(self, modType=None):
self.type = modType
self.entityTypes = set()
def __repr__(self):
return "MODIFIER " + self.type + "\t" + ",".join(sorted(list(self.entityTypes)))
def load(self, line):
line = line.strip()
if not line.startswith("MODIFIER"):
raise Exception("Not a modifier definition line: " + line)
tabSplits = line.split("\t")
self.type = tabSplits[0].split()[1]
self.entityTypes = set(tabSplits[1].split(","))
class Relation():
def __init__(self, relType=None):
self.type = relType
self.directed = None
self.e1Types = set()
self.e2Types = set()
self.e1Role = None
self.e2Role = None
def addInstance(self, e1Type, e2Type, directed=None, e1Role=None, e2Role=None, id="undefined"):
self.e1Types.add(e1Type)
self.e2Types.add(e2Type)
if self.directed == None: # no relation of this type has been seen yet
self.directed = directed
elif self.directed != directed:
raise Exception("Conflicting relation directed-attribute (" + str(directed) + ")for already defined relation of type " + self.type + " in relation " + id)
if self.e1Role == None: # no relation of this type has been seen yet
self.e1Role = e1Role
elif self.e1Role != e1Role:
raise Exception("Conflicting relation e1Role-attribute (" + str(e1Role) + ") for already defined relation of type " + self.type + " in relation " + id)
if self.e2Role == None: # no relation of this type has been seen yet
self.e2Role = e2Role
elif self.e2Role != e2Role:
raise Exception("Conflicting relation e2Role-attribute (" + str(e2Role) + ") for already defined relation of type " + self.type + " in relation " + id)
def load(self, line):
line = line.strip()
if not line.startswith("RELATION"):
raise Exception("Not a relation definition line: " + line)
tabSplits = line.split("\t")
if len(tabSplits[0].split()) == 3:
self.type = tabSplits[0].split()[1]
self.directed = tabSplits[0].split()[2] == "directed"
offset = 0
else:
print >> sys.stderr, "Warning, RELATION " + tabSplits[0] + " uses model file format <2.2."
self.type = tabSplits[0].split()[1]
self.directed = tabSplits[1] == "directed"
offset = 1
if " " in tabSplits[1+offset]:
self.e1Role = tabSplits[1+offset].split()[0]
self.e1Types = set(tabSplits[1+offset].split()[1].split(","))
else:
self.e1Types = set(tabSplits[1+offset].split(","))
if " " in tabSplits[2+offset]:
self.e2Role = tabSplits[2+offset].split()[0]
self.e2Types = set(tabSplits[2+offset].split()[1].split(","))
else:
self.e2Types = set(tabSplits[2+offset].split(","))
def __repr__(self):
s = "RELATION " + self.type + " "
if self.directed:
s += "directed\t"
else:
s += "undirected\t"
if self.e1Role != None:
s += self.e1Role + " "
s += ",".join(sorted(list(self.e1Types))) + "\t"
if self.e2Role != None:
s += self.e2Role + " "
s += ",".join(sorted(list(self.e2Types)))
return s
if __name__=="__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
from optparse import OptionParser
optparser = OptionParser(usage="%prog [options]\nCalculate f-score and other statistics.")
optparser.add_option("-i", "--input", default=None, dest="input", help="", metavar="FILE")
optparser.add_option("-o", "--output", default=None, dest="output", help="", metavar="FILE")
optparser.add_option("-l", "--load", default=False, action="store_true", dest="load", help="Input is a saved structure analyzer file")
optparser.add_option("-d", "--debug", default=False, action="store_true", dest="debug", help="Debug mode")
optparser.add_option("-v", "--validate", default=None, dest="validate", help="validate input", metavar="FILE")
(options, args) = optparser.parse_args()
s = StructureAnalyzer()
if options.load:
s.load(None, options.input)
else:
s.analyze(options.input.split(","))
print >> sys.stderr, "--- Structure Analysis ----"
print >> sys.stderr, s.toString()
if options.validate != None:
print >> sys.stderr, "--- Validation ----"
xml = ETUtils.ETFromObj(options.validate)
s.validate(xml, simulation=False, debug=options.debug)
if options.output != None:
ETUtils.write(xml, options.output)
elif options.output != None:
print >> sys.stderr, "Structure analysis saved to", options.output
s.save(None, options.output)
|
[
"sandeeps@andrew.cmu.edu"
] |
sandeeps@andrew.cmu.edu
|
cd17da2be8e61707740541225e6128d3129b4caf
|
7401d6ec2bf31b2235ba7a4504514689af796d21
|
/tests/base_test.py
|
c9a4d54bc4cfc6721ba863c4bc64b8d09d9830ba
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
AbySagar/Twilio-flask-test-1
|
278df2d792b3a1c587a1e566a1e46ec8a53b3ff8
|
4d7f7f3837e4a7cbd17457c79e703ea17c1f16f8
|
refs/heads/master
| 2021-08-10T12:19:48.124403
| 2020-03-21T17:22:15
| 2020-03-21T17:22:15
| 249,019,053
| 0
| 0
|
MIT
| 2021-04-20T19:27:04
| 2020-03-21T16:47:12
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
import unittest
from models.appointment import Appointment
class BaseTest(unittest.TestCase):
def setUp(self):
from reminders import app, db
self.app = app
self.db = db
self.celery = app.celery()
self.test_client = app.flask_app.test_client()
self.app.flask_app.config['WTF_CSRF_ENABLED'] = False
def tearDown(self):
self.db.session.query(Appointment).delete()
self.db.session.commit()
self.celery.control.purge()
self.celery.conf.CELERY_ALWAYS_EAGER = False
|
[
"jsantos@stackbuilders.com"
] |
jsantos@stackbuilders.com
|
44e421c442c37ca6f99ea92a51ed39af07a99133
|
ee7ca0fed1620c3426fdfd22e5a82bba2a515983
|
/dsn_product_category_etc/__openerp__.py
|
1f8392a509a4099e8874d1954698391fad0dd020
|
[] |
no_license
|
disna-sistemas/odoo
|
318d0e38d9b43bea56978fe85fc72850d597f033
|
0826091462cc10c9edc3cc29ea59c417f8e66c33
|
refs/heads/8.0
| 2022-03-08T19:01:21.162717
| 2022-02-15T13:06:26
| 2022-02-15T13:06:26
| 99,210,381
| 0
| 5
| null | 2019-07-24T08:49:58
| 2017-08-03T08:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
##########################################################################
# Copyright (C) 2014 Victor Martin #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
##########################################################################
{
"name": "Disna - Product Category Etc",
"version": "0.1",
"author": "Disna, S.A.",
"contributors": [],
"website": "",
"category": "",
"depends": ['product'],
"description": """
- Adds mrp_report_order field to category
""",
"data": ['views/category.xml'],
"installable": True,
"auto_install": False,
}
|
[
"sistemas@disna.com"
] |
sistemas@disna.com
|
defaf6b765da9866005d5019a16256c7441117ed
|
ee263bbfcff49ef84e0e8eaa840be36406edfedf
|
/api/views.py
|
5f6c1ecd5e15bfb164b6f69db9a181508d195903
|
[] |
no_license
|
JaySiu/Investment-Information-Feed-System
|
b8764e2f64c8e1f3695ffc90af7ec06979cb0111
|
fceeacea717511240745a7dc9d0221dc5e5aa3ee
|
refs/heads/master
| 2020-03-23T14:36:29.911624
| 2019-07-15T03:58:10
| 2019-07-15T03:58:10
| 141,687,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import TA
from .serializers import TASerializer
class TAPlots(APIView):
def get(self):
plots = TA.objects.all()
serializer = TASerializer(plots, many=True)
return Response(serializer.data)
|
[
"jay.siu@ifshk.com"
] |
jay.siu@ifshk.com
|
fdea7829b90e545db6411ba84cdd2f00eaeccee9
|
bb49872151e598a78116fa0a5faaa34de6ddafa8
|
/functions/review-post.py
|
4b7209348c944807b3a7e51a8441e86061a27cf7
|
[
"Apache-2.0"
] |
permissive
|
ChristelleJolly/agfzb-CloudAppDevelopment_Capstone
|
1c1f5654ce26feb0d53224a031662fe24c460920
|
e0f7352fbc1b19aab3d14b436cd6311dfbde3cbb
|
refs/heads/master
| 2023-08-29T14:25:14.145562
| 2021-10-25T13:56:51
| 2021-10-25T13:56:51
| 417,084,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
#
#
# main() will be run when you invoke this action
#
# @param Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# @return The output of this action, which must be a JSON object.
#
#
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result
import requests
import json
def main(dict):
try:
client = Cloudant.iam(
account_name=dict["COUCH_USERNAME"],
api_key=dict["IAM_API_KEY"],
connect=True,
)
database = client["reviews"]
next_id = database.doc_count()+1
data = {
"id": next_id,
"name": dict["name"],
"dealership": dict["dealership"],
"review": dict["review"],
"purchase": dict["purchase"],
"another": dict["another"],
"purchase_date": dict["purchase_date"],
"car_make": dict["car_make"],
"car_model": dict["car_model"],
"car_year": dict["car_year"],
}
# Create a document using the Database API
my_document = database.create_document(data)
except CloudantException as ce:
print("unable to connect")
return {"error": ce}
except (requests.exceptions.RequestException, ConnectionResetError) as err:
print("connection error")
return {"error": err}
return my_document
|
[
"christelle.jolly@gmail.com"
] |
christelle.jolly@gmail.com
|
544cf17289d4bea37ac89d89533a462b179ec1c5
|
e15f7d0a672950b90baab0eacf6b476c9bf551ff
|
/sqlite_database/clientTable2_db.py
|
4303dd7420ab2f4f6e0a27b48f7572d4cc243b3b
|
[] |
no_license
|
oomintrixx/EC500-hackthon
|
b75d46c9984411e49664bc1878ae9c5f8b7b6897
|
18eec9b138d86f7e77363e37c7afed1e7df21942
|
refs/heads/main
| 2023-05-01T00:26:30.646119
| 2021-05-05T21:24:25
| 2021-05-05T21:24:25
| 354,972,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
import sqlite3
# table two: table used for store friends
# primary ID, username(string), ip address(string), port(int), public key(string)
def client_database2():
conn = sqlite3.connect('client_table2.db') #Opens Connection to SQLite database file.
conn.cursor().execute('''CREATE TABLE client_table2
(PRIMARY_ID INTEGER,
USERNAME TEXT,
IP_ADDRESS TEXT,
PORT INTEGER,
PUBLIC_KEY TEXT
);''')
conn.commit()
conn.close()
#create client table 2 with primary id, username, text, time
def create_cltable2(primaryid, username, ipaddress, port, publickey):
conn = sqlite3.connect('client_table2.db')
cursor = conn.cursor()
params = (primaryid, username, ipaddress, port, publickey)
cursor.execute("INSERT INTO client_table2 VALUES (?,?,?,?,?)",params)
conn.commit()
#print('User Creation Successful')
conn.close()
#retrieve all information stored inside client table 2
def retrieve_all():
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("SELECT * FROM client_table2")
return (cur.fetchall())
#return all information associated with this primary id
def retrieve_specific(primaryid):
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("SELECT * FROM client_table2 WHERE PRIMARY_ID =:PRIMARY_ID",{'PRIMARY_ID':primaryid})
return (cur.fetchall())
#delete all information related to this username from the database
def delete_userFromUsername(username):
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("""DELETE FROM client_table2 WHERE USERNAME =:USERNAME """,{'USERNAME':username})
conn.commit()
conn.close()
#delete all information related to this primary from the database
def delete_userFromID(primaryid):
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("""DELETE FROM client_table2 WHERE PRIMARY_ID =:PRIMARY_ID """,{'PRIMARY_ID':primaryid})
conn.commit()
conn.close()
#delete all information from database
def delete_all():
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("""DELETE FROM client_table2""")
conn.commit()
conn.close()
|
[
"p.nuwapa@gmail.com"
] |
p.nuwapa@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.