blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
120619f54431f83fb7cba20d4393f406956ed0c7 | Python | MOE-Geomatics/PGMN | /Precipitation/Convert.py | UTF-8 | 1,495 | 2.59375 | 3 | [] | no_license | # ---------------------------------------------------------------------------
# Convert.py
# Created on: Thu May 16 2013 04:13:29 PM
# (generated by ArcGIS/ModelBuilder)
# ---------------------------------------------------------------------------
# data input: an Access file with all stations in tables.
# 1) Use ArcGIS to extract the tables to dbf files to get a list of table name. However, dbf file do not have time information.
# 2) Use the list to generate a python script to extract the tables in Access to text file.
# Import system modules
import sys, string, os, arcgisscripting
# Create the Geoprocessor object
gp = arcgisscripting.create()
# Load required toolboxes...
gp.AddToolbox("C:/working/PGMN/20130417/PGMN Precip Data for Portal 201305/Additional Conversion - Generic Tools.tbx")
# Local variables...
filelist = ["024","039","043","054","062","063","066","080","083","088","092","093","095","109","113","123","124","140","165","187","190","192","206","207","215","220","221","232","242","250","278","281","286","293","305","307","321","322","331","349","350","358","373","383","392","399","431","453","454","460","487","496"]
for file in filelist:
v024_csv = "C:\\working\\PGMN\\20130417\\PGMN Precip Data for Portal 201305\\" + file + ".csv"
v024 = "C:\\working\\PGMN\\20130417\\PGMN Precip Data for Portal 201305\\PGMN Precip Data for Portal 201305.mdb\\" + file
# Process: Table To Excel...
gp.TableToExcel_conversion2(v024, v024_csv, "CSV") | true |
fbddbf22e784ec42b25f67adc035cf829134c07f | Python | recuraki/PythonJunkTest | /atcoder/ABC/206_d.py | UTF-8 | 4,837 | 3.03125 | 3 | [] | no_license | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
# https://note.nkmk.me/python-union-find/
# https://qiita.com/Kerzival/items/6923c2eb3b91be86f19f
class UnionFindAtCoder():
# 作りたい要素数nで初期化
# 使用するインスタンス変数の初期化
def __init__(self, n):
self.n = n
# root[x]<0ならそのノードが根かつその値が木の要素数
# rootノードでその木の要素数を記録する
self.root = [-1] * (n + 1)
# 木をくっつける時にアンバランスにならないように調整する
self.rnk = [0] * (n + 1)
# ノードxのrootノードを見つける
def Find_Root(self, x):
if (self.root[x] < 0):
return x
else:
# ここで代入しておくことで、後の繰り返しを避ける
self.root[x] = self.Find_Root(self.root[x])
return self.root[x]
# 木の併合、入力は併合したい各ノード
def Unite(self, x, y):
# 入力ノードのrootノードを見つける
x = self.Find_Root(x)
y = self.Find_Root(y)
# すでに同じ木に属していた場合
if (x == y):
return
# 違う木に属していた場合rnkを見てくっつける方を決める
elif (self.rnk[x] > self.rnk[y]):
self.root[x] += self.root[y]
self.root[y] = x
else:
self.root[y] += self.root[x]
self.root[x] = y
# rnkが同じ(深さに差がない場合)は1増やす
if (self.rnk[x] == self.rnk[y]):
self.rnk[y] += 1
# xとyが同じグループに属するか判断
def isSameGroup(self, x, y):
return self.Find_Root(x) == self.Find_Root(y)
# ノードxが属する木のサイズを返す
def Count(self, x):
return -self.root[self.Find_Root(x)]
# Listing all nodes same as group of x
# O(N)
def members(self, x):
root = self.Find_Root(x)
return [i for i in range(self.n) if self.Find_Root(i) == root]
# List all root
# O(N)
def roots(self):
return [i for i, x in enumerate(self.root) if x < 0]
# root Count
def group_count(self):
return len(self.roots())
# {4: [0, 1, 2, 3, 4, 5, 6, 8, 9], 7: [7], 10: []}
def all_group_members(self):
return {r: self.members(r) for r in self.roots()}
def size(self, x):
return -self.root[self.Find_Root(x)]
import sys
input = sys.stdin.readline
from pprint import pprint
def do():
n = int(input())
dat = list(map(int, input().split()))
import math
ss = set()
N = 2 * 10**5
uf = UnionFindAtCoder(N)
for i in range(math.ceil(n/2)):
#print(i)
a, b = dat[i], dat[n-1-i]
uf.Unite(a, b)
res = 0
#for i in range(N):
# uf.Find_Root(i)
#for key in dat.keys():
# if len(dat[key]) != 0:
# res += len(dat[key]) - 1
import collections
C= collections.Counter(uf.root)
for k in C.keys():
if k < 0:
continue
res += C[k]
#print(C)
print(res)
do()
#################Nを変更
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """8
1 5 3 2 5 2 3 1"""
output = """2"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """7
1 2 3 4 1 2 3"""
output = """1"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """1
200000"""
output = """0"""
self.assertIO(input, output)
def test_input_31(self):
print("test_input_31")
input = """8
1 2 5 4 3 3 1 2"""
output = """3"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | true |
839ef96c0db7d47f219b7f1390d93c16379c9ebb | Python | Justin-Keener/CS-1301 | /HW 2/hw2.py | UTF-8 | 13,496 | 4.46875 | 4 | [] | no_license | """ Fruitful functions and conditionals """
"""
Function name (1): is_in_stock
Parameters: item(str), quantity(int)
Return Value: True, false, or None (None Type)
Description: Write a function that determines whether or not the parameter item is in stock. If so, you also need
to check that there are enough of that item in stock to fulfill your order (specified by the parameter
quantity). Return True if these conditions are met, False otherwise. Return None if the item is not
in the table.
"""
def is_in_stock(item, quantity):
if item == "avocado":
if quantity > 0:
return False
else:
return True
elif item == "toothpaste":
if quantity <= 5:
return True
else:
return False
elif item == "popcorn":
if quantity <= 10:
return True
else:
return False
elif item == "bottled water":
if quantity <= 8:
return True
else:
return False
elif item == "phone charger":
if quantity <= 1:
return True
else:
return False
else:
return None
print(is_in_stock("avocado", 3))
print(is_in_stock("bottled water", 1))
print(is_in_stock("popcorn", 100))
print(is_in_stock("potato chips",5),"\n")
"""
Function name (2): can_afford
Parameters: item(str), quantity(int), wallet(int)
Return Values: True or False or None (None Type)
Description: Write a function that determines whether or not you can afford the parameter item, given the number
of that item you would like to buy (specified by parameter quantity), the amount of money you have
(specified by parameter wallet), and the item’s current price value in the table. You do not need to
consider whether there are enough of the item in stock for this function. Return True if you can afford
this item, False otherwise. Return None if the item is not in the table.
"""
def can_afford(item, quantity, wallet):
if item == "avocado":
if wallet >= quantity*1:
return True
else:
return False
elif item == "phone charger":
if wallet <= quantity*12:
return False
else:
return True
elif item == "popcorn":
if wallet >= quantity*1:
return True
else:
return False
elif item == "toothpaste":
if wallet >= quantity*2.75:
return True
else:
return False
elif item == "bottle water":
if wallet >= quantity*4:
return True
else:
return False
else:
return None
print(can_afford("avocado", 500, 5))
print(can_afford("phone charger", 10, 500))
print(can_afford("mouthwash", 1, 20),"\n")
"""
Function name (3): is_on_sale
Parameters: item (str)
Return value: True or False (bool) or None (NoneType)
Description: Write a function that determines whether or not the parameter item is on sale, according to the list
price and current price values for the item in the table. You do not need to consider whether the item
is in stock for this function. Return True if it is on sale, False otherwise. Return None if the item is not
in the table.
"""
def is_on_sale(item):
if item == "avocado":
avo_current_price = 1
avo_list_price = 1.50
if avo_current_price < avo_list_price:
return True
else:
return False
elif item == "toothpaste":
toothp_current_price = 2.75
toothp_list_price = 2.75
if toothp_current_price < toothp_list_price:
return True
else:
return False
elif item == "popcorn":
popcorn_current_price = 1
popcorn_list_price = 1
if popcorn_current_price < popcorn_list_price:
return True
else:
return False
elif item == "bottled water":
bw_current_price = 4
bw_list_price = 5.50
if bw_current_price < bw_list_price:
return True
else:
return False
elif item == "phone charger":
phch_current_price = 12
phch_list_price = 15
if phch_current_price < phch_list_price:
return True
else:
return False
else:
return None
print(is_on_sale("phone charger"))
print(is_on_sale("toothpaste"))
print(is_on_sale("chocolate bar"),"\n")
""" Part 2: Concert Listing """
""""
Function name (1): is_single_cheaper
Parameters: artistName(str)
Return value: boolean
Description: Write a function that takes in the name of one of the artists from the chart. Decide whether or not it
would be cheaper to buy a single ticket, or to get the group ticket price and divide it amongst 10
people. If it’s cheaper for single tickets, then return True, and if not, then return False. If the artist is not valid,
return None.
"""
# global variables for the functions below
tswift_single_tickets = 275
tswift_group_tickets = int(3000/10)
adele_single_tickets = 152
adele_group_tickets = int(1500/10)
zbb_single_tickets = 25
zbb_group_tickets = int(200/10)
def is_single_cheaper(artistName):
if artistName == "Taylor Swift":
if tswift_single_tickets < tswift_group_tickets:
return True
else:
return False
elif artistName == "Adele":
if adele_single_tickets < adele_group_tickets:
return True
else:
return False
elif artistName == "Zac Brown Band":
if zbb_single_tickets < zbb_group_tickets:
return True
else:
return False
else:
return None
print(is_single_cheaper("Taylor Swift"))
print(is_single_cheaper("Chainsmokers"),"\n")
"""
Function name (2): best_price
Parameters: artistName(str)
Return value: representing the best price the user would pay to attend the artist’s concert (int)
Description: Write a function that takes in the name of one of the artists from the chart. Using the
is_single_cheaper function, determine whether a single ticket or group ticket would be the cheapest,
and then return the price of the ticket as an integer. If the group option ends up being the cheapest,
do not return the full price for the group, but the price once it is divided by 10 people. If the artist is
not valid, return None.
"""
def best_price(artistName):
if artistName == "Taylor Swift":
if is_single_cheaper(artistName) == True:
return tswift_single_tickets
else:
return tswift_group_tickets
elif artistName == "Adele":
if is_single_cheaper(artistName) == True:
return adele_single_tickets
else:
return adele_group_tickets
elif artistName == "Zac Brown Band":
if is_single_cheaper(artistName) == True:
return zbb_single_tickets
else:
return zbb_group_tickets
else:
return None
print(best_price("Taylor Swift"))
print(best_price("Avicii"),"\n")
"""
Function name (3): all_three
Parameters: None
Return value: representing how much it would cost to attend all three concerts (int)
Description: Write a function that uses the best_price function to determine the best prices of each
of the concerts, sums them all up, and returns the total cost.
"""
def all_three():
print("The best price for the Taylor Swift concert is ", best_price("Taylor Swift"))
print("The best price for the Adele Concert is ", best_price("Adele"))
print("The best price for the Zac Brown Band is ", best_price("Zac Brown Band"))
total_cost = best_price("Taylor Swift") + best_price("Adele") + best_price("Zac Brown Band")
return total_cost
print("The total cost is ", all_three(),"\n")
""""
Function name (4): cheapest_concert
Parameters: None
Return value: representing the name of the artist with the cheapest concert (str)
Description: Write a function that uses the best_price function to determine the best prices of each
of the concerts, and then returns the name of the artist with the cheapest concert. You may not
use any built in Python functions.
"""
def cheapest_concert():
if best_price("Taylor Swift") < best_price("Adele"):
return "Taylor Swift has the cheapest tickets"
if best_price("Adele") < best_price("Zac Brown Band"):
return "Adele has the cheapest tickets"
if best_price("Zac Brown Band") < best_price("Taylor Swift"):
return "Zac Brown Band has the cheapest tickets"
print(cheapest_concert(),"for",best_price("Zac Brown Band"),"dollars","\n")
"""
Function name (5): add_two
Parameters: artist1 (str), artist2 (str)
Return value: representing the cost to go to both concerts (int)
Description: Write a function that takes in two artists from the table above, and calculates how much
it would cost to attend both concerts based on their best prices. Return the total cost.
"""
def add_two(artist1, artist2):
if artist1 == "Taylor Swift" and artist2 == "Adele":
cost_concert1 = best_price("Taylor Swift") + best_price("Adele")
return cost_concert1
elif artist1 == "Taylor Swift" and artist2 == "Zac Brown Band":
cost_concert2 = best_price("Taylor Swift") + best_price("Zac Brown Band")
return cost_concert2
elif artist1 == "Zac Brown Band" and artist2 == "Adele":
cost_concert3 = best_price("Zac Brown Band") + best_price("Adele")
return cost_concert3
elif artist1 == "Zac Brown Band" and artist2 == "Taylor Swift":
cost_concert4 = best_price("Zac Brown Band") + best_price("Taylor Swift")
return cost_concert4
elif artist1 == "Adele" and artist2 == "Taylor Swift":
cost_concert5 = best_price("Adele") + best_price("Taylor Swift")
return cost_concert5
elif artist1 == "Adele" and artist2 == "Zac Brown Band":
cost_concert6 = best_price("Adele") + best_price("Zac Brown Band")
return cost_concert6
print(add_two("Zac Brown Band","Adele"),"\n")
"""
Function name (6): can_afford_concerts
Parameters: money(int)
Return value: None
Description: Write a function that will be using some of the functions that you have written above.
Based on the money passed in, determine if you can go to all three concerts, only two concerts, or
only the cheapest concert. If you can go to all three, print “I can go to all three!”, if you can only go
to two of any combination, (Taylor Swift and Adele, Adele and Zac Brown Band, etc), then print “I
can only go to two!”, and if you can only go to one concert, print a statement in the format of “I can
only go to one.”. If there is not enough money for any of those options, then print out a statement
that says “Dang it, I can’t go to any concert.”. Note: It’s very important that you print out your answer
EXACTLY as it’s formatted in the instructions.
"""
def can_afford_concerts(money):
if money < 20:
return "Dang. I can't go to any concert"
elif money >= 20 and money < 150:
return "I can only go to one concert."
elif money >= 150 and money < 275:
return "I can go to any of the two concerts."
else:
return "I can choose to go to any of the three concerts."
print(can_afford_concerts(5))
print(can_afford_concerts(24))
print(can_afford_concerts(220))
print(can_afford_concerts(355),"\n")
""" Part 3: Miscellaneous """
""""
Function name: what_can_you_do
Parameters: age(int)
Return value: None
Description: Write a function that takes in the age of the user and prints out all the activities they are
able to do based on the table below. If they can’t do any of those activities, print out “Sorry, you’re
not old enough for any of these”.
"""
def what_can_you_do (age):
if age < 18:
return "Sorry, you're not old enough for any of these"
elif age >= 18 and age < 21:
return "You can vote."
elif age >= 21 and age < 65:
return "You can vote and drink."
else:
return "You can vote, drink, and retire"
print(what_can_you_do(5))
print(what_can_you_do(68),"\n")
"""" Function name: pass_or_fail
Parameters: current_grade (int), final_weight (float), final_score (int)
Return value: final letter grade A, B, C, D, or F (str)
Description: Write a function that will take your current grade in a class, the weight of the final exam
as a decimal between 0 and 1, and the score you got on the final exam to determine what letter grade
you’ll receive using the following formula:
final_grade = current_grade ∗ (1 − final_weight) + final_score ∗ final_weight
Use the following ranges for letter grades:
• A: 90-100
• B: 80-89.9999
• C: 70-79.9999
• D: 60-69.9999
• F: 0-59.9999
"""
def pass_or_fail(current_grade, final_weight, final_score):
float(final_weight)
final_grade = current_grade * (1 - final_weight) + final_score * final_weight
print(final_grade)
if final_grade >= 90.0:
return "A"
elif final_grade < 90.0 and final_grade >= 80:
return "B"
elif final_grade < 80.0 and final_grade >= 70:
return "C"
elif final_grade < 70.0 and final_grade >= 60:
return "D"
else:
return "F"
print(pass_or_fail(90, .15, 75))
print(pass_or_fail(60, .3, 100),"\n") | true |
bb44c4ec9baadd7359c271ba7f5f4781dd7d84e2 | Python | sixxchung/damdam | /test.py | UTF-8 | 519 | 3 | 3 | [] | no_license | import sixx
import json
inline_JSON = '{"id":1, "name": ["abc", "xyz"]}'
jsonData = json.loads(inline_JSON)
jsonData.get("name")
json.dumps(jsonData)
print(json.dumps(jsonData, indent='\t'))
path_JSON = './jsondata/jsn2.json' # String
### Read File <way1>
jsonFile = open(path_JSON,'r') #'r'ead,'w'rite,'a'ppend
jsonData = json.load(jsonFile)
jsonFile.close()
### Read File <way2>
# with문을 나올 때 close를 자동으로 불러줍니다.
with open(path_JSON) as jsonFile:
jsonData = json.load(jsonFile)
| true |
8d3159548777a7dc503fe21d686fa44b1ffbf084 | Python | omerkarabacak/serverless-csv-to-dynamodb | /app.py | UTF-8 | 746 | 2.5625 | 3 | [] | no_license | import boto3
import csv
def lambda_handler(event, context):
for record in event['Records']:
bucket = record['s3']['bucket']['name']
file_key = record['s3']['object']['key']
s3 = boto3.client('s3')
csvfile = s3.get_object(Bucket=bucket, Key=file_key)
csvcontent = csvfile['Body'].read().decode('utf-8').splitlines()
lines = csv.reader(csvcontent)
headers = next(lines)
for line in lines:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('csv-table')
table.put_item(
Item={
'name': line[0],
'city': line[1],
'country': line[2]
}
) | true |
e091c286fbd0c2ba26955880964b0349dc4f2b52 | Python | wempem/Scripting | /src/python/Function_Basic.py | UTF-8 | 643 | 4.09375 | 4 | [] | no_license | #!/usr/bin/python
## Function_Basic.py
def get_name(): #(A)
"Get from the user his/her first and last names" #(B)
first = raw_input( "Enter your first name: " ) #(C)
last = raw_input( "Enter your last name: " ) #(D)
return (first, last) #(E)
full_name = get_name() #(F)
print full_name #(G)
print type( get_name ) # <type 'function'> #(H)
| true |
7ad336274a2335185a39a72056eeedd0ee7bfc15 | Python | SamChen1981/image-recognition | /OTSU.py | UTF-8 | 1,001 | 2.796875 | 3 | [] | no_license | """
python 大津法实现 计算阀值 网上摘录
弃用原因: cv2的threshold 自带大津法
"""
import numpy as np
def OTSU_enhance(img_gray, th_begin=0, th_end=256, th_step=1):
assert img_gray.ndim == 2, "must input a gary_img"
max_g = 0
suitable_th = 0
for threshold in range(th_begin, th_end, th_step):
bin_img = img_gray > threshold
bin_img_inv = img_gray <= threshold
fore_pix = np.sum(bin_img)
back_pix = np.sum(bin_img_inv)
if 0 == fore_pix:
break
if 0 == back_pix:
continue
w0 = float(fore_pix) / img_gray.size
u0 = float(np.sum(img_gray * bin_img)) / fore_pix
w1 = float(back_pix) / img_gray.size
u1 = float(np.sum(img_gray * bin_img_inv)) / back_pix
# intra-class variance
g = w0 * w1 * (u0 - u1) * (u0 - u1)
if g > max_g:
max_g = g
suitable_th = threshold
return suitable_th
| true |
f1976347b9c87feaa09226962ae05c4f5d235efe | Python | Zakaria9494/python | /learn/oop/Main.py | UTF-8 | 1,008 | 3.21875 | 3 | [] | no_license | from oop.Author import *
from oop.Book import Book
from oop.Library import Library
author1 = Author("Mhamad", "+96170123456", "mhamad@gmail.com")
author2 = Author("Salem", "+9664021833", "salem@gmail.com")
author3 = Author("Rola", "+9631249392", "rola@gmail.com")
book1 = Book("Learn Java", "12-20-2019", 1, author1)
book2 = Book("Learn HTML", "8-5-2018", 3, author1)
book3 = Book("PHP for beginners", "10-2-2019", 1, author2)
book4 = Book("C# for dummies", "12-20-2019", 1, author3)
library = Library()
library.add_author(author1)
library.add_author(author2)
library.add_author(author3)
library.add_book(book1)
library.add_book(book2)
library.add_book(book3)
library.add_book(book4)
library.print_author(1)
library.print_author(2)
library.print_author(3)
library.print_book(1)
library.print_book(2)
library.print_book(3)
library.print_book(4)
'''
library.print_author_books(1)
library.print_author_books(2)
library.print_author_books(3)
library.remove_author(2)
library.print_author(2)
library.print_author_books(2)''' | true |
33790ec9ca005342222157f6724f0a3da1ba35e6 | Python | Savital/monitorkbd | /models/db.py | UTF-8 | 4,787 | 2.546875 | 3 | [
"MIT"
] | permissive | # Savital https://github.com/Savital
import sqlite3
class Users():
createTableSQL = "CREATE TABLE IF NOT EXISTS users(username CHAR)"
dropTableSQL = "DROP TABLE IF EXISTS users"
selectSQL = "SELECT * FROM users"
selectByNameSQL = "SELECT * FROM users WHERE username='{0}'"
insertSQL = "INSERT INTO users VALUES ('{0}')"
deleteSQL = "DELETE FROM users WHERE username = '{0}'"
def __init__(self):
super(Users, self).__init__()
self.construct()
def construct(self):
pass
def createTable(self):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.createTableSQL)
self.conn.commit()
self.cursor.close()
self.conn.close()
def dropTable(self):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.dropTableSQL)
self.conn.commit()
self.cursor.close()
self.conn.close()
def select(self):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.selectSQL)
results = self.cursor.fetchall()
self.cursor.close()
self.conn.close()
return results
def selectByName(self, name):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.selectByNameSQL.format(name))
results = self.cursor.fetchone()
self.cursor.close()
self.conn.close()
return results
def insert(self, name):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.insertSQL.format(name))
self.conn.commit()
self.cursor.close()
self.conn.close()
def delete(self, name):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.deleteSQL.format(name))
self.conn.commit()
self.cursor.close()
self.conn.close()
class Log():
createTableSQL = "CREATE TABLE IF NOT EXISTS log(username CHAR, id INT, state INT, layout INT, scancode INT, downtime INT, searchtime INT, keyname CHAR)"
dropTableSQL = "DROP TABLE IF EXISTS log"
selectSQL = "SELECT * FROM log"
selectByNameSQL = "SELECT * FROM log WHERE username='{0}'"
insertSQL = "INSERT INTO log VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}')" #TODO UNRECOGNIZED TOKEN "''')"
deleteSQL = "DELETE FROM log WHERE username = '{0}'"
def __init__(self):
super(Log, self).__init__()
self.construct()
def construct(self):
pass
def createTable(self):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.createTableSQL)
self.conn.commit()
self.cursor.close()
self.conn.close()
def dropTable(self):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.dropTableSQL)
self.conn.commit()
self.cursor.close()
self.conn.close()
def select(self):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.selectSQL)
self.conn.commit()
self.cursor.close()
self.conn.close()
def selectByName(self, name):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.selectByNameSQL.format(name))
results = self.cursor.fetchall()
self.conn.commit()
self.cursor.close()
self.conn.close()
return results
def insert(self, name, list):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
if len(list) == 0:
pass
elif len(list[0]) == 1:
self.cursor.execute(self.insertSQL.format(name, list[0], list[1], list[2], list[3], list[4], list[5], list[6]))
else:
for item in list:
self.cursor.execute(self.insertSQL.format(name, item[0], item[1], item[2], item[3], item[4], item[5], item[6]))
self.conn.commit()
self.cursor.close()
self.conn.close()
def delete(self, name):
self.conn = sqlite3.connect("keypadMonitoringDB.db")
self.cursor = self.conn.cursor()
self.cursor.execute(self.deleteSQL.format(name))
self.conn.commit()
self.cursor.close()
self.conn.close()
| true |
4c7f58eed7e3d8a95602c62eed059cb6473cbeed | Python | qianshuang/ant_exam | /test.py | UTF-8 | 1,679 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
from scipy import stats
import random
from data.cnews_loader import *
from numpy import *
base_dir = 'data/cnews'
all_dir = os.path.join(base_dir, 'cnews.all.txt')
balance_all_dir = os.path.join(base_dir, 'cnews.balance_all.txt')
balance_train_dir = os.path.join(base_dir, 'cnews.balance_train.txt')
train_dir = os.path.join(base_dir, 'cnews.train.txt')
test_dir = os.path.join(base_dir, 'cnews.test.txt')
balance_test_dir = os.path.join(base_dir, 'cnews.balance_test.txt')
balance_all_train_dir = os.path.join(base_dir, 'cnews.balance_all_train.txt')
balance_all_test_dir = os.path.join(base_dir, 'cnews.balance_all_test.txt')
# 将原始数据按照9:1拆分为训练集与测试集
def split_data():
with open_file(balance_all_dir) as f:
lines = f.readlines()
random.shuffle(lines)
len_test = int(len(lines) * 0.1)
lines_test = lines[0:len_test]
lines_train = lines[len_test:]
train_w = open_file(balance_all_train_dir, mode='w')
test_w = open_file(balance_all_test_dir, mode='w')
for i in lines_train:
train_w.write(i)
for j in lines_test:
test_w.write(j)
# 随机正采样
def balance_sample():
v0 = []
v = []
with open_file(all_dir) as f:
for line in f:
cols = line.strip().split('\t')
if cols[0] == '1':
v.append(line)
else:
v0.append(line)
v = list(set(v))
l = len(v)
# 补足样本数
v = np.array(v)
v0 = list(set(v0))
cnt = len(v0)
v = v.repeat(int(cnt / l + 1))
v = random.sample(list(v), cnt)
v_all = v + v0
train_w = open_file(balance_all_dir, mode='w')
for i in v_all:
train_w.write(i)
# balance_sample()
# split_data()
| true |
eef5099b3418e502b0861cab1829b89d1a22d114 | Python | Evg3sha/lesson3 | /bot/bot_wordcount.py | UTF-8 | 1,593 | 3.03125 | 3 | [] | no_license | # Импортируем нужные компоненты
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import settings
import logging
logging.basicConfig(format=('%(name)s - %(levelname)s - %(message)s'), level=logging.INFO, filename='bot_wordcount.log')
# Функция, которая соединяется с платформой Telegram, "тело" нашего бота
def main():
mybot = Updater(settings.API_KEY, request_kwargs=settings.PROXY)
dp = mybot.dispatcher
dp.add_handler(CommandHandler('start', greet_user, pass_user_data=True))
dp.add_handler(CommandHandler('wordcount', wordcount, pass_user_data=True))
dp.add_handler(MessageHandler(Filters.text, talk_to_me, pass_user_data=True))
mybot.start_polling()
mybot.idle()
def greet_user(bot, update, user_data):
text = 'Вызван /start'
print(text)
update.message.reply_text(text)
def talk_to_me(bot, update, user_data):
user_text = update.message.text
print(user_text)
update.message.reply_text(user_text)
# Подсчёт количества слов
def wordcount(bot, update, user_data):
command = update.message.text
word = command.split()
if len(word) == 1:
update.message.reply_text('InvalidCommand')
return
if '""' in word:
update.message.reply_text('NoneString')
return
count = len(word) - 1
update.message.reply_text('%.i слова' % count)
# Вызываем функцию - эта строчка собственно запускает бота
main()
| true |
77b0d4af240a1502ea822ac69f108b2dff090dcf | Python | chaitraliv/Assignments | /P06.py | UTF-8 | 373 | 3.796875 | 4 | [
"MIT"
] | permissive | mylist=['x','a','m','a','x']
start = 0
end = len(mylist)-1
count=0
for item in mylist:
if mylist[start] == mylist[end]:
start+=1
end-=1
count=1
else:
start += 1
end -= 1
count-=1
if count > 0:
print(f'Yes! Given list {mylist} is a Palindrome !')
else:
print(f'No! Given list {mylist} is not a Palindrome !') | true |
754ff288099a9d8266af8e2ede629df7cf0b2047 | Python | JaisonST/PartionsPython | /pt.py | UTF-8 | 3,945 | 3.171875 | 3 | [] | no_license | #-------------------Intro Note----------------------------------#
#Function: Backend Functiond for Partions Program
#Made by: Jaision Thomas
#---------------------------------------------------------------#
#---------------Create Partion Function-------------------------#
def getPartitions(n):
retval = []
p = [0] * n
k = 0
p[k] = n
cTotal = 0
while True:
a = []
for i in range(0,k+1):
a.append(p[i])
retval.append(a)
rem_val = 0
while k >= 0 and p[k] == 1:
rem_val += p[k]
k -= 1
if k < 0:
return retval
p[k] -= 1
rem_val += 1
while rem_val > p[k]:
p[k + 1] = p[k]
rem_val = rem_val - p[k]
k += 1
p[k + 1] = rem_val
k += 1
# this function was written
# by JoshuaWorthington
#modified by Jaison Thomas
#---------------------------------------------------------------#
#---------------Select Sort Partions Functions-------------------#
#no even numbers
def oddPart(li):
for i in li:
if i%2==0:
return False
return True
#no odd numbers
def evenPart(li):
for i in li:
if i%2!=0:
return False
return True
#no given number
def rm(li, n):
for i in li:
if i == n:
return False
return True
#no number divisible by a number
def rmDiv(li, n):
for i in li:
if i%n == 0:
return False
return True
#no number lesser than a number
def ltn(li, n):
for i in li:
if i>=n:
return False
return True
#no number greater than a number
def gtn(li, n):
for i in li:
if i<=n:
return False
return True
#check array is 1,2,4, % 7
def m7(li):
rep_check = []
for i in li:
if i % 7 == 1 or i % 7 == 2 or i % 7 == 4:
if i in rep_check:
return False
rep_check.append(i)
else:
return False
return True
#check iterative mod values
def modIterFN(li):
if(len(li) == 1):
return True
else:
for i in range(len(li) - 1):
if li[i] == li[i + 1]:
return False
if li[i + 1] % 7 == 1 or li[i + 1] % 7 == 2 or li[i + 1] % 7 == 4:
if li[i] - li[i + 1] < 7 :
return False
elif li[i + 1] % 7 == 3:
if li[i] - li[i + 1] < 12 :
return False
elif li[i + 1] % 7 == 6 or li[i + 1] % 7 == 5:
if li[i] - li[i + 1] < 10 :
return False
elif li[i + 1] % 7 == 0:
if li[i] - li[i + 1] < 15 :
return False
else:
return False
return True
#--------------------------------------------------------------#
#-----------------------Sort Functions-------------------------#
def oddParts(ans):
return [i for i in ans if oddPart(i)]
def evenParts(ans):
return [i for i in ans if evenPart(i)]
def smallerThan(ans, n):
return [i for i in ans if len(i) < n]
def biggerThan(ans, n):
return [i for i in ans if len(i) > n]
def removeN(ans, n):
return [i for i in ans if rm(i, n)]
def removeDiv(ans, n):
return [i for i in ans if rmDiv(i, n)]
def lessThanN(ans, n):
return [i for i in ans if ltn(i, n)]
def greaterThanN(ans, n):
return [i for i in ans if gtn(i, n)]
def mod7(ans):
return [i for i in ans if m7(i)]
def modIter(ans):
return [i for i in ans if modIterFN(i)]
#--------------------------------------------------------------#
#------------------------Main program--------------------------#
#a = int(input("Enter the number to partiiton:- "))
#ans = getPartitions(5)
#partions lesser than a number
#short = [i for i in ans if len(i) < 3]
#partions more than a number
#moreThan = [i for i in ans if len(i) > 3]
#partions wihtout even
#odd = [i for i in ans if oddPart(i)]
#partions without odd
#even = [i for i in ans if evenPart(i)]
#partions removing specific number
#removedN = [i for i in ans if rm(i, 3)]
#partions removing containing a number divisible by given number
#removedDiv = [i for i in ans if rmDiv(i, 3)]
#1,2,4 mod 7
#a = [i for i in ans if m7(i)]
#print final
#for i in a:
# print(i)
#---------------------------------------------------------------#
| true |
040872c0586290f40cbc61865e0e1b67a476168c | Python | Kesin11/Python-study | /src/multi_reduce.py | UTF-8 | 2,306 | 3.109375 | 3 | [] | no_license | #coding: utf-8
import multiprocessing as mp
from time import time, sleep
import os
'''
マルチプロセスでReduceを実装
'''
def add(x, y):
sleep(0.1)
print 'pid: %s, add:%d, %d' % (os.getpid(), x, y)
return x+y
def in_reduce(func, li):
return reduce(func, li)
def multi_reduce(func, arg_list, core=mp.cpu_count()):
'''arg_listを分割して並列にfuncでまとめる
map()だとfuncが渡せないのでapply_async()を使用
'''
pool = mp.Pool()
#listがコア数以上なら等分割、以下ならlen=1のlistにする
if len(arg_list) > core:
split_arg_list = equal_division(arg_list, core)
else:
split_arg_list = equal_division(arg_list, len(arg_list))
result_list=[]
reduced_list=[]
for li in split_arg_list:
result_list.append(pool.apply_async(in_reduce, (func, li)))
for result in result_list:
reduced_list.append(result.get())
#全て集約するまで再帰
if len(reduced_list) == 1:
return reduced_list.pop(0)
else:
return multi_reduce(func, reduced_list, core/2)
def equal_division(li, num):
'''listを等分割。奇数でも問題なし'''
split_li=[]
for i in xrange(0, num):
split_li.append(li[i*len(li)/num : (i+1)*len(li)/num])
return split_li
if __name__ == '__main__':
'''reduceで使用する関数はおそらくローカルだろうが関係なし'''
def mul(x, y):
sleep(0.1)
print 'pid: %s, add:%d, %d' % (os.getpid(), x, y)
return x*y
list = range(1, 100)
singe_start = time()
single_sum = reduce(add, list)
single_end = time() - singe_start
multi_start = time()
multi_sum = multi_reduce(add, list, 4)
multi_end = time() - multi_start
print "single reduce: %d, %fs" % (single_sum, single_end)
print "multi reduce: %d, %fs" % (multi_sum, multi_end)
list = range(1, 100)
singe_start = time()
single_mul = reduce(mul, list)
single_end = time() - singe_start
multi_start = time()
multi_mul = multi_reduce(mul, list, 4)
multi_end = time() - multi_start
print "single reduce: %d, %fs" % (single_mul, single_end)
print "multi reduce: %d, %fs" % (multi_mul, multi_end)
| true |
2017945b19759c5781cca74fa75546ae7b9c7a66 | Python | fdurant/kiva_project | /src/KivaLoans.py | UTF-8 | 4,771 | 2.609375 | 3 | [] | no_license | from KivaLoan import KivaLoan
from KivaPartners import KivaPartners
from os.path import expanduser
from SldaTextFeatureGenerator import SldaTextFeatureGenerator
class KivaLoans(object):
''' Class representing a collection of loans at kiva.org '''
def __init__(self, loanIdList=None, loanDictList=None):
self.list = []
self.dict = {}
if loanIdList:
for loanId in loanIdList:
loan = KivaLoan(id=loanId)
self.list.append(loan)
self.dict[loan.getId()] = loan
elif loanDictList:
for loanDict in loanDictList:
loan = KivaLoan(dict=loanDict)
self.list.append(loan)
self.dict[loan.getId()] = loan
def push(self, loan):
assert(type(loan).__name__ == 'KivaLoan')
self.list.append(loan)
self.dict[loan.getId()] = loan
def getLoans(self):
return self.list
def getLoanIds(self):
return [loan.getId() for loan in self.list]
def getSize(self):
return len(self.list)
def getLabels(self):
return [loan.getFundingRatioLabel() for loan in self.list]
def getTopicFeatures(self, slda=None, settingsFile=None):
descriptionList = [loan.getEnglishDescription() for loan in self.list]
return slda.getGammasFromDescriptions(descriptionList,
settingsFile=settingsFile,
outDir='/tmp',
sortedByDescendingEta=False)
def getLoanFeatures(self,transformCategorical=False):
return [loan.getMultipleFeatures(transformCategorical=transformCategorical) for loan in self.list]
def getPartnerFeatures(self,partners=KivaPartners()):
return [partners.getMultiplePartnerFeatures(loan.getPartnerId()) for loan in self.list]
def getAllFeatures(self, slda=None, settingsFile=None, transformCategorical=False):
allFeatures = []
columns = []
topicFeatures = self.getTopicFeatures(slda=slda, settingsFile=settingsFile)
loanFeatures = self.getLoanFeatures(transformCategorical=transformCategorical)
partnerFeatures = self.getPartnerFeatures()
baselineFeature = [[('Baseline',1.0)] for i in range(len(partnerFeatures))]
columns.extend([f[0] for f in baselineFeature[0]])
columns.extend([f[0] for f in loanFeatures[0]])
columns.extend([f[0] for f in topicFeatures[0]])
columns.extend([f[0] for f in partnerFeatures[0]])
for i in range(len(self.list)):
mergedFeatures = []
mergedFeatures.extend([f[1] for f in baselineFeature[i]])
mergedFeatures.extend([f[1] for f in loanFeatures[i]])
mergedFeatures.extend([f[1] for f in topicFeatures[i]])
mergedFeatures.extend([f[1] for f in partnerFeatures[i]])
allFeatures.append(mergedFeatures)
return (columns, allFeatures)
if __name__ == "__main__":
loanIds = [376222,376200]
loanCollection = KivaLoans(loanIdList=loanIds)
assert(len(loanCollection.getLabels()) == loanCollection.getSize())
assert(loanIds == loanCollection.getLoanIds())
homeDir = expanduser("~")
projectDir = "%s/%s" % (homeDir, 'work/metis_projects/passion_project/kiva_project')
sldaBin = "%s/%s" % (homeDir, 'install/slda-master/slda')
modelFileBin = "%s/%s" % (projectDir, 'data/predicting_funding/slda_out/final.model')
modelFileTxt = "%s/%s" % (projectDir, 'data/predicting_funding/slda_out/final.model.text')
dictionaryFile = "%s/%s" % (projectDir, 'data/predicting_funding/kiva_dict.txt')
vocabFile = "%s/%s" % (projectDir, 'data/predicting_funding/kiva.lda-c.vocab')
slda = SldaTextFeatureGenerator(modelFileBin=modelFileBin,
modelFileTxt=modelFileTxt,
dictionaryFile=dictionaryFile,
vocabFile=vocabFile,
sldaBin=sldaBin)
settingsFile = "%s/%s" % (projectDir, 'data/predicting_funding/slda_settings.txt')
topicFeatures = loanCollection.getTopicFeatures(slda,settingsFile)
assert(len(topicFeatures) == loanCollection.getSize())
loanFeatures = loanCollection.getLoanFeatures()
assert(len(loanFeatures) == loanCollection.getSize())
partnerFeatures = loanCollection.getPartnerFeatures()
assert(len(partnerFeatures) == loanCollection.getSize())
columns, allFeatures = loanCollection.getAllFeatures(slda, settingsFile)
print columns
print allFeatures
assert(len(columns) == len(allFeatures[0]))
| true |
66af88db005b6d4a24cf4595b897ba0edba2a111 | Python | Yuliya-Karuk/get-repo | /lesson04/easy_homework.py | UTF-8 | 1,900 | 4.375 | 4 | [] | no_license | # Все задачи текущего блока решите с помощью генераторов списков!
# Задание-1:
# Дан список, заполненный произвольными целыми числами.
# Получить новый список, элементы которого будут
# квадратами элементов исходного списка
# [1, 2, 4, 0] --> [1, 4, 16, 0]
import random
lst_beg = [random.randint(-10, 10) for i in range(5)]
lst_last = [(lambda i: i*i)(i) for i in lst_beg]
print('Начальный список - ', lst_beg, 'Конечный список - ', lst_last)
# Задание-2:
# Даны два списка фруктов.
# Получить список фруктов, присутствующих в обоих исходных списках.
fruits = ["яблоко", "банан", "киви", "арбуз", "клубника", "слива", "черешня", "инжир", "хурма"]
exotic_fruit = ["инжир", "маракуйа", "ананас", "хурма"]
print('Список фруктов - ', fruits, '\nСписок экзотических фруктов - ', exotic_fruit)
inter = [i for i in fruits if i in exotic_fruit]
print('Фрукты в обоих списках', inter)
# Задание-3:
# Дан список, заполненный произвольными числами.
# Получить список из элементов исходного, удовлетворяющих следующим условиям:
# + Элемент кратен 3
# + Элемент положительный
# + Элемент не кратен 4
original = [random.randint(-100, 100) for i in range(10)]
last = [i for i in original if i >= 0 and i % 3 == 0 and i % 4 != 0]
print('Начальный список', original, '\nКонечный список', last)
| true |
b77533b1d15e0b05f21e36220d4619dddc39d0db | Python | Anya1234/1c_task | /parse_image.py | UTF-8 | 4,399 | 3.09375 | 3 | [] | no_license | import numpy as np
def find_black(image_data):
print(image_data)
result = np.full((image_data.shape[0], image_data.shape[1]), False)
for i in range(image_data.shape[0]):
for j in range(image_data.shape[1]):
if image_data[i][j][0] < 50 and image_data[i][j][1] < 50 and \
image_data[i][j][2] < 50 and image_data[i][j][3] > 100:
result[i][j] = True
return result
def find_coordinates(image_data):
cnt = 0
upper_result = np.full((2, 2, 2), 0)
for i in range(image_data.shape[0]):
for j in range(image_data.shape[1]):
if cnt > 1:
break
if cnt == 1 and j <= upper_result[0][1][1]:
continue
if image_data[i][j]:
upper_result[cnt][0] = [i, j]
while image_data[i][j]:
j += 1
upper_result[cnt][1] = [i, j - 1]
cnt += 1
if cnt > 1:
break
cnt = 0
lower_result = np.full((2, 2, 2), 0)
for i in range(image_data.shape[0]):
for j in range(image_data.shape[1]):
if cnt > 1:
break
if cnt == 1 and j <= lower_result[0][1][1]:
continue
if image_data[image_data.shape[0] - i - 1][j]:
lower_result[cnt][0] = [image_data.shape[0] - i - 1, j]
while image_data[image_data.shape[0] - i - 1][j]:
j += 1
lower_result[cnt][1] = [image_data.shape[0] - i - 1, j - 1]
cnt += 1
if cnt > 1:
break
cnt = 0
left_result = np.full((2, 2, 2), 0)
for j in range(image_data.shape[1]):
for i in range(image_data.shape[0]):
if cnt > 1:
break
if cnt == 1 and i <= left_result[0][1][0]:
continue
if image_data[i][j]:
left_result[cnt][0] = [i, j]
while image_data[i][j]:
i += 1
left_result[cnt][1] = [i - 1, j]
cnt += 1
if cnt > 1:
break
cnt = 0
right_result = np.full((2, 2, 2), 0)
for j in range(image_data.shape[1]):
for i in range(image_data.shape[0]):
if cnt > 1:
break
if cnt == 1 and i <= right_result[0][1][0]:
continue
if image_data[i][image_data.shape[1] - j - 1]:
right_result[cnt][0] = [i, image_data.shape[1] - j - 1]
while image_data[i][image_data.shape[1] - j - 1]:
i += 1
right_result[cnt][1] = [i - 1, image_data.shape[1] - j - 1]
cnt += 1
if cnt > 1:
break
return {"upper": upper_result,
"lower": lower_result,
"lefter": left_result,
"righter": right_result}
def find_centers(image_boolean_data):
coordinates = find_coordinates(image_boolean_data)
centers = []
x_bounds = []
y_bounds = []
horizontals = np.full(3, 0)
vertical = np.full(3, 0)
vertical[0] = (coordinates['upper'][0][0][0] + coordinates['lefter'][0][0][0]) / 2
vertical[1] = (coordinates['lefter'][0][1][0] + coordinates['lefter'][1][0][0]) / 2
vertical[2] = (coordinates['lower'][0][0][0] + coordinates['lefter'][1][1][0]) / 2
x_bounds.append([coordinates['upper'][0][0][0], coordinates['lefter'][0][0][0]])
x_bounds.append([coordinates['lefter'][0][1][0], coordinates['lefter'][1][0][0]])
x_bounds.append([coordinates['lefter'][1][1][0], coordinates['lower'][0][0][0]])
horizontals[0] = (coordinates['upper'][0][0][1] + coordinates['lefter'][0][0][1]) / 2
horizontals[1] = (coordinates['upper'][0][1][1] + coordinates['upper'][1][0][1]) / 2
horizontals[2] = (coordinates['upper'][1][1][1] + coordinates['righter'][0][0][1]) / 2
y_bounds.append([coordinates['lefter'][0][0][1], coordinates['upper'][0][0][1]])
y_bounds.append([coordinates['upper'][0][1][1], coordinates['upper'][1][0][1]])
y_bounds.append([coordinates['upper'][1][1][1], coordinates['righter'][0][0][1]])
for i in range(3):
for j in range(3):
centers.append([[vertical[i], horizontals[j]], [x_bounds[i], y_bounds[j]]])
return centers
| true |
dbdf245acdac888f7866467dca1a778cfcb64d17 | Python | rajarameshmamidi/NLP | /nlp_text_generation.py | UTF-8 | 2,023 | 3.984375 | 4 | [] | no_license | # Read in the corpus, including punctuation!
import pandas as pd
#Build a Markov Chain Function
from collections import defaultdict
#Create a Text Generator
import random
data = pd.read_pickle('corpus.pkl')
data
# Extract only Ali Wong's text
ali_text = data.transcript.loc['ali']
ali_text[:200]
print('data till index 200 is:- '+ali_text[:200])
#Build a Markov Chain Function
def markov_chain(text):
'''The input is a string of text and the output will be a dictionary with each word as
a key and each value as the list of words that come after the key in the text.'''
# Tokenize the text by word, though including punctuation
words = text.split(' ')
# Initialize a default dictionary to hold all of the words and next words
m_dict = defaultdict(list)
# Create a zipped list of all of the word pairs and put them in word: list of next words format
for current_word, next_word in zip(words[0:-1], words[1:]):
m_dict[current_word].append(next_word)
# Convert the default dict back into a dictionary
m_dict = dict(m_dict)
return m_dict
# Create the dictionary for Ali's routine, take a look at it
ali_dict = markov_chain(ali_text)
ali_dict
print('ali text info is:- '+str(ali_dict))
#Create a Text Generator
def generate_sentence(chain, count=15):
'''Input a dictionary in the format of key = current word, value = list of next words
along with the number of words you would like to see in your generated sentence.'''
# Capitalize the first word
word1 = random.choice(list(chain.keys()))
sentence = word1.capitalize()
# Generate the second word from the value list. Set the new word as the first word. Repeat.
for i in range(count-1):
word2 = random.choice(chain[word1])
word1 = word2
sentence += ' ' + word2
# End it with a period
sentence += '.'
return(sentence)
a = generate_sentence(ali_dict)
print('random sentence:-'+a) | true |
eee031351c4115a1057b3b81293fe25a17ad8066 | Python | jrinder42/Advent-of-Code-2020 | /day15/day15.py | UTF-8 | 767 | 3.515625 | 4 | [] | no_license |
'''
Advent of Code 2020 - Day 15
'''
lookup = {0: [1],
3: [2],
1: [3],
6: [4],
7: [5],
5: [6]}
turn = 7
prev = 5
while turn != 2020 + 1: # Part 1
#while turn != 30_000_000 + 1: # Part 2
if prev in lookup and len(lookup[prev]) == 1:
prev = 0
if prev in lookup:
lookup[prev].append(turn)
else:
lookup[prev] = [turn]
elif prev in lookup: # not unique
prev = lookup[prev][-1] - lookup[prev][-2] # most recent - second most recent
if prev in lookup:
lookup[prev].append(turn)
else:
lookup[prev] = [turn]
turn += 1
print('Advent of Code Day 15 Answer Part 1 / 2:', prev) # depends on while loop condition
| true |
1919e76da1680f882731f7dcf908d89472ff10f6 | Python | ysli16/coverage-path-generation | /old versions/waypointpart/singlearearoute.py | UTF-8 | 8,057 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from shapely import geometry
def rotate(point):
x=cos*point[0]-sin*point[1]
y=cos*point[1]+sin*point[0]
rpoint=np.array([[x,y]]).reshape(1,2)
return rpoint
def findcross(pointl,pointr,lineheight):
x=(lineheight-pointl[1])/(pointr[1]-pointl[1])*(pointr[0]-pointl[0])+pointl[0]
return x
num=int(input("Enter the number of boundry points:"))
#gcoord=np.zeros((1,0),dtype=[('longitude',float),('latitude',float)])
gcoord=np.zeros((0,2))
for i in range(num):
longitude_in=float(input("Enter boundry longitude:"))
latitude_in=float(input("Enter boundry latitude:"))
# cin=np.array([(longitude_in,latitude_in)],dtype=[('longitude',float),('latitude',float)]).reshape(1,1)
cin=np.array([[longitude_in,latitude_in]]).reshape(1,2)
gcoord=np.append(gcoord,cin,axis=0)
#rearrange coordinate(according to x)
gcoord = gcoord[gcoord[:,0].argsort()]
#convert to relative frame
#startx=gcoord.min(0)[0]
#starty=gcoord.min(0)[1]
cgcoord=gcoord-gcoord[0]
#coord=preprocess(gcoord)
#convert to rotated frame
angle=float(input("Enter the angle of the route(degree):"))
sin=math.sin(angle/180*math.pi)
cos=math.cos(angle/180*math.pi)
rcoord=np.zeros((0,2))
for i in range(num):
# x=math.cos(angle/180*math.pi)*coord[i][0]-math.sin(angle/180*math.pi)*coord[i][1]
# y=math.cos(angle/180*math.pi)*coord[i][1]+math.sin(angle/180*math.pi)*coord[i][0]
# rpoint=np.array([[x,y]]).reshape(1,2)
rpoint=rotate(cgcoord[i]).reshape(1,2)
rcoord=np.append(rcoord,rpoint,axis=0)
coord=rcoord[rcoord[:,0].argsort()]
#rearrange the sequence of boundry points so that they form a polygon
vertex=np.zeros((0,2))
vertex=np.append(vertex,coord[0].reshape(1,2),axis=0)
vertex=np.append(vertex,coord[num-1].reshape(1,2),axis=0)
for i in range(1,num-1):
split=(coord[i][0]-coord[0][0])/(coord[num-1][0]-coord[0][0])*(coord[num-1][1]-coord[0][1])+coord[0][1]
if(coord[i][1]>split):
up=True
else:
up=False
if(up):
j=0
while(vertex[j][0]<=coord[i][0]):
j=j+1
vertex=np.insert(vertex, j, values=coord[i], axis=0)
else:
j=np.argwhere(vertex[:,0]==coord[num-1][0])[0][0]
while(j<len(vertex)):
if(vertex[j][0]>=coord[i][0]):
j=j+1
else:
vertex=np.insert(vertex, j, values=coord[i], axis=0)
break
if(j==len(vertex)):
vertex=np.append(vertex, coord[i].reshape(1,2), axis=0)
pivot=np.argwhere(vertex[:,0]==coord[num-1][0])[0][0]
#define the width,generate cut position
width=float(input("Enter width of the route:"))
cutpos=np.arange(coord[0][0]+width,coord[num-1][0]+width,width)
pointpos=np.arange(coord[0][0]+width/2,coord[num-1][0]+width/2,width)
#find cross point with boundry
allvertex=vertex
upaddnum=0
for i in range(len(cutpos)-1):
j=0
k=pivot
while(cutpos[i]>vertex[j][0]):
j=j+1
lefttop=vertex[j-1]
righttop=vertex[j]
while(k<len(vertex)):
if(cutpos[i]<vertex[k][0]):
k=k+1
else:
rightbuttom=vertex[k-1]
leftbuttom=vertex[k]
break
if(k==len(vertex)):
rightbuttom=vertex[k-1]
leftbuttom=vertex[0]
upperpos=[(cutpos[i],(cutpos[i]-lefttop[0])/(righttop[0]-lefttop[0])*(righttop[1]-lefttop[1])+lefttop[1])]
lowerpos=[(cutpos[i],(cutpos[i]-leftbuttom[0])/(rightbuttom[0]-leftbuttom[0])*(rightbuttom[1]-leftbuttom[1])+leftbuttom[1])]
allvertex=np.insert(allvertex, upaddnum+j, values=upperpos, axis=0)
allvertex=np.insert(allvertex, upaddnum+k+1, values=lowerpos, axis=0)
upaddnum=upaddnum+1
#find range of waypoints ineach cut
pointrange=np.zeros(shape=[0,2])
waypoint=np.zeros(shape=[0,2])
allvertex=np.append(allvertex,allvertex[0].reshape(1,2),axis=0)
allpivot=np.argwhere(allvertex[:,0]==coord[num-1][0])[0][0]
for i in range(len(cutpos)):
if(i==0):
leftupindex=0
leftdownindex=len(allvertex)-1
else:
leftupindex=np.argwhere(allvertex[:,0]>=cutpos[i-1])[0][0]
leftdownindex=np.argwhere(allvertex[allpivot:len(allvertex),0]<cutpos[i-1])[0][0]+allpivot-1
if(i==len(cutpos)-1):
rightupindex=allpivot
rightdownindex=allpivot
else:
rightupindex=np.argwhere(allvertex[:,0]>cutpos[i])[0][0]-1
rightdownindex=np.argwhere(allvertex[allpivot:len(allvertex),0]<cutpos[i])[0][0]+allpivot-1
upsearchrange=allvertex[leftupindex:rightupindex+1,:]
downsearchrange=allvertex[rightdownindex:leftdownindex+1,:]
topmaxindex=upsearchrange.argmax(0)[1]
topmin=upsearchrange.min(0)
buttommax=downsearchrange.max(0)
buttomminindex=downsearchrange.argmin(0)[1]
if(topmin[1]-width/2>buttommax[1]+width/2):
newpoint=np.array([topmin[1]-width/2,buttommax[1]+width/2])
else:
newpoint=np.array([(topmin[1]+buttommax[1])/2])
newpoint=newpoint.reshape(len(newpoint),1)
newpoint=np.insert(newpoint,0,[pointpos[i]],axis=1)
if(topmaxindex==0):
topx1=upsearchrange[topmaxindex][0]
topx2=findcross(upsearchrange[topmaxindex],upsearchrange[topmaxindex+1],upsearchrange[topmaxindex][1]-width/2)
elif(topmaxindex==len(upsearchrange)-1):
topx1=findcross(upsearchrange[topmaxindex-1],upsearchrange[topmaxindex],upsearchrange[topmaxindex][1]-width/2)
topx2=upsearchrange[topmaxindex][0]
else:
topx1=findcross(upsearchrange[topmaxindex-1],upsearchrange[topmaxindex],upsearchrange[topmaxindex][1]-width/2)
topx2=findcross(upsearchrange[topmaxindex],upsearchrange[topmaxindex+1],upsearchrange[topmaxindex][1]-width/2)
if(topx1<=pointpos[i] and topx2>=pointpos[i]):
newpoint=np.insert(newpoint,0,[[pointpos[i],upsearchrange[topmaxindex][1]-width/2]],axis=0)
elif(topx1>pointpos[i]):
newpoint=np.insert(newpoint,0,[[topx1,upsearchrange[topmaxindex][1]-width/2]],axis=0)
else:
newpoint=np.insert(newpoint,0,[[topx2,upsearchrange[topmaxindex][1]-width/2]],axis=0)
if(buttomminindex==len(upsearchrange)-1):
buttomx1=downsearchrange[buttomminindex][0]
buttomx2=findcross(downsearchrange[buttomminindex],downsearchrange[buttomminindex-1],downsearchrange[buttomminindex][1]-width/2)
elif(buttomminindex==0):
buttomx1=findcross(downsearchrange[buttomminindex+1],downsearchrange[buttomminindex],downsearchrange[buttomminindex][1]-width/2)
buttomx2=downsearchrange[buttomminindex][0]
else:
buttomx1=findcross(downsearchrange[buttomminindex+1],downsearchrange[buttomminindex],downsearchrange[buttomminindex][1]-width/2)
buttomx2=findcross(downsearchrange[buttomminindex],downsearchrange[buttomminindex-1],downsearchrange[buttomminindex][1]-width/2)
if(buttomx1<=pointpos[i] and buttomx2>=pointpos[i]):
newpoint=np.append(newpoint,[[pointpos[i],downsearchrange[buttomminindex][1]-width/2]],axis=0)
elif(topx1>pointpos[i]):
newpoint=np.append(newpoint,[[buttomx1,downsearchrange[buttomminindex][1]-width/2]],axis=0)
else:
newpoint=np.append(newpoint,[[buttomx2,downsearchrange[buttomminindex][1]-width/2]],axis=0)
if(i%2==1):
newpoint=np.flipud(newpoint)
waypoint=np.append(waypoint,newpoint,axis=0)
#rotate back everything
rwaypoint=np.zeros(shape=[len(waypoint),2])
rvertex=np.zeros(shape=[len(vertex),2])
sin=math.sin(-angle/180*math.pi)
cos=math.cos(-angle/180*math.pi)
for i in range(len(waypoint)):
rwaypoint[i]=rotate(waypoint[i])
for i in range(len(vertex)):
rvertex[i]=rotate(vertex[i])
rwaypoint=rwaypoint+gcoord[0]
rvertex=rvertex+gcoord[0]
#plot the waypoints
plt.plot(rwaypoint[:,0],rwaypoint[:,1])
poly = geometry.Polygon(rvertex)
x,y = poly.exterior.xy
plt.plot(x,y)
| true |
db28c4e8be1c953d983d16260b8883a4d8892711 | Python | drewkarpov/python_pytest | /test_framework/helpers/BrowserApi.py | UTF-8 | 1,470 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | import allure
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
DEFAULT_ELEMENT_WAIT_TIMEOUT = 40
class BrowserApi:
def __init__(self, driver):
self._driver = driver
def __wait_for_element_present(self, locator, time=None):
return WebDriverWait(self._driver, time or DEFAULT_ELEMENT_WAIT_TIMEOUT) \
.until(expected_conditions.presence_of_element_located(locator),
message=f"Не найден элемент с локатором {locator}")
def _click(self, locator):
with allure.step(f"кликаем по элементу {locator}"):
element = self.__wait_for_element_present(locator)
element.click()
def _get_element_text(self, locator):
element = self.__wait_for_element_present(locator)
return element.text
def _get_element_attribute(self, locator, attribute_value):
element = self.__wait_for_element_present(locator)
return element.get_attribute(attribute_value)
def _type(self, locator, send_value):
with allure.step(f"вводим {send_value} в поле {locator}"):
element = self.__wait_for_element_present(locator)
element.clear()
element.send_keys(send_value)
def _get_elements(self, locator):
self.__wait_for_element_present(locator)
return self._driver.find_elements(*locator)
| true |
ba41627f50138051c2dfd85d490a26e6466c753d | Python | jairGil/Libreria-BDDII | /modelo/Persona.py | UTF-8 | 471 | 2.6875 | 3 | [] | no_license | from modelo.Direccion import Direccion
class Persona:
rfc: str
nombre: str
apellido_paterno: str
apellido_materno: str
direccion: Direccion
def __init__(self, rfc: str, nombre: str, apellido_paterno: str, apellido_materno: str, direccion: Direccion) -> None:
self.rfc = rfc
self.nombre = nombre
self.apellido_paterno = apellido_paterno
self.apellido_materno = apellido_materno
self.direccion = direccion
| true |
5499a86b969a3d8bd1b98e408c2efa35294f598d | Python | WeiProtein/Self-replicating-repo | /create_repo/routes.py | UTF-8 | 4,836 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
from flask import Flask, render_template, url_for, flash, redirect, request
from create_repo import app,db
from create_repo.forms import InfoForm
from create_repo.models import User
import os
import getpass
import subprocess
from subprocess import Popen, PIPE
#routes start
@app.route("/",methods=['GET', 'POST'])
@app.route("/home",methods=['GET', 'POST'])
def home():
#return render_template('home.html')
form = InfoForm()
if form.validate_on_submit():
#probably don't need to save to db
user = User(username=form.username.data, password=form.password.data)
db.drop_all()
db.create_all()
db.session.add(user)
db.session.commit()
flash('Repo has been created for %s!'% (form.username.data),'success')
#return redirect(url_for('home'))
######BEGIN GIT REPLICATION HERE########
user_name = str(form.username.data)
password = str(form.password.data)
#using shell to execute git command
def execute(cmd, work_dir):
pipe = subprocess.Popen(cmd, shell=True, cwd=work_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, error) = pipe.communicate()
print out, error
pipe.wait()
#cloning original repository & move into that directory
def git_clone(repo_dir):
cmd = 'git clone https://github.com/weiprotein/self-replicating-repo.giti && cd create_repo'
execute(cmd, repo_dir)
#git init
def git_init(repo_dir):
cmd = 'git init'
execute(cmd, repo_dir)
#adds file to index
def git_add(repo_dir):
cmd = 'git add .'
execute(cmd, repo_dir)
#commit to git
def git_commit(msg, repo_dir):
cmd = 'git commit -m \'' + msg + '\''
execute(cmd, repo_dir)
#create repo in user's github
def create_repo(user_name, repo_name, repo_dir):
cmd = 'curl -u ' + user_name + ' https://api.github.com/user/repos -d \'{"name":"' + repo_name + '"}\''
execute(cmd, repo_dir)
#delete any existing remote branches & create remote branch to push to master
def create_origin(user_name, repo_name, repo_dir):
cmd1 = 'git remote rm origin'
execute(cmd1,repo_dir)
cmd2 = 'git remote add origin https://github.com/' + user_name + '/' + repo_name + '.git'
execute(cmd2, repo_dir)
#git push - ensure that credentials are deleted first if keychain access enabled
def git_push(repo_dir):
cmd_1 = 'git credential-osxkeychain erase'
cmd_2 = 'host=github.com'
cmd_3 = 'protocol=https'
cmd_4 = ' '
execute(cmd_1, repo_dir)
execute(cmd_2, repo_dir)
execute(cmd_3, repo_dir)
execute(cmd_4, repo_dir)
cmd_5 = 'git push -u origin master'
execute(cmd_5, repo_dir)
#get the user's pwd
pwd = os.getcwd()
git_clone(pwd)
git_init(pwd)
print "====PAST STEP 1===="
git_add(pwd)
print "============PAST STEP 2============"
git_commit('Testing gitupload via script.', pwd)
print "============================WE HAVE GIT COMMIT========================"
create_repo(user_name,'script_test', pwd)
#repo = Popen(['curl', '-u', str(user_name), 'https://api.github.com/user/repos', '-d', '\'{"name":"script_test"}\''], stdin=PIPE)
#repo.communicate(password)
print "===============================REPO HAS BEEN CREATED======================="
create_origin(user_name, 'script_test', pwd)
cmd_1 = 'git credential-osxkeychain erase'
cmd_2 = 'host=github.com'
cmd_3 = 'protocol=https'
cmd_4 = ' '
execute(cmd_1, pwd)
execute(cmd_2, pwd)
execute(cmd_3, pwd)
execute(cmd_4, pwd)
git_push(pwd)
#cmd = Popen(['git', 'push', '-u', 'origin', 'master'], stdin=PIPE)
#cmd.communicate(password)
print "===========================================GIT PUSH HAS OCCURED========================"
"""
#giving the command line username and password
execute(user_name, pwd)
execute(password, pwd)
execute(password, pwd)
execute(password, pwd)
"""
######END GIT REPLICATION HERE######
return render_template('info.html', title='GitHub Info', form=form)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route('/result',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("result.html",result = result)
| true |
b0e5f025e0dc92d97646b5fde9c52ebfb8ea8144 | Python | EDITD/riak-python-client | /riak/client/index_page.py | UTF-8 | 6,155 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple, Sequence
CONTINUATION = namedtuple('Continuation', ['c'])
class IndexPage(Sequence, object):
"""
Encapsulates a single page of results from a secondary index
query, with the ability to iterate over results (if not streamed),
capture the page marker (continuation), and automatically fetch
the next page.
While users will interact with this object, it will be created
automatically by the client and does not need to be instantiated
elsewhere.
"""
def __init__(self, client, bucket, index, startkey, endkey, return_terms,
max_results, term_regex):
self.client = client
self.bucket = bucket
self.index = index
self.startkey = startkey
self.endkey = endkey
self.return_terms = return_terms
self.max_results = max_results
self.results = None
self.stream = False
self.term_regex = term_regex
continuation = None
"""
The opaque page marker that is used when fetching the next chunk
of results. The user can simply call :meth:`next_page` to do so,
or pass this to the :meth:`~riak.client.RiakClient.get_index`
method using the ``continuation`` option.
"""
def __iter__(self):
"""
Emulates the iterator interface. When streaming, this means
delegating to the stream, otherwise iterating over the
existing result set.
"""
if self.results is None:
raise ValueError("No index results to iterate")
try:
for result in self.results:
if self.stream and isinstance(result, CONTINUATION):
self.continuation = result.c
else:
yield self._inject_term(result)
finally:
if self.stream:
self.results.close()
def __len__(self):
"""
Returns the length of the captured results.
"""
if self._has_results():
return len(self.results)
else:
raise ValueError("Streamed index page has no length")
def __getitem__(self, index):
"""
Fetches an item by index from the captured results.
"""
if self._has_results():
return self.results[index]
else:
raise ValueError("Streamed index page has no entries")
def __eq__(self, other):
"""
An IndexPage can pretend to be equal to a list when it has
captured results by simply comparing the internal results to
the passed list. Otherwise the other object needs to be an
equivalent IndexPage.
"""
if isinstance(other, list) and self._has_results():
return self._inject_term(self.results) == other
elif isinstance(other, IndexPage):
return other.__dict__ == self.__dict__
else:
return False
def __ne__(self, other):
"""
Converse of __eq__.
"""
return not self.__eq__(other)
def has_next_page(self):
"""
Whether there is another page available, i.e. the response
included a continuation.
"""
return self.continuation is not None
def next_page(self, timeout=None, stream=None):
"""
Fetches the next page using the same parameters as the
original query.
Note that if streaming was used before, it will be used again
unless overridden.
:param stream: whether to enable streaming. `True` enables,
`False` disables, `None` uses previous value.
:type stream: boolean
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
"""
if not self.continuation:
raise ValueError("Cannot get next index page, no continuation")
if stream is not None:
self.stream = stream
args = {'bucket': self.bucket,
'index': self.index,
'startkey': self.startkey,
'endkey': self.endkey,
'return_terms': self.return_terms,
'max_results': self.max_results,
'continuation': self.continuation,
'timeout': timeout,
'term_regex': self.term_regex}
if self.stream:
return self.client.stream_index(**args)
else:
return self.client.get_index(**args)
def _has_results(self):
"""
When not streaming, have results been assigned?
"""
return not (self.stream or self.results is None)
def _should_inject_term(self, term):
"""
The index term should be injected when using an equality query
and the return terms option. If the term is already a tuple,
it can be skipped.
"""
return self.return_terms and not self.endkey
def _inject_term(self, result):
"""
Upgrades a result (streamed or not) to include the index term
when an equality query is used with return_terms.
"""
if self._should_inject_term(result):
if type(result) is list:
return [(self.startkey, r) for r in result]
else:
return (self.startkey, result)
else:
return result
def __repr__(self):
return "<{!s} {!r}>".format(self.__class__.__name__, self.__dict__)
def close(self):
if self.stream:
self.results.close()
| true |
6359e923d2fcb2edee64b0905f245136722b65af | Python | yellowracecar/chiffer | /chiffer.py | UTF-8 | 514 | 3.609375 | 4 | [] | no_license | key = 0
letters = []
crypt = []
meny = 0
while meny != 3:
print("1. Kryptera")
print("2. Dekryptera")
print("3. Avsluta")
meny = int(input("Vad vill du göra? "))
if meny == 1:
word = input("Vad vill du kryptera? ")
key = input("vad vill du ha för nyckel? ")
for letter in word:
letters.append(ord(letter) + key)
print(letters)
if meny == 2:
for l in letters:
crypt.append(chr(l - key))
print(crypt)
break
| true |
fdf60411085508e9ae4335081c60d48f37ea434f | Python | jainshubhi/find_people | /find_people.py | UTF-8 | 3,094 | 2.875 | 3 | [] | no_license | import json
import time
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def read_config():
'''
Read config.json file. config.json should have companies and jobs keys.
'''
with open('config.json') as f:
config = json.load(f)
return config['companies'], config['jobs']
def login(driver):
'''
Login to linkedin.
'''
driver.get('https://www.linkedin.com/')
elem = driver.find_element_by_id('login-email')
elem.send_keys(os.environ['EMAIL_USERNAME'])
elem = driver.find_element_by_id('login-password')
elem.send_keys(os.environ['EMAIL_PASSWORD'])
elem.send_keys(Keys.RETURN)
def search_person(driver, company, job):
'''
Opens a linkedin page and searches for a specific person with a specific job
and specific company.
'''
driver.get('https://www.linkedin.com/vsearch/f?f_N=F,S,A&openFacets=N,G,CC&rsid=1934857271457685716329&adv=open')
try:
# Enter in title
elem = driver.find_element_by_name('title')
elem.send_keys(job)
# Select current in title advanced options
driver.find_element_by_xpath("//select[@name='titleScope']/option[text()='Current']").click()
# Enter in company name
elem = driver.find_element_by_name('company')
elem.send_keys(company)
# Select current in company advanced options
driver.find_element_by_xpath("//select[@name='companyScope']/option[text()='Current']").click()
# Submit
elem = driver.find_element_by_name('submit')
elem.click()
except:
print 'Could not find required element'
driver = webdriver.Chrome()
login(driver)
time.sleep(5)
search_person(driver, company, job)
# Wait for results to load
time.sleep(2)
if 'Sorry, no results containing' in driver.page_source:
return []
else:
# TODO: Return list of names in search results
try:
elems = driver.find_elements_by_xpath("//a[@class='title main-headline']")
return invalid_results([elem.text for elem in elems])
except:
return []
def invalid_results(results):
'''
This method removes invalid results from returned results
'''
# Remove invalid profiles
results = filter((lambda x: 'LinkedIn' not in x), results)
# Only have unique elements
return list(set(results))
if __name__ == '__main__':
# Login to LinkedIn and set up selenium
driver = webdriver.Chrome()
login(driver)
# Search config
companies, jobs = read_config()
# Sleep 5 seconds
time.sleep(5)
results = {}
for company in companies:
results[company] = []
for job in jobs:
results[company] += search_person(driver, company, job)
time.sleep(3.5)
time.sleep(3)
driver.close()
# Write results of search_person to a json file
# (key: company, value: [name])
with open('people.json', 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
print 'Done'
| true |
162d18d1aa1b81c2da5cb092c70bd1716f09aae1 | Python | smarden1/data-tools | /datatools/HistogramPlot.py | UTF-8 | 553 | 3.34375 | 3 | [] | no_license | import math
class HistogramPlot(object):
defaultRange = 50
def __init__(self, data, zero_start = False, displayRange = defaultRange):
self.data = data
self.range = displayRange
self.bin_size = math.ceil(self.data.range(zero_start) / self.range) + 1
def result(self):
return (self.getBar(i) for i in self.data.data)
def prettyPrint(self):
return "\n".join(self.result())
def getBar(self, value):
return "".join(["*"] * int(math.floor((value - self.data.min) / self.bin_size))) | true |
c4aabea30604281b3bb48966961a4faf3695b811 | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_7/bgrtej001/util.py | UTF-8 | 2,593 | 4.1875 | 4 | [] | no_license | """Tejasvin Bagirathi BGRTEJ001
Assignment 7, Question 2
util.py"""
global grid
grid = []
#Create grid function to make empty grid
def create_grid(grid):
for i in range(4):
#Create new row
row_x = []
for j in range(4):
#Add zero's to row
row_x.append(0)
#Add row to grid
grid.append(row_x)
def print_grid(grid):
#Print top of box
print('+', '-'*20, '+', sep = '')
#Loop through grid vertically
for i in range(len(grid)):
print('|', end='')
#Loop through each vertical element of grid
for j in grid[i]:
#If 0, print empty spaces
if j == 0:
print('{0:<5}'.format(' '), end ='')
#If number, print the number out
else:
print('{0:<5}'.format(j), end ='')
print('|')
print('+', '-'*20, '+', sep = '')
def check_lost(grid):
for i in range(4):
#Search for equal adjecent values
for j in range(3):
if grid[i][j] == grid[i][j+1]:
return False
#Search for 0 values in each horizontal element
for j in range(3):
if grid[i][j]==0:
return False
#Check if any horizontal adjacent elements are the same, first lop through vertically
for i in range(3):
#Loop through each horizontal element
for j in range(4):
#If any adjacent elements are the same, return false
if grid[i][j] == grid[i+1][j]:
return False
return True
def check_won(grid):
#Loop through grid veritcally
for i in grid:
#Loop through horizontal elements of grid
for j in i:
#If any number is greater than 32, return true
if j >= 32:
return True
return False
def copy_grid(grid):
#Declare new grid
grid_copy = []
for i in grid:
#Create new horizontal row
new_row = []
#Loop trhough each row in grid
for j in i:
#Add each element of the horizontal row to new row
new_row.append(j)
#Add new row to new grid
grid_copy.append(new_row)
#Return Copy
return grid_copy
def grid_equal(grid1, grid2):
for i in range(4):
#Check to see if any values horizontally in grid don't match, if so return False
for j in range(4):
if grid1[i][j] != grid2[i][j]:
return False
return True | true |
db63756e771ec099046d9a4848f874437875db72 | Python | srikantviswanath/Algo-Practice | /dfs/count_paths_for_a_sum.py | UTF-8 | 1,150 | 3.75 | 4 | [] | no_license | """Given a binary tree and a number ‘S’, find all paths in the tree such that the sum of all
the node values of each path equals ‘S’. Please note that the paths can start or end at any
node but all paths must follow direction from parent to child (top to bottom)"""
from trees import TreeNode, build_binary_tree
from typing import List
def count_paths(root: TreeNode, target: int) -> int:
count = {'count': 0}
def helper(root: TreeNode, target: int, current_path: List[int]) -> None:
if not root:
return
i = 0
if current_path:
while target < root.val:
target += current_path[i]
i += 1
if target == root.val:
count['count'] += 1
target -= root.val
current_path = current_path[i+1:] if i != 0 else current_path
helper(root.left, target, current_path + [root.val])
helper(root.right, target, current_path + [root.val])
helper(root, target, [])
return count['count']
if __name__ == '__main__':
root = build_binary_tree([12, 7, 1, None, 4, 10, 5])
print(count_paths(root, 6))
| true |
25e82d30ca9c54bd17ed91944044cdd673607ff5 | Python | zdd0819/MsPacman_DQN | /gifMaker.py | UTF-8 | 668 | 2.703125 | 3 | [] | no_license | import imageio
import os
class GifAgent:
def __init__(self):
self.storage = []
self.max_score = 0
self.max_storage = []
def store(self, img):
self.storage.append(img)
def commit(self, score, auto_output=False):
if score > self.max_score:
self.max_score = score
self.max_storage = self.storage.copy()
if auto_output:
self.output()
self.storage = []
def output(self, name='max_score.gif'):
if 'gif' not in os.listdir(os.getcwd()):
os.mkdir('./gif')
imageio.mimsave('./gif/'+name, self.max_storage, 'GIF', duration=0.05)
| true |
b6d9c69286a64cd8db453a19dd2d057353daf4ad | Python | bayguang/FruitRecognition | /models/vgg_model.py | UTF-8 | 1,377 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation
from keras.layers import Reshape, Lambda, BatchNormalization, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
def get_model(input_dim, category_num):
"""
Build Convolution Neural Network
args : nb_classes (int) number of classes
returns : model (keras NN) the Neural Net model
"""
chanDim = 1
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=(input_dim[0], input_dim[1], 3)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(category_num, activation='softmax'))
return model
| true |
584171e2ae6965b58f39adf551361ddd074caaa9 | Python | DiffeoInvariant/pytex | /pytex/code.py | UTF-8 | 8,537 | 2.640625 | 3 | [] | no_license | from .environment import Environment
from .command import Command, UsePackage, TextModifier
from .text import TextLines
from collections.abc import Iterable
def _comma_separated_tuple(tpl):
n = len(tpl)
csv = []
for i,val in enumerate(tpl):
if i < n-1:
csv.append(str(val)+',')
else:
csv.append(str(val))
return tuple(csv)
class CodeColor(TextLines):
def __init__(self,name,values,color_scheme='rgb'):
self._name = name
self.vals = values
self.color_scheme = color_scheme
self._make_line()
super().__init__([self.line],name)
def get(self):
return self.line if self.line else ''
def get_as_line(self):
if self.line is None:
return ''
return self.line if self.line.endswith('\n') else self.line + '\n'
def get_use_command(self):
return Command('color',args=self.vals,options=['rgb'])
@staticmethod
def Green():
return CodeColor('green',(0,0.6,0),'rgb')
@staticmethod
def Gray():
return CodeColor('gray',(0.0,0.5,0.5),'rgb')
@staticmethod
def Purple():
return CodeColor('purple',(0.58,0,0.82),'rgb')
@staticmethod
def Magenta():
return CodeColor('magenta',None)
@staticmethod
def Red():
return CodeColor('red',None)
@staticmethod
def Blue():
return CodeColor('blue',None)
@staticmethod
def DefaultBackground():
return CodeColor('PyTexDefaultBackground',(0.95,0.95,0.92),'rgb')
def _make_line(self):
_default_colors = {'magenta','red','blue'}
if self._name in _default_colors:
self.line = ''
else:
self.line = '\\definecolor{' + self._name + '}{' + self.color_scheme + '}{'
for elem in _comma_separated_tuple(self.vals):
self.line += elem
self.line += '}\n'
"""
def _make_premade_color(self, pre_name):
_PREMADE_XCOLORS = {'magenta','green','blue','red','brown'}
if pre_name not in _PREMADE_XCOLORS:
raise NotImplementedError(f"Cannot get premade xcolor {pre_name}! Premade colors are: {[x for x in _PREMADE_XCOLORS]}")
self._name = pre_name
self.vals = None
self.color_scheme = 'rbg'
self.line = None
"""
_default_colors = {CodeColor.Green(),CodeColor.Gray(),CodeColor.Purple(),CodeColor.Magenta(),CodeColor.Red(),CodeColor.Blue(),CodeColor.DefaultBackground()}
class CodeStyle:
def __init__(self, style_name: str,
background_color: CodeColor=CodeColor.DefaultBackground(),
comment_color: CodeColor=CodeColor.Green(),
keyword_color: CodeColor=CodeColor.Magenta(),
number_color: CodeColor=CodeColor.Gray(),
string_color: CodeColor=CodeColor.Red(),
basic_style_mods: Iterable=None,
whitespace_break: bool=False,
breaklines: bool=True,
caption_pos: str='b',
keep_spaces=True,
number_alignment='left',
number_sep_pts=5,
show_spaces=False,
show_string_spaces=False,
show_tabs=False,
tabsize=2):
self._colors = [background_color, comment_color, keyword_color,
number_color, string_color]
self._name = style_name
self._get_style_options(background_color,comment_color,keyword_color,
number_color,string_color,basic_style_mods,
whitespace_break, breaklines, caption_pos,
keep_spaces, number_alignment, number_sep_pts,
show_spaces, show_string_spaces, show_tabs, tabsize)
self._get_color_defines()
self._get_style_define()
def get(self):
return self.cmd
def get_as_line(self):
return self.cmd + '\n'
def name(self):
return self._name
def command_to_set(self):
return Command('lstset',[f'style={self._name}'])
def color_definitions(self):
return self.color_defs
def _get_color_defines(self):
self.color_defs = [col.get_as_line() for col in self._colors]
def _get_style_define(self):
self.cmd = "\\lstdefinestyle{" + self._name + "}{\n"
last_option = 'tabsize'
n = len(self.options)
i = 0
for opt, val in self.options.items():
if i < n - 1:
self.cmd += opt + '=' + val + ',\n'
else:
self.cmd += opt + '=' + val + '\n'
i += 1
self.cmd += '}'
def _get_style_options(self, background, comment, kw, num, string,
basic_style, whitespace, breaklines,
captionpos, keepspaces, numalign,
numseppts, showspaces, showstringspaces,
showtabs, tabsz):
if basic_style is None:
basic_style = [TextModifier('ttfamily'),TextModifier('footnotesize')]
self.options = {'backgroundcolor' : '\\color{' + background.name() + '}',
'commentstyle' : '\\color{' + comment.name() + '}',
'keywordstyle' : '\\color{' + kw.name() + '}',
'numberstyle' : '\\color{' + num.name() + '}',
'stringstyle' : '\\color{' + string.name() + '}',
'basicstyle' : (basic_style if not isinstance(basic_style,Iterable) else ''.join([x.get() for x in basic_style])),
'breaklines' : 'true' if breaklines else 'false',
'captionpos' : captionpos,
'keepspaces' : 'true' if keepspaces else 'false',
'numbers' : numalign,
'numbersep' : str(numseppts)+'pt',
'showspaces' : 'true' if showspaces else 'false',
'showstringspaces' : 'true' if showstringspaces else 'false',
'showtabs' : 'true' if showtabs else 'false',
'tabsize' : str(tabsz),
}
_PYTEX_REQUIRED_CODE_PKGS = {
('listings',),
('xcolor',),
('inputenc',('utf8')),
}
class CodeSnippet(Environment):
def __init__(self, code_lines, language='C++', code_style=None, caption=None, name=None, xleftmargin=None, xrightmargin=None):
self.style = code_style if code_style else CodeStyle('default_pytex_code_style')
self.lang = language
super().__init__('lstlisting',code_lines,
name if name else language + ' code snippet',False,None,
_PYTEX_REQUIRED_CODE_PKGS)
#self.end = Command('end','lstlisting',opts)
self._get_style_use_cmd()
for coldef in self.style.color_definitions():
self.required_packages.append(coldef)
#add the text style as a required package to put it in the preamble
self.required_packages.append(self.style.get_as_line())
lo = self._listing_options(language,caption)
if lo:
if xleftmargin:
lo.append(f'xleftmargin={xleftmargin}')
if xrightmargin:
lo.append(f'xrightmargin={xrightmargin}')
self.add_end_options_to_begin(lo)
self._set_begin(self.begin.get())
def _get_style_use_cmd(self):
self.prepend_line(self.style.command_to_set().get_as_line())
def _listing_options(self, lang, caption):
if caption:
return [f'language={lang}, ',f'caption={caption}, ',f'style={self.style.name()}']
else:
return [f'language={lang}, ',f'style={self.style.name()}']
class ColoredText(TextLines):
def __init__(self, text: Iterable, color: CodeColor):
TextLines.__init__(text)
self.color = color
self.cmaps = {'red' : (1,0,0), 'blue' : (0,0,1)}
if self.color.name() in self.cmaps.keys():
self.color.vals = self.cmaps[self.color.name()]
self.init_cmd = self.color.get_use_command()
self.prepend_line('{'+self.init_cmd.get() + ' ')
self.append_line('}')
| true |
0c1298415ecc68977583cfc5ff98dd6e99d69101 | Python | cginternals/khrbinding-generator | /khrapi/API.py | UTF-8 | 3,533 | 2.734375 | 3 | [
"MIT"
] | permissive |
from .Version import Version;
from .Extension import Extension;
class API(object):
def __init__(self, identifier, revision):
self.identifier = identifier
self.revision = revision
self.apis = []
self.versions = []
self.extensions = []
self.types = []
self.functions = []
self.constants = []
self.declarations = []
self.dependencies = []
self.vendors = []
def constantByIdentifier(self, identifier):
return next((c for c in self.constants if c.identifier == identifier), None)
def functionByIdentifier(self, identifier):
return next((f for f in self.functions if f.identifier == identifier), None)
def typeByIdentifier(self, identifier):
return next((t for t in self.types if t.identifier == identifier), None)
def extensionByIdentifier(self, identifier):
return next((e for e in self.extensions if e.identifier.endswith(identifier)), None)
def extensionsByCoreVersion(self):
result = {}
for version in [ version for version in self.versions if isinstance(version, Version) ]:
for extension in version.requiredExtensions:
result[extension] = version
return result
def extensionsByFunction(self):
result = {}
for function in self.functions:
result[function] = [ extension for extension in function.requiringFeatureSets if isinstance(extension, Extension) and extension in self.extensions ]
return result
def printSummary(self):
print("%s API (%s)" % (self.identifier, self.revision))
print("")
print("VENDORS")
for vendor in self.vendors:
print("%s (%s)" % (vendor.token, vendor.name))
print("")
print("TYPES")
for type in self.types:
print(type.identifier + (" ("+type.declaration+")" if hasattr(type, "declaration") else "") \
+ (" => " + type.aliasedType.identifier if hasattr(type, "aliasedType") else ""))
if hasattr(type, "values"):
print("[ %s ]" % (", ".join([ value.identifier + "(" + value.value + ")" for value in type.values ])))
print("")
print("FUNCTIONS")
for function in self.functions:
print(function.returnType.identifier + " " + function.identifier + "(" + ", ".join([ param.type.identifier + " " + param.name for param in function.parameters ]) + ")")
print("")
print("VERSIONS")
for version in self.versions:
print(version.identifier)
print("Extensions " + ", ".join([extension.identifier for extension in version.requiredExtensions]))
print("Functions " + ", ".join([function.identifier for function in version.requiredFunctions]))
print("Constants " + ", ".join([value.identifier for value in version.requiredConstants]))
print("Types " + ", ".join([type.identifier for type in version.requiredTypes]))
print("")
print("")
print("EXTENSIONS")
for extension in self.extensions:
print(extension.identifier)
print("Functions " + ", ".join([ function.identifier for function in extension.requiredFunctions ]))
print("Constants " + ", ".join([ value.identifier for value in extension.requiredConstants]))
print("Types " + ", ".join([type.identifier for type in extension.requiredTypes]))
print("")
print("")
| true |
48d56dd12959623bd19450b1024c889691277e80 | Python | tsemach/pyexamples | /pyexamples/contextmanager/contextmanager-04.py | UTF-8 | 944 | 3.875 | 4 | [] | no_license |
"""
from: https://jeffknupp.com/blog/2016/03/07/python-with-context-managers/
The decorator generate another function behind the scenes which wrap the defined method
Using the @contextmanager decorator.
- The decorate a generator function that calls yield exactly once.
- Everything before the call to yield is considered the code for __enter__().
- Everything after is the code for __exit__().
So: in open_file
the_file = open(path, mode) - is called on __enter__ and the_file is yield
the_file.close() is called on __exist__
"""
from contextlib import contextmanager
@contextmanager
def open_file(path, mode):
print("open-file() is called")
the_file = open(path, mode)
yield the_file
print("open-file() after yield")
the_file.close()
files = []
for x in range(10):
with open_file('foo.txt', 'w') as infile:
files.append(infile)
for f in files:
if not f.closed:
print('not closed') | true |
0ee1282af27a60f2531b85f8052e2eada0d8112e | Python | Aasthaengg/IBMdataset | /Python_codes/p03427/s960365152.py | UTF-8 | 329 | 2.875 | 3 | [] | no_license | #-*-coding:utf-8-*-
import sys
input=sys.stdin.readline
def main():
n = list(map(int,input().rstrip()))
digit=len(n)
ans1=0
ans2=0
if digit==1:
print(n[0])
exit()
else:
ans1=sum(n)
ans2=n[0]-1+(digit-1)*9
print(max(ans1,ans2))
if __name__=="__main__":
main() | true |
99e76f3f180e2c9c276e337b969587003aa2fe42 | Python | huozhiwei/Python3Project | /TCPAndUDP/TCP客户端代码.py | UTF-8 | 908 | 3.203125 | 3 | [] | no_license | #coding:utf-8
from socket import *
print("=====================TCP客户端=====================")
HOST = '127.0.0.1' #服务器ip地址,等价于localhost
PORT = 21567 #通信端口号
BUFSIZ = 1024 #接收数据缓冲大小
ADDR = (HOST, PORT)
tcpCliSock = socket(AF_INET, SOCK_STREAM) #创建客户端套接字
tcpCliSock.connect(ADDR) #发起TCP连接
while True:
data = input('> ') #接收用户输入
if not data: #如果用户输入为空,直接回车就会发送"",""就是代表false
break
tcpCliSock.send(bytes(data, 'utf-8')) #客户端发送消息,必须发送字节数组
data = tcpCliSock.recv(BUFSIZ) #接收回应消息,接收到的是字节数组
if not data: #如果接收服务器信息失败,或没有消息回应
break
print(data.decode('utf-8')) #打印回应消息,或者str(data,"utf-8")
tcpCliSock.close() #关闭客户端socket
| true |
033a6e0d9d95e7a7f6cfa93b65fc1084030f3935 | Python | giuscri/problem-solving-workout | /Drools.py | UTF-8 | 1,730 | 3.375 | 3 | [] | no_license | import itertools
def f0(g):
return len(list(filter(lambda x: x=='red', \
map(lambda t: t[1], g)))) == 1
def f1(g):
fred_pos = list(filter(lambda t: t[0]=='fred', g))[0][-1]
lst = list(filter(lambda t: t[-1]==fred_pos+1, g))
return len(lst) > 0 and lst[0][1] == 'blue'
def f2(g):
return list(filter(lambda t: t[0]=='joe', g))[0][-1] == 2
def f3(g):
return list(filter(lambda t: t[0]=='bob', g))[0][1] == 'plaid'
def f4(g):
tom = list(filter(lambda t: t[0]=='tom', g))[0]
return tom[-1] != 1 and tom[-1] != 4 and tom[1] != 'orange'
rules = (
f0,
f1,
f2,
f3,
f4,
)
class Drools:
def __init__(self, rules, *lsts):
self.rules = rules
self.lsts = lsts
def eval(self):
def valid_group(g, seen=[]):
if len(g) == 0: return True
for x in g[0]:
if x in seen: return False
return valid_group(g[1:], seen + list(g[0]))
gs = filter(valid_group, \
itertools.combinations( \
itertools.product(*self.lsts), len(self.lsts[0])))
ok_gs = []
for g in gs:
ok = True
for r in rules:
if not r(g):
ok = False
if ok: ok_gs.append(g)
fmt = 'Golfer {} is in position {} and wears some {} pants.'
for g in ok_gs:
for glf in g:
print(fmt.format(glf[0], glf[-1], glf[1]))
print('------------------------------------------------------')
if __name__ == "__main__":
d = Drools(rules,
['bob', 'joe', 'fred', 'tom'],
['red', 'orange', 'blue', 'plaid'],list(range(1,5)))
d.eval()
| true |
f8c76709c52d45dae58792a402164a16f9688fce | Python | williamvdev/genetic | /python/knapsack.py | UTF-8 | 4,115 | 3.296875 | 3 | [] | no_license | from random import randint, random
import json
POPULATION_SIZE = 100
GENERATIONS = 100
KNAPSACK_MAX_WEIGHT = 15000
TREASURE_FILE = 'python/treasures.json'
INHERITANCE_FACTOR = 0.5 # What fraction of bits are copied from parent A
MUTATION_FACTOR = 0.01 # What fraction of bits are flipped in a child.
def treasure_generator(item_count, max_weight, max_value):
"""
Generates a list of treasure items
"""
treasures = []
for i in range(item_count):
item = { "id": i, "weight": randint(0, max_weight), "value": randint(0, max_value)}
treasures.append(item)
return treasures
def generate_treasure_file(item_count, max_weight, max_value):
with open(TREASURE_FILE, 'w') as output_file:
json.dump(treasure_generator(item_count, max_weight, max_value), output_file)
def read_treasures():
treasure_list = []
with open(TREASURE_FILE, 'r') as input_file:
treasure_list = json.load(input_file)
return treasure_list
def select_by_tournament(candidates, treasures):
"""
Randomly selects two candidates from the list and returns the one
with the best fitness score
"""
candidate_a = candidates[randint(0, len(candidates) - 1)]
candidate_b = candidates[randint(0, len(candidates) - 1)]
return candidate_a if candidate_a['fitness'] > candidate_b['fitness'] else candidate_b
def solution_fitness(treasures, solution):
"""
Determines the fitness score for a given solution
"""
total_value = 0
total_weight = 0
for i in range(len(solution)):
if solution[i] == True :
total_value += treasures[i]['value']
total_weight += treasures[i]['weight']
if (total_weight > KNAPSACK_MAX_WEIGHT):
return 0
return total_value
def get_child(treasures, parent_a, parent_b):
child = {'solution': []}
for i in range(len(parent_a['solution'])):
child_bit = parent_a['solution'][i] if random() < INHERITANCE_FACTOR else parent_b['solution'][i]
child_bit = not child_bit if random() < MUTATION_FACTOR else child_bit
child['solution'].append(child_bit)
child['fitness'] = solution_fitness(treasures, child['solution'])
return child
def generate_random_solution(size):
solution = []
for i in range(size):
solution.append(True if randint(0,1) == 1 else False)
return solution
def get_generation(treasures, parent_generation, population_size):
new_gen = []
while len(new_gen) < population_size:
if(len(parent_generation) == 0):
random_solution = generate_random_solution(len(treasures))
new_gen.append({ 'solution': random_solution, 'fitness': solution_fitness(treasures, random_solution)})
else:
parent_a = select_by_tournament(parent_generation, treasures)
parent_b = select_by_tournament(parent_generation, treasures)
new_gen.append(get_child(treasures, parent_a, parent_b))
return new_gen
def get_generation_stats(treasures, generation):
stats = {}
total_score = 0
for member in generation:
score = member['fitness']
total_score += score
if 'min' not in stats: stats['min'] = score
if 'max' not in stats: stats['max'] = score
if score < stats['min']: stats['min'] = score
if score > stats['max']: stats['max'] = score
stats['avg'] = total_score / len(generation)
return stats
def main():
print("Starting Genetic Search...")
generation_stats = []
treasure_list = read_treasures()
current_generation = []
for gen_no in range(GENERATIONS):
current_generation = get_generation(treasure_list, current_generation, POPULATION_SIZE)
current_gen_stats = get_generation_stats(treasure_list, current_generation)
print('Generation {} stats: Lowest Fitness: {}, Highest Fitness: {}, Average Fitness: {}'.format(gen_no, current_gen_stats['min'], current_gen_stats['max'], current_gen_stats['avg']))
generation_stats.append(current_gen_stats)
print("Done!")
if __name__ == "__main__":
main() | true |
cab4f24e669a87cef9633c9f0867231e26c396d9 | Python | Sunghwan-DS/TIL | /Python/Programmers/Level_2_더_맵게.py | UTF-8 | 1,525 | 3.203125 | 3 | [] | no_license | # def solution(scoville, K):
# if sum(scoville) < K:
# return -1
#
# scoville.sort()
# scoville.insert(0, 0)
# answer = 0
# print("초기조건:", scoville)
#
# while scoville[1] < K:
# scoville[1], scoville[-1] = scoville[-1], scoville[1]
# min_s = scoville.pop(-1)
# target_idx = 1
#
#
# # scoville[-1] = scoville[-1] * 2 + min_s
# # target_idx = len(scoville) - 1
# while True:
# if scoville[target_idx] > scoville[target_idx * 2] or scoville[target_idx] > scoville[target_idx * 2 + 1]:
# if scoville[target_idx * 2] >= scoville[target_idx * 2 + 1]:
# scoville[target_idx], scoville[target_idx * 2 + 1] = scoville[target_idx * 2 + 1], scoville[target_idx]
# target_idx = target_idx * 2 + 1
# else:
# scoville[target_idx], scoville[target_idx * 2] = scoville[target_idx * 2], scoville[target_idx]
# target_idx *= 2
# else:
# break
# answer += 1
# print(scoville)
# return answer
import heapq
def solution(scoville, K):
if sum(scoville) < K:
return -1
answer = 0
scoville.sort()
print(scoville)
while scoville[0] < K:
min_s = heapq.heappop(scoville)
min2_s = heapq.heappop(scoville)
heapq.heappush(scoville, min_s + min2_s * 2)
answer += 1
print(scoville)
return answer
print(solution([1, 3], 7)) | true |
48c3cf1f0c4fb897959f1735d0a2fce5f0c8ab62 | Python | nandansn/pythonlab | /durgasoft/chapter41/function-return-another-function.py | UTF-8 | 256 | 2.8125 | 3 | [] | no_license | def outer():
print('outer function')
def inner():
print('inner function')
print('outer returning innner function...')
def inner2():
print('inner 2 function')
return inner, inner2
f1,f2=outer()
f1()
f2()
| true |
db3c37faa847ced223e675090befd9f750b9a1fd | Python | Derhks/AirBnB_clone_v3 | /api/v1/views/cities.py | UTF-8 | 3,003 | 2.78125 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/python3
"""
This module create a new view for City objects
"""
from api.v1.views import app_views
from flask import Flask, jsonify, make_response, request
from models.state import State
from models.city import City
from models import storage
@app_views.route('/states/<state_id>/cities',
strict_slashes=False, methods=['GET'])
def show_cities(state_id=None):
"""
This method show a list with all the cities
"""
state = storage.get(State, state_id)
if state_id is not None and state is not None:
all_cities = state.cities
list_cities = []
for city in all_cities:
list_cities.append(city.to_dict())
return jsonify(list_cities)
else:
return make_response(jsonify({"error": "Not found"}), 404)
@app_views.route('/cities/<city_id>', strict_slashes=False, methods=['GET'])
def show_city(city_id=None):
"""
This method show a city
"""
city = storage.get(City, city_id)
if city_id is not None and city is not None:
return jsonify(city.to_dict())
else:
return make_response(jsonify({"error": "Not found"}), 404)
@app_views.route('/cities/<city_id>',
strict_slashes=False, methods=['DELETE'])
def delete_city(city_id=None):
"""
This method delete a city
"""
city = storage.get(City, city_id)
if city_id is not None and city is not None:
storage.delete(city)
return make_response(jsonify({}), 200)
else:
return make_response(jsonify({"error": "Not found"}), 404)
@app_views.route('/states/<state_id>/cities',
strict_slashes=False, methods=['POST'])
def create_city(state_id=None):
"""
This method create a city
"""
state = storage.get(State, state_id)
if not request.get_json():
return make_response(jsonify({"error": "Not a JSON"}), 400)
elif 'name' not in request.get_json():
return make_response(jsonify({"error": "Missing name"}), 400)
else:
if state_id is not None and state is not None:
request_with_state = request.get_json()
request_with_state['state_id'] = state_id
new_city = City(**request_with_state)
new_city.save()
return make_response(jsonify(new_city.to_dict()), 201)
else:
return make_response(jsonify({"error": "Not found"}), 404)
@app_views.route('/cities/<city_id>',
strict_slashes=False, methods=['PUT'])
def modify_city(city_id=None):
"""
This method modify a city
"""
city = storage.get(City, city_id)
if city_id is not None and city is not None:
if not request.get_json():
return make_response(jsonify({"error": "Not a JSON"}), 400)
else:
city.name = request.get_json()['name']
storage.save()
return make_response(jsonify(city.to_dict()), 200)
else:
return make_response(jsonify({"error": "Not found"}), 404)
| true |
79eb0b4a21c6f0a67e881be58dc722e1f1af3b79 | Python | Luodian/Data-Structure | /HashSet/HashSet/plot.py | UTF-8 | 4,949 | 2.921875 | 3 | [] | no_license | #!/urs/bin/python
# -*- coding: UTF-8 -*-
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def readFile(fileName,words,times):
file = open(fileName);
print "Name of file: ", file.name
scale = ""
time = ""
for line in file.readlines():
index = line.find(' ', 0, len(line));
scale = line[0:index];
words.append(int(scale))
time = line[index + 1:-1]
times.append(float(time))
file.close()
def insert_random():
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/Open_insert.txt",words,times)
fig = plt.figure(figsize = (10,8))
plt.subplot(211)
plt.plot(words, times, color = 'b', label="Insert Time of Open Hash in random data")
plt.xlabel = "Scale(n)"
plt.ylabel = "Times(s)"
plt.legend(loc = "upper right", shadow = True)
plt.grid()
yMin = min(times)
yMax = max(times)
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/Probe_insert.txt",words,times)
yMin = min(min(times),yMin)
yMax = max(max(times),yMax)
plt.subplot(212)
plt.plot(words,times, color = 'r', label = "Insert Time of Probe Hash in random data")
plt.xlabel = "Scale(n)"
plt.ylabel = "Times(s)"
plt.legend(loc="upper right", shadow = True)
plt.grid()
plt.savefig("/Users/luodian/Desktop/DSA/HashSet/HashSet/insert_comparasion.png")
def insert_random_whole():
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/Open_insert.txt",words,times)
fig = plt.figure(figsize = (10,8))
plt.subplot(111)
plt.plot(words, times, color = 'b', label="Insert Time of Open Hash in random data")
yMin = min(times)
yMax = max(times)
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/Probe_insert.txt",words,times)
plt.plot(words, times, color='g', label= "Insert Time of Probe Hash in random data")
yMin = min(min(times),yMin)
yMax = max(max(times),yMax)
plt.ylim(yMin , yMax)
plt.xlabel = "Scale(n)"
plt.ylabel = "Times(s)"
plt.legend(loc="upper right", shadow=True)
plt.grid()
plt.savefig('/Users/luodian/Desktop/DSA/HashSet/HashSet/insert_comparasion_whole.png')
def find_random_whole():
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/Open_find.txt",words,times)
fig = plt.figure(figsize = (10,8))
plt.subplot(111)
plt.plot(words, times, color = 'b', label="Search Time of Open Hash in random data")
yMin = min(times)
yMax = max(times)
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/Probe_find.txt",words,times)
plt.plot(words, times, color='g', label= "Search Time of Probe Hash in random data")
yMin = min(min(times),yMin)
yMax = max(max(times),yMax)
plt.ylim(yMin , yMax)
plt.xlabel = "Scale(n)"
plt.ylabel = "Times(s)"
plt.legend(loc = "upper right", shadow = True)
plt.grid()
plt.savefig('/Users/luodian/Desktop/DSA/HashSet/HashSet/find_comparasion.png')
def crash_open_whole():
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/BKDR_open_crash.txt",words,times)
fig = plt.figure(figsize = (10,8))
plt.subplot(111)
plt.plot(words, times, color = 'b', label="Crash count of Open Hash in BKDR function")
yMin = min(times)
yMax = max(times)
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/FNV_open_crash.txt",words,times)
plt.plot(words, times, color='g', label='Crash count of Open Hash in FNV function')
yMin = min(min(times),yMin)
yMax = max(max(times),yMax)
plt.ylim(yMin , yMax)
plt.xlabel = "Scale(n)"
plt.ylabel = "Count(t)"
plt.legend(loc="upper right", shadow=True)
plt.grid()
plt.savefig('/Users/luodian/Desktop/DSA/HashSet/HashSet/Crash_count_open.png')
def crash_probe_whole():
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/BKDR_probe_crash.txt",words,times)
fig = plt.figure(figsize = (10,8))
plt.subplot(111)
plt.plot(words, times, color = 'b', label="Crash count of Probe Hash in BKDR function")
yMin = min(times)
yMax = max(times)
words = []
times = []
readFile("/Users/luodian/Desktop/DSA/HashSet/HashSet/FNV_probe_crash.txt",words,times)
plt.plot(words, times, color='g', label='Crash count of Probe Hash in FNV function')
yMin = min(min(times),yMin)
yMax = max(max(times),yMax)
plt.ylim(yMin , yMax)
plt.xlabel = "Scale(n)"
plt.ylabel = "Count(t)"
plt.legend(loc="upper right", shadow=True)
plt.grid()
plt.savefig('/Users/luodian/Desktop/DSA/HashSet/HashSet/Crash_count_probe.png')
if __name__ == "__main__":
insert_random()
insert_random_whole()
find_random_whole()
crash_open_whole()
crash_probe_whole()
| true |
659d94446094880a10ea9676bf34117feab863c0 | Python | plusEV/xgboost_to_c | /xgb_to_c.py | UTF-8 | 3,344 | 2.6875 | 3 | [] | no_license | mport contextlib
from sklearn.tree.tree import DecisionTreeRegressor, DTYPE
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.ensemble.forest import ForestRegressor
#ALWAYS_INLINE = "__attribute__((__always_inline__))"
ALWAYS_INLINE = "ALWAYS_INLINE"
class CodeGenerator(object):
def __init__(self):
self._lines = []
self._indent = 0
@property
def lines(self):
return self._lines
def write(self, line):
self._lines.append(" " * self._indent + line)
@contextlib.contextmanager
def bracketed(self, preamble, postamble):
assert self._indent >= 0
self.write(preamble)
self._indent += 1
yield
self._indent -= 1
self.write(postamble)
def code_gen_tree(tree,fn,gen=None):
if gen is None:
gen = CodeGenerator()
def recur(ttree,level=0):
result = {}
for i in range(0,len(ttree)):
cn = ttree[i]
try:
nn = ttree[i+1]
except:
nn = {'level':-1}
if cn['level']>level:
continue
if cn['level']<level:
return
branch = "if ({0}f) {{".format(cn['line'])
if nn['level']==level:
gen.write("return {0}f;".format(cn['line']))
elif nn['level']>level:
with gen.bracketed(branch,"}"):
recur(ttree[i+1:],level=nn['level'])
else:
with gen.bracketed("else {", "}"):
gen.write("return {0}f;".format(cn['line']))
fn_decl = "{inline} double {name}(double* f) {{".format(
inline=ALWAYS_INLINE,
name=fn)
info = []
for line in lines[1:]:
line = line.replace(' ','\t')
level = line.count('\t')
s = line.split(',')[0].replace('\t','')[2:]
if s[:4] == 'leaf':
s=s[5:]
else:
d = s.find('<')
n = s[2:d]
s = s[1] + '[' + n + ']' + s[d:s.find(']')]
info.append({'line': s,'level' : level})
with gen.bracketed(fn_decl, "}"):
recur(info)
return gen.lines
def get_tree(it):
tree = []
while True:
line = next(it,'end')
if re.search('booster',line):
if tree:
yield tree
tree= []
elif line == 'end':
yield tree
break
else:
tree.append(line)
def code_gen_ensemble(model_path,fn,gen=None):
if gen is None:
gen = CodeGenerator()
it = open(model_path)
num_trees = 0
for i, tree in enumerate(get_tree(it)):
name = "{name}_{index}".format(name='boost', index=i)
code_gen_tree(tree,name, gen)
num_trees+=1
fn_decl = "double {name}(double* f) {{".format(name=fn)
with gen.bracketed(fn_decl, "}"):
gen.write("double result = 0.;")
for i in range(num_trees):
increment = "result += {name}_{index}(f);".format(
name='boost',index=i)
gen.write(increment)
gen.write("return result;")
return gen.lines
def xgb_to_c(model_path,fn):
lines = code_gen_ensemble(model_path,fn=fn)
assert lines is not None
return "\n".join(lines)
| true |
83cc7f64d393d8763ac93f462ef27bf7dd873b51 | Python | stankiewiczm/contests | /ProjectEuler/UC solutions/Successful 1-50/Q024.py | UTF-8 | 643 | 2.765625 | 3 | [] | no_license | from Numeric import *
N = 1000000-1; D = zeros(10);
def Translate(d):
e = zeros(10);
e[0] = d[0];
for i in arange(1,10):
Cnt = 0;
Fre = zeros(10-i);
for j in arange(10):
Mis = 1;
for k in arange(i):
if e[k] == j:
Mis = 0;
if (Mis == 1):
Fre[Cnt] = j;
Cnt = Cnt+1;
e[i] = Fre[d[i]];
return(e);
def F(n):
P = 1;
for i in arange(1,n+1):
P = P*i;
return P;
for i in arange(10):
D[i] = N/(F(9-i));
N = N - D[i]*F(9-i);
print D,"\n",Translate(D);
| true |
bfdd290394cfeead24bb8eb1dccc0b5703f92a24 | Python | amagee/mongofrog | /mongofrog.py | UTF-8 | 2,492 | 2.53125 | 3 | [] | no_license | import asyncio
import curses
from typing import List, Optional, Callable
from blessed import Terminal
import motor.motor_asyncio
import urwid
client = None
state = {
'database': None,
'collection': None,
}
urwid_loop = None
def main():
global loop, client, urwid_loop
client = motor.motor_asyncio.AsyncIOMotorClient(host="localhost")
loop = asyncio.get_event_loop()
urwid_loop = urwid.MainLoop(
urwid.SolidFill(), # Placeholder
palette=[('reversed', 'standout', '')],
event_loop=urwid.AsyncioEventLoop(loop=loop),
unhandled_input=lambda k: asyncio.create_task(handle_input(k)),
)
loop.create_task(render())
urwid_loop.run()
def menu(title: str, choices: List[str], onclick: Optional[Callable]):
body = [urwid.Text(title), urwid.Divider()]
for c in choices:
button = urwid.Button(c)
if onclick is not None:
urwid.connect_signal(button, 'click', lambda b, d: asyncio.create_task(onclick(d)), c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
async def select_database(database):
state['database'] = database
await render()
async def select_collection(collection):
state['collection'] = collection
await render()
async def render():
if state['collection'] is not None:
items = await client[state['database']].get_collection(state['collection']).find().to_list(100)
urwid_loop.widget = menu(
f"{state['database']} -> {state['collection']}",
[str(item) for item in items], None
)
elif state['database'] is not None:
collections = await client[state['database']].list_collections()
urwid_loop.widget = menu(
state['database'],
[c['name'] for c in collections], select_collection
)
else:
cursor = await client.list_databases()
databases = await cursor.to_list(length=999)
urwid_loop.widget = menu('Databases', [d['name'] for d in databases], select_database)
def exit_program():
raise urwid.ExitMainLoop()
async def handle_input(key):
if key == 'backspace':
if state['collection'] is not None:
state['collection'] = None
elif state['database'] is not None:
state['database'] = None
await render()
elif key in ('esc', 'q'):
exit_program()
if __name__ == "__main__":
main()
| true |
155bae86a37adb84a5ae7dca935111cbd4a9e0b4 | Python | FFFutureflo/CodingChallenges | /codeforces/800/WordCapitalization.py | UTF-8 | 338 | 3.9375 | 4 | [] | no_license | """
https://codeforces.com/problemset/problem/231/A
"""
def function():
word = input()
first_letter = word[0:1]
print(word.replace(first_letter, first_letter.upper(), 1))
if __name__ == "__main__":
function()
"""
Read Input as String
Get first letter
Replace first letter with upper letter one time
print string
"""
| true |
a34e5da793f8f8a6e4dfc28c7cb7431dd3de4c45 | Python | Ruwzy/Python-Crash-Course-Practises | /PCC_4/PCC_4_13.py | UTF-8 | 332 | 3.390625 | 3 | [] | no_license | res_foods = ("sparking water", "pizza", "stark", "chicken", "french fries")
print("The restaurant provides food:")
for food in res_foods:
print(food)
res_foods = ("ice cream", "pizza", "stark", "chicken", "french fries", "tomato")
print("The restaurant's new menu is: ")
for food in res_foods:
print(food)
| true |
e52e648d0550ff37e25a783b5108d0ab64b6f490 | Python | satyam-cyc/MASS-Learning | /scripts/plotting/PlotMASSLossTermsTrainingCurves.py | UTF-8 | 3,221 | 2.671875 | 3 | [
"MIT"
] | permissive | import os
import pandas as pd
import seaborn as sns
sns.set()
sns.set_style("whitegrid")
# Expects data downloaded from tensorboard. You have to do this manually from within tensorboard; the file paths below
# are example placeholders.
# Your CSVs will have different filenames. You need to set datadir to be wherever you've stored the tensorboard CSVs
# Note that you need to replace the tensorboard seed tag with "seedX" in the filename.
datadir = ''
SoftmaxCE_training_curve_paths_models_terms = [
(
'run-SoftmaxCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-MASSLossTerms_train__cross_entropy_term.csv',
'SoftmaxCE',
'$H(Y | f(X))$ (nats)'
),
(
'run-SoftmaxCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-MASSLossTerms_train__entropy_term.csv',
'SoftmaxCE',
'$H(f(X))$ (nats)'
),
(
'run-SoftmaxCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-MASSLossTerms_train__Jacobian_term.csv',
'SoftmaxCE',
'$- \mathbb{E}_X[\log \ J_{f}(X)]$'
),
(
'run-SoftmaxCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-ModelLossAndAccuracy_Validation_Accuracy.csv',
'SoftmaxCE',
'Validation Accuracy (%)'
),
]
ReducedJacMASSCE_training_curve_paths_models_terms = [
(
'run-ReducedJacMASSCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-MASSLossTerms_train__cross_entropy_term.csv',
'MASS',
'$H(Y | f(X))$ (nats)'
),
(
'run-ReducedJacMASSCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-MASSLossTerms_train__entropy_term.csv',
'MASS',
'$H(f(X))$ (nats)'
),
(
'run-ReducedJacMASSCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-MASSLossTerms_train__Jacobian_term.csv',
'MASS',
'$- \mathbb{E}_X[\log \ J_{f}(X)]$'
),
(
'run-ReducedJacMASSCE_seedX_Feb08_19-24-56_ip-0-0-0-0-tag-ModelLossAndAccuracy_Validation_Accuracy.csv',
'MASS',
'Validation Accuracy (%)'
),
]
total_steps = 50000
training_curves = []
for seed in range(5):
for path, model, term in SoftmaxCE_training_curve_paths_models_terms + ReducedJacMASSCE_training_curve_paths_models_terms:
path = os.path.join(datadir, path).replace('seedX', 'seed{}'.format(seed))
df = pd.read_csv(path, usecols=['Step', 'Value'])
df['Training Method'] = model
df['Loss Term'] = term
df['Seed'] = seed
training_curves.append(df.loc[df['Step'] <= total_steps])
training_curves = pd.concat(training_curves)
training_curves.rename(columns={'Step': 'Training Step'}, inplace=True)
g = sns.lineplot(data=training_curves,
x='Training Step',
y='Value',
hue='Loss Term',
style='Training Method',
err_style='band',
ci='sd')
g.set_xlim(0, 50000)
g.get_legend().set_bbox_to_anchor((1.05, 0.8))
g.get_figure().set_figwidth(10)
g.get_figure().set_figheight(5)
g.get_figure().subplots_adjust(bottom=0.2, right=0.6)
header = g.get_legend().texts[0]
header._fontproperties = header._fontproperties.copy()
g.get_legend().texts[5]._fontproperties = header._fontproperties
header.set_weight('bold')
g.get_figure().savefig('./runs/TrainingCurves.pdf', format='pdf')
| true |
a633d8cffa68d24d9e668f8b684c0f2752393eb1 | Python | Talengi/phase | /src/dashboards/dashboards.py | UTF-8 | 1,151 | 2.859375 | 3 | [
"MIT"
] | permissive | class DashboardProvider(object):
es_date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, **kwargs):
self.category = kwargs.get('category', None)
def query_elasticsearch(self):
"""Performs actual query to Elastic Search.
The method must return a dict, as in the `to_dict` function of the
python elastic search api.
"""
raise NotImplementedError()
def fetch_data(self):
"""Sends a request to ES, and save the response in local variables."""
data = self.query_elasticsearch()
self.hits = data['hits']['hits']
self.total_hits = data['hits']['total']
self.took = data['took']
self.aggregations = data['aggregations']
def get_headers(self):
"""Must return a list of dates."""
raise NotImplementedError()
def get_buckets(self):
"""Return an ordered dict of data.
Each key is a string, and is the name of the row.
Each value is a list that contains as many values as there are headers.
"""
raise NotImplementedError()
class EmptyDashboard(DashboardProvider):
pass
| true |
f32de78f226af34b14d0bf5ca4c2356c19da1107 | Python | Saf1n/python_algorythm | /HomeWork3/test3_9.py | UTF-8 | 548 | 3.984375 | 4 | [] | no_license | __author__ = 'Сафин Ильшат'
# 9. Найти максимальный элемент среди минимальных элементов столбцов матрицы.
print('Введите 12 цифр:')
matrix = [[int(input()) for _ in range(4)] for _ in range(3)]
for i in range(3):
print(matrix[i])
mx = -1
for i in range(3):
mn = 10
for j in range(4):
if matrix[i][j] < mn:
mn = matrix[i][j]
if mn > mx:
mx = mn
print('Максимальный элемент среди минимальных:', mx)
| true |
36494e3765f35cb7a104a349bf3c63543b1d279a | Python | xlui/DsAlgoDp | /BucketSort.py | UTF-8 | 2,548 | 4.125 | 4 | [] | no_license | # 桶排序
# 如果一个数组 A,包含 N 个整数,值从 1 到 M,那么我们可以得到一种非常快的排序,桶排序。
# 留置一个数组,里面有 M 个桶,初始化为 0
# 然后遍历数组 A,读入 Ai 时,S[Ai]增一。
# 所有输入被读入后,扫描数组 S 得出排好的表
# 该算法时间花费 O(M+N),空间上不能原地排序
import random
class BucketSort(object):
def _max(self, old_list):
_max = old_list[0]
for i in old_list:
if i > _max:
_max = i
return _max
def _min(self, old_list):
_min = old_list[0]
for i in old_list:
if i < _min:
_min = i
return _min
def sort(self, old_list):
# 节约空间的桶排序
_max = self._max(old_list)
_min = self._min(old_list)
s = [0 for i in range(_min, _max + 1)]
# 创建的 s 数组大小是 max-min+1 ,相较于原始的排序方法节省了 min-1 的空间
# 这种方式创建的 s 数组,在存入元素时要以 元素的值-min 作为索引
for i in old_list:
s[i - _min] += 1
current = _min
# 同时,在向原始数组中存入数据时也要以值的方式存入
index = 0
for i in s:
while i > 0:
old_list[index] = current
index += 1
i -= 1
current += 1
def bucket_sort(self, old_list):
# 原始桶排序的实现
# 这种做法创建的 s 数组可能会过大,导致其中 9 成元素未使用,所以使用上一个函数的实现来节约空间
_max = self._max(old_list)
s = [0 for i in range(_max + 1)]
# 创建一个 max+1 大小的数组用于记录原始数组中元素出现的次数
for i in old_list:
s[i] += 1
index = 0
for i in range(len(s)):
# 以索引遍历数组,当某个索引对应的值非 0 时,向原始数组写入新元素(本次的索引)
# 同时原始数组的索引自增
tmp = s[i]
while tmp > 0:
old_list[index] = i
index += 1
tmp -= 1
def __call__(self, old_list):
self.bucket_sort(old_list)
return old_list
if __name__ == '__main__':
l = [random.randint(0, 100) for i in range(10)]
print('要被桶排序列表为:\t', l)
BucketSort()(l)
print('排序后的列表为:\t', l)
| true |
1411d9757180b083796e84becbd669bbedccc6d8 | Python | ffhan/lingua | /automata/nfa.py | UTF-8 | 8,430 | 3.046875 | 3 | [
"MIT"
] | permissive | """
Defines Non-deterministic finite automata, including epsilon non deterministic finite automata.
"""
import copy
import automata.fa as fa
import automata.state as st
import misc.helper as helper
class NFA(fa.FiniteAutomaton):
'''
Non-deterministic finite automata.
'''
def distinguish(self):
raise NotImplementedError
def minimize(self):
raise NotImplementedError
def _check_structure(self):
#does nothing.
pass
@property
def accepted(self):
for state in self.current:
if state in self.accepted_states:
return True
return False
def _access(self, value):
if value not in self.inputs:
raise ValueError(self._input_error(value))
old_currents = set()
for state in sorted(list(self.current)):
res = state.forward(value)
for end in res:
old_currents.add(self.states[self._get_alias(end.name)])
self.current = old_currents
@staticmethod
def factory(input_text, lexer):
lexer.scan(input_text)
return __class__(lexer.states, lexer.inputs, lexer.start_state)
class EpsilonNFA(NFA):
"""
Epsilon non-deterministic finite automata.
"""
def minimize(self):
raise NotImplementedError
def distinguish(self):
raise NotImplementedError
def __init__(self, states, inputs, start_state, epsilon='$'):
self._epsilon = epsilon
super().__init__(states, inputs, start_state)
self.inputs.add(epsilon)
@property
def epsilon(self):
return self._epsilon
@property
def accepted(self):
for state in self._all_closures():
if state in self.accepted_states:
return True
return False
def _e_closure(self, state, closure):
"""
Returns an epsilon closure of a state.
:param State state: state
:param set closure: closure set
:return set: epsilon closure of a state
"""
if state not in self:
raise ValueError(self._state_error(state))
if isinstance(state, str):
closure -= {state}
state = self.states[state]
closure |= {state}
for eps in state.forward(self._epsilon):
if eps not in closure:
self._e_closure(eps, closure)
return closure
def e_closures(self, *states):
"""
Returns epsilon closure for all specified states.
:param states: specified states
:return set: epsilon closure for specified states
"""
currents = set(states) if not isinstance(states, set) else states
for i in states:
currents |= self._e_closure(i, currents)
return currents
def _all_closures(self):
"""
Returns epsilon closure of all current states.
:return set: epsilon closure
"""
return self.e_closures(*self.current)
def _access(self, value):
super()._access(value)
self.current = self._all_closures()
def _process(self, *entry):
self.current = self._all_closures()
return super()._process(*entry)
def __add__(self, other):
"""
Allows for epsilon NFA addition.
:param EpsilonNFA other: other epsilon NFA
:return EpsilonNFA: resulting NFA
"""
import form.generators as lex
#ensuring state names are not identical when doing multiple additions.
ending = 'end_'
for state in self.accepted_states:
ending += str(state.name)
for state in other.accepted_states:
ending += str(state.name)
# ensuring state names are not identical when doing multiple additions.
starting = 'start_' + str(self.start_state.name) + str(other.start_state.name)
#we need a clean epsilon NFA instance. See NFA union.
new_e_nfa = self.factory(
"""{0},{1}
{1}
{0}
""".format(starting, ending), lex.StandardFormatGenerator())
starting = new_e_nfa.start_state
ending = list(new_e_nfa.accepted_states)[0]
copied_self = self.deepcopy()
copied_other = other.deepcopy()
states = dict()
for name in list(copied_self.states):
new_name = 'a_0' + name.name
state = copied_self.states[name]
state.name = st.StateName(new_name)
states[state.name] = state
for name in list(copied_other.states):
new_name = 'a_1' + name.name
state = copied_other.states[name]
state.name = st.StateName(new_name)
states[state.name] = state
# new_e_nfa.states.update(copied_self.states)
# new_e_nfa.states.update(copied_other.states)
new_e_nfa.states.update(states)
new_e_nfa.inputs |= copied_self.inputs | copied_other.inputs
starting.add_function(new_e_nfa.states[copied_self.start_state.name], self._epsilon)
starting.add_function(new_e_nfa.states[copied_other.start_state.name], self._epsilon)
for state in new_e_nfa.accepted_states:
if state != ending:
state.add_function(ending, self._epsilon)
for state in new_e_nfa.accepted_states:
if state != ending:
state.value = 0
return new_e_nfa
def __mul__(self, other):
"""
Allows for multiplying epsilon NFA-s.
:param EpsilonNFA other: other EpsilonNFA
:return EpsilonNFA: multiplied NFA-s
"""
first = self.deepcopy()
size1 = len(first.states)
other = other.deepcopy()
size2 = len(other.states)
states = dict()
for name in list(first.states):
new_name = 'm_0' + name.name
state = first.states[name]
state.name = st.StateName(new_name)
states[state.name] = state
for name in list(other.states):
new_name = 'm_1' + name.name
state = other.states[name]
state.name = st.StateName(new_name)
states[state.name] = state
for state in list(first.accepted_states):
state.add_function(other.start_state, first.epsilon)
# first.states.update(other.states)
first.states = states
try:
assert len(first.states) == size1 + size2
except AssertionError as err:
raise err
first.inputs |= other.inputs
for state in first.accepted_states:
if state not in other.accepted_states:
state.value = 0
return first
def kleene_operator(self):
import form.generators as lex
# ensuring state names are not identical when doing multiple additions.
ending = 'end_'
for state in self.accepted_states:
ending += str(state.name)
# ensuring state names are not identical when doing multiple additions.
starting = 'start_' + str(self.start_state.name)
# we need a clean epsilon NFA instance. See NFA union.
new_e_nfa = self.factory(
"""{0},{1}
{1}
{0}
""".format(starting, ending), lex.StandardFormatGenerator()) # starting and ending might need escaping
starting = new_e_nfa.start_state
ending = list(new_e_nfa.accepted_states)[0]
starting.add_function(ending, self._epsilon)
copied_self = self.deepcopy()
new_e_nfa.states.update(copied_self.states)
new_e_nfa.inputs |= copied_self.inputs
starting.add_function(new_e_nfa.states[self.start_state.name], self._epsilon)
ending.add_function(new_e_nfa.states[self.start_state.name], self._epsilon)
for state in new_e_nfa.accepted_states:
if state != ending:
state.add_function(ending, self._epsilon)
for state in new_e_nfa.accepted_states:
if state != ending:
state.value = 0
return new_e_nfa
@staticmethod
def factory(input_text, lexer):
lexer.scan(input_text)
return __class__(lexer.states, lexer.inputs, lexer.start_state)
def _create_copy(self, *args):
return self.__class__(*args, epsilon=copy.deepcopy(self._epsilon))
def _create_state(self, *args):
return st.State(*args, epsilon=self._epsilon)
| true |
0604d6facc5a85d7c73506d247311404c7fb67c8 | Python | zeppertrek/my-python-sandpit | /pibm-training/sample-programs/user_input.py | UTF-8 | 141 | 3.71875 | 4 | [] | no_license | #user_input.py
# Accept input from the user
print('Please enter some text:')
x = input()
print('Text entered:', x)
print('Type:', type(x))
| true |
5c94840913b6e4af81c607e839e917396e662d92 | Python | inkyu0103/BOJ | /Simulation/11723.py | UTF-8 | 1,108 | 3.75 | 4 | [] | no_license | # 집합
'''
add x
remove x
check x
toggle x
all
empty
'''
import sys
input = sys.stdin.readline
def add(target):
S.add(target)
def remove(target):
if target not in S:
return
S.remove(target)
def toggle(target):
if target in S:
remove(target)
else:
add(target)
def check(target):
if target in S:
print(1)
else:
print(0)
def all():
global S
S = set(i for i in range(1,21))
def empty():
global S
S =set()
if __name__ =="__main__":
M = int(input())
S = set()
for _ in range(M):
target_list = input().strip().split(" ")
if len(target_list)==2:
command, num = target_list
if command == "add":
add(num)
elif command == "remove":
remove(num)
elif command == "toggle":
toggle(num)
elif command == "check":
check(num)
else:
command = target_list[0]
if command == "all":
all()
else:
empty()
| true |
7449a160311eec070bb7794e07339fe3ad3c7a17 | Python | hcoliver97/InfectionSim | /Rv.py | UTF-8 | 1,476 | 3.640625 | 4 | [] | no_license | import random
import math
class Uniform():
def __init__(self):
self.max = 0
self.min = 0
def generate(self, max, min):
self.max = max
self.min = min
r = self.max - self.min
if r < 0:
raise RuntimeError("Min must be less than max")
rand = self.min + r * random.random()
return rand
class Triangle():
def __init__(self):
self.min = 0
self.max = 0
self.mode = 0
self.range = 0
self.crossover_p = 0
def generate(self, min, max, mode):
self.min = min
self.max = max
self.mode = mode
self.range = self.max - self.min
if self.range < 0:
raise RuntimeError("Min must be less than max")
if self.mode > self.max or self.mode < self.min:
raise RuntimeError("Mode must be between min and max")
self.crossover_p = (self.mode - self.min) / self.range
u = random.random()
if u < self.crossover_p:
return self.min + math.sqrt(self.range * (self.mode - self.min) * u)
else:
return self.max - math.sqrt(self.range * (self.max - self.mode) * (1.0 - u))
class Exponential():
def __init__(self):
self.rate = 0
def generate(self, rate):
self.rate = rate
if self.rate < 0:
raise RuntimeError("Rate must be positive")
r = random.random()
return -1 * math.log(r) / self.rate
| true |
5a5c9aa07a902bbf31034138b789e4f419943b8d | Python | doosea/god_like | /myRecommendation/PersonalRank/mat_util.py | UTF-8 | 1,672 | 2.546875 | 3 | [] | no_license | from scipy.sparse import coo_matrix
from myRecommendation.PersonalRank.read import *
import numpy as np
def graph_to_mat(graph):
"""
:return:
matrix M,
a list 所有(item+user)顶点,
a dict 所有(item+user)顶点位置
"""
vertex = list(graph.keys())
address_dict = {}
for index in range(len(vertex)):
address_dict[vertex[index]] = index
row = []
col = []
data = []
for i in graph:
weight = round(1 / len(graph[i]), 3)
row_index = address_dict[i]
for j in graph[i]:
col_index = address_dict[j]
row.append(row_index)
col.append(col_index)
data.append(weight)
row = np.array(row)
col = np.array(col)
data = np.array(data)
m = coo_matrix((data, (row, col)), shape=(len(vertex), len(vertex)))
# print(m.todense())
return m, vertex, address_dict
def mat_all_point(m_mat, vertex, alpha=0.6):
"""
这里得到的是矩阵运算的(E-alpha*M^T)
:param m_mat:
:param vertex:
:param alpha:
:return:
"""
total_len = len(vertex)
row = np.array(list(range(total_len)))
col = list(range(total_len))
data = [1] * total_len
row = np.array(row)
col = np.array(col)
data = np.array(data)
eye_t = coo_matrix((data, (row, col)), shape=(total_len, total_len))
# print(eye_t.todense())
return eye_t.tocsr() - alpha * m_mat.tocsr().transpose()
if __name__ == '__main__':
graph = get_graph_from_data("../data/log.txt")
m, vertex, address_dict = graph_to_mat(graph)
res = mat_all_point(m, vertex, alpha=0.6)
print(res.todense())
| true |
6b45189513baece77570c961ebae3946b309b580 | Python | hamatz/efppap | /enc_and_sign.py | UTF-8 | 1,923 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import sys
import binascii
import json
import base64
from utils.aes_util import AESUtil
from utils.rsa_util import RSAUtil
def main(target_file_name, path_to_rsa_pub_key, result_file_name, prv_key, pass_phrase):
p_version = "0.1.0"
aes_util = AESUtil()
with open(target_file_name, 'rb') as f:
target = f.read()
enc_content = aes_util.encrypt(target)
content_key = aes_util.get_aes_key()
with open(path_to_rsa_pub_key, 'r') as f2:
pkey_data = f2.read()
with open( prv_key, 'r') as f3:
s = f3.read()
my_prvkey = binascii.unhexlify(s)
rsa_util = RSAUtil()
rsa_util.import_prv_key(my_prvkey, pass_phrase)
enc_key = rsa_util.encrypt_with_pubkey(content_key, pkey_data)
result = {}
result["version"] = p_version
result["file_name"] = target_file_name
content_key_txt = binascii.hexlify(base64.b64encode(enc_key)).decode('ascii')
result["content_key"] = content_key_txt
content_txt = binascii.hexlify(base64.b64encode(enc_content)).decode('ascii')
result["content"] = content_txt
sender_pub_key = binascii.hexlify(rsa_util.get_my_pubkey()).decode('ascii')
result["sender"] = sender_pub_key
signature = rsa_util.compute_digital_signature(p_version + target_file_name + content_key_txt + content_txt + sender_pub_key)
result["signature"] = signature
with open(result_file_name, 'w') as f4:
json.dump(result, f4, indent=4)
if __name__ == '__main__':
args = sys.argv
if len(args) == 6:
target = args[1]
pub_key = args[2]
result = args[3]
prv_key = args[4]
pass_phrase = args[5]
else:
print('Param Error')
print('$ enc_and_sign.py <target_file_name> <path_to_rsa_pub_key> <result_file_name> <path_to_rsa_prv_key> <pass_phrase>')
quit()
main(target, pub_key, result, prv_key, pass_phrase) | true |
bfbfafa90515bcdd5cb52c7e9d449548c0b0b8a2 | Python | kapilsinha/botnet-surf | /old_files/tests/test4.py | UTF-8 | 2,340 | 2.65625 | 3 | [] | no_license | import sys
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
# Documentation: https://graph-tool.skewed.de/static/doc/index.html
class SpecialBox(Gtk.Box):
def __init__(self, GUI):
Gtk.Box.__init__(self)
self.GUI = GUI
self.liststore = Gtk.ListStore(str, int, int)
self.liststore.append(["Apple", 0, 100])
self.liststore.append(["Pear", 0, 100])
self.liststore.append(["Orange", 0, 100])
treeview = Gtk.TreeView(model=self.liststore)
filter_name = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Fruit is good", filter_name, text=0)
treeview.append_column(column_text)
self.filter_low = Gtk.CellRendererSpin()
self.filter_low.connect("edited", self.low_on_amount_edited)
self.filter_low.set_property("editable", True)
low_adjustment = Gtk.Adjustment(0, 0, 99, 1, 10, 0)
self.filter_low.set_property("adjustment", low_adjustment)
low_spin = Gtk.TreeViewColumn("Random Number", self.filter_low, text=1)
treeview.append_column(low_spin)
self.add(treeview)
def low_on_amount_edited(self, widget, path, value):
value = int(value)
self.liststore[path][1] = value
self.GUI.set_label(str(value))
class GUI:
def __init__(self):
self.win = Gtk.Window()
self.window_grid = Gtk.Grid()
self.special_box = Gtk.Box(spacing=10)
self.label = Gtk.Label("Number label")
self.win.connect("delete-event", Gtk.main_quit)
self.start_window()
def start_window(self):
self.special_box.pack_start(SpecialBox(self), True, True, 0)
self.window_grid.add(self.special_box)
self.window_grid.add(self.label)
self.win.add(self.window_grid)
self.win.show_all()
def set_label(self, value):
self.label.destroy()
self.label = Gtk.Label(value)
self.window_grid.add(self.label)
self.win.show_all()
def restart_window(self, label="Number"):
self.window_grid.destroy()
self.window_grid = Gtk.Grid()
self.special_box = Gtk.Box(spacing=10)
self.label = Gtk.Label(label)
self.start_window()
def main():
app = GUI()
Gtk.main()
if __name__ == "__main__":
sys.exit(main()) | true |
8634e749611e8da654358e92a9d0f9c87c247a6a | Python | Sally-E/sample | /hello.py | UTF-8 | 70 | 2.71875 | 3 | [] | no_license | #! -*-coding* utf-8-*-
for i in [0, 1, 2, 3]:
print "hello world"
| true |
336c8f19079f70d35a21d4529c17d5130a7efc9e | Python | zopefoundation/zExceptions | /src/zExceptions/tests/test_unauthorized.py | UTF-8 | 4,264 | 2.546875 | 3 | [
"ZPL-2.1"
] | permissive | ##############################################################################
#
# Copyright (c) 2010 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Unit tests for unauthorized module.
"""
import unittest
from zope.interface.verify import verifyClass
class UnauthorizedTests(unittest.TestCase):
def _getTargetClass(self):
from zExceptions.unauthorized import Unauthorized
return Unauthorized
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_interfaces(self):
from zope.security.interfaces import IUnauthorized
verifyClass(IUnauthorized, self._getTargetClass())
def test_empty(self):
exc = self._makeOne()
self.assertEqual(exc.name, None)
self.assertEqual(exc.message, None)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(bytes(exc), b'Unauthorized()')
self.assertEqual(str(exc), 'Unauthorized()')
def test_ascii_message(self):
arg = b'ERROR MESSAGE'
exc = self._makeOne(arg)
self.assertEqual(exc.name, None)
self.assertEqual(exc.message, arg)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(bytes(exc), arg)
self.assertEqual(str(exc), arg.decode('ascii'))
def test_encoded_message(self):
arg = 'ERROR MESSAGE \u03A9'.encode()
exc = self._makeOne(arg)
self.assertEqual(exc.name, None)
self.assertEqual(exc.message, arg)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(bytes(exc), arg)
self.assertEqual(str(exc), arg.decode('utf-8'))
def test_str_message(self):
arg = 'ERROR MESSAGE \u03A9'
exc = self._makeOne(arg)
self.assertEqual(exc.name, None)
self.assertEqual(exc.message, arg)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(bytes(exc), arg.encode('utf-8'))
self.assertEqual(str(exc), arg)
def test_ascii_name(self):
arg = b'ERROR_NAME'
exc = self._makeOne(arg)
self.assertEqual(exc.name, arg)
self.assertEqual(exc.message, None)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(
bytes(exc),
b"You are not allowed to access 'ERROR_NAME' in this context")
self.assertEqual(
str(exc),
"You are not allowed to access 'ERROR_NAME' in this context")
def test_encoded_name(self):
arg = 'ERROR_NAME_\u03A9'.encode()
exc = self._makeOne(arg)
self.assertEqual(exc.name, arg)
self.assertEqual(exc.message, None)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(
bytes(exc),
(b"You are not allowed to access "
b"'ERROR_NAME_\xce\xa9' in this context"))
self.assertEqual(
str(exc),
"You are not allowed to access "
"'ERROR_NAME_\u03A9' in this context")
def test_str_name(self):
arg = 'ERROR_NAME_\u03A9'
exc = self._makeOne(arg)
self.assertEqual(exc.name, arg)
self.assertEqual(exc.message, None)
self.assertEqual(exc.value, None)
self.assertEqual(exc.needed, None)
self.assertEqual(
bytes(exc),
(b"You are not allowed to access "
b"'ERROR_NAME_\xce\xa9' in this context"))
self.assertEqual(
str(exc),
"You are not allowed to access "
"'ERROR_NAME_\u03A9' in this context")
| true |
b0b16c6b284014463772a8be2f58b67ca91806a1 | Python | JMine97/ProblemSolvingByPy | /week1/gyuri/1013.py | UTF-8 | 298 | 3.015625 | 3 | [] | no_license | # 해님님, 준범님 코드 참고해서 코드
import re
n = int(input())
p = re.compile('(100+1+|01)+')
result = []
for i in range(n):
case = str(input())
if p.fullmatch(case):
result.append('YES')
else:
result.append('NO')
for i in range(n):
print(result[i])
| true |
c1867dd03a56e7c8a3a87afab234d7e3a0f27e84 | Python | Lyubov-smile/SEP | /String/task08.py | UTF-8 | 1,910 | 4.4375 | 4 | [] | no_license | # 8 Написати функцію, що перетворює рядок в дробове або ціле число.
''' # converts a string to a number
num_0_19 = {'One': 1, 'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9,
'Ten': 10, 'Eleven': 11, 'Twelve': 12, 'Thirteen': 13, 'Fourteen': 14, 'Fifteen': 15, 'Sixteen': 16,
'Seventeen': 17, 'Eighteen': 18, 'Nineteen': 19}
num_20_90 = {'Twenty': 20, 'Thirty': 30, 'Forty': 40, 'Fifty': 50, 'Sixty': 60, 'Seventy': 70, 'Eighty': 80,
'Ninety': 90}
'''
num_0_19 = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9,
'ten': 10, 'eleven': 11, 'twelve': 12, 'thirteen': 13, 'fourteen': 14, 'fifteen': 15, 'sixteen': 16,
'seventeen': 17, 'eighteen': 18, 'nineteen': 19}
num_20_90 = {'twenty': 20, 'thirty': 30, 'forty': 40, 'fifty': 50, 'sixty': 60, 'seventy': 70, 'eighty': 80,
'ninety': 90}
def str_to_n_100(string):
number = 0
if 'hundred' in string:
string = string.split(' hundred ')
number += num_0_19[string[0]] * 100
print(string[0], string[-1])
if 'ty' in string[-1]:
string = string[-1].split(' ')
number += num_20_90[string[0]]
number += num_0_19[string[-1]]
else:
number += num_0_19[string[-1]]
return(number)
def str_to_number(string):
number = 0
if 'thousand' in string:
string = string.split(' thousand ')
number += str_to_n_100(string[0]) * 1000 + str_to_n_100(string[-1])
else:
number += str_to_n_100(string)
return(number)
# string = 'seven hundred eighty nine thousand one hundred twenty three'
# string = 'one hundred nineteen thousand six hundred forty five'
string = str(input('Input your number: ')).lower()
print(string)
print(str_to_number(string))
| true |
7738e4ec0dfc2563ec24ce8589a164898e8a6b4a | Python | KorobovMS/Algorithms | /sorting/heapsort.py | UTF-8 | 688 | 3.421875 | 3 | [] | no_license | def heapsort(A):
heapify(A)
end = len(A) - 1
while end > 0:
A[0], A[end] = A[end], A[0]
end = end - 1
siftDown(A, 0, end)
def heapify(A):
start = int((len(A) - 2)/2)
while start >= 0:
siftDown(A, start, len(A) - 1)
start = start - 1
def siftDown(A, start, end):
root = start
while 2*root + 1 <= end:
child = 2*root + 1
swap = root
if A[swap] < A[child]:
swap = child
if child + 1 <= end and A[swap] < A[child + 1]:
swap = child + 1
if swap != root:
A[root], A[swap] = A[swap], A[root]
root = swap
else:
return
| true |
57091dd9381b05f3a826b1616bd43107e37f473d | Python | didud1798/Algorithm_Lecture | /yoh/lec01/2442.py | UTF-8 | 96 | 3.171875 | 3 | [] | no_license | n = input()
for i in range(1, n+1):
Star = '*'*(2*i - 1)
Blank = ' '*(n-i)
print Blank + Star | true |
426d3948115a9e4a0433650eaede00a29ee5b967 | Python | manoharendla/PycharmProjects | /YourAge.py | UTF-8 | 126 | 3.359375 | 3 | [] | no_license | __author__ = '619635'
my_age=input("Enter your age:")
print("After one year, your age will be " + str(int(my_age)+1) )
| true |
230ba8f47384a961ba702315a603ea48cdab677b | Python | wangbaorui/MHAN | /models/SRCNN.py | UTF-8 | 2,926 | 2.75 | 3 | [] | no_license | import torch
from torch import nn
from torch import autograd
from torch.autograd import Variable
from torch.nn import functional as F
import math
import pdb
import time
import numpy as np
from math import sqrt
import argparse
class Net(torch.nn.Module):
def __init__(self, num_channels = 3, base_filter = 128, upscale_factor=2):
super(Net, self).__init__()
self.layers = torch.nn.Sequential(
nn.Conv2d(in_channels=num_channels, out_channels=base_filter, kernel_size=9, stride=1, padding=4, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=base_filter, out_channels=base_filter , kernel_size=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=base_filter , out_channels=num_channels , kernel_size=5, stride=1, padding=2, bias=True)
)
def forward(self, x):
out = self.layers(x)
return out
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def count_parameters(net):
params = list(net.parameters())
k = 0
for i in params:
l = 1
for j in i.size():
l *= j
k = k + l
print("total parameters:" + str(k))
def runing_time(net, x):
net = net.cuda()
x = Variable(x.cuda())
y = net(x)
timer = Timer()
timer.tic()
for i in range(100):
timer.tic()
y = net(x)
timer.toc()
print('Do once forward need {:.3f}ms '.format(timer.total_time*1000/100.0))
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument("--scale", type=int, default=4, help="scale size")
if __name__ == '__main__':
opt = parser.parse_args()
scale = opt.scale
x = torch.rand(1,3,100*opt.scale,100*opt.scale)
net = Net().cuda()
x = x.cuda()
t0 = time.time()
for i in range(30):
out = net(x)
t = time.time() - t0
print('average running time: ', t/30)
count_parameters(net)
#runing_time(net, x)
| true |
51a432390c0a689e974381d1a7b85fb912459baf | Python | berthih/Codewars | /python_kyu/kyu4/sudoku_validator.py | UTF-8 | 590 | 3.359375 | 3 | [] | no_license | from itertools import product
DIGITS = set(range(1, 10))
THREES = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
def correct(groups):
return all(set(group) == DIGITS for group in groups)
def validSolution(grid):
assert isinstance(grid, list)
if len(grid) != 9 or not all(len(row) == 9 for row in grid):
return None
rows = grid
columns = zip(*grid)
squares3x3 = [
[grid[r][c] for r, c in product(row_block, col_block)]
for row_block, col_block in product(THREES, THREES)
]
return correct(rows) and correct(columns) and correct(squares3x3)
| true |
b68be736893d3bc09570ac66352e7b9c8eed145b | Python | lukas-blecher/LaTeX-OCR | /pix2tex/dataset/postprocess.py | UTF-8 | 695 | 2.78125 | 3 | [
"MIT"
] | permissive | import argparse
from tqdm.auto import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True, help='input file')
parser.add_argument('-o', '--output', default=None, help='output file')
args = parser.parse_args()
d = open(args.input, 'r').read().split('\n')
reqs = ['\\', '_', '^', '(', ')', '{', '}']
deleted = 0
for i in tqdm(reversed(range(len(d))), total=len(d)):
if not any([r in d[i] for r in reqs]):
del d[i]
deleted += 1
print('removed %i lines' % deleted)
f = args.output
if f is None:
f = args.input
open(f, 'w').write('\n'.join(d))
| true |
e85d9100cf9d39f86acdd46de80743dbd435b1e4 | Python | sarmabhamidipati/UCD | /Specialist Certificate in Data Analytics Essentials/DataCamp/01-python-data-science-toolbox-part-1/e27_reduce_and_lambda_function.py | UTF-8 | 971 | 4.25 | 4 | [] | no_license | '''
Reduce() and lambda functions
The reduce() function is useful for performing some computation on a list and,
unlike map() and filter(), returns a single value as a result. To use reduce(), you must import it from the functools module.
mbda function that concatenates strings together.
Instructions
Import the reduce function from the functools module.
In the reduce() call, pass a lambda function that takes two string arguments item1 and item2 and
concatenates them; also pass the list of strings, stark. Assign the result to result.
The first argument to reduce() should be the lambda function and the second argument is the list stark.
'''
# Import reduce from functools
from functools import reduce
# Create a list of strings: stark
stark = ['robb', 'sansa', 'arya', 'brandon', 'rickon']
# Use reduce() to apply a lambda function over stark: result
result = reduce(lambda item1,item2:item1+item2, stark)
# Print the result
print(result)
| true |
e339b99866b05d4256b240cc72d54cedb13eb5d2 | Python | I-am-Fine-1994/DeepLearningCode | /pycode/TensorFlow/Practice/simline5.py | UTF-8 | 2,881 | 2.6875 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import os.path
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#~ 加入epoch
#~ Note: if num_epochs is not None, this function creates local counter epochs.
#~ Use local_variables_initializer() to initialize local variables.
x = np.random.uniform(0, 5, [10, 4, 4, 2])
y = 3*x + 10
#~ 将数据写入TFrecords文件中
def convert_to(x, y, name):
#~ width = 4
#~ height = 4
#~ depth = 2
#~ 指定文件名
filename = os.path.join(os.getcwd(), name+".tfrecords")
print("Writing", filename)
#~ 创建写入器对象
writer = tf.python_io.TFRecordWriter(filename)
#~ 开始写入
for index in range(len(x)):
x_data = x[index].tostring()
y_data = y[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={\
#~ 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),\
#~ 'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),\
#~ 'depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[depth])),\
'x_data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[x_data])),\
'y_data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[y_data]))}))
writer.write(example.SerializeToString())
writer.close()
convert_to(x, y, "simline4")
#~ 读取和解析数据
def read_and_decode(filename):
print("Reading", filename)
#~ 想要读取数据需要先将文件名转为一个队列类型
filename_queue = tf.train.string_input_producer([filename], num_epochs=5)
#~ 创建读取器对象
reader = tf.TFRecordReader()
#~ 读取器读取,返回(key, value)对,key个人猜测为文件名,value为文件中的内容
_, se_exp = reader.read(filename_queue)
features = tf.parse_single_example(se_exp, features={\
'x_data': tf.FixedLenFeature([], tf.string),\
'y_data': tf.FixedLenFeature([], tf.string)})
print("Decoding", filename)
#~ 通过对文件内容进行解析获取其中存储的数据
x_data = tf.decode_raw(features['x_data'], tf.float64)
x_data = tf.reshape(x_data, [4, 4, 2])
y_data = tf.decode_raw(features['y_data'], tf.float64)
y_data = tf.reshape(y_data, [4, 4, 2])
return x_data, y_data
x_data, y_data = read_and_decode("simline4.tfrecords")
x_batch, y_batch = tf.train.shuffle_batch([x_data, y_data], \
batch_size=2, capacity=10, min_after_dequeue=2)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
#~ 创建协调器管理线程
coord = tf.train.Coordinator()
#~ 让文件名进入队列
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
val, l = sess.run([x_batch, y_batch])
print([val, l])
coord.request_stop()
coord.join(threads)
| true |
261b7e3ba722d3cc37d9cedc92edfc3a4eee73b7 | Python | kyomukyomupurin/LINE_bot | /contest_info.py | UTF-8 | 1,545 | 2.640625 | 3 | [] | no_license | import requests
import json
import datetime
import time
contest_url = 'https://codeforces.com/api/contest.list'
line_url = 'https://notify-api.line.me/api/notify'
num_future_contest = 20
def convert_time(timestamp : int) -> str:
day = timestamp // (60 * 60 * 24)
s1 = ' day ' if day == 1 else ' days '
timestamp %= (60 * 60 * 24)
hour = timestamp // (60 * 60)
s2 = ' hour ' if hour == 1 else ' hours '
timestamp %= (60 * 60)
minute = timestamp // 60
s3 = ' minute.' if minute == 1 else ' minutes.'
return str(day) + s1 + str(hour) + s2 + str(minute) + s3
if __name__ == '__main__':
r_json = requests.get(contest_url).json()
messages = []
for contest in r_json['result'][0:num_future_contest]:
if (contest['phase'] == 'BEFORE'):
start_unix_time = contest['startTimeSeconds']
start_time = datetime.datetime.fromtimestamp(start_unix_time)
time_before_start = -contest['relativeTimeSeconds']
message = contest['name'] + ' start at ' + str(start_time) + ', in ' + convert_time(time_before_start)
messages.append(message)
messages.reverse()
line_token = open('./token.txt', 'r').read()
headers = {'Authorization': 'Bearer ' + line_token}
payload = {'message': 'Comming contest'}
res = requests.post(line_url, headers=headers, params=payload)
for contest in messages:
payload = {'message': contest}
res = requests.post(line_url, headers=headers, params=payload)
time.sleep(1) | true |
35e385503f75b0f8b745d04e39301f91d9b7e565 | Python | calebespinoza/python-opp-tasks | /task_3/machine.py | UTF-8 | 2,733 | 3.859375 | 4 | [] | no_license |
from ticket import Ticket
from timer import Timer
from payment import Payment
class Machine:
"""This class offers funcionalities to register the entrance, exit and payments when someone wants to park a vehicle.
"""
def __init__(self):
"""Machine constructor
"""
self.__ticket = Ticket()
self.__timer = Timer()
self.__payment = Payment(0.233333333)
def register_entrance (self, car_id, time_in):
"""This function calls the register_ticket function to create a new ticket
Args:
car_id (int): This is the ID of each vehicle (Placa).
time_in (string): This is the time when the vehicle enter to the parking.
"""
self.__ticket.register_ticket(car_id, time_in)
def register_exit (self, ticket, time_exit):
"""This function register the time when a vehicle is leaving the parking
Args:
ticket (dictionary): This argument contains the info of the vehicle.
time_exit (string): This argument is the time when the vehicle is leaving.
"""
self.__ticket.set_time_exit(ticket, time_exit)
def print_list_cars(self):
"""This functions prints all tickets created.
"""
self.__ticket.print_all_tickets()
def payment(self, car_id, time_exit):
"""This function calls to the register_exit, payment and print_ticket functions
"""
ticket = self.__ticket.search_ticket(car_id)
if (ticket == None):
self.print_ticket(ticket)
else:
self.register_exit(ticket, time_exit)
self.__payment.register_payment(ticket, self.__ticket)
self.print_ticket(ticket)
def print_ticket(self, ticket):
"""This function prints two differente tickets
When not registered it prints: Unregistered Vehicle
if exists, it prints the ticket in detail.
"""
if (ticket == None):
text = """
----------------------------
Unregistered vehicle
----------------------------
"""
else:
today = self.__timer.get_date_today()
text = """
----------------------------
PAID PARKING
----------------------------
Date: {0}
From: {1}
To: {2}
Paid: Bs. {3}
----------------------------
Thank you and lucky road!
----------------------------
""".format(today, self.__ticket.get_time_in(ticket), self.__ticket.get_time_exit(ticket), self.__ticket.get_cost(ticket))
print(text)
| true |
2f480cc2b4b4bd631a628aa43defbbaf17a51cad | Python | dfdf/hello-world | /Python_Codes/verify_pair.py | UTF-8 | 1,541 | 2.59375 | 3 | [] | no_license | from shutil import copyfile
from os import listdir
from os.path import isfile, join
import os
import numpy as np
path = "C:\\Users\\dfdf\\Documents\\Imagens_DATABASE\\BENCHMARK\\faces\\BENCHMARK_1\\HIT\\"
path_dest = "C:\\Users\\dfdf\\Documents\\Imagens_DATABASE\\BENCHMARK\\faces\\BENCHMARK_1\\HIT_POSITIVO_1\\"
path_dest_2 = "C:\\Users\\dfdf\\Documents\\Imagens_DATABASE\\BENCHMARK\\faces\\BENCHMARK_1\\HIT_POSITIVO_2\\"
aux = listdir(path)
counter = 0
for mypath in aux:
match = False
src = path+mypath
#Se o arquivo ja foi copiado, pule
if(not os.path.isfile(path+mypath)):
continue
list = mypath.split("_")
if(list[0] == "CADASTRO"):
continue
aux_2 = listdir(path)
for i in aux_2:
i = i.split("_")
if(i[0] == "CADASTRO"):
continue
counter+=1
#Copie a imagem
dest = path_dest + "_".join(list)
if(list[1] == i[1] and list[2] != i[2]):
#copyfile(src, dest)
print(src)
print(dest)
os.rename(src, dest)
src = path+"_".join(i)
dest = path_dest_2 + "_".join(i)
print(src)
print(dest)
os.rename(src, dest)
match = True
if(match):
print("OK");
else:
counter +=1
os.remove(path+"_".join(list))
aux_2 = listdir(path)
print("FALSE")
print list
print counter
| true |
163c0b9bfdb207e1d78f8ed1d1e83b97d61ecd0c | Python | DavidCastilloAlvarado/PPO_reinforcement_learning | /PPO_pendulum.py | UTF-8 | 5,982 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | """
A simple version of Proximal Policy Optimization (PPO)
Based on:
1. [https://arxiv.org/abs/1707.02286]
2. [https://arxiv.org/abs/1707.06347]
View more on this tutorial website: https://morvanzhou.github.io/tutorials
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
EP_MAX = 600
EP_LEN = 200
GAMMA = 0.9
A_LR = 0.0001
C_LR = 0.0002
BATCH = 64
A_UPDATE_STEPS = 20
C_UPDATE_STEPS = 20
S_DIM, A_DIM = 3, 1
METHOD = dict(name='clip', epsilon=0.2) # Clipped surrogate objective, find this is better
# epsilon=0.2 is in the paper
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# CRITIC #######################################
with tf.variable_scope('critic'):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu, name='layer1-critic')
self.v = tf.layers.dense(l1, 1, name = 'V_layer')
# Implementation the Train method
with tf.variable_scope('ctrain'):
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs) # insted SGD
# ACTOR ########################################
# Current policy
pi, pi_params = self._build_anet('pi', trainable=True)
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
# Hold policy
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
with tf.variable_scope('update_oldpi'): # Intercambia los pesos de las capas de hold_pi by pi
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
# PPO implementation, Loss function
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate_pp'):
ratio = pi.prob(self.tfa) / oldpi.prob(self.tfa)
surr = ratio * self.tfadv
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))
# Implementation the Train method
with tf.variable_scope('atrain'):
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
################################################
# Almacenando modelo en la carpeta log
tf.summary.FileWriter("log/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def update(self, s, a, r):
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# update actor
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
def _build_anet(self, name, trainable): # Build the current & hold structure for the policies
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable, name = 'mu_'+name)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable,name ='sigma_'+name )
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma) # Loc is the mean
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) #Recolecta los pesos de los layers l1,mu/2,sigma
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2) # limita la salida de valores entre -2 & 2, a cada uno de los valores de 'a'
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0] # Salida de NN del Critic|| V = learned state-value function
env = gym.make('Pendulum-v0').unwrapped
ppo = PPO()
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a) # observation, reward, done, info|| 'a' is torque
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize reward, find to be useful
#print(r)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
v_s_ = ppo.get_v(s_) # Obteniendo la respuesta de la NN del Critic, entregando el estado 's_'
# V = learned state-value function
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
ppo.update(bs, ba, br) # Entranar el Cliente y el actor (Estado, acciones, discounted_r)
if ep == 0: all_ep_r.append(ep_r)
else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
print(
'Ep: %i' % ep,
"|Ep_r: %i" % ep_r,
("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
)
plt.plot(np.arange(len(all_ep_r)), all_ep_r)
plt.xlabel('Episode');plt.ylabel('Moving averaged episode reward');plt.show()
| true |
41d6420d1a8811be137ff133dfdf40295706cdfd | Python | pelson/namehash | /run_test.py | UTF-8 | 785 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | from namehash import *
def roundtrip(n, n_words=3):
hash = encode(n, n_words)
actual = decode(hash)
if n != actual:
next_hash = encode(actual, n_words)
raise ValueError('{} -> {} -> {} -> {}'.format(n, hash, actual, next_hash))
return hash
if __name__ == '__main__':
roundtrip(0)
roundtrip(1)
roundtrip(2)
roundtrip(4519)
roundtrip(2012000)
roundtrip(25161700)
roundtrip(42343029)
roundtrip(42343030)
roundtrip(42343031)
print(encode(25161700))
print(decode('cooing-smooth-logic'))
print(adj_combo_dim_sizes)
print(encode(25161792))
print(decode('quiet-yellow-behest'))
print(encode(25161793))
print(decode('quiet-yellow-week'))
# for i in range(1000):
# encode(i)
| true |
6a1507f178533e329fb6c664f49e757413a8f61f | Python | PakornChaenglew/6230403882-oop-labs | /pakorn-6230403882-lab6/Prob4.py | UTF-8 | 334 | 2.5625 | 3 | [] | no_license | with open("kku2.txt", 'w', encoding='utf8') as f:
with open("kku.txt", encoding="utf-8") as o:
pr = o.read()
g = "\nMotto: วิทยา จริยา ปัญญา\nMotto in English: Knowledge"
f.write(pr)
f.write(g)
with open("kku2.txt", encoding='utf8') as e:
p = e.read()
print(p) | true |
baa6b546cd2193e468be6006802a40d0fa8459a4 | Python | simranluthra/Movies-Trailer | /moviestrailer.py | UTF-8 | 3,037 | 2.828125 | 3 | [] | no_license | import fresh_tomatoes
import movies
Beauty_and_the_beast = movies.Movie("Beauty and the beast",
"A story of a bright, beautiful \
and independent young woman",
"http://t2.gstatic.com/images?q=tbn:ANd9 \
GcT7w1Dj-lkTL1CooOXihJ3WBIxyt3K9H6UZ08Kt \
jv8Ba3gLgC7B",
"https://www.youtube.com/watch? \
v=e3Nl_TCQXuw")
# print(Beauty_and_the_beast.storyline)
Guardians_of_the_Galaxy_2 = movies.Movie("Guardians of the Galaxy 2",
"American superhero film \
based on the Marvel Comics",
"http://t3.gstatic.com/images?q=tbn:A \
Nd9GcQXZE44ioeZHmwyJMeBa3rXFyOWT \
Ne3ZnoYUK0tSkdkECpX-v7P",
"https://www.youtube.com/watch? \
v=2cv2ueYnKjg")
# print(Guardians_of_the_Galaxy_2.storyline)
# Guardians_of_the_Galaxy_2.show_trailer()
Annabelle_Creation = movies.Movie("Annabelle Creation",
"supernatural horror film",
"http://t2.gstatic.com/images?q=tbn:A \
Nd9GcSFJXO-WgGmu29wKPaaR2lPco4z3krUU_A \
qiktmz4XtxGvys3Cn",
"https://www.youtube.com/watch? \
v=KisPhy7T__Q")
# print(Wonder_Woman.storyline)
# Wonder_Woman.show_trailer()
Wonder_Woman = movies.Movie("Wonder Woman",
"Rise of a Warrior",
"http://t1.gstatic.com/images?q=tbn:ANd9GcQcCAO \
mt-FsRsR8GebIzI67qSvdQ2JLYDRLxeAcbH-541fzqq1H",
"https://www.youtube.com/watch? \
v=VSB4wGIdDwo&t=103s")
# print(Dangal.storyline)
# Dangal.show_trailer()
Dangal = movies.Movie("Dangal",
"sports drama film based on true story",
"http://t3.gstatic.com/images?q=tbn:ANd9GcQIXnFlB \
KGWT1ByyIu3qfxX6opQX6BmeeU_qsiE3X8rX9ZRr63r",
"https://www.youtube.com/watch?v=x_7YlGv9u1g")
# print(Dear_zindagi.storyline)
# Dear_zindagi.show_trailer()
Dear_zindagi = movies.Movie("Dear zindagi",
"Love you zindagi",
"http://t2.gstatic.com/images?q=tbn:ANd9GcQlZ4YZ7 \
wNla7O6kQQQ83OAcEDsv1_S1a_euSbenWr_FpkJW_6D",
"https://www.youtube.com/watch?v=5DkO7ksXY8E")
movies = [Beauty_and_the_beast, Guardians_of_the_Galaxy_2, Annabelle_Creation,
Wonder_Woman, Dangal, Dear_zindagi]
fresh_tomatoes.open_movies_page(movies)
| true |
6b851c6b3a93a91d99abcbff0380c18aabc50d83 | Python | chubby-panda/shecodes-python-work | /project_2/part1/part1.py | UTF-8 | 5,590 | 4.09375 | 4 | [] | no_license | import json
from datetime import datetime
DEGREE_SYBMOL = u"\N{DEGREE SIGN}C"
def format_temperature(temp):
"""Takes a temperature and returns it in string format with the degrees and celcius symbols.
Args:
temp: A string representing a temperature.
Returns:
A string contain the temperature and 'degrees celcius.'
"""
temp = float(temp)
return f"{temp:.1f}{DEGREE_SYBMOL}"
def convert_date(iso_string):
"""Converts an ISO formatted date into a human readable format.
Args:
iso_string: An ISO date string..
Returns:
A date formatted like: Weekday Date Month Year
"""
d = datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S%z")
return d.strftime('%A %d %B %Y')
def convert_f_to_c(temp_in_farenheit):
"""Converts an temperature from farenheit to celcius
Args:
temp_in_farenheit: integer representing a temperature.
Returns:
An integer representing a temperature in degrees celcius.
"""
temp_in_celcius = ((temp_in_farenheit - 32) * 5) / 9
temp_in_celcius = round(temp_in_celcius, 1)
return temp_in_celcius
def calculate_mean(total, num_items):
"""Calculates the mean.
Args:
total: integer representing the sum of the numbers.
num_items: integer representing the number of items counted.
Returns:
An integer representing the mean of the numbers.
"""
mean = total / num_items
if type(mean) == float:
return round(mean, 1)
elif type(mean) == int:
return mean
def process_weather(forecast_file):
"""Converts raw weather data into meaningful text.
Args:
forecast_file: A string representing the file path to a file
containing raw weather data.
Returns:
A string containing the processed and formatted weather data.
"""
with open(forecast_file) as json_file:
json_data = json.load(json_file)
# Create empty variable to store output
forecast_data = f""
# Create empty lists to store temps in C for summary
min_temps = {}
max_temps = {}
for day in json_data["DailyForecasts"]:
# Get Date
date = convert_date(day["Date"])
forecast_data += f"\n-------- {date} --------\n"
# Get Minimum Temperature
min_temp = convert_f_to_c(day["Temperature"]["Minimum"]["Value"])
min_temps.update({date: min_temp})
min_temp = format_temperature(min_temp)
min_temp_string = "Minimum Temperature:"
forecast_data += f"{min_temp_string:<21}{min_temp}\n"
# Get Maximum Temperature
max_temp = convert_f_to_c(day["Temperature"]["Maximum"]["Value"])
max_temps.update({date: max_temp})
max_temp = format_temperature(max_temp)
max_temp_string = "Maximum Temperature:"
forecast_data += f"{max_temp_string:<21}{max_temp}\n"
# Get Daytime Long Phrase
daytime = day["Day"]["LongPhrase"]
forecast_data += f"Daytime: {daytime}\n"
# Get Daytime Chance of Rain
rain_chance_day = day["Day"]["RainProbability"]
rain_chance_day = str(rain_chance_day) + "%"
rain_chance_day_string = " Chance of rain:"
forecast_data += f"{rain_chance_day_string:<21}{rain_chance_day}\n"
# Get Nighttime Long Phrase
nighttime = day["Night"]["LongPhrase"]
forecast_data += f"Nighttime: {nighttime}\n"
# Get Nighttime Chance of Rain
rain_chance_night = day["Night"]["RainProbability"]
rain_chance_night = str(rain_chance_night) + "%"
rain_chance_night_string = " Chance of rain:"
forecast_data += f"{rain_chance_night_string:<21}{rain_chance_night}\n"
# Get the average high
max_temps_total = 0
max_temps_days = 0
for days, temps in max_temps.items():
max_temps_total += temps
max_temps_days += 1
average_high = calculate_mean(max_temps_total, max_temps_days)
average_high = format_temperature(average_high)
forecast_data = f" The average high this week is {average_high}.\n" + forecast_data
# Get the average low
min_temps_total = 0
min_temps_days = 0
for days, temps in min_temps.items():
min_temps_total += temps
min_temps_days += 1
average_low = calculate_mean(min_temps_total, min_temps_days)
average_low = format_temperature(average_low)
forecast_data = f" The average low this week is {average_low}.\n" + forecast_data
# Get the highest temperature for summary
highest_temp = max(max_temps.values())
highest_day = [day for day in max_temps if max_temps[day] == highest_temp][0]
highest_temp = format_temperature(highest_temp)
forecast_data = f" The highest temperature will be {highest_temp}, and will occur on {highest_day}.\n" + forecast_data
# Get the lowest temperature for summary
lowest_temp = min(min_temps.values())
lowest_day = [day for day in min_temps if min_temps[day] == lowest_temp][0]
lowest_temp = format_temperature(lowest_temp)
forecast_data = f" The lowest temperature will be {lowest_temp}, and will occur on {lowest_day}.\n" + forecast_data
# Get number of days - overview
num_days = len(json_data["DailyForecasts"])
forecast_data = f"{num_days} Day Overview\n" + forecast_data
# Add an extra new line at the end
forecast_data += "\n"
# Final Return Statement
return forecast_data
if __name__ == "__main__":
print(process_weather("data/forecast_5days_b.json"))
| true |
f305a623f18a83ecfa29c707f3a9a5a6815ad83a | Python | premkashyap/PythonTrilogyPluralsight | /PythonGettingStarted/Module7/platform_specific.py | UTF-8 | 432 | 2.546875 | 3 | [] | no_license | try:
import mscvrt
def getkey():
return mscvrt.getch()
except:
import sys
import tty
import termios
def getkey():
fd =sys.stdin.fileno()
original_attributs = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, original_attributs)
return ch
| true |
8804b3a349c4d6553b17d8b53194c2a38e9a0605 | Python | bazelbuild/rules_apple | /tools/bundletool/bundletool_test.py | UTF-8 | 12,583 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Bundler."""
import io
import os
import re
import shutil
import stat
import tempfile
import unittest
import zipfile
from tools.bundletool import bundletool
def _run_bundler(control):
"""Helper function that runs Bundler with the given control struct.
This function inserts a BytesIO object as the control's "output" key and
returns it after bundling; this object will contain the binary data for the
ZIP file that was created, which can then be reopened and tested.
Args:
control: The control struct to pass to Bundler. See the module doc for
the bundletool module for a description of this format.
Returns:
The BytesIO object containing the binary data for a bundled ZIP file.
"""
output = io.BytesIO()
control['output'] = output
tool = bundletool.Bundler(control)
tool.run()
return output
class BundlerTest(unittest.TestCase):
def setUp(self):
super().setUp()
self._scratch_dir = tempfile.mkdtemp('bundlerTestScratch')
def tearDown(self):
super().tearDown()
shutil.rmtree(self._scratch_dir)
def _scratch_file(self, name, content='', executable=False):
"""Creates a scratch file with the given name.
The scratch file's path, which is returned by this function, can then be
passed into the bundler as one of its `bundle_merge_files`.
Args:
name: The name of the file.
content: The content to write into the file. The default is empty.
executable: True if the file should be executable, False otherwise.
Returns:
The absolute path to the file.
"""
path = os.path.join(self._scratch_dir, name)
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(path, 'w') as f:
f.write(content)
if executable:
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
return path
def _scratch_zip(self, name, *entries):
"""Creates a scratch ZIP file with the given entries.
The scratch ZIP's path, which is returned by this function, can then be
passed into the bunlder as one of its `bundle_merge_zips` or
`root_merge_zips`.
Args:
name: The name of the ZIP file.
*entries: A list of archive-relative paths that will represent empty
files in the ZIP. If a path entry begins with a "*", it will be made
executable. If a path entry contains a colon, the text after the
colon will be used as the content of the file.
Returns:
The absolute path to the ZIP file.
"""
path = os.path.join(self._scratch_dir, name)
with zipfile.ZipFile(path, 'w') as z:
for entry in entries:
executable = entry.startswith('*')
entry_without_content, _, content = entry.partition(':')
zipinfo = zipfile.ZipInfo(entry_without_content.rpartition('*')[-1])
zipinfo.compress_type = zipfile.ZIP_STORED
# Unix rw-r--r-- permissions and S_IFREG (regular file).
zipinfo.external_attr = 0o100644 << 16
if executable:
zipinfo.external_attr = 0o111 << 16
z.writestr(zipinfo, content)
return path
def _assert_zip_contains(self, zip_file, entry, executable=False,
compressed=False):
"""Asserts that a `ZipFile` has an entry with the given path.
This is a convenience function that catches the `KeyError` that would be
raised if the entry was not found and turns it into a test failure.
Args:
zip_file: The `ZipFile` object.
entry: The archive-relative path to verify.
executable: The expected value of the executable bit (True or False).
compressed: If the entry should be compressed (True or False).
"""
try:
zipinfo = zip_file.getinfo(entry)
if executable:
self.assertEqual(
0o111, zipinfo.external_attr >> 16 & 0o111,
'Expected %r to be executable, but it was not' % entry)
else:
self.assertEqual(
0, zipinfo.external_attr >> 16 & 0o111,
'Expected %r not to be executable, but it was' % entry)
if compressed:
self.assertEquals(
zipfile.ZIP_DEFLATED, zipinfo.compress_type,
'Expected %r to be compressed, but it was not' % entry)
else:
self.assertEquals(
zipfile.ZIP_STORED, zipinfo.compress_type,
'Expected %r not to be compressed, but it was' % entry)
except KeyError:
self.fail('Bundled ZIP should have contained %r, but it did not' % entry)
def test_bundle_merge_files(self):
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [
{'src': self._scratch_file('foo.txt'), 'dest': 'foo.txt'},
{'src': self._scratch_file('bar.txt'), 'dest': 'bar.txt'},
]
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/foo.txt')
self._assert_zip_contains(z, 'Payload/foo.app/bar.txt')
def test_bundle_merge_files_with_executable(self):
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [
{'src': self._scratch_file('foo.exe'), 'dest': 'foo.exe',
'executable': True},
{'src': self._scratch_file('bar.txt'), 'dest': 'bar.txt',
'executable': False},
{'src': self._scratch_file('baz.txt', executable=True),
'dest': 'baz.txt', 'executable': False},
]
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/foo.exe', True)
self._assert_zip_contains(z, 'Payload/foo.app/bar.txt', False)
self._assert_zip_contains(z, 'Payload/foo.app/baz.txt', True)
def test_bundle_merge_files_with_renaming(self):
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [
{'src': self._scratch_file('foo.txt'), 'dest': 'renamed1'},
{'src': self._scratch_file('bar.txt'), 'dest': 'renamed2'},
]
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/renamed1')
self._assert_zip_contains(z, 'Payload/foo.app/renamed2')
def test_bundle_merge_files_with_directories(self):
a_txt = self._scratch_file('a.txt')
root = os.path.dirname(a_txt)
self._scratch_file('b.txt')
self._scratch_file('c/d.txt')
self._scratch_file('c/e/f.txt', executable=True)
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [{'src': root, 'dest': 'x/y/z'}],
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/x/y/z/a.txt')
self._assert_zip_contains(z, 'Payload/foo.app/x/y/z/b.txt')
self._assert_zip_contains(z, 'Payload/foo.app/x/y/z/c/d.txt')
self._assert_zip_contains(z, 'Payload/foo.app/x/y/z/c/e/f.txt', True)
def test_bundle_merge_zips(self):
foo_zip = self._scratch_zip('foo.zip',
'foo.bundle/img.png', 'foo.bundle/strings.txt')
bar_zip = self._scratch_zip('bar.zip',
'bar.bundle/img.png', 'bar.bundle/strings.txt')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_zips': [
{'src': foo_zip, 'dest': '.'},
{'src': bar_zip, 'dest': '.'},
]
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/foo.bundle/img.png')
self._assert_zip_contains(z, 'Payload/foo.app/foo.bundle/strings.txt')
self._assert_zip_contains(z, 'Payload/foo.app/bar.bundle/img.png')
self._assert_zip_contains(z, 'Payload/foo.app/bar.bundle/strings.txt')
def test_bundle_merge_zips_propagates_executable(self):
foo_zip = self._scratch_zip('foo.zip', '*foo.bundle/some.exe')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_zips': [{'src': foo_zip, 'dest': '.'}],
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/foo.bundle/some.exe', True)
def test_root_merge_zips(self):
support_zip = self._scratch_zip('support.zip', 'SomeSupport/some.dylib')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'root_merge_zips': [{'src': support_zip, 'dest': '.'}],
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'SomeSupport/some.dylib')
def test_root_merge_zips_with_different_destination(self):
support_zip = self._scratch_zip('support.zip', 'some.dylib')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'root_merge_zips': [{'src': support_zip, 'dest': 'SomeSupport'}],
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'SomeSupport/some.dylib')
def test_root_merge_zips_propagates_executable(self):
support_zip = self._scratch_zip('support.zip', '*SomeSupport/some.dylib')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'root_merge_zips': [{'src': support_zip, 'dest': '.'}],
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'SomeSupport/some.dylib', True)
def test_duplicate_files_with_same_content_are_allowed(self):
foo_txt = self._scratch_file('foo.txt', 'foo')
bar_txt = self._scratch_file('bar.txt', 'foo')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [
{'src': foo_txt, 'dest': 'renamed'},
{'src': bar_txt, 'dest': 'renamed'},
]
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/renamed')
def test_duplicate_files_with_different_content_raise_error(self):
foo_txt = self._scratch_file('foo.txt', 'foo')
bar_txt = self._scratch_file('bar.txt', 'bar')
with self.assertRaisesRegex(
bundletool.BundleConflictError,
re.escape(bundletool.BUNDLE_CONFLICT_MSG_TEMPLATE %
'Payload/foo.app/renamed')):
_run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [
{'src': foo_txt, 'dest': 'renamed'},
{'src': bar_txt, 'dest': 'renamed'},
]
})
def test_zips_with_duplicate_files_but_same_content_are_allowed(self):
one_zip = self._scratch_zip('one.zip', 'some.dylib:foo')
two_zip = self._scratch_zip('two.zip', 'some.dylib:foo')
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_zips': [
{'src': one_zip, 'dest': '.'},
{'src': two_zip, 'dest': '.'},
]
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/some.dylib')
def test_zips_with_duplicate_files_and_different_content_raise_error(self):
one_zip = self._scratch_zip('one.zip', 'some.dylib:foo')
two_zip = self._scratch_zip('two.zip', 'some.dylib:bar')
with self.assertRaisesRegex(
bundletool.BundleConflictError,
re.escape(bundletool.BUNDLE_CONFLICT_MSG_TEMPLATE %
'Payload/foo.app/some.dylib')):
_run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_zips': [
{'src': one_zip, 'dest': '.'},
{'src': two_zip, 'dest': '.'},
]
})
def test_compressed_entries(self):
a_txt = self._scratch_file('a.txt')
root = os.path.dirname(a_txt)
out_zip = _run_bundler({
'bundle_path': 'Payload/foo.app',
'bundle_merge_files': [{'src': root, 'dest': 'x/y/z'}],
'compress': True,
})
with zipfile.ZipFile(out_zip, 'r') as z:
self._assert_zip_contains(z, 'Payload/foo.app/x/y/z/a.txt', compressed=True)
if __name__ == '__main__':
unittest.main()
| true |
b00b5f36378cd82f448c3d139927d8868e3bc47b | Python | Dyr-El/advent_of_code_2017 | /dlofstrom-python/day5.py | UTF-8 | 386 | 3.03125 | 3 | [] | no_license | import sys
input = sys.stdin.read()
input = [int(r) for r in input.split('\n') if r]
#Part 1
i = 0
s = 0
l = list(input)
while i < len(input):
s += 1
l[i] += 1
i += l[i]-1
print "Part 1:", s
#Part 2
i = 0
s = 0
l = list(input)
while i < len(input):
s += 1
lt = l[i]
if l[i] >= 3:
l[i] -= 1
else:
l[i] += 1
i += lt
print "Part 2:", s
| true |
b3470f0904ff60a72fc3a85cb86aa4d7fa4ab1bf | Python | IGS/cvd-scripts | /TIGRFAM_processing/build_custom_TIGRFAM_HMM_LIB.py | UTF-8 | 2,243 | 2.828125 | 3 | [] | no_license | #!/usr/bin/python
# The purpose of this script is to build a custom TIGRFAM HMM LIB after
# extracting the desired TIGRFAM ids using extract_TIGRFAM_IDs_from_IT.py
#
# HOWTO: (python) build_custom_TIGRFAM_HMM_LIB.py path_to_extracted_ids_file path_to_tigrfam_hmm_lib_file
#
# Author: James Matsumura
import sys, os, re
idFile = str(sys.argv[1]) # let the user specify the info file
hmmLibFile = str(sys.argv[2]) # let the user specify the info file
regexForHeader = r"^HMMER"
regexForName = r"^NAME"
regexForId = r"^ACC\s+(.*)$"
regexForFooter = r"^\/\/"
# Declaring all explicitly to elucidate the nomenclature
relevantIdsFile = open(idFile, 'r')
originalHMMLIBFile = open(hmmLibFile, 'r')
outFile = open('./custom_TIGRFAMs_HMM.LIB', 'w')
headerFound = False
nameFound = False
idFound = False
footerFound = False
validEntry = False
foundHeader = ''
foundName = ''
foundId = ''
foundIdValue = ''
relevantIdsList = []
# First, build a set of target IDs to use for lookup
for line in relevantIdsFile:
line = line.rstrip('\n')
relevantIdsList.append(line)
setOfIds = set(relevantIdsList)
# The files format is such that each entry ends with //. Use this as a
# spacer of sorts and print blocks of entries until this is found.
for line in originalHMMLIBFile:
line = line.rstrip('\n')
if(validEntry == True):
if(re.search(regexForFooter, line)):
outFile.write(line+'\n')
validEntry = False
idFound = False
nameFound = False
headerFound = False
else:
outFile.write(line+'\n')
elif(headerFound==True and nameFound==True and idFound==True and validEntry==False):
if(foundIdValue in setOfIds):
outFile.write(headerValue+'\n')
outFile.write(nameValue+'\n')
outFile.write(idValue+'\n')
validEntry = True
else:
idFound = False
nameFound = False
headerFound = False
elif(headerFound==True and nameFound==True and idFound==False):
foundId = re.search(regexForId, line)
if(foundId):
idFound = True
idValue = line
foundIdValue = foundId.group(1)
elif(headerFound==True and nameFound==False):
if(re.search(regexForName, line)):
nameFound = True
nameValue = line
elif(headerFound==False):
if(re.search(regexForHeader, line)):
headerFound = True
headerValue = line
| true |
c244cf50787518aadbaca84df00c384b934c8a06 | Python | deostroll/deohttp | /main.py | UTF-8 | 1,123 | 2.890625 | 3 | [
"MIT"
] | permissive | from deohttp import HttpClient
from machine import Pin
import time
# pin 12 is output - connects led in series with
# 39E resistance
# pin 5 is input pin - in series with a 1k resistor
# in series with 10k resistor which is grounded
# 3.3V output from wemos goes to the switch
# which connects parallel to the two resistors
led12 = Pin(12, Pin.OUT)
led12.off()
iPin5 = Pin(5, Pin.IN)
def debounce_wrap(fn, p, interval=20):
def callback(*arg, **kwargs):
count = 0
hits = 0
value = p.value()
while count < interval:
if p.value() == value:
hits = hits + 1
count = count + 1
time.sleep(0.001)
if hits == interval:
fn(value)
return callback
def change_state(value):
led12.value(value)
if value:
signal_on()
else:
signal_off()
iPin5.irq(trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, \
handler=debounce_wrap(change_state, iPin5, 20) )
def signal_on():
client = HttpClient('http://192.168.4.1/on')
client.do_request()
def signal_off():
client = HttpClient('http://192.168.4.1/off')
client.do_request()
while True:
time.sleep(1) | true |
3fb51cbaa1fe8b5fa8431dda8d6246adc22aa79b | Python | alexandraback/datacollection | /solutions_5631572862566400_0/Python/mikebot/C.py | UTF-8 | 1,330 | 3.046875 | 3 | [] | no_license | from sys import argv
from os.path import expanduser
from itertools import permutations
# Import the file as a list of lines:
problem = argv[1]
path = expanduser('~/Github/codejam/2016/Round1A/')
file_in = path + problem + '.in.txt'
file_out = path + problem + '.out.txt'
with open(file_in,'rb') as fin:
lines = fin.read().splitlines()
#num_cases = int(lines[0])
with open(file_out,'wb') as fout:
i = 1
casenum = 1
while i < len(lines):
# DO STUFF
num_kids = int(lines[i])
bffs = map(int, lines[i+1].split())
for b in xrange(len(bffs)):
bffs[b] -= 1
print 'BFFS: ', bffs
# Find the kids who are noone's BFF. Two of them will be the "ends" of the circle.
if len(set(bffs)) == num_kids:
answer = num_kids
print answer
else:
answer = 0
found_any = False
for x in xrange(3,num_kids+1):
for p in permutations(range(num_kids),x):
#print p
failed = False
for j in xrange(x):
left = (j-1)%x
right = (j+1)%x
if p[left] != bffs[p[j]] and p[right] != bffs[p[j]]:
failed = True
break
if not failed:
answer = x
found_any = True
print x, p
break
if x > answer and found_any:
break
print 'case', casenum, answer
fout.write('Case #' + str(casenum) + ': ' + str(answer) + '\n')
casenum += 1
i += 2 | true |
25ef2cf4e38791dcc498bcb0b480dd110e187c49 | Python | rusinchuk/lesson_3_homework | /Lesson_3_task_5.py | UTF-8 | 727 | 3.515625 | 4 | [] | no_license | s = ' We are not what we should be! We are not what we need to be. But at least we are not what we used to be (Football Coach) '
l = s.strip()
s = l
print(l, '\n', l.count(" ") + 1)
while (l.find('.') > 0) or (l.find('!') > 0) or (l.find('(') > 0) or (l.find(')') > 0):
l1 = l.find(".")
s1 = l[0:l1]
s2 = l[l1:]
s = s1.strip('.') + s2.strip('.')
l = s
l1 = l.find('!')
s1 = l[0:l1]
s2 = l[l1:]
s = s1.strip('!') + s2.strip('!')
l = s
l1 = l.find('(')
s1 = l[0:l1]
s2 = l[l1:]
s = s1.strip('(') + s2.strip('(')
l = s
l1 = l.find(')')
s1 = l[0:l1]
s2 = l[l1:]
s = s1.strip(')') + s2.strip(')')
l = s
print(s)
print(*sorted(s.split()))
| true |
046632d0e7b4a7563a7e3c0edab92650a3d11bab | Python | GuyRobot/RL-Python | /TabularLearnBellmanEquation/FrozenLakeQL/SolveFrozenLakeUsingValueFunc.py | UTF-8 | 8,238 | 3.875 | 4 | [] | no_license | """
The central data structures in this example are as follows:
Reward table: A dictionary with the composite key "source state" +
"action" + "target state". The value is obtained from the immediate
reward.
Transitions table: A dictionary keeping counters of the experienced
transitions. The key is the composite "state" + "action" and the value is
another dictionary that maps the target state into a count of times that
we've seen it. For example, if in state 0 we execute action 1 ten times,
after three times it leads us to state 4 and after seven times to state 5.
Entry with the key (0, 1) in this table will be a dict {4: 3, 5: 7}. We
use this table to estimate the probabilities of our transitions.
Value table: A dictionary that maps a state into the calculated value of
this state.
The overall logic of our code is simple: in the loop, we play 100 random
steps from the environment, populating the reward and transition tables. After
those 100 steps, we perform a value iteration loop over all states, updating
our value table. Then we play several full episodes to check our
improvements using the updated value table. If the average reward for those
test episodes is above the 0.8 boundary, then we stop training. During test
episodes, we also update our reward and transition tables to use all data from
the environment.
Graph:
Play random to get experience (construct 2 dict tables above - transition and reward)
Value Iteration to construct values table
Play with environment:
Play episode:
Choose the best action using max value function (fun select_action)
In select action calculate values using dict values has saved
Take the reward from best action
Update 3 tables dict
Repeat util solve!
"""
import gym
import collections
from tensorboardX import SummaryWriter
ENV_NAME = "FrozenLake-v0"
GAMMA = 0.9
TEST_EPISODES = 20
class Agent:
def __init__(self):
self.env = gym.make(ENV_NAME)
self.state = self.env.reset()
# {(0, 0, 0): 0.0, (0, 3, 0): 0.0, (0, 1, 1): 0.0, (1, 0, 0): 0.0,...}
# (source_state, action, target_state) : reward
self.rewards = collections.defaultdict(float)
# {(0, 1): Counter({4: 146, 0: 146, 1: 124}), (4, 3): Counter({0: 31, 4: 27, 5: 22}), ...}
# (state, action) : count num time exec {state1: num time exist, state2: num time exist, ...}
self.transitions = collections.defaultdict(collections.Counter)
# {4: 0.09031095088988002, 0: 0.07002148262932646, 1: 0.06382471255154518, 5: 0.0, 2: 0.07480238713555348,
# 3: 0.056943860481139336, ...}
# state: calc values
self.values = collections.defaultdict(float)
def play_n_random_steps(self, count):
"""
This function is used to gather random experience from the environment and
update reward and transition tables.
:param count:
:return:
"""
for _ in range(count):
action = self.env.action_space.sample()
new_state, reward, is_done, _ = self.env.step(action)
self.rewards[(self.state, action, new_state)] = reward
self.transitions[(self.state, action)][new_state] += 1
self.state = self.env.reset() if is_done else new_state
def calc_action_value(self, state, action):
"""
1. We extract transition counters for the given state and action from the
transition table. Counters in this table have a form of dict, with target
states as key and a count of experienced transitions as value. We sum all
counters to obtain the total count of times we've executed the action
from the state. We will use this total value later to go from an individual
counter to probability.
2. Then we iterate every target state that our action has landed on and
calculate its contribution into the total action value using the Bellman
equation @see Theory.py. This contribution equals to immediate reward plus discounted
value for the target state. We multiply this sum to the probability of this
transition and add the result to the final action value.
See images/Q_learning_transitions
"""
target_counts = self.transitions[(state, action)]
total = sum(target_counts.values())
action_values = 0.0
for tgt_state, count in target_counts.items():
reward = self.rewards[(state, action, tgt_state)]
action_values += (count / total) * (reward + GAMMA * self.values[tgt_state])
return action_values
def select_action(self, state):
"""
iterates over all possible
actions in the environment and calculates value for every action. The action
with the largest value wins and is returned as the action to take.
:param state:
:return:
"""
best_action, best_value = None, None
for action in range(self.env.action_space.n):
action_value = self.calc_action_value(state, action)
if best_value is None or best_value < action_value:
best_value = action_value
best_action = action
return best_action
def play_episode(self, env):
"""
The play_episode function uses select_action to find the best action to
take and plays one full episode using the provided environment. This
function is used to play test episodes,
Loop over states
accumulating reward for one episode:
:param env:
:return:
"""
total_reward = 0.0
state = env.reset()
while True:
action = self.select_action(state)
new_state, reward, is_done, _ = env.step(action)
self.rewards[(state, action, new_state)] = reward
self.transitions[(state, action)][new_state] += 1
total_reward += reward
if is_done:
break
state = new_state
return total_reward
def value_iteration(self):
"""
loop over all states in the environment, then for every state we calculate the
values for the states reachable from it, obtaining candidates for the value of
the state. Then we update the value of our current state with the maximum
value of the action available from the state
:return:
"""
for state in range(self.env.observation_space.n):
state_values = [self.calc_action_value(state, action) for action in
range(self.env.action_space.n)]
self.values[state] = max(state_values)
def run_main(agent, comment="-v-learning"):
test_env = gym.make(ENV_NAME)
writer = SummaryWriter(comment=comment)
epoch = 0
best_reward = 0.0
while True:
epoch += 1
"""
First, we perform 100 random steps to fill our reward and transition
tables with fresh data and then we run value iteration over all states. The rest
of the code plays test episodes using the value table as our policy, then writes
data into TensorBoard, tracks the best average reward, and checks for the
training loop stop condition.
"""
agent.play_n_random_steps(100)
agent.value_iteration()
reward = 0.0
for _ in range(TEST_EPISODES):
reward += agent.play_episode(test_env)
reward /= TEST_EPISODES
writer.add_scalar("reward", reward, epoch)
if reward > best_reward:
print("Best reward updated: %.3f -> %.3f"
% (best_reward, reward))
best_reward = reward
if reward > 0.8:
print("Solved in %d iterations!" % epoch)
break
writer.close()
if __name__ == '__main__':
run_main(agent=Agent())
| true |
ef37c3012bbf9ee8111be4cd5173e31e930683be | Python | rimow/NeuroPrononciation | /neuroPro/phonemesAnalysis/featuresGeneration.py | UTF-8 | 11,511 | 2.78125 | 3 | [] | no_license | import pywt
import numpy as np
import scipy as sc
import scipy.io.wavfile
import librosa
from librosa import feature
from librosa import filters
from librosa import util
import matplotlib.pyplot as plt
import math
from numpy import shape
from Erreurs import initialisationError
from phonemesAnalysis.analyse import *
from phonemesAnalysis.utiles import *
import mlpy.wavelet as wave
# Fichier contenant les fonctions d'extraction de parametres a partir de signaux
# Specification pour toutes les fonctions:
# - path des parametres doit etre valide et etre le nom d'un fichier audio
# - la taille des fenetres doit etre inferieur a la duree des signaux
##########################################################################################################################
############################################ FOURIER TRANSFORM ###########################################################
##########################################################################################################################
def FourierTransform(signal_path, n_fft, hop_length,fmin, fmax, n_mels,affichage=False):
'''
Fonction de generation des parametres de fourier
:param signal_path: C'est le chemin vers le fichier audio a traiter
:param n_fft: La taille de la fenetre
:param hop_length: La fenetre glissante glisse d'une periode de hop_length
:param fmin: frequence minimale
:param fmax: frequence maximale
:param nBands: nombre de bandes
:param affichage: True si on veut afficher le spectrogramme
:return: La matrice D dont les lignes sont des durees de temps de la fenetre et les colonnes contiennent les parametres
'''
#S=librosa.feature.melspectrogram(y=s1, sr=sr, S=None, n_fft=441, hop_length=221, n_mels=40)
#D = scipy.fft(S)
signal, sampling_rate = librosa.load(signal_path) #load du fichier audio
D=librosa.feature.melspectrogram(y=signal, sr=sampling_rate, S=None, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels, fmin=fmin, fmax=fmax)
#D = np.abs(D).transpose()
D = np.log(D)
if affichage:
afficherSpec(D,sampling_rate,hop_length)
D=D.transpose()
return D;
#Exemple de fonctionnement : Avec une fenetre de 20ms et un glissement de 10ms
#signal, sampling_rate = librosa.load('1.wav')
#FourierTransform('1.wav', int(0.02*sampling_rate), int(0.01*sampling_rate))
##########################################################################################################################
##########################################################################################################################
############################################ WAVELET TRANSFORM ###########################################################
##########################################################################################################################
def waveletsTransformContinue(signalPath, wf, wf_param, dt, dj, affichageSpectrogram):
'''
Calcule la transformee en ondelettes continue du signal
:param signalPath: Le chemin du signal audio
:param wf: La fonction de l'ondelette ('morlet', 'paul', 'dog')
:param wf_param: Parametre de la l'ondelette (8 pour morlet, 2 pour dog et paul)
:param dt: Pas (10ms par exemple)
:param dj: Resolution de l'echelle (plus dj est petit plus la resolution est fine)
:return: la transformee en ondelettes continue du signal, matrice 40*len(signal)
'''
# Load the wav file, y is the data and sr the sampling frequency
signal, fe = librosa.load(signalPath)
scales = wave.autoscales(len(signal), dt=dt, dj=dj, wf=wf, p=wf_param)
spec = wave.cwt(signal, dt=dt, scales=scales, wf=wf, p=wf_param)
spec= np.abs(spec)
wvtransform=spec.transpose()
wvtransform= moyennerMatrice(wvtransform) #A decommenter si l'on veut avoir une matrice 40*len(signal)
if affichageSpectrogram:
afficherSpec(wvtransform,fe,dt)
return wvtransform
## AMELIORATION RESULTATS C.W.T
def moyennerMatrice(x):
'''
Effectue la moyenne sur les lignes suivant des fenetres de 20ms avec un saut de 10ms
:param x: Matrice resultante de la transformee en ondelettes continue, 40*len(signal)
:return: Matrice 3449*40
'''
out=[]
y=np.array(x)
for i in range(0,len(x)):
if i % 221 == 0:
sousMatrice = np.array(y[i:i+441,:])
moyenne = sousMatrice.mean(0)
out.append(moyenne)
out=np.array(out)
return out
##########################################################################################################################
##########################################################################################################################
############################################### MFCC TRANSFORM ###########################################################
##########################################################################################################################
def mfcc(path, taille_fenetre, hop_span, nb_mel,affichage=False):
'''
:genere les coefficients cepstraux du fichier son, en utilisant une fenetre glissante
:param path: (string) chemin du fichier son sur la machine utilisateur
:param taille_fenetre: (secondes) taille de la fenetre glissante : extraction des parametres pour les fenetres de cette taille
:param hop_span: (secondes) deplacement de la fenetre
:param nb_mel: (int) nombre de coefficients a generer
:param affichage: True si on veut afficher le spectrogramme
:return: matrice (liste de tableaux) nb_fenetres*nb_mel : les coefficients pour chaque fenetre
'''
#acquisition du signal avec le taux d'echantillonage par defaut (22050)
son, sr = librosa.core.load(path)
duree = librosa.core.get_duration(son)
#normalisation du signal
#son_normalized = librosa.util.normalize(son)
#exceptions sur les parametres de la fonction
try:
taille_fenetre<duree
except initialisationError:
print "la fenetre glissante doit etre plus petite que la duree de l'enregistrement"
try:
hop_span<duree
except initialisationError:
print "la duree du hop_lenght doit etre plus petite que la duree de l'enregistrement"
#calcul de la mfcc pour les deux sons
son_mfcc = librosa.feature.mfcc(son,sr,None,nb_mel, hop_length = int(np.floor(hop_span*sr)), n_fft=int(np.floor(taille_fenetre*sr)))
# #enregistrement de la matrice sous forme numpyArray avec une taille sr
# son2 = numpy.asarray(son_mfcc)
# numpy.save("data/mfcc" , numpy.transpose(son2))
# #affichage des matrices
# plt.figure(0)
# librosa.display.specshow(son2, sr, overlapping, x_axis='frames', y_axis='log', n_xticks = 20, n_yticks = 20, fmin = 50, fmax = 1000)
# plt.savefig("mfcc.jpg")
# plt.title('MFCC')
# plt.show()
if affichage:
afficherSpec(son_mfcc,sr,hop_span)
return np.transpose(son_mfcc)
##########################################################################################################################
##########################################################################################################################
############################################## FBANK TRANSFORM ###########################################################
##########################################################################################################################
def fbank(path, fft_span, hop_span, n_mels, fmin, fmax,affichage=False):
"""
:param path: emplacement du fichier
:param fft_span: taille de la fenetre pour la transformee de fourrier en seconde
:param hop_span: pas entre deux echantillons en seconde
:param n_mels: nombre de bandes de frequences mel
:param fmin: frequence minimale de la decomposition
:param fmax: frequence maximale de la decomposition
:param affichage: True si on veut afficher le spectrogramme
:return: Renvoie les vecteurs fbank representant le signal
X matrice representant la decomposition fbank au cours du temps (une ligne = une decomposition pour une periode hop_span, de taille n_mels)
"""
# 1ere facon d ouvrir un fichier
# wav_signal = scipy.io.wavfile.read(path)
# wav = np.array(wav_signal[1])
# s_rate = wav_signal[0]
# Deuxieme facon d ouvrir un fichier
wav, s_rate = librosa.load(path)
X = feature.melspectrogram(util.normalize(wav), s_rate, S=None, n_fft=int(np.floor(fft_span * s_rate)),
hop_length=int(np.floor(hop_span * s_rate)), n_mels=n_mels, fmin=fmin, fmax=fmax)
# #Verification nombre d'echantillons (un toutes les 10ms)
# size = X.shape
# print 'Taille de la matrice de sortie',size
# print 'Taille d un morceau de signal de 10ms que l on obtient' ,len(wav)/size[1]
# print 'taille theorique d un morceau de signal',0.01*s_rate
# print 's_rate',s_rate
# print 'longueur',wav.shape
# print wav.shape[0]/s_rate
X = np.log(X)
if affichage:
afficherSpec(X,s_rate,hop_span)
return np.transpose(X)
#fBank en prenant plusieurs fichiers en entree
def fbankPlus(paths_wav,paths_aligned,fft_span,hop_span,n_mels,fmin,fmax):
"""
:param paths_wav: tableau des chemins des fichiers sons
:param paths_aligned: tableau des chemins des fichiers d'alignement
:param fft_span: fenetre pour la fft
:param hop_span: pas d'une fenetre a une autre
:param n_mels: nombre de plage de mels
:param fmin: frequence miniamale
:param fmax: frequence maximale
:return: X (les vecteurs representants le signal, nb_vectors x nb_features)
et Y (phoneme correspondant a chaque vecteur)
"""
X = []
Y = []
for path,path_a in zip(paths_wav,paths_aligned):
x = fbank(path,fft_span,hop_span,n_mels,fmin,fmax)
X.append(x)
Y.append(getY(x,path_a,hop_span))
return np.concatenate(np.array(X)),np.concatenate(np.array(Y))
def afficherSpec(X,s_rate,hop_span):
"""
:param X: matrice dont on veut le spectrogramme
:param s_rate: frequence d echantillonnage
:param hop_span: pas d'une fenetre a une autre
:return: affiche une spectrogram avec librosa.specshow
"""
plt.figure()
plt.title('Spectrogrammes : librosa.specshow')
librosa.display.specshow(X,y_axis='mel', fmax=8000, x_axis='time',sr=s_rate,hop_length=int(np.floor(hop_span * s_rate)))
plt.colorbar(format='%+2.0f dB')
plt.show()
##########################################################################################################################
##########################################################################################################################
############################################## TESTS DES TRANSFORMATIONS #################################################
##########################################################################################################################
#Tests des fonction ci-dessus : la verification s'effectue grace aux spectrogrammes
# #Tests
# fft_span = 0.02
# hop_span = 0.01
# n_mels = 40
# fmin = 50
# fmax = 8000
# dt=0.01
# dj=0.5
# path = "./data/Bref80_L4/Bref80_L4M01.wav"
# # Fbank
# X = fbank(path,fft_span,hop_span,n_mels,fmin,fmax,affichage=True)
# # mfcc
# X = mfcc(path, fft_span, hop_span, n_mels,affichage=True)
# #FFT
# FourierTransform(path, 441,221,fmin, fmax, n_mels,affichage=True)
# #WAVELETS
# waveletsTransformContinue(path, 'paul', 2, dt, dj, affichageSpectrogram=True)
| true |
b634572cf6c875c03496992ecdd5fe275512c787 | Python | Krishnap641/ABC_Fin_App | /setupDB.py | UTF-8 | 815 | 2.71875 | 3 | [] | no_license | import sqlite3
# The database will be created in the location where 'py' file is saved
conn = sqlite3.connect('loan_data.db')
c = conn.cursor()
# Create table - "loan_application"
#c.execute('''CREATE TABLE CLIENTS
# ([generated_id] INTEGER PRIMARY KEY,[Client_Name] text, [Country_ID] integer, [Date] date)''')
c.execute('''
CREATE TABLE "loan_application" (
"loanid" INTEGER DEFAULT 1000 PRIMARY KEY AUTOINCREMENT,
"name" TEXT,
"email" TEXT,
"age" INTEGER,
"gender" TEXT,
"married" TEXT,
"dependents" INTEGER,
"education" INTEGER,
"employment" INTEGER,
"appincome" REAL,
"coappincome" REAL,
"loan_term" INTEGER,
"loan_amount" REAL,
"credit_history" INTEGER,
"area" TEXT,
"loan_status" INTEGER
)'''
)
conn.commit()
print("DB created successfully") | true |
639cce4ce0431e059ccb8b8a297c4b257a80ccb9 | Python | cyogita97/Project | /CoeffGenerator.py | UTF-8 | 2,596 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 19:04:57 2019
@author: yogit
"""
import numpy as np
from sympy import *
from z3 import *
import math
import time
start_time = time.time()
def CoefficientGenerator(B,v,x0,x1,pop,ca,ga,x,yy,u,w,nml,nsamp):
pp=len(B)
p=[]
for j1 in range(pp):
p.append(Real('p{}'.format(j1))) #Define coefficients for barrier polynomial p0*1+p1*x1+... (depending on the order of terms)
s = Solver() #Initialise z3 solver
aa=[]
for i in range(0,len(v)):
aaa=0
for kk in range(pp):
aaa = aaa+v[i,kk]*p[kk] #aaa represnts the inequality with coefficients for each data sample in entire region
aa.append(aaa)
A=z3.And([aa[m]>=0 for m in range(len(aa))]) # the inequalities should be satisfied for all data samples
s.add(A) #Condition 1 B(x)>0
# =============================================================================
a=[] #Condition 2: B(x)<=ga for initial condition
for i in range(0,len(x0)):
aaa=0
for kk in range(pp):
aaa = aaa+x0[i,kk]*p[kk] #aaa represents the expression for each data sample in x0
a.append(aaa)
Z=z3.And([a[m]<=ga for m in range(len(a))]) # the inequalities should be satisfied for all X0 data samples
s.add(Z)
# =============================================================================
b=[] #Condition 3 B(x)>=1 for unsafe condition
for i in range(0,len(x1)):
aaa=0
for kk in range(pp):
aaa = aaa+x1[i,kk]*p[kk] #aaa represents the expression B(x) for each data sample in x1
b.append(aaa)
F=z3.And([b[m]>=1 for m in range(len(b))]) # the inequalities should be satisfied for all X1 data samples
s.add(F)
npo=nml
for i in range(0,nsamp):
d1=[]
for k in range(npo):
d=0
for kk in range(pp):
d=d+pop[i*npo+k,kk]*p[kk] #d represents the expression E(B(f(x)))-B(x) for each input mode and each data sample in x
d1.append(d)
e=z3.Or([d1[m]<=ca for m in range(len(d1))]) # the inequalities should be satisfied for atleast one of the input modes
s.add(e)
if s.check()==sat:
# print("Generating Barrier Polynomial","--- %s seconds ---" % (time.time() - start_time))
return p,s.check(),s.model()
else:
return p,s.check(),'model not available' | true |
761955364869f1d4aa5654e0e9c855a7ead0e52b | Python | papagr/everest | /everest/repositories/manager.py | UTF-8 | 4,885 | 2.640625 | 3 | [
"MIT"
] | permissive | """
The repository manager class.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 25, 2013.
"""
from everest.repositories.constants import REPOSITORY_DOMAINS
from everest.repositories.interfaces import IRepository
from everest.utils import id_generator
from pyramid.compat import itervalues_
from pyramid.threadlocal import get_current_registry
__docformat__ = 'reStructuredText en'
__all__ = ['RepositoryManager',
]
class RepositoryManager(object):
"""
The repository manager creates, initializes and holds repositories by
name.
"""
__repo_id_gen = id_generator()
def __init__(self):
self.__repositories = {}
self.__default_repo = None
def get(self, name):
"""
Returns the specified repository.
"""
return self.__repositories.get(name)
def set(self, repo):
"""
Sets the given repository (by name).
"""
name = repo.name
if name in self.__repositories \
and self.__repositories[name].is_initialized:
raise ValueError('Can not replace repositories that have been '
'initialized.')
self.__repositories[name] = repo
def get_default(self):
"""
Returns the default repository.
"""
return self.__default_repo
def new(self, repo_type, name=None, make_default=False,
repository_class=None, aggregate_class=None,
configuration=None):
"""
Creates a new repository of the given type. If the root repository
domain (see :class:`everest.repositories.constants.REPOSITORY_DOMAINS`)
is passed as a repository name, the type string is used as the name;
if no name is passed, a unique name is created automatically.
"""
if name == REPOSITORY_DOMAINS.ROOT:
# Unless explicitly configured differently, all root repositories
# join the transaction.
join_transaction = True
autocommit = False
name = repo_type
else:
join_transaction = False
if name is None:
name = "%s%d" % (repo_type, next(self.__repo_id_gen))
# The system repository is special in that its repository
# should not join the transaction but still commit all changes.
autocommit = name == REPOSITORY_DOMAINS.SYSTEM
if repository_class is None:
reg = get_current_registry()
repository_class = reg.queryUtility(IRepository, name=repo_type)
if repository_class is None:
raise ValueError('Unknown repository type "%s".' % repo_type)
repo = repository_class(name,
aggregate_class,
join_transaction=join_transaction,
autocommit=autocommit)
if not configuration is None:
repo.configure(**configuration)
if make_default:
self.__default_repo = repo
return repo
def setup_system_repository(self, repository_type, reset_on_start,
repository_class=None):
"""
Sets up the system repository with the given repository type.
:param str repository_type: Repository type to use for the SYSTEM
repository.
:param bool reset_on_start: Flag to indicate whether stored system
resources should be discarded on startup.
:param repository_class: class to use for the system repository. If
not given, the registered class for the given type will be used.
"""
# Set up the system entity repository (this does not join the
# transaction and is in autocommit mode).
cnf = dict(messaging_enable=True,
messaging_reset_on_start=reset_on_start)
system_repo = self.new(repository_type,
name=REPOSITORY_DOMAINS.SYSTEM,
repository_class=repository_class,
configuration=cnf)
self.set(system_repo)
def initialize_all(self):
"""
Convenience method to initialize all repositories that have not been
initialized yet.
"""
for repo in itervalues_(self.__repositories):
if not repo.is_initialized:
repo.initialize()
def reset_all(self):
for repo in itervalues_(self.__repositories):
if repo.is_initialized:
repo.reset()
def on_app_created(self, event): # pylint: disable=W0613
"""
Callback set up by the registry configurator to initialize all
registered repositories.
"""
self.initialize_all()
| true |
9028322fda081f40df81691407391919184a45f7 | Python | angeeranaser/Rosalind | /Python/rosalind_perm.py | UTF-8 | 971 | 3.84375 | 4 | [
"MIT"
] | permissive | # Rosalind: Enumerating Gene Orders
# Given: A positive integer n < 7.
# Result: The total number of permutations of length n, followed by a list of all such permutations (in any order).
import itertools
def converter(input):
result = " ".join(input)
return result
def main():
# open file, extract data
dataFile = open('rosalind_perm.txt','r')
n = dataFile.readline().strip()
n = int(n)
dataFile.close()
# find permutations of length n, output results
index = ''
for x in range (1,n+1):
index += str(x)
listPerm = list(itertools.permutations(index))
outputFile = open("output.txt", "w")
outputFile.write(str(len(listPerm)) + "\n") # number of permutations of length n
for x in range(len(listPerm)):
result = converter(listPerm[x])
outputFile.write(result + "\n")
print(result)
outputFile.close()
if __name__ == "__main__":
main()
| true |
05be55cda0a268c2bbb356c2122a3da0d4468094 | Python | jasonthename/python-triplesec | /triplesec/test/test.py | UTF-8 | 9,856 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from binascii import unhexlify as unhex
from binascii import hexlify
import json
import os.path
import six
import struct
import triplesec
from triplesec import TripleSec, TripleSecError
from triplesec.versions import _versions
path = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
vectors = json.load(open(os.path.join(path, 'vectors.json')))
for v in vectors:
for k in v:
v[k] = v[k].encode('ascii') # JSON insists to decode the loaded objects
if v[k].startswith(b'0x'): v[k] = unhex(v[k][2:])
if 'extra' in v: v['extra'] = unhex(v['extra'])
v['ciphertext'] = unhex(v['ciphertext'])
# A generic vector for various tests
VECTOR = vectors[0]
assert 'disabled' not in VECTOR
class TripleSec_tests(unittest.TestCase):
def _test_encrypt(self, encrypt, plaintext, key, pass_key=True):
if pass_key: ciphertext = encrypt(plaintext, key)
else: ciphertext = encrypt(plaintext)
self.assertEqual(plaintext, triplesec.decrypt(ciphertext, key))
def test_missing_key(self):
T = TripleSec()
regex = 'You didn\'t initialize TripleSec with a key'
self.assertRaisesRegexp(TripleSecError, regex, lambda: T.encrypt(b'xxx'))
self.assertRaisesRegexp(TripleSecError, regex, lambda: T.decrypt(b'xxx'))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.encrypt(b'xxx'))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(b'xxx'))
def test_initialized_behavior(self):
T = TripleSec(VECTOR['key'])
self._test_encrypt(T.encrypt, VECTOR['plaintext'], VECTOR['key'], pass_key=False)
self.assertEqual(T.decrypt(VECTOR['ciphertext']), VECTOR['plaintext'])
def test_uninitialized_behavior(self):
T = TripleSec()
self._test_encrypt(T.encrypt, VECTOR['plaintext'], VECTOR['key'])
self.assertEqual(T.decrypt(VECTOR['ciphertext'], VECTOR['key']), VECTOR['plaintext'])
T = TripleSec(b'foo')
self._test_encrypt(T.encrypt, VECTOR['plaintext'], VECTOR['key'])
self.assertEqual(T.decrypt(VECTOR['ciphertext'], VECTOR['key']), VECTOR['plaintext'])
def test_shortcuts(self):
self._test_encrypt(triplesec.encrypt, VECTOR['plaintext'], VECTOR['key'])
self.assertEqual(triplesec.decrypt(VECTOR['ciphertext'], VECTOR['key']), VECTOR['plaintext'])
def test_data_type(self):
T = TripleSec(VECTOR['key'])
regex = r'The input data needs to be a binary string'
for d in (u'xxx', 12, [12, 13]):
self.assertRaisesRegexp(TripleSecError, regex, lambda: T.decrypt(d))
self.assertRaisesRegexp(TripleSecError, regex, lambda: T.encrypt(d))
def test_key_type(self):
regex = r'The key needs to be a binary string'
for k in (u'xxx', 12, [12, 13]):
self.assertRaisesRegexp(TripleSecError, regex, lambda: TripleSec(k))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(b'foo', k))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.encrypt(b'foo', k))
def test_decrypt_invalid_data(self):
regex = r'does not look like a TripleSec ciphertext'
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(b'foo', b'xxx'))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(unhex(b'1c94d7de00000003abcdef'), b'xxx'))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(b'12345678901235'*100, b'xxx'))
def test_decrypt_invalid_version(self):
regex = r'Unimplemented version'
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(unhex(b'1c94d7de01200000abcdef'), b'xxx'))
def test_zero_length(self):
regex = r'Invalid message length - message cannot be empty'
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.encrypt(b'', b'xxx'))
regex = r'Invalid key length - key cannot be empty'
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.encrypt(b'foo', b''))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(b'foo', b''))
def test_extra_bytes(self):
extra_vectors = tuple(v for v in vectors if 'extra' in v)
self.assertTrue(len(extra_vectors))
for VECTOR in extra_vectors:
T = TripleSec()
self._test_encrypt(T.encrypt, VECTOR['plaintext'], VECTOR['key'])
self.assertEqual(None, T.extra_bytes())
data = VECTOR['ciphertext']
header_version = struct.unpack(">I", data[4:8])[0]
version = triplesec.versions.get_version(header_version, False)
header, salt, macs, encrypted_material = T._split_ciphertext(data, version)
mac_keys, cipher_keys, extra = T._key_stretching(VECTOR['key'], salt, version, len(VECTOR['extra']))
self.assertEqual(VECTOR['extra'], extra)
T.encrypt(VECTOR['plaintext'], VECTOR['key'], extra_bytes=len(VECTOR['extra']))
self.assertTrue(T.extra_bytes())
self._test_encrypt(T.encrypt, VECTOR['plaintext'], VECTOR['key'])
self.assertEqual(None, T.extra_bytes())
def test_random_encryption(self):
for i in range(500 // 20):
p = triplesec.rndfile.read(i * 20 + 1)
k = triplesec.rndfile.read((i * 20 - 300) % 500 + 1)
c = triplesec.encrypt(p, k)
self.assertEqual(p, triplesec.decrypt(c, k), i)
def test_using_randomness(self):
for version in _versions.keys():
compatibility = version in {1, 3}
T = TripleSec(key=b"YELLOW_SUBMARINE")
pt = b"foobar"
once = T.encrypt(pt, v=version, compatibility=compatibility)
twice = T.encrypt(pt, v=version, compatibility=compatibility)
self.assertNotEqual(once, twice)
T = TripleSec(key=b"YELLOW_SUBMARINE")
thrice = T.encrypt(pt, v=version, compatibility=compatibility)
self.assertNotEqual(once, thrice)
self.assertNotEqual(twice, thrice)
def test_external_vectors(self):
for V in vectors:
if 'disabled' in V: continue
self._test_encrypt(triplesec.encrypt, V['plaintext'], V['key'])
self.assertEqual(triplesec.decrypt(V['ciphertext'], V['key']), V['plaintext'])
def test_tampered_data(self):
regex = r'Failed authentication of the data'
c = VECTOR['ciphertext']
c = c[:-2] + six.int2byte(six.indexbytes(c, -2) ^ 25) + six.int2byte(six.indexbytes(c, -1))
self.assertRaisesRegexp(TripleSecError, regex, lambda: triplesec.decrypt(c, VECTOR['key']))
def test_signatures_v1(self):
inp = unhex('1c94d7de000000019f1d6915ca8035e207292f3f4f88237da9876505dee100dfbda9fd1cd278d3590840109465e5ed347fdeb6fc2ca8c25fa5cf6e317d977f6c5209f46c30055f5c531c')
key = unhex('1ee5eec12cfbf3cc311b855ddfddf913cff40b3a7dce058c4e46b5ba9026ba971a973144cbf180ceca7d35e1600048d414f7d5399b4ae46732c34d898fa68fbb0dbcea10d84201734e83c824d0f66207cf6f1b6a2ba13b9285329707facbc060')
out = unhex('aa761d7d39c1503e3f4601f1e331787dca67794357650d76f6408fb9ea37f9eede1f45fcc741a3ec06e9d23be97eb1fbbcbe64bc6b2c010827469a8a0abbb008b11effefe95ddd558026dd2ce83838d7a087e71d8a98e5cbee59f9f788e99dbe7f9032912a4384af760c56da8d7a40ab057796ded052be17a69a6d14e703a621')
version = triplesec.versions.get_version(1, compatibility=True)
self.assertEqual(out, b''.join(TripleSec._generate_macs(inp, [key[:48], key[48:]], version)))
def test_ciphers(self):
s = triplesec.rndfile.read(100)
k = triplesec.rndfile.read(32)
for c in (triplesec.crypto.XSalsa20, triplesec.crypto.AES, triplesec.crypto.Twofish):
self.assertEqual(s, c.decrypt(c.encrypt(s, k, c.generate_iv_data(triplesec.rndfile)), k), c.__name__)
ciphertext = b'24-byte nonce for xsalsa' + unhex('002d4513843fc240c401e541')
self.assertEqual(b'Hello world!', triplesec.crypto.XSalsa20.decrypt(ciphertext,
b'this is 32-byte key for xsalsa20'))
ciphertext = b'24-byte nonce for xsalsa' + unhex(
'4848297feb1fb52fb66d81609bd547fabcbe7026edc8b5e5e449d088bfa69c088f5d8da1d791267c2c195a7f8cae9c4b4050d08ce6d3a151ec265f3a58e47648')
self.assertEqual(b'\x00' * 64, triplesec.crypto.XSalsa20.decrypt(ciphertext,
b'this is 32-byte key for xsalsa20'))
def test_spec(self):
for version in _versions.keys():
compatibility = version in {1, 3}
path = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
with open(os.path.join(path, "spec/triplesec_v{}.json".format(version))) as specfile:
vectors = json.load(specfile)
for v in vectors['vectors']:
key = unhex(v['key'])
pt = unhex(v['pt'])
ct = unhex(v['ct'])
rndstream = six.BytesIO(unhex(v['r']))
# Self-consistency
got_self_compat = triplesec.encrypt(pt, key, compatibility=compatibility)
self.assertEqual(pt, triplesec.decrypt(got_self_compat, key, compatibility=compatibility))
# Self-consistency for reverse compatibility
got_self_rev_compat = triplesec.encrypt(pt, key, compatibility=not compatibility)
self.assertEqual(pt, triplesec.decrypt(got_self_rev_compat, key, compatibility=not compatibility))
# Able to decrypt spec
self.assertEqual(pt, triplesec.decrypt(ct, key, compatibility=compatibility))
# Correct encryption with fixed random tape
T = TripleSec(key, rndstream=rndstream)
got = T.encrypt(pt, v=version, compatibility=compatibility)
self.assertEqual(hexlify(got), hexlify(ct))
| true |
c850b86511d64a5f8ad1a41841e36be1fda4b34a | Python | phong010198/multimediaGroup11 | /bai5/bai5.py | UTF-8 | 1,901 | 3.21875 | 3 | [] | no_license | import matplotlib.pyplot as plt
from numpy import zeros
# khoi tao figure
fig = plt.figure(figsize=(5, 5))
# khởi tạo ma trận điểm ảnh rỗng
imga = zeros([800, 800, 3])
h = len(imga)
w = len(imga[0])
R = 0
G = 1
B = 2
# khởi tạo bàn cờ trắng đen
check = False
for y in range(h):
if y % (h / 8) == 0:
check = not check
for x in range(w):
if x % (w / 8) == 0:
check = not check
if check:
imga[y][x][R] = 1
imga[y][x][G] = 1
imga[y][x][B] = 1
else:
imga[y][x][R] = 0
imga[y][x][G] = 0
imga[y][x][B] = 0
# lưu ảnh
plt.imsave("chessboard.png", imga, format="png")
# hiển thị ảnh
fig.add_subplot(2, 2, 1)
plt.imshow(imga)
# tạo ảnh gradient từ phải sang
for y in range(h):
for x in range(w):
imga[y][x][R] = 1 # red channel
imga[y][x][G] = 0 # green channel
imga[y][x][B] = x / float(w) # blue channel
# lưu ảnh
plt.imsave("ngang.png", imga, format="png")
# hiển thị ảnh
fig.add_subplot(2, 2, 2)
plt.imshow(imga)
# tạo ảnh gradient từ trên xuống
for y in range(h):
for x in range(w):
imga[y][x][R] = 1 # red channel
imga[y][x][G] = 0 # green channel
imga[y][x][B] = y / float(h) # blue channel
# lưu ảnh
plt.imsave("doc.png", imga, format="png")
# hiển thị ảnh
fig.add_subplot(2, 2, 3)
plt.imshow(imga)
# tạo ảnh gradient chéo
for y in range(h):
for x in range(w):
imga[y][x][R] = 1 # red channel
imga[y][x][G] = 0 # green channel
imga[y][x][B] = (x + y) / float(w + h) # blue channel
# lưu ảnh
plt.imsave("cheo.png", imga, format="png")
# hiển thị ảnh
fig.add_subplot(2, 2, 4)
plt.imshow(imga)
# hiển thị
plt.show() | true |
dd78719e3b7e861b00e630c4b8fb958585ae1ec7 | Python | OskarKozaczka/pp1-OskarKozaczka | /08-DataStructures/dict.py | UTF-8 | 463 | 3.234375 | 3 | [] | no_license | osoba = { "imie": "Marek",
"nazwisko": "Banach",
"wiek": 25,
"hobby": ["programowanie","wycieczki"],
"student": True,
"telefon":{"stacjonarny":"2233","komorkowy":"7788"}
}
print(osoba)
print(osoba["imie"])
print(osoba["hobby"])
osoba["nazwisko"]="Nowak"
print(osoba)
osoba["plec"]="mezczyzna"
print(osoba)
osoba["hobby"].append("rower")
print(osoba)
osoba["telefon"]["sluzbowy"]=3131
print(osoba) | true |
6af6c1f7a1774a367acee389a23e016895b83e25 | Python | timohouben/python_scripts | /ogs_multi_folder/ogs_multi_folder.py | UTF-8 | 1,755 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python2 ### this has to be changed to your python2 executable
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 13 11:14:41 2018
@author: houben
script to run multiple ogs runs in a directory
Script will search for all .gli files in directory and subdirectory in CURRENT WORKING DIRECTIORY!
First change directory to the parent directory. All ogs model runs should be in this directory with supdirectories for each model set up.
"""
import os
import os.path
import datetime
cwd = os.getcwd()
for dirpath, dirnames, filenames in os.walk("."):
for filename in [
f for f in filenames if f.endswith(".gli")
]: ### it searches for .gli files to identify the folders with ogs model runs. Maybe have to change it for ogs6.
# print os.path.join(dirpath, filename)
time = datetime.datetime.now().strftime("%Y%m%d_%H:%M:%S")
print("Start time: " + str(time))
print("OGS Run : " + str(dirpath[1:]) + "/" + str(filename[:-4]))
# TESTZEILE
# print("/Users/houben/PhD/ogs5/executable/ogs5 "+str(cwd)+str(dirpath[1:])+"/"+str(filename[:-4])+
# " >"+str(cwd)+str(dirpath[1:])+"/"+str(filename[:-4])+"_"+str(time[:-9])+".log")
os.system(
"/Users/houben/PhD/ogs5/executable/ogs5 "
+ str(cwd)
+ str(dirpath[1:])
+ "/"
+ str(filename[:-4])
+ " >"
+ str(cwd)
+ str(dirpath[1:])
+ "/"
+ str(filename[:-4])
+ "_"
+ str(time[:-9])
+ ".log"
) ### in this line you have to replace the path with the path to your ogs executable. Everything else should be working...should :-D
print("ogs runs finished")
| true |
536e1842757ac7a44fb249514c8373a0a225a187 | Python | simonbowly/strong-graphs | /strong_graphs/output.py | UTF-8 | 841 | 2.859375 | 3 | [
"MIT"
] | permissive |
def output(graph, sum_of_distances, output_dir="output/"):
"""
Converts a graph in `extended DIMACS format' which is what is expected
by the algorithms in SPLib
Note that the node ordering is indexed from 1 not 0 so our nodes must be increased.
"""
n = graph.number_of_nodes()
m = graph.number_of_arcs()
source = 0
filename = f"strong-graph-{n}-{m}-{sum_of_distances}" # Other input data required
with open(output_dir+filename, 'w') as f:
f.write("c Strong graph for shortest paths problem\n")
f.write("c extended DIMACS format\nc\n")
f.write(f"t {filename}\nc\n")
# Skipping some bits
f.write(f"p sp {n:10} {m:10}\nc\n")
f.write(f"n {source+1:10}\nc\n")
for u, v, w in graph.arcs():
f.write(f"a {u+1:10} {v+1:10} {w:10}\n") | true |
d34f3d53cd0db6284f250e79fca25e42b06a2a09 | Python | vivianbuan/cs3240-s15-team20 | /Standalone/decrypt.py | UTF-8 | 1,448 | 2.796875 | 3 | [
"MIT"
] | permissive | from Crypto import Random
from Crypto.Cipher import AES
import sys
#def pad(s):
# return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
#def decrypt(ciphertext, key):
# iv = ciphertext[:AES.block_size]
# cipher = AES.new(key, AES.MODE_CBC, iv)
# import pdb; pdb.set_trace()
# plaintext = cipher.decrypt(ciphertext[:AES.block_size])
# print(plaintext)
# return plaintext.rstrip(b"\0")
#def decrypt_file(file_name, key):
# with open(file_name, 'rb') as fo:
# ciphertext = fo.read(16)
# print(ciphertext)
# dec = decrypt(ciphertext, key)
# print(dec)
# with open(file_name + ".dec", 'wb') as fo:
# fo.write(dec)
def decrypt_file(in_filename, out_filename, key):
chunk_size = 8192
# crypt = AES.new(key, AES.MODE_CBC, iv)
with open(in_filename, 'rb') as in_file:
iv = in_file.read(AES.block_size)
crypt = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as out_file:
while True:
chunk = in_file.read(chunk_size)
if len(chunk) == 0:
break
out_file.write(crypt.decrypt(chunk))
filename = ""
outname = ""
key = b'\x1euIg6\x1f\x0el\xc6\xd2\xcf\xc2\xf6m\xf1\x8e'
if len(sys.argv) > 2:
filename = sys.argv[1]
outname = sys.argv[2]
# key = b"" + sys.argv[2])
else:
filename = input("Please enter a file to decrypt: ")
outname = input("Please enter an out_file name: ")
# key = b"" + input("Please enter a key: ")
decrypt_file(filename, outname, key)
| true |
3d9573af6d1278f4f736bc710c8e2c0d7db3dea4 | Python | benbendaisy/CommunicationCodes | /python_module/examples/105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py | UTF-8 | 1,186 | 3.515625 | 4 | [] | no_license | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
from typing import List, Optional
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:
if not preorder or not inorder:
return None
indexMap = {v:i for i,v in enumerate(inorder)}
def constructTree(l1: int, r1: int, l2: int, r2: int):
if l1 > r1 or l2 > r2:
return None
elif l1 == r1 or l2 == r2:
return TreeNode(preorder[l1])
node = TreeNode(preorder[l1])
leftLength = indexMap[preorder[l1]] - l2
node.left = constructTree(l1 + 1, l1 + leftLength, l2, indexMap[preorder[l1]] - 1)
node.right = constructTree(l1 + leftLength + 1, r1, indexMap[preorder[l1]] + 1, r2)
return node
return constructTree(0, len(preorder) - 1, 0, len(preorder) - 1)
if __name__ == "__main__":
solution = Solution()
preorder = [1,2,3]
inorder = [2,3,1]
print(solution.buildTree(preorder, inorder))
| true |
e2cd2c5a03f126a0d74a175334b0ea0a43f949d5 | Python | Michielvsb/automatic-environment-map | /data_sources/squarewavesequence.py | UTF-8 | 4,321 | 2.75 | 3 | [] | no_license | from torch.utils.data import Dataset
import csv
import cv2
import numpy
from math import floor
from random import uniform
from math import radians, cos, sin
class SquareWaveSequence():
def __init__(self, file, translate=10, amount=1, crop_size=(256, 256), patch_size=(128, 128), patch_location = (128, 128)):
self.crop_size = crop_size
self.patch_size = patch_size
self.patch_location = patch_location
self.translate = translate
self.amount = amount
self.image = cv2.imread(file)
#self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
self.i = 0
self.h = 0
self.v = 0
self.dir = 0
self.mov = 0
self.pos = numpy.array([[0,0],[0,0],[0,0],[0,0]], dtype="float32")
self.rand_h = numpy.array([[1, 0,0],[0,1,0]], dtype="float32")
self.rotation = 0
def update_index(self):
if self.dir == 0 or self.dir == 2:
self.h += 1
self.mov += 1
if self.mov > 9:
self.dir += 1
self.mov = 0
elif self.dir == 3:
self.v += 1
self.mov += 1
if self.mov > 9:
self.dir = 0
self.mov = 0
elif self.dir == 1:
self.v -= 1
self.mov += 1
if self.mov > 9:
self.dir += 1
self.mov = 0
def get(self, i):
if (i > self.i):
self.pos = numpy.array(
[[self.h * self.translate, self.v * self.translate], [self.h * self.translate, self.v * self.translate],
[self.h * self.translate, self.v * self.translate], [self.h * self.translate, self.v * self.translate]], dtype="float32")
self.update_index()
src = numpy.array([[self.center[1] + self.h * self.translate, self.center[0] + self.v * self.translate],
[self.center[1] + self.h * self.translate, self.center[0] + self.v * self.translate + self.crop_size[1]],
[self.center[1] + self.h * self.translate + self.crop_size[1], self.center[0] + self.v * self.translate + self.crop_size[1]],
[self.center[1] + self.h * self.translate + self.crop_size[1], self.center[0] + self.v * self.translate]], dtype="float32")
rotation_center = (self.center[1] + self.h * self.translate + (self.crop_size[1] / 2), self.center[0] + self.v * self.translate + (self.crop_size[1] / 2))
self.rotation = self.rotation+uniform(-self.amount, self.amount)
self.rand_h = cv2.getRotationMatrix2D(rotation_center, self.rotation, 1)
self.i = i
self.center = (self.image.shape[0] / 2, self.image.shape[1] / 2)
if self.center[0] + self.v * self.translate + self.crop_size[1] > self.image.shape[0] or \
self.center[0] + self.v * self.translate < 0 or \
self.center[1] + self.h * self.translate + self.crop_size[1] > self.image.shape[1] or \
self.center[1] + self.h * self.translate < 0:
raise StopIteration
image = cv2.warpAffine(self.image, self.rand_h, (self.image.shape[1], self.image.shape[0]))
image1 = image[self.center[0] + self.v * self.translate : self.center[0] + self.v * self.translate + self.crop_size[1], self.center[1] + self.h * self.translate : self.center[1] + self.h * self.translate + self.crop_size[1]]
patch1 = image1[self.patch_location[0] - self.patch_size[0] / 2:self.patch_location[0] + self.patch_size[0] / 2,
self.patch_location[1] - self.patch_size[1] / 2:self.patch_location[1] + self.patch_size[1] / 2]
patch1 = cv2.cvtColor(patch1, cv2.COLOR_BGR2GRAY)
h4pt = numpy.array([[self.h * self.translate, self.v * self.translate], [self.h * self.translate, self.v * self.translate],
[self.h * self.translate, self.v * self.translate], [self.h * self.translate, self.v * self.translate]], dtype="float32") - self.pos
src = numpy.array([[0.0, 0.0], [0.0, self.crop_size[0]],
[self.crop_size[1],self.crop_size[0]], [self.crop_size[1], 0.0]], dtype="float32")
h = cv2.getPerspectiveTransform(src, src+h4pt)
return patch1,image1, h | true |
f2709dbe0f07b5219e443f7b55ad8c4bfef3689f | Python | CDog5/AutoUpdate | /Generators.py | UTF-8 | 1,113 | 4.15625 | 4 | [] | no_license | # generator object that splits a sentence into words
def sentence_to_words(words):
punctuation = [".",",","?","!","-"," ",":",";"]
for punc in punctuation:
words = words.replace(punc," ")
words = words.split()
yield from words
# generator object that splits a string into certain chars
def string_to_chars(string,mode="all"):
chars=[]
if "all" in mode.lower():
for char in string:
chars.append(char)
elif "alpha" in mode.lower():
alphabet = "abcdefghijklmnopqrstuvwxyz"
for char in string:
if char.lower() in alphabet:
chars.append(char)
elif "numeric" in mode.lower():
numbers = "0123456789"
for char in string:
if char in numbers:
chars.append(char)
elif "special" in mode.lower():
specials = r"!£$%^&*()_+{}[]#~/*-¬`@\|"
for char in string:
if char in specials:
chars.append(char)
yield from chars
s = string_to_chars("Hello people, I am Bob!","alpha")
print([c for c in s])
| true |