blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bf05b7ae2b7990bc479aedee1522fc5f0242929a | Python | michael-kwan/llanalysis | /src/util.py | UTF-8 | 1,469 | 2.84375 | 3 | [] | no_license | from bs4 import BeautifulSoup as bs
import requests
import json
import numpy as np
import csv
def login():
logindetails = json.load(open('./logindata.json'))
payload = {'login': 'Login'}
payload['username'] = logindetails['learnedleague']['username']
payload['password'] = logindetails['learnedleague']['password']
ses1 = requests.Session()
ses1.post('https://www.learnedleague.com/ucp.php?mode=login', data=payload)
print("created new session with login {}".format(payload['username']))
return ses1
def get_player_ids(session=None):
players2 = {}
if not session:
#print("retrieving players from local")
reader = csv.reader(open('./data/players.csv', 'r'))
for row in reader:
k, v = row
players2[k] = v
else:
#print("retrieving players from remote")
url = 'https://learnedleague.com/backend-search.php?term='
r = session.get(url)
data = r.content
soup = bs(data, features='html.parser')
players = []
for link in soup.find_all('a', href=True):
row = [link.p.span.text.lower(), link['href']]
players.append(row)
players2[link.p.span.text.lower()] = link['href']
np.savetxt("./data/players.csv", players, delimiter=',', fmt='% s')
return players2
def url_to_bs(url, session):
page = session.get(url).content
soup = bs(page, features='html.parser')
return soup
| true |
c8861acc1784603d97c7c2132840f988f831df39 | Python | pwdemars/projecteuler | /josh/Extras/permute.py | UTF-8 | 373 | 2.796875 | 3 | [] | no_license | def permute(num):
ay = False
for a in num[-2::-1]:
for b in num[:num.index(a):-1]:
if b > a:
k = a
ay = True
break
if ay:
break
for a in num[:num.index(k):-1]:
if a > k:
l = a
break
elif a == b:
print('uhoh')
exit()
num[num.index(l)] = k
num[num.index(k)] = l
num[num.index(l)+1::1] = num[:num.index(l):-1]
return num
print
| true |
e17fe7c9f4a4a525cfa6ff91f6bb09d9290c44ee | Python | raulgranja/Python-Course | /PythonExercicios/ex108/moeda.py | UTF-8 | 1,029 | 3.890625 | 4 | [
"MIT"
] | permissive | def moeda(valor=0, moeda='R$'):
return f'{moeda} {valor:.2f}'.replace('.', ',')
def aumentar(preco, fator, moeda=''):
"""
--> Aumenta em uma dada porcentagem o valor inserido.
:param preco: valor a ser aumentado
:param fator: fator de aumento, em porcentagem
:return: valor aumentado
"""
final = (preco * (1 + fator/100))
return final
def diminuir(preco,fator):
"""
--> Diminui em uma dada porcentagem o valor inserido.
:param preco: valor a ser diminuído
:param fator: fator de diminuição, em porcentagem
:return: valor diminuído
"""
final = (preco * (1 - fator/100))
return final
def dobro(preco):
"""
--> Multiplica o valor inserido por 2.
:param preco: valor a ser multiplicado
:return: valor dobrado
"""
final = preco * 2
return final
def metade(preco):
"""
--> Divide o valor inserido por 2.
:param preco: valor a ser dividido
:return: metade do valor
"""
final = preco / 2
return final
| true |
524a6f4f3bb8b48c35f3b813d870cb5e8bd0aacd | Python | Arunken/PythonScripts | /1_Basics/3_NumericFunctions.py | UTF-8 | 428 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive |
# Return the character of the given ASCII value
a = chr(67)
# Returns the ASCII value of the character
b = ord('A')
# Returns the absolute value fo the number
c = abs(-64)
# Returns both the quotient and remainder
q,r = divmod(16,5)
# returns the rounded number
a = round(4.33444333,5) # max five digits after decimal point
print(a)
import math
x = math.ceil(5.33) # 6
y = math.floor(5.87) # 5
z = math.sqrt(4)
| true |
1415c8842ced3de45288f9323abe916665e71664 | Python | geparada/my_src | /Tools/Extract_seq_from_genome.py | UTF-8 | 716 | 2.734375 | 3 | [] | no_license | import sys
import csv
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
Genome = {}
def Genomictabulator(fasta):
print >> sys.stderr, "Cargando genoma en la memoria RAM ...",
f = open(fasta)
for chrfa in SeqIO.parse(f, "fasta"):
Genome[chrfa.id] = chrfa.seq
print >> sys.stderr, "OK"
f.close()
def main (chr, start, end, strand):
""" Extrae genoma de coordenadas """
ID = chr + ":" + str(start) + strand + str(end)
seq = Genome[chr][start:end]
if strand == "-":
seq = seq.reverse_complement()
print ">" + ID
print str(seq)
if __name__ == '__main__':
Genomictabulator(sys.argv[1])
main(sys.argv[2], int(sys.argv[3]), int(sys.argv[4]), sys.argv[5])
| true |
b9cd30d633ba689257ee29490803588208f098c5 | Python | kinpoll/python- | /mysqlpython.py | UTF-8 | 1,095 | 2.671875 | 3 | [] | no_license | # coding=utf-8
'''
mysql交互类\n
env:python 3.5\n
mysql\n
'''
from pymysql import *
class Mysqlpython:
def __init__(self, database, host='localhost', user='root', password='123456', charset='utf8', port=3306):
self.database = database
self.host = host
self.user = user
self.password = password
self.charset = charset
self.port = port
def open(self):
self.db = connect(host=self.host, user=self.user, password=self.password,
database=self.database, charset=self.charset, port=self.port)
self.cur = self.db.cursor()
def close(self):
self.cur.close()
self.db.close()
def zhixing(self, sql, L=[]):
self.open()
self.cur.execute(sql, L)
self.db.commit()
self.close()
def get_select(self, sql, L=[]):
self.open()
self.cur.execute(sql,L)
return self.cur.fetchall()
if __name__ =='__main__':
sqlh = Mysqlpython('chatroom')
a=sqlh.get_select('select username from user where password=987')
print(a) | true |
c6ef7bfc4e10af8e5ac9f626970f3594fced377c | Python | wooseok-song/Algorithm-Python- | /2021 summer/0622/미로탐색(2178).py | UTF-8 | 646 | 2.921875 | 3 | [] | no_license | import sys
from collections import deque
input=sys.stdin.readline
n,m=map(int,input().split())
s=[list(map(int,input().strip())) for _ in range(n)]
visited=[[0]*m for _ in range(n)]
ds=[(1,0),(0,1),(-1,0),(0,-1)]
def bfs(start):
queue=deque()
queue.append(start)
while queue:
x,y=queue.popleft()
if visited[x][y]==0:
visited[x][y]=1
for i in range(4):
nx=x+ds[i][0]
ny=y+ds[i][1]
if 0<=nx<n and 0<=ny<m and s[nx][ny]==1:
queue.append((nx,ny))
s[nx][ny]=s[x][y]+1
res=bfs((0,0))
print(s[n-1][m-1])
| true |
35b1756cae1a2aba342df54716c487e4b3308dd2 | Python | lzbotha/aruba-tech-assessment | /aplocation/geolocation.py | UTF-8 | 940 | 2.71875 | 3 | [] | no_license | import json
import requests
_API_URL = 'https://www.googleapis.com/geolocation/v1/geolocate'
import logging
logger = logging.getLogger(__name__)
def make_geolocation_request(wifi_access_points, api_key):
"""
Makes an HTTP POST request to the Google's Geolocation service using a given
list of wifi access points and an api_key.
Args:
wifi_access_points: list of dictionaries containing access points
api_key: string the API key
Returns:
dictionary containing the response from the Geolocation API. Note this response
may be an error.
"""
payload = {
'considerIp': 'false', # this avoids defaulting to something stupid
'wifiAccessPoints': wifi_access_points,
}
params = {
'key': api_key,
}
r = requests.post(
url=_API_URL,
params=params,
json=payload
)
return(r.json()) | true |
bf3139c4acd964484612282baabc22ce30841a20 | Python | varunkumar032/lockdown-leetcode | /april2020/solutions/day28_FirstUniqueNumber.py | UTF-8 | 2,779 | 3.9375 | 4 | [] | no_license | # You have a queue of integers, you need to retrieve the first unique integer in the queue.
# Implement the FirstUnique class:
# FirstUnique(int[] nums) Initializes the object with the numbers in the queue.
# int showFirstUnique() returns the value of the first unique integer of the queue, and returns -1 if there is no such integer.
# void add(int value) insert value to the queue.
# Example 1:
# Input:
# [“FirstUnique”,”showFirstUnique”,”add”,”showFirstUnique”,”add”,”showFirstUnique”,”add”,”showFirstUnique”]
# [[[2,3,5]],[],[5],[],[2],[],[3],[]]
# Output:
# [null,2,null,2,null,3,null,-1]
# Explanation:
# FirstUnique firstUnique = new FirstUnique([2,3,5]);
# firstUnique.showFirstUnique(); // return 2
# firstUnique.add(5); // the queue is now [2,3,5,5]
# firstUnique.showFirstUnique(); // return 2
# firstUnique.add(2); // the queue is now [2,3,5,5,2]
# firstUnique.showFirstUnique(); // return 3
# firstUnique.add(3); // the queue is now [2,3,5,5,2,3]
# firstUnique.showFirstUnique(); // return -1
# Example 2:
# Input:
# [“FirstUnique”,”showFirstUnique”,”add”,”add”,”add”,”add”,”add”,”showFirstUnique”]
# [[[7,7,7,7,7,7]],[],[7],[3],[3],[7],[17],[]]
# Output:
# [null,-1,null,null,null,null,null,17]
# Explanation:
# FirstUnique firstUnique = new FirstUnique([7,7,7,7,7,7]);
# firstUnique.showFirstUnique(); // return -1
# firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7]
# firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3]
# firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3,3]
# firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7,3,3,7]
# firstUnique.add(17); // the queue is now [7,7,7,7,7,7,7,3,3,7,17]
# firstUnique.showFirstUnique(); // return 17
# Example 3:
# Input:
# [“FirstUnique”,”showFirstUnique”,”add”,”showFirstUnique”]
# [[[809]],[],[809],[]]
# Output:
# [null,809,null,-1]
# Explanation:
# FirstUnique firstUnique = new FirstUnique([809]);
# firstUnique.showFirstUnique(); // return 809
# firstUnique.add(809); // the queue is now [809,809]
# firstUnique.showFirstUnique(); // return -1
from collections import defaultdict
class FirstUnique:
def __init__(self, nums):
self.myMap = defaultdict(int)
self.myQueue = []
for num in nums:
self.add(num)
def showFirstUnique(self):
while len(self.myQueue) and self.myMap[self.myQueue[0]]>1:
self.myQueue.pop(0)
if len(self.myQueue) == 0:
return -1
return self.myQueue[0]
def add(self, value):
self.myMap[value] += 1
if self.myMap[value] == 1:
self.myQueue.append(value)
| true |
8016f675d1e2456b35c801491e0ec14aa68c103d | Python | rajat046/Python-programs | /product.py | UTF-8 | 2,122 | 4.375 | 4 | [] | no_license | def product():
"""calculate 10 rs"""
print("Denomination of notes(10)")
user_input_10 = input("how many notes do you have?")
value_of_10 = 10
product_of_10 = int(value_of_10) * int(user_input_10)
print(product_of_10)
"""calculate 20 rs"""
print("Denomination of notes(20)")
user_input_20 = input("how many notes do you have?")
value_of_20 = 20
product_of_20 = int(value_of_20) * int(user_input_20)
print(product_of_20)
"""calculate 50 rs"""
print("Denomination of notes(50)")
user_input_50 = input("how many notes do you have?")
value_of_50 = 50
product_of_50 = int(value_of_50) * int(user_input_50)
print(product_of_50)
"""calculate 100 rs"""
print("Denomination of notes(100)")
user_input_100 = input("how many notes do you have?")
value_of_100 = 100
product_of_100 = int(value_of_100) * int(user_input_100)
print(product_of_100)
"""calculate 200 rs"""
print("Denomination of notes(200)")
user_input_200 = input("how many notes do you have?")
value_of_200 = 200
product_of_200 = int(value_of_200) * int(user_input_200)
print(product_of_200)
"""calculate 500 rs"""
print("Denomination of notes(500)")
user_input_500 = input("how many notes do you have?")
value_of_500 = 500
product_of_500 = int(value_of_500) * int(user_input_500)
print(product_of_500)
"""calculate 2000 rs"""
print("Denomination of notes(2000)")
user_input_2000 = input("how many notes do you have?")
value_of_2000 = 2000
product_of_2000 = int(value_of_2000) * int(user_input_2000)
print(product_of_2000)
Result = [product_of_10 , product_of_20 , product_of_50 , product_of_100 , product_of_200 , product_of_500 , product_of_2000]
Result = sum(Result)
print("Result :",Result)
if Result < 10000:
print("You need to work hard or save more.")
elif Result > 10000:
print("Nice you should donate some money")
else:
print("Error, no money!")
product() | true |
a027911a44513a97818915ce34c652a3b448f81b | Python | gofflab/biolib | /src/seqlib/RIPDiff.py | UTF-8 | 1,097 | 2.578125 | 3 | [] | no_license | '''
Created on May 13, 2010
Normalizes and compares RIP vs Control (IgG or total RNA) to identify segments of transcripts that are
preferrentially enriched in RIP
@author: lgoff
'''
##################
#Imports
##################
import intervallib
import seqstats
##################
#Classes
##################
class RIPUnit(intervallib.Interval):
"""
Can be individual transcript or some basic unit being interrogated for differential peaks (ie. chromosome)
Extends intervallib.Interval class
"""
def __init__(self,interval):
"""Initiate from existing instance of Interval class only"""
assert isinstance(interval,intervallib.Interval)
intervallib.Interval.__init__(interval)
def scan(self):
pass
def makebins(self,binSize):
pass
def binBinom(self):
pass
def binPois(self):
pass
def fetchReads(self,bamHandle):
pass
#################
#Functions
#################
def globalNorm(ripUnit,totReads):
pass
def localNorm(ripUnitA,ripUnitB):
pass
| true |
f71198960414c533a8772396c60af34e7f70f15a | Python | svakhnyuk/opsworks-scrapper | /target/pipelines.py | UTF-8 | 941 | 2.65625 | 3 | [] | no_license | import target.model
from configparser import ConfigParser
class TargetPipeline(object):
store_mongo_db = True
storage_config = {}
def __init__(self):
parser = ConfigParser()
parser.read('scrapy.cfg')
if 'mongo' in self.storage_config and self.storage_config['mongo'].lower() == "false":
self.store_mongo_db = False
def open_spider(self, spider):
if self.store_mongo_db:
self.mongo_db = target.model.Model_mongo_db()
def close_spider(self, spider):
if self.store_mongo_db:
self.mongo_db.close()
def process_item(self, item, spider):
"""
Save deals in the database.
This method is called for every item pipeline component.
"""
if item:
if self.store_mongo_db is True:
self.mongo_db.insert(collection_name=self.mongo_db.collection, data=dict(item))
return item
| true |
38f9fbda123dd808b19001973544ffd52a18cec3 | Python | iv-kis/my_python_course | /l4_iterables/switch_via_dict.py | UTF-8 | 1,406 | 3.578125 | 4 | [] | no_license | '''
Created on 5 авг. 2018 г.
@author: ivkis
'''
class ServiceException(Exception):
pass
class SystemServiceException(ServiceException):
pass
class BusinessServiceException(ServiceException):
pass
#Аналог Switch-Case с помощью словаря:
def case1():
raise BusinessServiceException('Business Service Exception')
def case2():
raise ServiceException('Service Exception')
def case3():
raise Exception('Exception')
select={ #создаём словарь
"1": case1, #передаём ключи, а НЕ САМИ ФУНКЦИИ, иначе они будут вызываться при создании словаря
"2": case2,
"3": case3
}
try:
func=select[input()] #получаем ключ
func() #вызываем функцию по ключу
#Располагаем except'ы в порядке наследования от дочернего к родительскому:
except KeyError:
print("Success!")
except BusinessServiceException as BSE:
print('BSE: {0}'.format(BSE))
#try:
# raise ServiceException('Service Exception')
#except ServiceException as SE:
# print(str(SE))
except ServiceException as SE:
print('SE: {0}'.format(SE))
except Exception as E:
print('E: {0}'.format(E))
| true |
4f69e144508144e4f693ba8aa02d9c451091af78 | Python | Vin129/IWTL_Python | /Python/DataStructure/AVLTree.py | UTF-8 | 5,691 | 3.828125 | 4 | [] | no_license | class BTreeNode:
Value = None;
Left = None;
Right = None;
Depth = None;
def __init__(self,v:int):
self.Value = v;
class AVLTree:
Node = None
def Find(self,X:int) -> BTreeNode:
self.__find(self.Node,X)
def __find(self,Node:BTreeNode,X:int):
if Node == None :
return None;
if Node.Value == X:
return Node
if Node.Value > X:
return self.__find(Node.Left,X);
return self.__find(Node.Right,X);
def Add(self,X:int):
self.Node = self.__add(self.Node,X);
def __add(self,Node:BTreeNode,X:int) -> BTreeNode:
if Node == None:
Node = BTreeNode(X)
Node.Depth = 0
else:
if Node.Value > X :
Node.Left = self.__add(Node.Left,X)
if self.GetDepth(Node.Left) - self.GetDepth(Node.Right) == 2:
if Node.Left.Value > X:
Node = self.SingleRotateRight(Node) # 左-左型
else:
Node = self.DoubleRotateLeft(Node) # 左-右型
elif Node.Value < X:
Node.Right = self.__add(Node.Right,X)
if self.GetDepth(Node.Right) - self.GetDepth(Node.Left) == 2:
if Node.Right.Value < X:
Node = self.SingleRotateLeft(Node) # 右-右型
else:
Node = self.DoubleRotateRight(Node) # 右-左型
Node.Depth = (self.GetDepth(Node.Right) if self.GetDepth(Node.Left) < self.GetDepth(Node.Right) else self.GetDepth(Node.Left)) + 1
return Node
def GetDepth(self,Node:BTreeNode) -> int:
if Node == None:
return -1;
return Node.Depth
#右-右型:左旋 父节点被自己的右孩子取代,右孩子的左子节点变成自己的右孩子,而自己成为自己的左孩子
def SingleRotateLeft(self,Node:BTreeNode) -> BTreeNode:
TempNode = Node
Node = TempNode.Right
TempNode.Right = Node.Left
TempNode.Depth = (self.GetDepth(TempNode.Right) if self.GetDepth(TempNode.Left) < self.GetDepth(
TempNode.Right) else self.GetDepth(TempNode.Left)) + 1
Node.Left = TempNode
Node.Depth = (self.GetDepth(Node.Right) if self.GetDepth(Node.Left) < self.GetDepth(
Node.Right) else self.GetDepth(Node.Left)) + 1
return Node
# 左-左型:右旋 父节点被自己的左孩子取代,左孩子的右子节点变成自己的左孩子,而自己成为自己的右孩子
def SingleRotateRight(self,Node:BTreeNode)-> BTreeNode:
TempNode = Node
Node = TempNode.Left
TempNode.Left = Node.Right
TempNode.Depth = (self.GetDepth(TempNode.Right) if self.GetDepth(TempNode.Left) < self.GetDepth(
TempNode.Right) else self.GetDepth(TempNode.Left)) + 1
Node.Right = TempNode
Node.Depth = (self.GetDepth(Node.Right) if self.GetDepth(Node.Left) < self.GetDepth(
Node.Right) else self.GetDepth(Node.Left)) + 1
return Node
# 左-右型 先对左子节点进行左旋,后对父节点右旋
def DoubleRotateLeft(self,Node:BTreeNode) -> BTreeNode:
Node.Left = self.SingleRotateLeft(Node.Left)
return self.SingleRotateRight(Node)
# 右-左型 先对右子节点进行右旋,后对父节点左旋
def DoubleRotateRight(self, Node: BTreeNode) -> BTreeNode:
Node.Right = self.SingleRotateRight(Node.Right)
return self.SingleRotateLeft(Node)
# 删除操作:叶子就直接删除,存在一个子节点则连接上下,若存在两个,则将该节点右侧的最小值(叶子)替代该节点的值,并删除叶子。
def Delete(self,X:int):
self.__delete(self.Node,X);
def __delete(self,Node:BTreeNode,X:int) -> BTreeNode:
if Node == None:
return None;
if Node.Value > X:
Node.Left = self.__delete(Node.Left,X)
elif Node.Value < X:
Node.Right = self.__delete(Node.Right,X)
elif Node.Value == X:
if Node.Left != None and Node.Right != None:
v = self.__getMin(Node.Right);
Node.Value = v
Node.Right = self.__delete(Node.Right,v) # 替换了右子树的最小值后删除该最小值的叶子节点。
else:
if Node.Left == None:
Node = Node.Right;
elif Node.Right == None:
Node = Node.Left;
return Node
def GetMin(self) -> int:
return self.__getMin(self.Node)
def __getMin(self,Node) -> int:
if Node == None:
return None;
if Node.Left == None:
return Node.Value;
return self.__getMin(Node.Left);
# 最大值就是右侧叶子
def GetMax(self) -> int:
return self.__getMax(self.Node)
def __getMax(self,Node) -> int:
if Node == None:
return None;
if Node.Right == None:
return Node.Value;
return self.__getMax(Node.Right);
def Log(self):
self.__log(self.Node,0)
def __log(self,Node:BTreeNode,depth:int):
empty = " "
if (Node == None):
return;
if (Node.Value == None):
print(empty * depth + "N")
else:
print(empty * depth + str(Node.Value))
if (Node.Left != None):
self.__log(Node.Left, depth + 1);
if (Node.Right != None):
self.__log(Node.Right, depth + 1);
AVL = AVLTree()
A = [3,2,1,4,5,6,7,10,9,8]
for x in A:
AVL.Add(x);
AVL.Log()
| true |
22947c1a129d865af8496d942dc0d500e7bd6c72 | Python | chhzh123/CCF-CSP | /201403-1.py | UTF-8 | 189 | 3.09375 | 3 | [] | no_license | n = int(input())
a = list(map(int,input().split()))
cnt = [0] * 2005
for i in range(n):
cnt[a[i]] += 1
res = 0
for i in range(1001):
if cnt[i] == 1 and cnt[-i] == 1:
res += 1
print(res) | true |
a8ab3fb96bc691176c44606786f6d2cd7e59e9eb | Python | MYMSSENDOG/leetcodes | /116. Populating Next Right Pointers in Each Node.py | UTF-8 | 626 | 2.984375 | 3 | [] | no_license |
from bNode_lib import *
class Solution:
def connect(self, root: Node) -> Node:
if not root:
return None
q = [root]
while q:
for i in range(len(q)-1):
if i != len(q) - 1:
q[i].next = q[i+1]
q[-1].next = None
if q[-1].left:
for i in range(len(q)):
t = q.pop(0)
q.append(t.left)
q.append(t.right)
else:
break
return root
sol = Solution()
p = makeTree([1,2,3,4,5,6,7])
inorder_next_print(sol.connect(p)) | true |
49be9d1e488769ba0a508da49437fa95c4d4fff4 | Python | reconstruir/bes | /lib/bes/archive/archive_xz.py | UTF-8 | 652 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from .archive_tar import archive_tar
class archive_xz(archive_tar):
'XZ archive.'
# https://tukaani.org/xz/xz-file-format.txt
_MAGIC = b'\xfd\x37\x7a\x58\x5a\x00'
def __init__(self, filename):
super(archive_xz, self).__init__(filename)
@classmethod
#@abstractmethod
def name(clazz, filename):
'Name of this archive format.'
return 'xz'
@classmethod
#@abstractmethod
def file_is_valid(clazz, filename):
with open(filename, 'rb') as fin:
magic = fin.read(len(clazz._MAGIC))
return magic == clazz._MAGIC
| true |
375006d36078c6505dd22aa569c19efbeaf5b33b | Python | ezeev/install | /app-config/WF-PCInstaller/plugin_dir/telegraf/telegraf_utils.py | UTF-8 | 1,782 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import re
import common.install_utils as utils
import common.config as config
def get_sample_config(name):
"""
using telegraf command to generate the plugin config
Input:
name string:
the name of plugin
Output:
returns the output of the following command
telegraf -usage name
"""
command = (
'telegraf -usage {name}'.format(name=name))
return utils.get_command_output(command)
def edit_conf(conf, key, value):
"""
read the sample config and modify the appropriate field
Input:
key string
- the key field in the conf
value string
- the corresponding key value
Output:
res_conf string
- the configuration string with the
proper field changed
use regex substitute key = (old value)
with the supplied key = value
"""
# regex search for key
search_re = re.compile(
r'{key} = '.format(key=key), re.I)
# break the conf by new line since the telegraf
# conf has key = value format per line
conf_list = conf.split('\n')
for index, line in enumerate(conf_list):
search_res = search_re.search(line)
if search_res is not None:
conf_list[index] = (
re.sub(
r'{key} = .*'.format(key=key),
'{key} = {value}'.format(key=key, value=value),
line))
# make sure new line is reinserted
res_conf = '\n'.join(conf_list)
if config.DEBUG:
utils.cprint('After change:')
utils.cprint(res_conf)
return res_conf
if __name__ == '__main__':
utils.print_step('Testing module {}'.format(__loader__.fullname))
utils.cprint(get_sample_config('apache'))
| true |
2dc7b69ad434a7840ae37cca5e1d7daa0681c4d4 | Python | svv1viktoria1soverda1mail1ru/Python | /zajecia18_10/wystompienieliczby.py | UTF-8 | 107 | 3.34375 | 3 | [] | no_license | lista=[1,2,3,4,5,6,7,8,9]
print "Podaj liczbe naturalna:"
a = input("Twoja liczba :")
print lista.index(a) | true |
41067ff880df82cc61da1b238030312af9cf8263 | Python | akshayav1996/codekata | /sum.py | UTF-8 | 112 | 3.65625 | 4 | [] | no_license | n=int(input("enter a range"))
i=1
sum=0
while(i<=n):
sum=sum+i
i=i+1
print(sum)
| true |
cb32b57d815077c037d45f9206586b8d0487a477 | Python | WaleedRanaC/Prog-Fund-1 | /Lab 3/Payroll.py | UTF-8 | 233 | 3.9375 | 4 | [] | no_license | #input hours
#input wage
#if hours>40:
#apply overtime hours *1.5
#print wage
hours=int(input("How many hours did you work this week? "))
wage=10*hours
if hours>40:
wage=(hours*1.5)+wage
print("Your wage is: $",wage)
| true |
8636a17afd3a3c5e1f4821786b72e2eaecfc6271 | Python | Nauman3S/SmartArmBand | /testCodes/Blink.py | UTF-8 | 165 | 2.6875 | 3 | [] | no_license | import mraa
import time
led=mraa.Gpio(13)
led.dir(mraa.DIR_OUT)
while True:
led.write(1)
time.sleep(0.2)
led.write(0.2)
time.sleep(0.2)
| true |
01f8cb9a53e383c831ae9c942e5ccb21e33a6eaf | Python | lingyunfx/MayaCameraRetime | /MayaCameraRetime/retime_mod.py | UTF-8 | 4,613 | 3.171875 | 3 | [] | no_license | from operator import itemgetter
import pymel.core as pm
def get_frames_range():
st_time = int(pm.playbackOptions(query=1, minTime=1))
ed_time = int(pm.playbackOptions(query=1, maxTime=1))
return range(st_time, ed_time + 1)
def none_type_method(*args):
values, current_frame, _ = args
return values[current_frame]
def frame_type_method(*args):
values, current_frame, decimal = args
frame = current_frame if not round(decimal) else current_frame + 1
return values[frame]
def motion_type_method(*args):
values, current_frame, decimal = args
result = values[current_frame]
if decimal:
result += (values[current_frame + 1] - values[current_frame]) * decimal
return result
def read_node(node_path):
"""
Read the retime node file and return the number of frames.
:param node_path: A txt file path.
:return: Contains a list of the number of frames before and after retime.
For example:
[('1001', '1001.0000000000'),
('1002', '1002.3257210000'),
('1003', '1003.6530310000')]
"""
pick_frames = itemgetter(0, 1)
with open(node_path, 'r') as f:
nodes = [pick_frames(line.split()) for line in f.readlines() if not line.isspace()]
return nodes
class CurvesRetime(object):
def __init__(self, node_path, typ='Motion'):
self.allow_curve = ('animCurveTL', 'animCurveTA', 'animCurveTU')
self.nodes = read_node(node_path)
self.frame_range = get_frames_range()
self.retime_method = self.get_method(typ)
self.animation_data = self.get_animation_data()
def do_retime(self):
pm.waitCursor(state=True)
self.set_playback_range()
for line in self.nodes:
frame, linked_frame = line
linked_frame, decimal = divmod(float(linked_frame), 1)
self.set_curve_keyframe(frame, linked_frame, decimal)
self.cut_key()
pm.waitCursor(state=False)
def get_method(self, typ):
return {'Motion': motion_type_method,
'Frame': frame_type_method,
'None': none_type_method}.get(typ)
def get_animation_data(self):
"""
Get all animation curve data.
:return: A Dict.
For Example:
{nt.AnimCurveTL(u'camera_1_translateZ'): {1001.0: -5.167882073630977,
1002.0: -5.554315975228488,
}
nt.AnimCurveTL(u'camera1_translateY'): {1001.0: 16.77582732509819,
1002.0: 16.789793058894574,
}
}
"""
anim_curves = pm.ls(type=self.allow_curve)
animation_data = {curve: self.get_keyframe_data(curve)
for curve in anim_curves if curve.type() in self.allow_curve}
return animation_data
def get_keyframe_data(self, curve):
"""
Get the value of each frame of an animation curve.
:param curve: A PyMel object of type animCurve.
:return: A Dict.
For Example:
{1001.0: 16.77582732509819,
1002.0: 16.789793058894574,
1003.0: 16.806778347654824,
1004.0: 16.826629063127495}
"""
return {float(frame): pm.keyframe(curve, query=True, eval=True, time=frame)[0]
for frame in self.frame_range}
def set_playback_range(self):
min_frame = float(self.nodes[0][0])
max_frame = float(self.nodes[-1][0])
pm.playbackOptions(edit=1, minTime=min_frame)
pm.playbackOptions(edit=1, maxTime=max_frame)
pm.playbackOptions(edit=1, animationStartTime=min_frame)
pm.playbackOptions(edit=1, animationEndTime=max_frame)
def set_curve_keyframe(self, frame, linked_frame, decimal):
"""
Set keyframes for all animation curves of a specified number of frames.
"""
for curve, values in self.animation_data.iteritems():
value = self.retime_method(values, linked_frame, decimal)
pm.setKeyframe(curve, time=frame, value=value)
def cut_key(self):
"""
Delete animations that exceed the range of frames.
:return:
"""
max_time = pm.playbackOptions(query=1, maxTime=1)
for curve in self.animation_data.keys():
last_frame = pm.findKeyframe(curve, which='last')
while last_frame > max_time:
pm.cutKey(curve, time=last_frame)
last_frame = pm.findKeyframe(curve, which='last')
| true |
f0f8e1494d89bab0bf1e40e5c2e92b4537dd5774 | Python | lucasagerber/whatiwant | /mimic.py | UTF-8 | 5,435 | 3.109375 | 3 | [] | no_license | """
whatiwant.mimic
Lucas A. Gerber
"""
import random #, goslate
from .tools import verbosePrint, numGen
class Mimic(object):
def __init__(self, filename, verbose=True):
self.filename = filename
self.mimic_dict = make_mimic_dict(filename)
self.text = mimic_lecture(self.get_mimic_dict())
self.verbose = verbose
def __str__(self):
return str(self.get_text())
def lecture(self, count=1, starting_word=None, limit=10):
while count >= 1:
verbosePrint(self.verbose, 'Composing lecture...' + str(count))
self.new_mimic(method='lecture', starting_word=starting_word, limit=limit)
verbosePrint(self.verbose, 'Complete.')
count -= 1
return self.get_text()
def poem(self, count=1, starting_word=None, limit=10):
while count >= 1:
verbosePrint(self.verbose, 'Composing poem...' + str(count))
self.new_mimic(method='poem', starting_word=starting_word, limit=limit)
verbosePrint(self.verbose, 'Complete.')
count -= 1
return self.get_text()
def translate(self, count=1, starting_word=None, limit=10):
while count >= 1:
verbosePrint(self.verbose, 'Composing translation...' + str(count))
self.new_mimic(method='translate', starting_word=starting_word, limit=limit)
verbosePrint(self.verbose, 'Complete.')
count -= 1
return self.get_text()
def new_mimic(self, method='lecture', starting_word=None, limit=100):
if method == 'lecture':
self.text = mimic_lecture(self.get_mimic_dict(), starting_word=starting_word, limit=limit)
elif method == 'poem':
self.text = mimic_poem(self.get_mimic_dict(), starting_word=starting_word, limit=limit)
elif method == 'translate':
self.text = mimic_translate(self.get_mimic_dict(), starting_word=starting_word, limit=limit)
def get_filename(self):
return self.filename
def get_mimic_dict(self):
return self.mimic_dict
def get_text(self):
return self.text
def make_mimic_dict(filename):
"""Makes a mimic dictionary from a text file."""
with open(filename, 'r') as file:
text = file.read().lower().replace("'",'').split()
mimic_dict = {}
prev = ''
for word in text:
if not prev in mimic_dict:
mimic_dict[prev] = [word]
else:
mimic_dict[prev].append(word)
prev = word
return mimic_dict
def mimic_lecture(mimic_dict, starting_word=None, limit=10):
"""Makes a mimic lecture from a mimic dictionary.
Limit default is 10 lines (30 seconds)"""
if not starting_word:
word = ''
else:
word = starting_word
line_label = (divmod(x*3, 60) for x in numGen())
text = word
text = str(next(line_label)) + word
line_mark = 0
line_words = 1
while line_mark < limit:
if word not in mimic_dict:
word = ""
new_word = random.choice(mimic_dict[word])
if line_words == 0:
rand_silence = random.randint(1,10)
if rand_silence <= 2:
text = text + "\n" + str(next(line_label))
line_words = 0
line_mark += 1
word = new_word
continue
text = text + " " + new_word
rand = random.randint(3,5)
if line_words >= rand:
text = text + "\n" + str(next(line_label))
line_words = 0
line_mark += 1
else:
line_words += 1
word = new_word
return text
def mimic_poem(mimic_dict, starting_word=None, limit=10):
"""Makes a mimic poem from a mimic dictionary.
Limit default is 10 lines"""
if not starting_word:
word = ''
else:
word = starting_word
text = word
line_mark = 0
line_words = 1
while line_mark < limit:
if word not in mimic_dict:
word = ""
new_word = random.choice(mimic_dict[word])
if line_words == 0:
rand_silence = random.randint(1,10)
if rand_silence <= 2:
text = text + "\n"
line_words = 0
line_mark += 1
word = new_word
continue
text = text + " " + new_word
rand = random.randint(3,12)
if line_words >= rand:
text = text + "\n"
line_words = 0
line_mark += 1
else:
line_words += 1
word = new_word
return text
'''
def mimic_translate(mimic_dict, starting_word=None, limit=10):
"""Makes a mimic google translation from a mimic dictionary.
Limit default is 10 lines"""
text = mimic_poem(mimic_dict=mimic_dict, starting_word=starting_word, limit=limit)
gs = goslate.Goslate()
languages = [ lang for lang in gs.get_languages().keys() ]
translation_iter = random.randint(3,10)
for i in range(1, translation_iter):
language = random.choice(languages)
text = gs.translate(text, language)
text = gs.translate(text, 'en')
return text
'''
def main():
pass
if __name__ == '__main__':
main()
| true |
73ac2bb9c64d93211fb2a8e471ed54881f43172c | Python | Yihan-Dai/Leetcode-Python | /TwoSum/Twosum1.py | UTF-8 | 1,288 | 4.15625 | 4 | [] | no_license | '''Given an array of integers, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
'''
'''
Given a dictionary used to store the list's values and index
key is number, values is index. [13,12,11]-->{13:0,12:1,11:2}
for i , n in enumerate(nums):
lookup the dictionary's value via list.get(target-n) # require no duplication and no none
return the result once found
'''
#drawback: store the list into a dictionary, if a list have two or three same integer, it can just contain a latest value
#but it can work
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
list = dict([(number,index) for index, number in enumerate(nums)])
for i, n in enumerate(nums):
if list.get(target-n) !=i and list.get(target-n) != None:
return [i+1,list.get(target-n)+1] | true |
495a1970fbbb0f45fee2e9124b48902c40cc1891 | Python | wjlight/euler | /tools/docker/deepwalk-demo.py | UTF-8 | 3,076 | 2.515625 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
import tensorflow as tf
import tf_euler
class DeepWalk(tf_euler.layers.Layer):
def __init__(self, node_type, edge_type, max_id, dim,
num_negs=8, walk_len=3, left_win_size=1, right_win_size=1):
super(DeepWalk, self).__init__()
self.node_type = node_type
self.edge_type = edge_type
self.max_id = max_id
self.num_negs = num_negs
self.walk_len = walk_len
self.left_win_size = left_win_size
self.right_win_size = right_win_size
self.target_encoder = tf_euler.layers.Embedding(max_id + 1, dim)
self.context_encoder = tf_euler.layers.Embedding(max_id + 1, dim)
def call(self, inputs):
src, pos, negs = self.sampler(inputs)
embedding = self.target_encoder(src)
embedding_pos = self.context_encoder(pos)
embedding_negs = self.context_encoder(negs)
loss, mrr = self.decoder(embedding, embedding_pos, embedding_negs)
embedding = self.target_encoder(inputs)
return (embedding, loss, 'mrr', mrr)
def sampler(self, inputs):
batch_size = tf.size(inputs)
path = tf_euler.random_walk(
inputs, [self.edge_type] * self.walk_len,
default_node=self.max_id + 1)
pair = tf_euler.gen_pair(path, self.left_win_size, self.right_win_size)
num_pairs = pair.shape[1]
src, pos = tf.split(pair, [1, 1], axis=-1)
negs = tf_euler.sample_node(batch_size * num_pairs * self.num_negs,
self.node_type)
src = tf.reshape(src, [batch_size * num_pairs, 1])
pos = tf.reshape(pos, [batch_size * num_pairs, 1])
negs = tf.reshape(negs, [batch_size * num_pairs, self.num_negs])
return src, pos, negs
def decoder(self, embedding, embedding_pos, embedding_negs):
logits = tf.matmul(embedding, embedding_pos, transpose_b=True)
neg_logits = tf.matmul(embedding, embedding_negs, transpose_b=True)
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(logits), logits=logits)
negative_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(neg_logits), logits=neg_logits)
loss = tf.reduce_sum(true_xent) + tf.reduce_sum(negative_xent)
mrr = tf_euler.metrics.mrr_score(logits, neg_logits)
return loss, mrr
if __name__ == '__main__':
print("begin....")
tf_euler.initialize_embedded_graph('ppi') # 图数据目录
source = tf_euler.sample_node(128, tf_euler.ALL_NODE_TYPE)
source.set_shape([128])
model = DeepWalk(tf_euler.ALL_NODE_TYPE, [0, 1], 56944, 256)
_, loss, metric_name, metric = model(source)
global_step = tf.train.get_or_create_global_step()
train_op = tf.train.GradientDescentOptimizer(0.2).minimize(loss, global_step)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.train.MonitoredTrainingSession(
hooks=[
tf.train.LoggingTensorHook({'step': global_step,
'loss': loss, metric_name: metric}, 100),
tf.train.StopAtStepHook(2000)
]) as sess:
while not sess.should_stop():
sess.run(train_op)
| true |
3724a8ed0e9fe54eb01a3bc59e1ea11934cecb12 | Python | mastersjw/ninjagame | /flask_app/models/user.py | UTF-8 | 2,863 | 2.71875 | 3 | [] | no_license | from flask_app.config.mysqlconnection import connectToMySQL
from flask_bcrypt import Bcrypt
from flask_app import app
from flask import flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
bcrypt = Bcrypt(app)
schema = "ninjaGame"
class User:
def __init__(self,data):
self.id=data['id']
self.email = data['email']
self.password = data['password']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def create(cls, data):
query = """
INSERT INTO users (email,password)
VALUES (%(email)s, %(password)s);
"""
post_data={
**data,
'password': bcrypt.generate_password_hash(data['password'])
}
return connectToMySQL(schema).query_db(query,post_data)
@classmethod
def get_all(cls):
query = "SELECT * FROM users;"
results = connectToMySQL(schema).query_db(query)
all_users = [];
for result in results:
all_users.append(cls(result))
return all_users
@classmethod
def get_one(cls,data):
query = "SELECT * FROM users;"
results = connectToMySQL(schema).query_db(query,data)
return cls(results[0]);
@classmethod
def get_user_by_email(cls,data):
query = "SELECT * FROM users WHERE email= %(email)s;"
results = connectToMySQL(schema).query_db(query,data)
# print("*"*80)
# print(results)
if results == ():
return False
return cls(results[0])
@classmethod
def logUser(cls,data):
print (data)
user1 = cls.get_user_by_email(data)
return user1.id
@staticmethod
def check_cred(post_data):
if len(post_data['email']) >0 and len(post_data['password'])>0:
user1 = User.get_user_by_email(post_data)
if (user1):
print (post_data['password'])
if bcrypt.check_password_hash(user1.password, post_data['password']):
return True
return False
@staticmethod
def validate_user(post_data):
is_valid = True
if len(post_data['password'])<8:
flash("Password must be longer then 7 characters")
is_valid=False;
elif post_data['password'] != post_data['cpassword']:
flash("Password does not match Confirm Password")
is_valid=False;
if not EMAIL_REGEX.match(post_data['email']):
flash("Invalid email address!")
is_valid = False
elif User.get_user_by_email(post_data):
flash("e-mail already in use")
is_valid=False;
return is_valid
| true |
62cf32de7acd8cebdc6751a50a394ee037e59668 | Python | JeremieBou/stix_generator | /make_nodes.py | UTF-8 | 1,484 | 2.6875 | 3 | [
"MIT"
] | permissive | import sys
import os
from stix_generator.util import Util as u
from stix_generator.stix_generator import Generator
def main():
"""
example script for STIX Generator
makes random stix data using the generator with set peramaters in the script
"""
path = os.path.realpath('static/data') + "/view.json"
if(len(sys.argv) is 2):
total_num = 100
sightings_num = 0
marking_num = 0
granular_marking_num = 0
M_0_num = 2
indicator_num = 50
observed_data_num = 0
report_num = 0
print "M_0 = " + str(M_0_num)
print "Generating " + str(total_num) + " nodes"
print "Generating " + str(sightings_num) + " sightingss"
print "Generating " + str(marking_num) + " markings"
print "Generating " + str(granular_marking_num) + " granular_markings"
print "Generating " + str(indicator_num) + " indicators"
print "Generating " + str(observed_data_num) + " observed_datas"
print "Generating " + str(report_num) + " reports"
sg = Generator(total_num, sightings_num, marking_num, granular_marking_num, M_0_num, indicator_num, observed_data_num, report_num)
stix = sg.generate()
print "Done generating, making output"
u.make_output(stix, str(sys.argv[1]))
print "Complete"
# No Arguments given
else:
print "Please specify the ouput directory."
if __name__ == "__main__":
main()
| true |
c3bb790691ca6a1a0a0a894d3acb7e66d21374e7 | Python | HariData20/SmartCalculator | /Problems/Dating App/main.py | UTF-8 | 1,179 | 3.5 | 4 | [] | no_license | """potential_dates = [{"name": "Julia", "gender": "female", "age": 29,
"hobbies": ["jogging", "music"], "city": "Hamburg"},
{"name": "Sasha", "gender": "male", "age": 18,
"hobbies": ["rock music", "art"], "city": "Berlin"},
{"name": "Maria", "gender": "female", "age": 35,
"hobbies": ["art"], "city": "Berlin"},
{"name": "Daniel", "gender": "non-conforming", "age": 50,
"hobbies": ["boxing", "reading", "art"], "city": "Berlin"},
{"name": "John", "gender": "male", "age": 41,
"hobbies": ["reading", "alpinism", "museums"], "city": "Munich"}]
"""
def select_dates(potential_dates):
names = []
for person in potential_dates:
if person["age"] > 30 and person["city"] == "Berlin" and 'art' in person["hobbies"]:
names.append(person['name'])
return ', '.join(names)
"""
Dictionary comprehension
names = [person['name'] for person in potential_dates if
person["age"] > 30 and person["city"] == "Berlin" and 'art' in person["hobbies"]]
print(', '.join(names))
"""
| true |
0c3bf2f33d12e7245f4fabc5b686c5f1b5930630 | Python | sumale/myChain | /input.py | UTF-8 | 1,757 | 2.8125 | 3 | [] | no_license | from ecdsa import VerifyingKey
from flask import jsonify
class Input:
def __init__(self, block_number=-1, auth_number=-1, output_number=-1, signature=None):
self._blockNumber = block_number
self._authNumber = auth_number
self._outputNumber = output_number
self._signature = signature
def set_block_number(self, block_number):
self._blockNumber = block_number
def get_block_number(self):
return self._blockNumber
def set_auth_number(self, auth_number):
self._authNumber = auth_number
def get_auth_number(self):
return self._authNumber
def set_output_number(self, output_number):
self._outputNumber = output_number
def get_output_number(self):
return self._outputNumber
def add_signature(self, signature):
self._signature = signature
def valid(self, blockchain, message):
source_out = blockchain.get_output(self._blockNumber, self._authNumber, self._outputNumber)
if source_out is None:
return False
source_pubkey = source_out.getAddress()
source_pubkey = VerifyingKey.from_string(source_pubkey)
return source_pubkey.verify(self._signature, message.encode("utf-8"))
def to_json(self):
json = {
'blockNumber': self._blockNumber,
'authNumber': self._authNumber,
'outputNumber': self._outputNumber,
'signature': self._signature
}
return jsonify(json)
def __str__(self):
return str(self._blockNumber)+str(self._authNumber)+str(self._outputNumber)
if __name__ == '__main__':
input = Input(10, 12, 3)
print(str(input))
input.add_signature("signature")
print(str(input))
| true |
2a5faaf2474b0091c2696927af1c72316bedb473 | Python | WestonSF/ArcGISDataToolkit | /MapDocumentSummary.py | UTF-8 | 10,003 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #-------------------------------------------------------------
# Name: Map Document Summary
# Purpose: Creates a summary for each map document in a folder, stating description information about the map document
# as well as a list of data sources used in the map documents.
# Author: Shaun Weston (shaun_weston@eagle.co.nz)
# Date Created: 27/05/2014
# Last Updated: 04/02/2015
# Copyright: (c) Eagle Technology
# ArcGIS Version: 10.1/10/.2
# Python Version: 2.7
#--------------------------------
# Import modules
import os
import sys
import logging
import smtplib
import arcpy
import csv
# Enable data to be overwritten
arcpy.env.overwriteOutput = True
# Set global variables
enableLogging = "false" # Use logger.info("Example..."), logger.warning("Example..."), logger.error("Example...")
logFile = "" # os.path.join(os.path.dirname(__file__), "Example.log")
sendErrorEmail = "false"
emailTo = ""
emailUser = ""
emailPassword = ""
emailSubject = ""
emailMessage = ""
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
output = None
# Start of main function
def mainFunction(mxdFolder,outputCSV,csvDelimiter,subDirectories): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# --------------------------------------- Start of code --------------------------------------- #
# Create a CSV file
csvFile = open(outputCSV, 'wb')
# Setup writer
if csvDelimiter == "|":
writer = csv.writer(csvFile, delimiter="|")
if csvDelimiter == ";":
writer = csv.writer(csvFile, delimiter=";")
if csvDelimiter == ",":
writer = csv.writer(csvFile, delimiter=",")
# If including subdirectories
if subDirectories == "true":
# Loop through the folder and all subdirectories
for subDirectory, directory, mxdFiles in os.walk(mxdFolder):
for mxdFile in mxdFiles:
fullMXDPath = os.path.join(subDirectory, mxdFile)
mapDocumentSummary(writer,fullMXDPath,mxdFile)
else:
# Loop through each of the MXD files in the folder
for mxdFile in os.listdir(mxdFolder):
fullMXDPath = os.path.join(mxdFolder, mxdFile)
mapDocumentSummary(writer,fullMXDPath,mxdFile)
# --------------------------------------- End of code --------------------------------------- #
# If called from gp tool return the arcpy parameter
if __name__ == '__main__':
# Return the output if there is any
if output:
arcpy.SetParameterAsText(1, output)
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logging.FileHandler.close(logMessage)
logger.removeHandler(logMessage)
pass
# If arcpy error
except arcpy.ExecuteError:
# Build and show the error message
errorMessage = arcpy.GetMessages(2)
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logging.FileHandler.close(logMessage)
logger.removeHandler(logMessage)
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# If python error
except Exception as e:
errorMessage = ""
# Build and show the error message
for i in range(len(e.args)):
if (i == 0):
errorMessage = unicode(e.args[i]).encode('utf-8')
else:
errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logging.FileHandler.close(logMessage)
logger.removeHandler(logMessage)
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# End of main function
# Start of map document summary function
def mapDocumentSummary(writer,fullMXDPath,mxdFile):
# If a file
if os.path.isfile(fullMXDPath):
# If an mxd file
if mxdFile.lower().endswith(".mxd"):
# Reference MXD
mxd = arcpy.mapping.MapDocument(fullMXDPath)
arcpy.AddMessage("Getting information for " + fullMXDPath + "...")
# Add in map document path
row = []
row.append("Map Document")
row.append(fullMXDPath)
writer.writerow(row)
# Add in map document title
row = []
row.append("Title")
row.append(mxd.title)
writer.writerow(row)
# Add in map document summary
row = []
row.append("Summary")
row.append(mxd.summary)
writer.writerow(row)
# Reference each data frame
dataFrameList = arcpy.mapping.ListDataFrames(mxd)
# For each data frame
for dataFrame in dataFrameList:
# Add in data frame name
row = []
row.append("Data Frame")
row.append(dataFrame.name)
writer.writerow(row)
# Reference each layer in a data frame
layerList = arcpy.mapping.ListLayers(mxd, "", dataFrame)
# For each layer
for layer in layerList:
# Add in layer name
row = []
row.append("Layer")
row.append(layer.longName)
writer.writerow(row)
if layer.supports("dataSource"):
# Add in layer data source
row = []
row.append("Data Source")
row.append(layer.dataSource)
writer.writerow(row)
# Reference each table in a data frame
tableList = arcpy.mapping.ListTableViews(mxd, "", dataFrame)
# For each table
for table in tableList:
# Add in table
row = []
row.append("Table")
row.append(table.name)
writer.writerow(row)
# Add in table data source
row = []
row.append("Data Source")
row.append(table.dataSource)
writer.writerow(row)
# Add in spacer rows
row = []
writer.writerow(row)
row = []
writer.writerow(row)
# End of map document summary function
# Start of set logging function
def setLogging(logFile):
# Create a logger
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter(logFormat)
# Add log message handler to logger
logger.addHandler(logMessage)
return logger, logMessage
# End of set logging function
# Start of send email function
def sendEmail(message):
# Send an email
arcpy.AddMessage("Sending email...")
# Server and port information
smtpServer = smtplib.SMTP("smtp.gmail.com",587)
smtpServer.ehlo()
smtpServer.starttls()
smtpServer.ehlo
# Login with sender email address and password
smtpServer.login(emailUser, emailPassword)
# Email content
header = 'To:' + emailTo + '\n' + 'From: ' + emailUser + '\n' + 'Subject:' + emailSubject + '\n'
body = header + '\n' + emailMessage + '\n' + '\n' + message
# Send the email and close the connection
smtpServer.sendmail(emailUser, emailTo, body)
# End of send email function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Arguments are optional - If running from ArcGIS Desktop tool, parameters will be loaded into *argv
argv = tuple(arcpy.GetParameterAsText(i)
for i in range(arcpy.GetArgumentCount()))
# Logging
if (enableLogging == "true"):
# Setup logging
logger, logMessage = setLogging(logFile)
# Log start of process
logger.info("Process started.")
# Setup the use of a proxy for requests
if (enableProxy == "true"):
# Setup the proxy
proxy = urllib2.ProxyHandler({requestProtocol : proxyURL})
openURL = urllib2.build_opener(proxy)
# Install the proxy
urllib2.install_opener(openURL)
mainFunction(*argv)
| true |
6cb391dd57cd0a13d219ccc68901fb5c14b59a78 | Python | Sushobhan04/dltools | /dltools/networks.py | UTF-8 | 4,504 | 3.125 | 3 | [] | no_license | import torch
import torch.nn as nn
class LinearNormRelu(nn.Module):
"""Linear (fully connected) Normalization Relu block
"""
def __init__(self, inc, outc, relu=True, norm=None):
super().__init__()
self.linear = nn.Linear(inc, outc)
self.relu = relu
self.norm = norm
if norm is None:
self.norm_layer = None
elif norm == "bn":
self.norm_layer = nn.BatchNorm1d(outc)
elif norm == "in":
self.norm_layer = nn.InstanceNorm1d(outc)
def forward(self, x):
out = self.linear(x)
if self.norm is not None:
out = self.norm_layer(out)
if self.relu:
out = torch.relu(out)
return out
class LinearBlock(nn.Module):
"""A sequence of LinearNormRelu blocks
"""
def __init__(self, inc, outc, itmc, L, relu=False, norm=None):
super().__init__()
layers = []
layers.append(LinearNormRelu(inc, itmc, norm=norm))
for i in range(L - 2):
layers.append(LinearNormRelu(itmc, itmc, norm=norm))
layers.append(LinearNormRelu(itmc, outc, relu=relu, norm=norm))
self.layers = nn.ModuleList(layers)
self.L = L
def forward(self, x):
out = x
for i in range(self.L):
out = self.layers[i](out)
return out
class ConvNormRelu(nn.Module):
"""Comvolution block containing a nn.Conv2d, batchnormalization and relu layer
"""
def __init__(self, inc, outc, k=3, stride=1, relu=True, norm="bn", D=2):
"""Instance initialization
Args:
inc (int): number of input channels
outc (int): number of output channels
stride (int, optional): stride for convolution. Defaults to 1.
relu (bool, optional): whether to use relu activation or not. Defaults to True.
norm (string, optional): normalization to use between layers.
Options between 'bn' for batchnormalization
and 'None' for no normalization. Defaults to 'bn'.
"""
super().__init__()
if D == 2:
self.conv = nn.Conv2d(inc, outc, k, padding=k // 2, stride=stride)
if norm is None:
self.norm_layer = None
elif norm == "bn":
self.norm_layer = nn.BatchNorm2d(outc)
elif norm == "in":
self.norm_layer = nn.InstanceNorm2d(outc)
elif D == 3:
self.conv = nn.Conv3d(inc, outc, k, padding=k // 2, stride=stride)
if norm is None:
self.norm_layer = None
elif norm == "bn":
self.norm_layer = nn.BatchNorm3d(outc)
elif norm == "in":
self.norm_layer = nn.InstanceNorm3d(outc)
self.relu = relu
self.norm = norm
self.D = D
def forward(self, x):
out = self.conv(x)
if self.norm is not None:
out = self.norm_layer(out)
if self.relu:
out = torch.relu(out)
return out
class ConvBlock(nn.Module):
"""Comvolution block containing a sequence of ConvNormRelu blocks.
The stride of the last block is given by stride parameter
"""
def __init__(self, inc, outc, itmc, L, k=3, stride=1, relu=True, norm="bn", D=2):
"""Instance initialization
Args:
inc (int): number of input channels
outc (int): number of output channels
itmc (int): number of channels for the hidden layers
L (int): number of ConvNormRelu blocks in sequence
stride (int, optional): Stride of the last ConvNormRelu layer. Defaults to 1.
norm (string, optional): normalization to use between layers.
Options between 'bn' for batchnormalization
and 'ln' for layer normalization. Defaults to 'bn'.
"""
super().__init__()
layers = []
layers.append(ConvNormRelu(inc, itmc, k=k, norm=norm, D=D))
for i in range(L - 2):
layers.append(ConvNormRelu(itmc, itmc, k=k, norm=norm, D=D))
layers.append(ConvNormRelu(itmc, outc, k=k, stride=stride, norm=norm, relu=relu, D=D))
self.layers = nn.ModuleList(layers)
self.L = L
def forward(self, x):
out = x
for i in range(self.L):
out = self.layers[i](out)
return out
| true |
26bc4d759ce5edbbad52abbd5a0c393d4ce3df95 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_148/173.py | UTF-8 | 630 | 3.078125 | 3 | [] | no_license | #!/usr/bin/python3 -t
def read_ints():
return map(int, input().split())
def solve():
n, x = read_ints()
sizes = list(reversed(sorted(read_ints())))
result = 0
cur = []
for s in sizes:
ok = False
for i, c in enumerate(cur):
if s <= c:
del cur[i]
ok = True
break
if not ok:
if x - s > 0:
cur.append(x - s)
result += 1
print(result)
if __name__ == '__main__':
for test_case in range(int(input())):
print('Case #{}: '.format(test_case + 1), end='')
solve() | true |
723fcb524c9783cb5348a7c5753db4111f8fb967 | Python | shaking54/Face-Recognition-Core | /Core/SVM-Classifier/SVM.py | UTF-8 | 842 | 2.578125 | 3 | [] | no_license | import face_recognition
from sklearn import svm
import numpy as np
import os
import pickle
encodings = []
names =[]
path_dataset= 'D:/pythonProjects/dataset/'
train_dir = os.listdir(path_dataset)
print(train_dir)
for person in train_dir:
pix =os.listdir(path_dataset+person)
for person_img in pix:
face = face_recognition.load_image_file(path_dataset+'/'+person+'/'+person_img)
face_bbox = face_recognition.face_locations(face)
if len(face_bbox)==1:
face_enc = face_recognition.face_encodings(face)[0]
encodings.append(face_enc)
names.append(person)
else:
print(person+'/'+person_img+"was skipped and can't be used for training")
clf = svm.SVC(gamma='scale',kernel='rbf',probability=True)
clf.fit(encodings,names)
pickle.dump(clf,open('svm.pkl','wb'))
| true |
aa5d9c125787b32c0b2248c64e1a6572fb9ded15 | Python | ondiekisteven/timetable | /studentdb.py | UTF-8 | 4,377 | 3.171875 | 3 | [] | no_license | import pymysql
import db
"""
@param program : Name of the program you want to search its units
@return : returns units for the program otherwise returns null
"""
def getunitsbycourse(program):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("select * from coursedetails where coursename = '%s'" % program)
data = cursor.fetchone()
units = []
units = data[2].split(".")
cursor.close()
dbase.close()
return units
"""
@param program_name : Name of the program you want to search if it exists. it performs a wildcard search
meaning it matches programs which contain name provided
@return : returns true if any such program exists, otherwise null.
"""
def isexistprogram(program_name):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("SELECT * FROM coursedetails WHERE coursename LIKE '%%%s%%'" % (program_name.lower()))
if(cursor.rowcount == 0):
return False
else:
return True
cursor.close()
dbase.close()
"""
@param regno : Registration number of student which you are searching for.
@return : returns True if reg number is found, otherwise False
"""
def isexistreg(regno):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("SELECT * FROM students WHERE regnumber = '%s'" % (regno.lower()))
if(cursor.rowcount == 0):
return False
else:
return True
cursor.close()
dbase.close()
"""
@param program_name : An expression or name of program you are searching for. Can be full name
or part of name of the program you are seraching for
@return : returns list of programs which contain the specified name, otherwise retuns null
"""
def getprogramslike(program_name):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("SELECT * FROM coursedetails WHERE coursename like '%%%s%%'" % (program_name))
result = cursor.fetchall()
cursor.close()
dbase.close()
return result
"""
@param null :
@return : returns a list of all programs saved in database
"""
def getallprogramnames():
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("SELECT coursename FROM coursedetails")
result = cursor.fetchall()
cursor.close()
dbase.close()
return result
"""
@param id : id of the program you are searching for
@return : returns name of program, otherwise null
"""
def getprogrambyid(id):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("SELECT * FROM coursedetails WHERE id = %d" % (id))
data = cursor.fetchone()
coursename = data[1]
cursor.close()
dbase.close()
return coursename
"""
@param program_name : Name of the program you want to search
@return : returns the course id if successful, otherwise returns -1 if no such progra found
"""
def getprogramid(program_name):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("SELECT * FROM coursedetails WHERE coursename = '%s'" % (program_name))
if(cursor.rowcount >0):
data = cursor.fetchone()
courseid = data[0]
return courseid
else:
return -1
cursor.close()
dbase.close()
"""
@param program : Registration number of student
@return : returns the registration number taken by student if student exists,
otherwise null of either student doesnt exist or no course found[however second option is unlikely]
"""
def getcoursebyregno(regno):
dbase = db.connect()
cursor = dbase.cursor()
if (isexistreg(regno)):
cursor.execute("SELECT * FROM students WHERE regnumber = '%s' " % (regno.lower()))
data = cursor.fetchone()
course = getprogrambyid(data[3])
return course
else:
pass
"""
@param regno : registration number of student you re registering
@param coursename : name of course the student is doing. must be a registered course
otherwise the insertion will fail.
@return : returns 0 if operation is successful, or 0 if any error occurs
"""
def insertcourse(regno, coursename):
dbase = db.connect()
cursor = dbase.cursor()
if(isexistprogram(coursename) and isexistreg(regno)):
cursor.execute("UPDATE TABLE students SET courseid = %d WHERE regnumber = '%s'" % (getprogramid(coursename), regno))
dbase.commit()
return 0
else:
return 1
def register(regno, fname, lname, courseid, year):
dbase = db.connect()
cursor = dbase.cursor()
cursor.execute("INSERT INTO students (regnumber, firstname, surname, courseid, year) values ('%s', '%s', '%s', %d, %d)" % (regno, fname, lname, courseid, year))
dbase.commit()
cursor.close()
dbase.close() | true |
e9d4cd4fa366d1cf5506e6a8ddafcd2a7567b14f | Python | rootAvish/TRIXIE | /OrganiseMyMusic/helpers.py | UTF-8 | 1,055 | 2.640625 | 3 | [
"MIT"
] | permissive | import shutil, os
def move(source, id3tags):
# print id3tags
#print "moving to " + + "from " + source
if (os.path.exists(source)):
if 'ALBUM' in id3tags and 'ARTIST' in id3tags:
s = id3tags['ARTIST'] + "\\"+ id3tags['ALBUM']
dest = "".join(x for x in s if x.isalnum() or x == "\\" or x == " ")
dest = movedir + dest
if not os.path.isdir(dest):
os.makedirs(dest)
print dest
if os.path.isdir(dest):
try:
shutil.move(source, dest)
except Exception, e:
return
else:
print "Could not create directories."
else:
print "skipping " + source + ", metadata not complete."
def destdir():
global movedir
movedir = raw_input('Enter directory where you want to move all your music to: ')
if movedir[-1] != "\\":
movedir += "\\"
if not os.path.exists(movedir):
os.makedirs(movedir) | true |
5120ef62404650c4e90437c5ab7debfd679db2c7 | Python | yephm/SSMN | /SSMN_rev02/model_training/plot_discussion.py | UTF-8 | 10,370 | 2.8125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import os
color = ['#AC5BF8', '#B4E593', '#007FC8'] # purple, green, blue
barcolor = '#B5A884'
mark = ['o', 'v', 's', 'p', '*', 'h', '8', '.', '4', '^', '+', 'x', '1', '2']
# 实心圆,正三角,正方形,五角,星星,六角,八角,点,tri_right, 倒三角...
# 都是实心的,需要设置edgecolor=..., facecolor='white'
linewidth = 2
font_label = {'family': 'Times New Roman', 'weight': 'bold', 'size': 12}
def plot_unlabel():
# ====== data ========= 5-shot case
# sample size of unlabeled data
Case_1 = [97.14, 99.56, 99.89, 99.97, 99.91, 98.74]
Case_2 = [89.98, 97.43, 98.62, 100, 97.70, 97.91]
Case_3 = [89.71, 93.40, 92.98, 95.07, 87.68, 85.25]
x = np.arange(len(Case_1))
# tick_label = ['0', '1', '3', 5, '10', '20']
tick_label = [0, 1, 3, 5, 10, 20]
x_label = 'Number of unlabeled samples'
y_label = 'Accuracy (%)'
y_max = 100 + 0.5
# ============== plot ========================
plt.rc('font', family='Times New Roman', style='normal', weight='light', size=10)
fig = plt.figure()
plt.plot(x, Case_1, color=color[0], linestyle='-', linewidth=linewidth,
marker='o', markersize=10, markerfacecolor='white',
markeredgecolor=color[0], markeredgewidth=linewidth, label='Case 1')
plt.plot(x, Case_2, color=color[1], linestyle='-', linewidth=linewidth,
marker='v', markersize=10, markerfacecolor='white',
markeredgecolor=color[1], markeredgewidth=linewidth, label='Case 2')
plt.plot(x, Case_3, color=color[2], linestyle='-', linewidth=linewidth,
marker='*', markersize=10, markerfacecolor=color[2],
markeredgecolor=color[2], markeredgewidth=linewidth, label='Case 3')
# plt.fill_between(x=[0, 3.5], y1=100, color='#9FAAB7')
plt.legend(fontsize=12)
plt.xlabel(x_label, fontdict=font_label)
plt.ylabel(y_label, fontdict=font_label)
plt.ylim([80, y_max])
plt.xticks(x, labels=tick_label)
save_f = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN_paper_imgs\Discussion'
name = r'unlabeled size.eps'
f = os.path.join(save_f, name)
order = input("Save the fig? Y/N\n")
if order == 'Y' or order == 'y':
plt.savefig(f, dpi=600)
print(f'Save at\n{f}')
plt.show()
def plot_refinement():
# ieration of refining
Case_1 = [97.14, 98.86, 99.97, 99.77, 97.22, 95.87]
Case_2 = [89.98, 97.78, 100, 100, 100, 99.20]
Case_3 = [89.71, 92.53, 95.07, 93.87, 89.62, 90.73]
avg_time = [1.06, 1.14, 1.21, 1.57, 2.34, 3.17]
x = np.arange(len(Case_1))
# tick_label = ['0', '1', '3', 5, '10', '20']
tick_label = [0, 1, 3, 5, 10, 20]
x_label = 'Iteration number of prototype refining'
y1_label = 'Average Accuracy (%)'
y2_label = 'Average Computional Time (s)'
y1_max = 100 + 0.5
plt.rc('font', family='Times New Roman', style='normal', weight='light', size=10)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, Case_1, color=color[0], linestyle='-', linewidth=linewidth,
marker='o', markersize=10, markerfacecolor='white',
markeredgecolor=color[0], markeredgewidth=linewidth, label='Case 1')
ax1.plot(x, Case_2, color=color[1], linestyle='-', linewidth=linewidth,
marker='v', markersize=10, markerfacecolor='white',
markeredgecolor=color[1], markeredgewidth=linewidth, label='Case 2')
ax1.plot(x, Case_3, color=color[2], linestyle='-', linewidth=linewidth,
marker='*', markersize=10, markerfacecolor=color[2],
markeredgecolor=color[2], markeredgewidth=linewidth, label='Case 3')
plt.legend(fontsize=12)
plt.xlabel(x_label, fontdict=font_label)
plt.ylabel(y1_label, fontdict=font_label)
plt.ylim([80, y1_max])
plt.xticks(x, labels=tick_label)
ax2 = ax1.twinx()
ax2.bar(x=x, height=avg_time, color=barcolor,
width=0.2, label='Time')
# ax1.set_xticks(x_tick)
# ax1.set_xticklabels(tick_label, fontdict=fontx)
ax2.set_ylabel(y2_label, fontdict=font_label)
ax2.set_ylim([0.5, 4])
ax2.legend(fontsize=12)
save_f = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN_paper_imgs'
name = r'Refineing number2.eps'
f = os.path.join(save_f, name)
order = input("Save the fig? Y/N\n")
if order == 'Y' or order == 'y':
plt.savefig(f, dpi=600)
print(f'Save at\n{f}')
plt.show()
def plot_attentionBlock():
# ieration of refining
Case_1 = [95.88, 97.77, 99.97, 99.64, 99.31]
Case_2 = [91.42, 99.53, 100, 99.45, 99.83]
Case_3 = [88.73, 92.04, 95.07, 92.37, 92.95]
avg_time = [1.12, 1.16, 1.21, 1.72, 2.01]
x = np.arange(len(Case_1))
# tick_label = ['0', '1', '3', 5, '10', '20']
tick_label = [0, 1, 2, 3, 4]
x_label = 'Number of attention blocks'
y1_label = 'Average Accuracy (%)'
y2_label = 'Average Computional Time (s)'
y1_max = 100 + 0.5
plt.rc('font', family='Times New Roman', style='normal', weight='light', size=10)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, Case_1, color=color[0], linestyle='-', linewidth=linewidth,
marker='o', markersize=10, markerfacecolor='white',
markeredgecolor=color[0], markeredgewidth=linewidth, label='Case 1')
ax1.plot(x, Case_2, color=color[1], linestyle='-', linewidth=linewidth,
marker='v', markersize=10, markerfacecolor='white',
markeredgecolor=color[1], markeredgewidth=linewidth, label='Case 2')
ax1.plot(x, Case_3, color=color[2], linestyle='-', linewidth=linewidth,
marker='*', markersize=10, markerfacecolor=color[2],
markeredgecolor=color[2], markeredgewidth=linewidth, label='Case 3')
plt.legend(fontsize=12)
plt.xlabel(x_label, fontdict=font_label)
plt.ylabel(y1_label, fontdict=font_label)
plt.ylim([80, y1_max])
plt.xticks(x, labels=tick_label)
ax2 = ax1.twinx()
ax2.bar(x=x, height=avg_time, color=barcolor,
width=0.2, label='Time')
# ax1.set_xticks(x_tick)
# ax1.set_xticklabels(tick_label, fontdict=fontx)
ax2.set_ylabel(y2_label, fontdict=font_label)
ax2.set_ylim([0.5, 3])
ax2.legend(fontsize=12)
save_f = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN_paper_imgs\Discussion'
name = r'attention_block.eps'
f = os.path.join(save_f, name)
order = input("Save the fig? Y/N\n")
if order == 'Y' or order == 'y':
plt.savefig(f, dpi=600)
print(f'Save at\n{f}')
plt.show()
def plot_loss_1(): # SGD, Adam, exp_SGD
# ieration of refining
sgd_f = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\SGD_900.npy'
exp_sgd = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\ExSGD_900.npy'
adam = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\Adam_900.npy'
SGD, ex_SGD, Adam = np.load(sgd_f), np.load(exp_sgd), np.load(adam)
x = np.arange(len(SGD))
plt.rc('font', family='Times New Roman', style='normal', weight='light', size=10)
plt.figure()
plt.plot(x[670:], SGD[670:] + 0.017, label='SGD') # x[670:], SGD[670:]+0.017
plt.plot(x[670:], ex_SGD[670:], label='exp_SGD')
plt.plot(x[670:], Adam[670:], label='Adam')
plt.xlabel('Training Step', fontsize=12, fontweight='bold')
plt.ylabel('Training Loss', fontsize=12, fontweight='bold')
plt.legend()
save_f = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN_paper_imgs\Discussion'
name = r'Loss_1_1.eps'
f = os.path.join(save_f, name)
order = input("Save the fig? Y/N\n")
if order == 'Y' or order == 'y':
plt.savefig(f, dpi=600)
print(f'Save at\n{f}')
plt.show()
def plot_loss_2(): # exp_SGD + Adam, lr_threshold
# ieration of refining
ls_50 = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\mix0.5_900.npy'
ls_20 = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\mix0.2_900.npy'
ls_05 = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\mix0.05_900.npy'
ls_005 = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\mix0.005_900.npy'
ls_exsgd = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\exSGD_lr0.2_900.npy'
ls_sgd = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN\Loss_file\SGD_lr0.2_900.npy'
L_50, L_20, L_05 = np.load(ls_50)[500:], np.load(ls_20)[500:], np.load(ls_05)[500:]
L_005, L_exsgd, L_sgd = np.load(ls_005)[500:], np.load(ls_exsgd)[500:], np.load(ls_sgd)[500:]
new_L = np.concatenate((L_50[None, :], L_20[None, :], L_05[None, :], L_05[None, :]), axis=0)
L_mean = np.mean(new_L, axis=0)
L_std = np.std(new_L, axis=0)
x = np.arange(900)[500:]
plt.rc('font', family='Times New Roman', style='normal', weight='light', size=10)
plt.figure(figsize=(12, 8))
plt.plot(x, L_sgd+0.0015, label=r'SGD: lr=0.2', linewidth=3)
plt.plot(x, L_exsgd+0.0005, label=r'exp_SGD: lr=0.2', linewidth=3)
plt.plot(x, L_50, label=r'exp_SGD+Adam: $l_{skip}$=0.50', linewidth=1.5) # x[670:], SGD[670:]+0.017
plt.plot(x, L_20, label=r'exp_SGD+Adam: $l_{skip}$=0.20', linewidth=1.5)
plt.plot(x, L_05, label=r'exp_SGD+Adam: $l_{skip}$=0.05', linewidth=1.5)
plt.plot(x, L_005, label=r'exp_SGD+Adam: $l_{skip}$=0.005', linewidth=1.5)
plt.plot(x, L_mean, label=r'exp_SGD+Adam: 0.005~0.50', color='#99CC00', linewidth=3)
plt.fill_between(x=x, y1=L_mean-L_std, y2=L_mean+L_std, color='#99CC00', alpha=0.15)
plt.xlabel('Training Step', fontsize=12, fontweight='bold')
plt.ylabel('Training Loss', fontsize=12, fontweight='bold')
plt.xlim([500, 900])
# font = {'family': 'Times New Roman', 'style': 'normal', 'weight': 'normal', 'size': 10}
plt.legend()
save_f = r'C:\Users\20996\Desktop\SSMN_revision\training_model\SSMN_paper_imgs\Discussion'
name = r'Loss_2.eps'
f = os.path.join(save_f, name)
order = input("Save the fig? Y/N\n")
if order == 'Y' or order == 'y':
plt.savefig(f, dpi=600)
print(f'Save at\n{f}')
plt.show()
if __name__ == "__main__":
# plot_refinement()
# plot_attentionBlock()
# plot_loss_1()
plot_loss_2()
| true |
89e5cd9b1b2a23059a4f0372313ca32632275e2c | Python | nixonpj/leetcode | /3Sum Closest.py | UTF-8 | 1,124 | 3.6875 | 4 | [] | no_license | """
Given an array nums of n integers and an integer target, find three integers
in nums such that the sum is closest to target. Return the sum of the three integers.
You may assume that each input would have exactly one solution.
"""
from typing import List
from math import inf
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
print(nums)
i, k = 0, len(nums)-1
best_sum = inf
while i + 1 < k:
sum2 = nums[i]+nums[k]
best_2sum = inf
for num in nums[i+1:k]:
if abs((sum2 + num)-target) < abs(best_2sum-target):
best_2sum = sum2 + num
print(nums[i], num, nums[k], best_sum, best_2sum)
if abs(best_2sum-target) < abs(best_sum-target):
best_sum = best_2sum
if best_2sum < target:
i += 1
else:
k -= 1
print("----")
return best_sum
s = Solution()
print(s.threeSumClosest([4,0,5,-5,3,3,0,-4,-5], -2))
# print(s.threeSumClosest([-4,1,-1,2], 1))
| true |
553bb08d1ff8c28fef89d6189ba4e7308f0f1d87 | Python | weronikazak/Penguin-Diner-Bot | /bot.py | UTF-8 | 5,818 | 2.78125 | 3 | [] | no_license | import os
import pyautogui
import time
import sys
class PenguinDinterBot():
# ------------------
# INITIATE VARIABLES
# ------------------
def __init__(self):
ORDERS_PATH = os.getcwd() + "/orders"
MEALS_PATH = os.getcwd() + "/meals"
self.back_button = (750, 640)
self.trash_bin = (550, 650)
self.upgrade_button = (530, 550)
self.new_level = (530-150, 550)
self.ORDERS = [ORDERS_PATH + "/" + order for order in os.listdir(ORDERS_PATH)]
self.MEALS = [MEALS_PATH + "/" + meal for meal in os.listdir(MEALS_PATH)]
self.MEAL2ORDER = dict()
for i, meal in enumerate(self.MEALS):
order = self.ORDERS[i]
self.MEAL2ORDER[meal] = order
self.start_game()
# ------------------
# FIRST CUTSCENE
# ------------------
def cutscene_start(self):
input("Move mouse over bot window and press enter.")
w = pyautogui.locateOnScreen("locate.png")
if w is None:
sys.exit("Couldn't find on screen. Is this game visible?")
win_right = w[0]
win_bottom = w[1]
print("------GAME FOUND-------")
pyautogui.click(win_right - 60, win_bottom - 180, interval=1)
pyautogui.click(win_right - 280, win_bottom - 200, interval=1) # in case if game was started earlier
pyautogui.click(self.back_button, interval=1)
pyautogui.click(self.back_button, interval=1)
time.sleep(6)
# ------------------
# LEVEL END
# ------------------
def end_level(self):
print("------END OF THE DAY!------")
time.sleep(3)
# first, look for upgrades
self.upgrade()
# start next level if available
self.start_level_or_restart()
time.sleep(6)
# ------------------
# EQUIPMENT UPGRADES
# ------------------
def upgrade(self):
pyautogui.click(self.upgrade_button,interval=.3)
upgrade_x_offset = 200
upgrade_y_offset = 150
upgrade_x_start = 300
upgrade_y_start = 250
# since it's impossible to deal with numbers, click everything
for x in range(3):
for y in range(2, -1, -1):
click_x = upgrade_x_start + x*upgrade_x_offset
click_y = upgrade_y_start + y*upgrade_y_offset
pyautogui.click(click_x, click_y, interval=.2)
pyautogui.click(self.back_button, interval=.3)
# --------------------------
# START NEW LEVEL OR RESTART
# --------------------------
def start_level_or_restart(self):
next_option = pyautogui.locateCenterOnScreen("nextlevel.png", confidence=.9)
if next_option is None:
# if not, start a new day or restart a day
pyautogui.click(self.new_level, interval=.3)
else:
print("\n------NEXT LEVEL!------\n")
pyautogui.click(next_option, interval=1)
pyautogui.click(self.back_button, interval=1)
pyautogui.click(self.back_button, interval=1)
# --------------------------
# LEAD CLIENT TO FREE TABLE
# --------------------------
def lead_to_table(self):
client = pyautogui.locateCenterOnScreen("client.png", confidence=.9, region=(120,300, 80, 200))
if client is not None:
print("Found a client and led them to a table.")
pyautogui.click(client[0], client[1])
free_table = pyautogui.locateCenterOnScreen("empty_table.png", confidence=.8)
conf = .8
while free_table is None and conf > .2:
conf -= .1
free_table = pyautogui.locateCenterOnScreen("empty_table.png", confidence=conf)
pyautogui.click(free_table[0], free_table[1])
# --------------------
# TAKE CLIENT'S ORDER
# --------------------
def take_order(self):
ordering_client = pyautogui.locateCenterOnScreen("client_order.png", confidence=.7)
if ordering_client is not None:
print("A client wants to order something!")
pyautogui.click(ordering_client[0], ordering_client[1])
# check for mad clients
ordering_client = pyautogui.locateCenterOnScreen("mad_client.png", confidence=.8)
if ordering_client is not None:
print("A mad client wants to order something!")
pyautogui.click(ordering_client[0], ordering_client[1])
# ---------------------
# SERVE MEAL TO CLIENT
# ---------------------
def serve_meal(self):
for meal in self.MEALS:
meal_centered = pyautogui.locateCenterOnScreen(meal, confidence=.9, region=(0, 600, 490, 80))
if meal_centered is not None:
pyautogui.click(meal_centered[0], meal_centered[1])
# find image reference
look_for_order = self.MEAL2ORDER[meal]
order_centered = pyautogui.locateCenterOnScreen(look_for_order, confidence=.8)
conf = .8
# if program doesn't recognize order, look for it until it succeed
while order_centered is None:
conf -= .1
order_centered = pyautogui.locateCenterOnScreen(look_for_order, confidence=conf)
pyautogui.click(order_centered[0], order_centered[1])
food_name = look_for_order.split("/")[-1][:-4]
print(f"Serving the meal {food_name} to the client.")
time.sleep(0.3)
break
# ---------------
# COLLECT MONEY
# ---------------
def collect_money(self):
money = pyautogui.locateCenterOnScreen("money.png", confidence=.8)
if money is not None:
print("Money collected.")
pyautogui.click(money[0], money[1])
# if there are money left, but the main charater covers it, force to move
if pyautogui.locateCenterOnScreen("pickup.png", grayscale=True, confidence=.8) is not None:
pyautogui.click(self.trash_bin)
# ---------------
# GAME FLOW
# ---------------
def run_game(self):
while True:
# if the level is accomplished
endgame = pyautogui.locateCenterOnScreen("end.png", grayscale=True, confidence=.9)
if endgame is not None:
self.end_level()
else:
print("Looking for a client......")
self.lead_to_table()
self.take_order()
self.serve_meal()
self.collect_money()
# ---------------------------
#
# GAME START
#
# ---------------------------
def start_game(self):
self.cutscene_start()
self.run_game()
if __name__ == "__main__":
bot = PenguinDinterBot() | true |
7c076e8212154a0bc6a5d8ea68051f69b4266ebf | Python | naufalscofield/kuisganteng | /app.py | UTF-8 | 12,830 | 2.53125 | 3 | [] | no_license | from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/<gurih>')
def hello_world(gurih):
return gurih
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/crot', methods=['POST'])
def login():
return request.form['anu']
@app.route('/input/mahasiswa')
def mahasiswa():
mahasiswa = [
{
'npm':1164001,
'nama':'aldi'
},
{
'npm':1164019,
'nama':'naufal'
},
{
'npm':1164026,
'nama':'rojasqi'
},
{
'npm':1164025,
'nama':'rizal'
},
{
'npm':1164012,
'nama':'farhan'
},
{
'npm':1164009,
'nama':'ikri'
},
{
'npm':1164011,
'nama':'esi'
},
{
'npm':1164013,
'nama':'ema'
},
{
'npm':1164020,
'nama':'ojack'
},
{
'npm':1164021,
'nama':'seta'
}
]
return jsonify({'daftar mahasiswa':mahasiswa}) #will return the json
@app.route('/negara/kota')
def kota():
kota = [
{
'kota':'Bandung'
},
{
'kota':'Jakarta'
},
{
'kota':'Surabaya'
},
{
'kota':'Palembang'
},
{
'kota':'Medan'
},
{
'kota':'Lampung'
},
{
'kota':'Makasar'
},
{
'kota':'Aceh'
},
{
'kota':'Semarang'
},
{
'kota':'Padang'
},
{
'kota':'Purwokerto'
}
]
return jsonify({'daftar kota':kota}) #will return the json
@app.route('/kampus/bandung', methods=['GET'])
def kampus():
kampus = [
{
'universitas':'Unpar'
},
{
'universitas':'Maranatha'
},
{
'institut':'ITB'
},
{
'politeknik':'Polban'
},
{
'universitas':'UPI'
},
{
'universitas':'Unpad'
},
{
'politeknik':'poltekpos'
},
{
'universitas':'Unisba'
},
{
'politeknik':'Polman'
},
{
'institut':'ITENAS'
}
]
return jsonify({'daftar kampus di Bandung':kampus}) #will return the json
@app.route('/brand/tas', methods=['GET'])
def merektas():
merektas = [
{
'tas':'Chanel'
},
{
'tas':'Dior'
},
{
'tas':'Papylon'
},
{
'tas':'Hermes'
},
{
'tas':'Louis Vuiton'
},
{
'tas':'YVL'
},
{
'tas':'Prada'
},
{
'tas':'Gucci'
},
{
'tas':'Givenchy'
},
{
'tas':'Dolce&Gabbana'
}
]
return jsonify({'merek tas':merektas}) #will return the json
@app.route('/binatang/farhan', methods=['GET'])
def binatang():
binatang = [
{
'nama':'Kucing',
'Tempat':'Darat'
},
{
'nama':'Anjing',
'Tempat':'Darat'
},
{
'nama':'Ikan',
'Tempat':'Air'
},
{
'nama':'Burung',
'Tempat':'Darat/Udara'
},
{
'nama':'Kijang',
'Tempat':'Darat'
},
{
'nama':'Kadal',
'Tempat':'Darat'
},
{
'nama':'Gajah',
'Tempat':'Darat'
},
{
'nama':'Jerapah',
'Tempat':'Darat'
},
{
'nama':'Hiu',
'Tempat':'Air'
},
{
'nama':'Badak',
'Tempat':'Darat'
},
{
'nama':'Gorila',
'Tempat':'Darat'
}
]
return jsonify({'Binatang':binatang})
@app.route('/makanan/daerah', methods=['GET'])
def makanandaerah():
makanandaerah = [
{
'Aceh':'Mie Aceh'
},
{
'Medan':'Bika Ambon'
},
{
'Padang':'Rendang Padang'
},
{
'Jambi':'Gulai Ikan Patin Jambi'
},
{
'Bengkulu':'Pendap Benngkulu'
},
{
'Riau':'Gulai Belacan Riau'
},
{
'Palembang':'empek empek Palembang'
},
{
'Bangka':'Mie Bangka'
},
{
'Lampung':'Seruit Lampung'
},
{
'Jakarta':'Kerak Telor Jakarta'
}
]
return jsonify({'Makanan daerah di indonesia':makanandaerah})
@app.route('/minuman/soda', methods=['GET'])
def minumansoda():
minumansoda = [
{
'soda':'Sprite'
},
{
'soda':'fanta'
},
{
'soda':'diet coke'
},
{
'soda':'pepsi'
},
{
'soda':'Aw'
},
{
'soda':'Redbull'
},
{
'soda':'Mocktail'
},
{
'soda':'Squash'
},
{
'soda':'Big Cola'
},
{
'soda':'Coca Cola'
}
]
return jsonify({'minuman':minumansoda})
@app.route('/bandung/wisata', methods=['GET'])
def wisata():
wisata = [
{
'wisata':'FarmHouse'
},
{
'wisata':'The Loudge Maribaya'
},
{
'wisata':'Puncak Bintang'
},
{
'wisata':'Floating Market'
},
{
'wisata':'Dusun Bambu'
},
{
'wisata':'Dago Dream Park'
},
{
'wisata':'Nu Art'
},
{
'wisata':'Kawah Putih'
},
{
'wisata':'Gunung Tangkuban Perahu'
},
{
'wisata':'Kebun Teh'
}
]
return jsonify({'daftar wisata di bandung':wisata})
nama=[
{
'no':'21',
'name':'Seta Permana',
'hobi':'olahraga'
},
{
'no':'22',
'name':'Miftahul Hasanah',
'hobi':'masak'
}
]
@app.route('/club/Seta',methods=['GET'])
def getAllEmp():
return jsonify({'data':nama})
@app.route('/input/daftarhargagamesteam')
def daftarhargagamesteam():
game_steam = "Daftar Harga Game Di Steam"
return game_steam
@app.route('/Septi Nurhidayah', methods=['VIEW'])
def namalengkap():
return "Septi Nurhidayah"
@app.route('/tari/tradisional')
def taritradisional():
taritradisional = [
{
'tari':'Reyog Ponorogo'
},
{
'tari':'Serimpi'
},
{
'tari':'Bedaya'
},
{
'tari':'Gambyong'
},
{
'tari':'Pendet'
},
{
'tari':'Kecak'
},
{
'tari':'Saman'
},
{
'tari':'Tortor'
},
{
'tari':'Piring'
},
{
'tari':'Jaipong'
}
]
return jsonify({'macam macam tarian tradiional':taritradisional})
@app.route('/rumah/adat')
def rumahtradisional():
rumahtradisional = [
{
'Krong Bade':'Aceh'
},
{
'Bolon':'Sumatera Utara'
},
{
'Gadang':'Sumatera Barat'
},
{
'Melayu Selaso':'Riau'
},
{
'Selaso Jatuh Kembar':'Kepulauan Riau'
},
{
'Panjang':'Jambi'
},
{
'Limas':'Sumatera Selatan'
},
{
'Rakit':'Bangka Belitung'
},
{
'Bubungan Lima':'Bengkulu'
},
]
return jsonify({'berbagai rumah tradisional di indonesia':rumahtradisional})
@app.route('/merk/hp', methods=['GET'])
def merkhp():
merkhp = [
{
'hp':'Samsung'
},
{
'hp':'Nokia'
},
{
'hp':'Oppo'
},
{
'hp':'Vivo'
},
{
'hp':'Sonny'
},
{
'hp':'Xiaomi'
},
{
'hp':'Asus'
},
{
'hp':'Huawei'
},
{
'hp':'Esia'
},
{
'hp':'Mito'
}
]
return jsonify({'macam macam merk hp':merkhp})
@app.route('/merek/merekmobil')
def merekmobil():
merekmobil = [
{
'merek':'Lamborghini'
},
{
'merek':'BMW'
},
{
'merek':'Ford'
},
{
'merek':'Audi'
},
{
'merek':'Volkswagen'
},
{
'merek':'Porsche'
},
{
'merek':'Ferrari'
},
{
'merek':'Subaru'
},
{
'merek':'Bugatti'
},
{
'merek':'Mini Copper'
}
]
return jsonify({'daftar mobil':merekmobil})
@app.route('/Aksesoris/onky', methods=['GET'])
def aksesoris():
aksesoris = [
{
'aksesoris':'Jam Tangan'
},
{
'aksesoris':'Kalung'
},
{
'aksesoris':'Gelang'
},
{
'aksesoris':'Kacamata'
},
{
'aksesoris':'Anting'
},
{
'aksesoris':'Jepitan Rambut'
},
{
'aksesoris':'Baju'
},
{
'aksesoris':'Topi'
},
{
'aksesoris':'Jacket'
},
{
'aksesoris':'Sepatu'
}
]
return jsonify({'Aksesoris kecantikan':aksesoris})
@app.route('/media/sosial', methods=['GET'])
def mediasosial():
mediasosial = [
{
'mediasosial':'Facebook'
},
{
'mediasosial':'Instagram'
},
{
'mediasosial':'Line'
},
{
'mediasosial':'BBM'
},
{
'mediasosial':'Twitter'
},
{
'mediasosial':'hatshapp'
},
{
'mediasosial':'youTube'
},
{
'mediasosial':'Pinterest'
},
{
'mediasosial':'Tumblr'
},
{
'mediasosial':'Flickr'
}
]
return jsonify({'media sosial':mediasosial})
@app.route('/pakaian/adat', methods=['GET'])
def pakaianadat():
pakaianadat = [
{
'pakaian adat':'Ulee Balang'
},
{
'pakaian adat':'Kain Ulos'
},
{
'pakaian adat':'Bundo Kanduang'
},
{
'pakaian adat':'Melayu'
},
{
'pakaian adat':'Belanga'
},
{
'pakaian adat':'Melayu Jambi'
},
{
'pakaian adat':'Aesan Gede'
},
{
'pakaian adat':'Paksian'
},
{
'pakaian adat':'Tulang Bawang'
},
{
'pakaian adat':'Betawi'
}
]
return jsonify({'Pakaian adat':pakaianadat})
@app.route('/depart/store', methods=['GET'])
def departstore():
departstore = [
{
'store':'Yogya'
},
{
'store':'Griya'
},
{
'store':'Matahari'
},
{
'store':'Giant'
},
{
'store':'Ramayana'
},
{
'store':'Suzuya'
},
{
'store':'Supreme'
},
{
'store':'Lotte Duty'
},
{
'store':'Sogo'
},
{
'store':'Giovani'
}
]
return jsonify({'departemen store':departstore})
@app.route('/maskapai/penerbangan')
def maskapai():
maskapai = [
{
'maskapai':'Lion Air'
},
{
'maskapai':'Batik Air'
},
{
'maskapai':'Air Asia'
},
{
'maskapai':'Sriwijaya Air'
},
{
'maskapai':'Citilink'
},
{
'maskapai':'Garuda Indonesia'
},
{
'maskapai':'Wings Air'
},
{
'maskapai':'Susi Air'
},
{
'maskapai':'Xpress Air'
},
{
'maskapai':'Trans Nusa'
}
]
return jsonify({'maskapai penerbangan':maskapai})
@app.route('/input/sepatu')
def sepatu():
sepatu = [
{
'merek':'adidas'
},
{
'merek':'reebok'
},
{
'merek':'ardiles'
},
{
'merek':'nike'
},
{
'merek':'wirisab'
},
{
'merek':'retmq'
},
{
'merek':'labiray'
},
{
'merek':'puma'
},
{
'merek':'snakehead'
},
{
'merek':'kerawa'
}
]
return jsonify({'sepatu':sepatu}) #will return the json
@app.route('/Atmbandung/aldi', methods=['GET'])
def atmbandung():
atmbandung = [
{
'nama':'ATM Bank BJB',
'Tempat':'Jl.Raya Sarimanah Sarijadi'
},
{
'nama':'ATM OCBC NISP',
'Tempat':'Setrasari Plaza'
},
{
'nama':'ATM BCA',
'Tempat':'Setrasari Mall Alfamart Express'
},
{
'nama':'ATM Bank BJB',
'Tempat':'Gegerkalong'
},
{
'nama':'Bank Mega',
'Tempat':'Jl Surya Sumantri'
},
{
'nama':'ATM Bank BJB',
'Tempat':'Jl.Sarijadi Raya'
},
{
'nama':'ATM Danamon',
'Tempat':'Indomaret Maranatha Surya Sumantri'
},
{
'nama':'ATM Hana Bank',
'Tempat':'Jl.Dr.Surya Sumantri'
},
{
'nama':'ATM Mandiri',
'Tempat':'Beat Family Karoke'
},
{
'nama':'ATM Bank OCBC NISP',
'Tempat':'Jl. Dr.Surya Sumantri'
}
]
return jsonify({'Atmbandung':atmbandung})
@app.route('/Barang/Elektronik', methods=['GET'])
def elektronik():
elektronik = [
{
'Barang elektronik':'Smartphone'
},
{
'Barang elektronik':'Laptop'
},
{
'Barang elektronik':'Radio'
},
{
'Barang elektronik':'Dispenser'
},
{
'Barang elektronik':'Setrika'
},
{
'Barang elektronik':'Kamera'
},
{
'Barang elektronik':'Printer'
},
{
'Barang elektronik':'Scaner'
},
{
'Barang elektronik':'Kipas angin'
},
{
'Barang elektronik':'Speaker'
}
]
return jsonify({'Barang elektronik':elektronik}) | true |
a52b94314210679c370d13ac31bcc909150897fe | Python | amsatique/BKTelegramBot | /BK/bot.py | UTF-8 | 3,121 | 3.03125 | 3 | [] | no_license | import telepot
import roburger
import mongo_interact
import os
import time
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton
# Emojis and cute stuff
hourglass = u'\U0000231B'
hamburger = u'\U0001F354'
okHandSign = u'\U0001F44C'
star = u'\U00002B50'
thumbsUpSign = u'\U0001F44D'
clappingHandSign = u'\U0001F44D'
button1 = hamburger+" "+hamburger
button2 = star+star+star+star+star
burgerNumber = 999
# Interaction Strings
welcomeText = """ Hi ! This bot generate a code for some free """ + button1 + """!
# - 1) Press """ + button1 + """, get a code!
# - 2) Press """ + button2 + """, give your feedback!
# - Bon Appetit ! """
aboutText = """Over """ + str(burgerNumber) + ' ' + hamburger + """ has been generated ! """+thumbsUpSign+"""
Like our bot? please give """ + button2 + """ on StoreBot :
>> http://telegram.me/storebot?start=bkcodebot <<"""
API_TOKEN = os.environ['API_TOKEN']
bot = telepot.Bot(API_TOKEN)
def handle(message):
content_type, chat_type, chat_id = telepot.glance(message)
print(content_type, chat_type, chat_id)
keyboard = ReplyKeyboardMarkup(keyboard=[
[KeyboardButton(text=button1), KeyboardButton(text=button2)]
])
if content_type != 'text':
return
feedback = message['text'].lower()
global burgerNumber
if feedback == '/freebk':
holding_burger_generation(chat_id, keyboard)
elif feedback == button1:
holding_burger_generation(chat_id, keyboard)
elif feedback == button2:
burgerNumber = mongo_interact.MongoInteract().countAllBurgerGenerated()
bot.sendMessage(chat_id, aboutText, reply_markup=keyboard)
elif feedback == '/start':
bot.sendMessage(chat_id, welcomeText, reply_markup=keyboard)
elif feedback == '/about':
burgerNumber = mongo_interact.MongoInteract().countAllBurgerGenerated()
bot.sendMessage(chat_id, aboutText, reply_markup=keyboard)
else:
bot.sendMessage(chat_id, "Press " + button1 + ", get burgers!", reply_markup=keyboard)
def have_a_good_meal_string():
return "\n " + hamburger + " Bon appetit! " + hamburger
def holding_burger_generation(chat_id, keyboard):
e = mongo_interact.MongoInteract()
q = roburger
g = e.codecountavailable
if g == 0:
print('g0')
bot.sendMessage(chat_id, q.burgermain(1)[0] + have_a_good_meal_string(), reply_markup=keyboard)
u = q.burgermain(5)
print(u)
e.insertANewCode(u)
e.updateGeneratedNumber(1)
elif 0 < g < 5:
print('g14')
r = e.getACode()
bot.sendMessage(chat_id, r + have_a_good_meal_string(), reply_markup=keyboard)
u = q.burgermain(2)
e.insertANewCode(u)
elif g > 4:
print('g5or+')
r = e.getACode()
bot.sendMessage(chat_id, r + have_a_good_meal_string(), reply_markup=keyboard)
else:
print("Else? ")
bot.sendMessage(chat_id, q.burgermain(1)[0] + have_a_good_meal_string(), reply_markup=keyboard)
e.updateGeneratedNumber(1)
bot.message_loop(handle)
print('Ready to serve..')
while 1:
time.sleep(10) | true |
9a3bc6fedac9c26dd9080c999bf2468da0f1caac | Python | NatanLisboa/python | /exercicios-cursoemvideo/Mundo3/ex076.py | UTF-8 | 745 | 4.1875 | 4 | [] | no_license | # Mundo 3 - Aula 16 - Variáveis Compostas - Tuplas
# Exercício Python 076: Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços, na
# sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular.
produtos = ('Lápis', 1.75, 'Borracha', 2, 'Caderno', 15.9, 'Estojo', 25, 'Transferidor', 4.2,
'Compasso', 9.99, 'Mochila', 120.32, 'Canetas', 22.3, 'Livro', 34.9)
print('\nExercício 76 – Lista de Preços com Tupla')
print('-' * 50)
print(f'{"TABELA DE PREÇOS":^60}')
print('-' * 50)
for posicao, produto in enumerate(produtos):
if posicao % 2 == 0:
print(f'{produto:.<40}', end='')
else:
print(f'R${produto:>7.2f}')
print('-' * 50)
| true |
9b0d0fbff587ddf76bd1c876f3ddbc8ed4954f5c | Python | gutsergey/PythonSamples | /file_reader_with_cursor.py | UTF-8 | 436 | 3.734375 | 4 | [] | no_license | try:
# работа с курсором
with open('example_text.txt', 'r') as file:
contents = file.read(10) # указываем кол-во символов для чтения
# курсор перемещается на 11 символ
rest = file.read() # читаем с 11 символа
print("10:", contents)
print("остальное:", rest)
except:
print ("Error opening file")
| true |
28b7f1fd84d369c3b0e5eb929c77eb011983f891 | Python | OnikenX/github-twitter-commits | /tests/args | UTF-8 | 240 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/python
import sys
import getopt
SERVER = '192.168.1.8'
INPUT = ' '
while INPUT[0].lower() != 'y' and INPUT[0].lower() != 'n' :
INPUT = input(f'is this the ip [{SERVER}][y/n]')
print(f"args[{len(sys.argv)}] = {str(sys.argv[1])}")
| true |
0d5f2cde8a475db6e8c2691e34a32e4ddcf23617 | Python | heroccccc/QtMultimediaVideo | /testmedia.py | UTF-8 | 1,736 | 2.609375 | 3 | [
"MIT"
] | permissive | from PyQt5.QtCore import QUrl
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QApplication, QPushButton, QVBoxLayout, QWidget
from PyQt5.QtWidgets import QMainWindow,QWidget, QPushButton
import sys
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
videoWidget = QVideoWidget()
#2つボタンを設置
btn = QPushButton("play", self)
btn2 = QPushButton("pause", self)
#全体のレイアウト設定
layout = QVBoxLayout()
layout.addWidget(videoWidget)
layout.addWidget(btn)
layout.addWidget(btn2)
#ビデオ再生部分の設定
wid = QWidget(self)
wid.setLayout(layout)
self.setCentralWidget(wid)
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer.setVideoOutput(videoWidget)
#絶対パスで再生したいファイルを指定
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile("***********************")))
#設置したボタンを押した際のメソッドを指定
btn.clicked.connect(self.play)
btn2.clicked.connect(self.pause)
#再生
def play(self):
self.mediaPlayer.play()
#停止
def pause(self):
self.mediaPlayer.pause()
if __name__ == '__main__':
app = QApplication(sys.argv)
player = Window()
#ディスプレイの大きさを取得
screen = app.desktop()
height = screen.height()
width = screen.width()
#スクリーンいっぱいに表示
player.resize(width,height)
player.show()
sys.exit(app.exec_())
| true |
abbdccb4e475efbf8df02b74a18b27626153f176 | Python | Harshpatel44/Pykinter | /Pykinter 3.0/singleton.py | UTF-8 | 242 | 2.671875 | 3 | [
"MIT"
] | permissive | def singleton(my_class):
instances = {}
def get_instance(*args, **kwargs):
if my_class not in instances:
instances[my_class] = my_class(*args, **kwargs)
return instances[my_class]
return get_instance
| true |
c1fa73b1bee30935d8e98d7ee02b71c949ba9ae2 | Python | psychedel/ischedule | /tests/test_cancel.py | UTF-8 | 441 | 2.78125 | 3 | [] | no_license | from math import isclose
from time import monotonic
from src.ischedule import reset, run_loop, schedule
def test_cancel_notasks():
reset()
run_loop(return_after=1)
def test_cancel_longtast():
reset()
@schedule(interval=2)
def task():
print("Doing task")
start = monotonic()
run_loop(return_after=1.5)
end = monotonic()
print(end - start)
assert isclose(end - start, 1.5, abs_tol=0.001)
| true |
b8d4d488d486afd194c374c4f0e71e48c7eeaea2 | Python | achung695/coreachord | /scripts/gen-transition-matrix-med.py | UTF-8 | 5,337 | 3.03125 | 3 | [] | no_license | import pandas as pd
print("generating transition matrix (medium chord diversity)...")
# all chord names
chord_names = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B']
# qualities
qualities = ['maj7', '-7', '-7b5', '7']
# all chords combining chord names and qualities
all_chords = []
# fill all_chords
for chord in chord_names:
for quality in qualities:
all_chords.append(chord + quality)
# # create enum representation with dicts
num_to_chord_name = {i: chord for i, chord in enumerate(all_chords)}
chord_name_to_num = {chord: i for i, chord in enumerate(all_chords)}
# data for our csv
t_matrix_data = []
# size of the row
row_size = len(all_chords) + 1
# initialize pandas data frame with column name including the start label
df = pd.DataFrame(columns=(['start'] + all_chords))
# use circle of fifths, sorta?
roots = len(chord_names)
quals = len(qualities)
# diatonic step functions
def down5(chord_name_num):
return (chord_name_num + 5) % roots
def up5(chord_name_num):
return (chord_name_num + 7) % roots
def up1(chord_name_num):
return (chord_name_num + 2) % roots
def down2(chord_name_num):
return (chord_name_num + 9) % roots
def down2half(chord_name_num):
return (chord_name_num + 8) % roots
def down1(chord_name_num):
return (chord_name_num + 11) % roots
def down1half(chord_name_num):
return (chord_name_num + 10) % roots
def uphalf(chord_name_num):
return (chord_name_num + 1) % roots
def up2(chord_name_num):
return (chord_name_num + 4) % roots
# function rules based on chord quality, add 1 to chord index bc of first label
# -7b5 rule should always go to the respective dom7
def m7b5_rule(chord_num, row):
chord_name_num = chord_num // quals
# num of chord NAME that is 5 below
d5 = down5(chord_name_num)
uh = uphalf(chord_name_num)
# dom7
row[(d5 * quals) + 3 + 1] = 0.92
# #IV-7b5 -> V
row[(uh * quals) + 3 + 1] = 0.08
# maj7 + 0, -7 + 1, -7b5 + 2, dom7 + 3
# dom7 rule should go 5th down, 1/2 down, and 1 up
def dom7_rule(chord_num, row):
chord_name_num = chord_num // quals
# num of chord NAME that is 5 below, 0.6
d5 = down5(chord_name_num)
# dom7
row[(d5 * quals) + 3 + 1] = 0.375
# -7
row[(d5 * quals) + 1 + 1] = 0.175
# maj7
row[(d5 * quals) + 1] = 0.15
d1 = down1(chord_name_num)
# dom7
row[(d1 * quals) + 3 + 1] = 0.1
# -7
row[(d1 * quals) + 1 + 1] = 0.075
# 7
row[(d1 * quals) + 1] = 0.075
u1 = up1(chord_name_num)
# maj7
row[(u1 * quals) + 1] = 0.05
def maj7_rule(chord_num, row):
chord_name_num = chord_num // quals
# to minor same
row[chord_name_num * quals + 1 + 1] = 0.055
# if we are at Cmaj7,
# F
d5 = down5(chord_name_num)
# TOTAL 0.075
# Fmaj7
row[(d5 * quals) + 1] = 0.04
# F-7
row[(d5 * quals) + 1 + 1] = 0.025
# TOTAL 0.5
# D
u1 = up1(chord_name_num)
# D-7
row[(u1 * quals) + 1 + 1] = 0.39
# D7
row[(u1 * quals) + 3 + 1] = 0.09
# 0.025
# Ab
d2h = down2half(chord_name_num)
# Abmaj7
row[(d2h * quals) + 1] = 0.02
# TOTAL 0.15
# A
d2 = down2(chord_name_num)
# A-7
row[(d2 * quals) + 1 + 1] = 0.1
# A7
row[(d2 * quals) + 3 + 1] = 0.05
# TOTAL 0.1
# G
u5 = up5(chord_name_num)
# G7
row[(u5 * quals) + 3 + 1] = 0.1
# TOTAL 0.075
# B
d1 = down1(chord_name_num)
# B-7b5
row[(d1 * quals) + 2 + 1] = 0.05
# B-7
row[(d1 * quals) + 1 + 1] = 0.01
# 0.15 left
# Bb 0.05
d1h = down1half(chord_name_num)
# Bb7
row[(d1h * quals) + 3 + 1] = 0.04
# Bbmaj7
row[(d1h * quals) + 1] = 0.01
# 0.025 left
# E-7
u2 = up2(chord_name_num)
row[(d1 * quals) + 1 + 1] = 0.02
def min7_rule(chord_num, row):
chord_name_num = chord_num // quals
# if we are at C-7,
# 0.025
# Ab
d2h = down2half(chord_name_num)
# Abmaj7
row[(d2h * quals) + 1] = 0.025
# F
d5 = down5(chord_name_num)
# TOTAL 0.6
# F-7
row[(d5 * quals) + 1 + 1] = 0.125
# F7
row[(d5 * quals) + 3 + 1] = 0.475
# B7, sub 5 0.15
d1 = down1(chord_name_num)
# B7
row[(d1 * quals) + 3 + 1] = 0.15
# TOTAL 0.1
# D
u1 = up1(chord_name_num)
# D-7b5
row[(u1 * quals) + 2 + 1] = 0.075
# TOTAL 0.1
# G
u5 = up5(chord_name_num)
# G7
row[(u5 * quals) + 3 + 1] = 0.075
# G-7
row[(u5 * quals) + 1 + 1] = 0.025
d1h = down1half(chord_name_num)
# Bb7
row[(d1h * quals) + 3 + 1] = 0.05
def generate():
for idx, chrd in enumerate(all_chords):
# chrd is the name of the chord
# num of the chord
chord_num = chord_name_to_num[chrd]
transition_row = [0] * row_size
transition_row[0] = chrd
chord_quality = chord_num % quals
if chord_quality == 0:
maj7_rule(chord_num, transition_row)
elif chord_quality == 1:
min7_rule(chord_num, transition_row)
elif chord_quality == 2:
m7b5_rule(chord_num, transition_row)
elif chord_quality == 3:
dom7_rule(chord_num, transition_row)
df.loc[idx] = transition_row
generate()
df.to_csv(r't-mat-med.csv', index=False, header=True)
| true |
39500c0104d902ab793666302e5b9519e62b8e52 | Python | DanielYe1/UriResolutions | /python_resolutions/beginner/1020.py | UTF-8 | 208 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | import math
a = int(input())
year = math.floor(a / 365)
n = a % 365
month = math.floor(n/ 30)
day = n % 30
print("{0} ano(s)".format(year))
print("{0} mes(es)".format(month))
print("{0} dia(s)".format(day)) | true |
c0367d9cc55fb586a631eb086ee40ca6dbb78b18 | Python | v-v-d/Python_client-server_apps | /messenger/server/src/middlewares.py | UTF-8 | 405 | 2.625 | 3 | [
"MIT"
] | permissive | """Middlewares for server side messenger app."""
import zlib
from functools import wraps
def compression_middleware(func):
"""Decompress request and return compression result."""
@wraps(func)
def wrapper(request, *args, **kwargs):
b_request = zlib.decompress(request)
b_response = func(b_request, *args, **kwargs)
return zlib.compress(b_response)
return wrapper
| true |
0511a5f3fd713d9982c1cfca1a81cc1f2bf0fd2b | Python | wrosko/EXDS | /Week 4/convert_to_waveraw.py | UTF-8 | 727 | 3.390625 | 3 | [] | no_license | import sound
import ??? as myfile
# ??? should be replaced with the name of
#the file which has your functions.
#Read the statements and comments below.
#You will need to make appropriate changes to the statements
#to work with different wav files and test various functions
#you have written.
#Converts the sound in grace.wav file to a Sound object.
snd = sound.Sound(filename='grace.wav')
#The function fade that you have written is called
#and the Sound object it returns is assigned to gracefade.
gracefade = myfile.fade(snd, len(snd))
#The Sound object gracefade is converted to sound in
#a wav file called grace_fade.wav.
#This wav file did not exist before but is newly created.
gracefade.save_as('grace_fade.wav')
| true |
e2bcf187713c7a3dd199872d93c377a524ddb77c | Python | SayedJPQ/Curso-Python | /Leccion4.py | UTF-8 | 681 | 3.875 | 4 | [] | no_license | #Listas
Lista1=["El pepe", "Ete Sech", "El pepeX2"]
Lista2=[1,2,3,4,5]
#Agregar elementos a las listas
Lista1.append("Sandro")
#Agregar los elementos controlando la posicion
Lista1.insert(1, "Chao")
#Agregar 2 o mas elementos
Lista1.extend(["Hallo", "Ja", "Da"])
#Eliminar elementos
Lista1.remove("El pepeX2")
#Eliminar ultimo elemento
Lista1.pop()
print(Lista1)
#Llamar indices
print(Lista1[0])
print(Lista1[2])
#Indices negativos
print(Lista1[-2])
#Exclusiones
print(Lista1[0:2])
#Encontrar si el elemento esta en lista
print("El pepe" in Lista1)
print("SS" in Lista1)
#Fusionar Listas
Lista3=Lista1+Lista2
print(Lista3)
#Repetir lista
Lista4=[12,12,13,13,14,14] * 3
print(Lista4)
| true |
cf2a84ff38b83fb1ad0f74b553c64d37d9d10c33 | Python | Lem0049/less0n3 | /3/uuu.py | UTF-8 | 473 | 3.796875 | 4 | [] | no_license | #import random
#value = random.randint(0,10)
#if value > 5:
# print(value)
#else:
# print(value)
# month_num = int(input("Введите номер месяца"))
#
# if month_num > 12
# print("noooo")
# elif month_num >= 9 and month_num <= 11:
# print("autumn")
# elif month_num >= 6 and month_num <= 8 :
# print("summer")
#
def print_nums(x):
res = 0
for i in range(x):
print(i)
res += i
return res
print(print_nums(4))
| true |
b33d2a85b90c956c0d523c8e68eaae7965e8a5b9 | Python | yanxurui/keepcoding | /python/algorithm/leetcode/541.py | UTF-8 | 472 | 3.28125 | 3 | [] | no_license | class Solution:
def reverseStr(self, s: str, k: int) -> str:
buf = []
for i in range(0, len(s), 2*k):
buf.append(s[i:i+k][::-1])
buf.append(s[i+k:i+2*k])
return ''.join(buf)
if __name__ == '__main__':
from testfunc import test
test_data = [
(
(
'abcdefg',
2
),
'bacdfeg'
)
]
test(Solution().reverseStr, test_data)
| true |
af833a2fbcbee5807ad096ac9b5bfc6886b4f48f | Python | NetSecLife/codeeval | /lettercase_percentage_ratio.py | UTF-8 | 575 | 3.375 | 3 | [] | no_license | import sys
def main():
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
low_count, total, high_count = 0, 0, 0
for i in test:
if i.isupper():
high_count += 1
total += 1
elif i.islower():
low_count += 1
total += 1
high_percent = format((high_count / total * 100), '.2f')
low_percent = format((low_count / total * 100), '.2f')
print('lowercase: ' + low_percent + ' ' + "uppercase: " + high_percent)
test_cases.close()
main() | true |
5e1741efe8c59aa6c05d8ed02238f5b1414c9a64 | Python | chokosabe/sains | /settings.py | UTF-8 | 334 | 2.515625 | 3 | [] | no_license | from collections import OrderedDict
MAX_LINES = 2
ALLOWED_DAYS = ['mon', 'tue', 'wed', 'thu', 'fri']
INDEXED_DAYS = OrderedDict([
('mon', 0),
('tue', 1),
('wed', 2),
('thu', 3),
('fri', 4)
])
ACTIONS = {
'mon': 'square',
'tue': 'square',
'wed': 'square',
'thu': 'double',
'fri': 'double'
}
| true |
09eeffe5e957aa4f547008df016cc53f21a72a38 | Python | KarinaYatskevich/python | /Lesson/Lessons/Other/oop2.py | UTF-8 | 873 | 3.953125 | 4 | [] | no_license | import string
class Alphabet:
def __init__(self, land, letter):
self.land = land
self.letter = list(letter)
def print(self):
return self.letter
def letters_num(self):
len(self.letter)
class EngAlphabet(Alphabet):
__letter_num = 26
def __init__(self, ):
super(EngAlphabet, self).__init__('En', string.ascii_uppercase)
def __letters_num(self):
return EngAlphabet.__letter_num
def is_en_letter(self, let):
if let.upper() in self.letter:
return "yees"
else:
return 'nooo'
@staticmethod
def example():
print("smth")
if __name__ == '__main__':
e = EngAlphabet()
print(e.print())
print(e.letters_num())
print(e.is_en_letter('F'))
print(e.is_en_letter('Щ'))
print(e.example())
| true |
36cd031b72c047c9b0a8525b9f0577f31ea37bfb | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/arc077/B/3559622.py | UTF-8 | 710 | 2.828125 | 3 | [] | no_license | from collections import Counter
N = int(input())
A = list(map(int,input().split()))
MOD = 10**9+7
ctr = Counter(A)
doub = ctr.most_common()[0][0]
i1 = A.index(doub)
i2 = N - A[::-1].index(doub)
l = N - (i2-i1)
fac = [1,1] + [0]*N
finv = [1,1] + [0]*N
inv = [0,1] + [0]*N
for i in range(2,N+2):
fac[i] = fac[i-1] * i % MOD
inv[i] = -inv[MOD%i] * (MOD // i) % MOD
finv[i] = finv[i-1] * inv[i] % MOD
def ncr(n,r):
if n < r: return 0
if n < 0 or r < 0: return 0
return fac[n] * (finv[r] * finv[n-r] % MOD) % MOD
ans = []
for n in range(1,N+2):
ans.append(ncr(N+1,n))
for i in range(l+1):
ans[i] -= ncr(l,i)
ans[i] %= MOD
print(*ans, sep='\n') | true |
1d97a7cf0c59b36d3dd989067f487fc8cd6a6c0d | Python | junyi1997/Final_OIT_projet | /Steper/vendor/StepMotor.py | UTF-8 | 3,593 | 3.28125 | 3 | [] | no_license | """
使用於Python3
使用此程式前,必須先安裝好RPi.GPIO(記得在樹莓派灌),如果沒灌好一定會有錯。
想安裝RPi.GPIO,且如果你有pip的話,可打下方指令完成安裝
pip install RPi.GPIO
"""
import time
import RPi.GPIO as GPIO
class StepMotor(object):
"""
StepMotor 此類別為簡單操作兩相4線控之步進馬達用
"""
forward_seq = ['1100', '0110', '0011', '1001']
"""
forward_seq 為步進馬達正轉之輸出順序
"""
reverse_seq = ['1001', '0011', '0110', '1100']
"""
reverse_seq 為步進馬達正轉之輸出順序
"""
a1_pin = 17 #A
a2_pin = 27 #B
b1_pin = 23 #/A
b2_pin = 24 #/B
"""
上述為樹莓派pin腳定義
"""
all_pin = [a1_pin,
a2_pin,
b1_pin,
b2_pin]
"""
上述為樹莓派pin腳定義之陣列
"""
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(StepMotor.all_pin, GPIO.OUT)
pass
def initialize(self):
self.set_step('0000')
def forward(self, delay, steps):
"""正轉用 Function
參數:
===
delay(float) : 延遲時間(sec)
(數值盡量在50/1000~4/1000,是不同步進馬達規格而定)
steps(int) : 期望走的步數
舉例:
===
forward(10/1000,100)
"""
steps /= 4
self._ward(delay,int(steps),StepMotor.forward_seq)
def backward(self,delay, steps):
"""反轉用 Function
參數:
===
delay(float) : 延遲時間(sec)
(數值盡量在50/1000~4/1000,是不同步進馬達規格而定)
steps(int) : 期望走的步數
舉例:
===
forward(10/1000,100)
"""
steps /= 4
self._ward(delay,int(steps),StepMotor.reverse_seq)
def set_step(self, step):
for i in range(4):
GPIO.output(StepMotor.all_pin[i], int(step[i]))
pass
def _ward(self, delay, steps, seq):
for _ in range(steps):
for step in seq:
self.set_step(step)
time.sleep(delay)
def clean(self):
GPIO.cleanup()
"""
以下為測試程式,直接執行該程式即可進行測試。
"""
class Test_StepMotor(StepMotor):
def __init__(self):
self.test_count = [0,0,0,0]
pass
def set_step(self, step,seq):
for i in range(4):
if step == seq[i]:
self.test_count[i] += 1
def _ward(self, delay, steps, seq):
for _ in range(steps):
for step in seq:
self.set_step(step,seq)
time.sleep(delay)
def test_step_info(self):
sum = 0
for i in range(len(self.test_count)):
sum += self.test_count[i]
# print(seq.__name__," " , seq , end="\n")
# print(self.test_count, end="\n")
# print("總共走 ", sum , " 步")
return sum
import unittest
class TestStepMotorMethods(unittest.TestCase):
def test_forward(self):
test_StepMotor = Test_StepMotor()
test_StepMotor.forward(1/1000,100)
self.assertEqual(test_StepMotor.test_step_info(),100)
def test_backward(self):
test_StepMotor2 = Test_StepMotor()
test_StepMotor2.backward(1/1000,200)
self.assertEqual(test_StepMotor2.test_step_info(),200)
def main():
unittest.main()
pass
if __name__ == '__main__':
main() | true |
de2426d2fa83cf57d06cfa1c56144b78acf4d684 | Python | rajatthosar/leetcode | /655_print_binary_tree.py | UTF-8 | 719 | 3.21875 | 3 | [] | no_license | from collections import deque
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def printTree(self, root: TreeNode) -> List[List[str]]:
if not root:
return [[""]]
q = deque([root])
levels = 0
tempq = []
while q:
node = q.popleft()
if node.left:
tempq.append(node.left)
if node.right:
tempq.append(node.right)
if not q:
levels += 1
q = deque(tempq)
tempq = [] | true |
bbc9c5e090acd3a6f7761a0a3519643ea120cdee | Python | ypkoo/flow | /studylamp/state.py | UTF-8 | 1,808 | 2.875 | 3 | [] | no_license | __author__ = 'koo'
import sqlite3
from db_manager import db
# states
COVER = 0
MENU = 1
LEARNING = 2
SOLVING = 3
GRADED = 4
REVIEW = 5
PROGRESS = 6
BUFFER = 7
class StateManager:
def __init__(self):
self._state = BUFFER
self._title = None
self.cur_page = -1
self.page_count = 0
self.new_pages = []
def get_state(self):
return self._state
def set_state(self, new_state):
if new_state != self._state:
self._state = new_state
changed = True
else:
changed = False
if changed:
print 'state changed:', new_state
return changed
@property
def title(self):
return self._title
@title.setter
def title(self, new_title):
self._title = new_title
def get_current_page(self, new_page):
if new_page != self.cur_page:
pass
self.new_pages.append(new_page)
self.page_count = self.page_count + 1
if self.page_count == 3:
# recognize 3 consecutive new pages. If all 3 pages are same, change current page.
if self.new_pages[0] == self.new_pages[1] and self.new_pages[1] == self.new_pages[2]:
conn = sqlite3.connect('studylamp.db')
cursor = conn.cursor()
if db.page_state(cursor, new_page) != False:
#print 'new page', new_page
self.cur_page = new_page
cursor.close()
conn.close()
self.new_pages[0] = self.new_pages[1]
self.new_pages[1] = self.new_pages[2]
self.new_pages.pop()
self.page_count = self.page_count - 1
#print 'page', self.cur_page
return self.cur_page
state = StateManager()
| true |
626991a9caf9856b27e02a35e66fe813717f0096 | Python | hit-e304/uwb_test | /anchor.py | UTF-8 | 1,695 | 2.78125 | 3 | [] | no_license | import time
import struct
import binascii
import serial
import json
portx = 'COM9'
bps = 921600
timex = 5
self_num = 0
dis = {}
str_dis = []
ser = serial.Serial(portx, bps, timeout=timex)
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def read_unit(b, width=6, a=0):
'''read information of one unit
output:
'''
ans = []
ans.append(b[31+a] * 65536 + b[30+a] * 256 + b[29+a])
ans.append(b[34+a] * 65536 + b[33+a] * 256 + b[32+a])
ans.append(b[37+a] * 65536 + b[36+a] * 256 + b[35+a])
for i, pos in enumerate(ans):
if pos > 16 ** (width - 1) -1:
ans[i] = pos - 16 ** width
ans[i] = ans[i] / 1000
return ans
if __name__ == '__main__':
test_id = -25
while True:
if ser.inWaiting:
data = ser.read(ser.inWaiting())
# print(data)
if len(data) < 40:
continue
if data[0] == 85 and data[1] == 0:
test = read_unit(data, a = test_id)
save_inf = json.dumps({'pos_x': test[0],
'pos_y': test[1],
'pos_z': test[2]})
with open('user_info.json', 'w', encoding='utf-8') as json_file:
json.dump(save_inf, json_file, ensure_ascii=False)
print(test[0], test[1], test[2], test_id)
# print(time.time() - start_time)
# else:
# print('Data Error')
| true |
a9526b6a7a4b747187d98e2b587d663aba832be4 | Python | ArBond/ITStep | /Python/lessons/lesson2_arithmetic/main4.py | UTF-8 | 171 | 3.4375 | 3 | [] | no_license | #Vychislit' ploshad' kruga
PI = 3.14
r = float(input("Vvedite radius kruga(sm): "))
print("Ploshad' kruga = %.2f" % (PI * r * r), "sm")
input("Press Enter to continue...") | true |
571c7e29b6a2606c71eba89ac96cdf7291ebdcf5 | Python | praveshtayal/pinception | /dp/1436_DP_findMaxSquareWithAllZeros.py | UTF-8 | 1,178 | 3.5 | 4 | [] | no_license | def findMaxSquareWithAllZeros(arr):
# Given a n*m matrix which contains only 0s and 1s, find out the size of
# maximum square sub-matrix with all 0s. You need to return the size of
# square with all 0s. */
row = len(arr)
col = len(arr[0])
# Create a storage of size row+1*col+1
storage = [[0 for i in range(col+1)] for j in range(row+1)]
for i in range(row-1,-1,-1):
for j in range(col-1,-1,-1):
storage[i][j] = max(storage[i+1][j], storage[i][j+1])
maximum = storage[i][j]
if min(row-i, col-j)<=maximum:
continue
foundOne = False
for p in range(0, maximum+1):
for q in range(0, maximum+1):
if arr[i+p][j+q]==1:
foundOne = True
break
if foundOne:
break
if foundOne==False:
storage[i][j] += 1
return storage[0][0]
# Main
m, n=(int(i) for i in input().strip().split(' '))
mat = [ [] ] * m
for i in range(m):
mat[i]=list(int(i) for i in input().strip().split(' '))
print(findMaxSquareWithAllZeros(mat))
| true |
e43c33d5d5dd4eebe8a49ab8c87e6b5bf5f14215 | Python | allanzi/truck-challenge | /app/controllers/travel_controller.py | UTF-8 | 3,235 | 2.625 | 3 | [] | no_license | from flask_restful import Resource
from flask import jsonify, make_response, request
from werkzeug.exceptions import NotFound
from models.travel_model import TravelModel
from validators.travel_validator import TravelCreateValidator, TravelUpdateValidator
from marshmallow import ValidationError
class TravelShow(Resource):
def __init__(self):
self.model = TravelModel()
self.validator = TravelUpdateValidator()
super().__init__()
def get(self, id):
response = self.model.findById(id)
if response is None:
return make_response({
'message': 'This travel does not exists!'
}, 404)
return jsonify({
'data': self.model.transform(response)
})
def delete(self, id):
response = self.model.remove(id)
if response is False:
return make_response({
'message': 'This travel does not exists!'
}, 404)
return make_response('', 204)
def put(self, id):
try:
data = request.json
travel = self.validator.load(data)
updatedTravel = self.model.update(id, travel)
if updatedTravel is False:
return make_response({
'message': 'Bad request!',
'errors': {
'user_id': [
'Must be a valid ObjectId or Not found.'
]
}
}, 400)
return make_response(
jsonify({
'data': self.model.transform(updatedTravel)
}), 200)
except ValidationError as err:
return make_response({
'message': 'Bad request!',
'errors': err.messages
}, 400)
except NotFound as err:
return make_response({
'message': 'This travel does not exists!'
}, 404)
class TravelCreateAndList(Resource):
def __init__(self):
self.validator = TravelCreateValidator()
self.model = TravelModel()
super().__init__()
def get(self):
response = []
travels = self.model.findAll()
for travel in travels:
response.append(self.model.transform(travel))
return jsonify({
'data': response
})
def post(self):
try:
data = request.json
travel = self.validator.load(data)
createdTravel = self.model.create(travel)
if createdTravel is False:
return make_response({
'message': 'Bad request!',
'errors': {
'user_id': [
'Must be a valid ObjectId or Not found.'
]
}
}, 400)
return make_response(
jsonify({
'data': self.model.transform(createdTravel)
}), 201)
except ValidationError as err:
return make_response({
'message': 'Bad request!',
'errors': err.messages
}, 400) | true |
db25119a414a33b8be9166482ad52d469b5f9e5a | Python | jungr-ait/offboard | /src/interactive_mode.py | UTF-8 | 10,113 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
"""
Created on Thu Sep 29 09:22:58 2016
@author: dennis
"""
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Point
from geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped
import threading
import sys
import time
import signal
import mavros_driver
import setpoint
import numpy as np
#import circle
import state
import pub_target
import paths
import follow
from tf.transformations import * #quaternion_from_euler, quaternion_multiply, quaternion_matrix
#from __future__ import print_function
# desire relative pose
pose_rel = [0.0,0.0,0.0,0.0]
# function to reset variable
def reset(ar):
for i,x in enumerate(ar):
ar[i] = 0.0
# checks if arg is an int
def if_isNum(arg):
try:
float(arg)
return True
except ValueError:
return False
# move thread: needs to sent position > 2 Hz
def move_pub(rate, st, run_event):
# frequency of publishing
rate = rospy.Rate(rate)
# publish desire pose
while run_event.is_set() and not rospy.is_shutdown():
st.pub.publish(st.msg)
rate.sleep()
# expected user input
def usage():
print "usage: [d [x] [y] [z] [y] | p [x] [y] [z] [y] | c [r] [ax] [bx] [cx] | mode [MODE] | arm | disarm | exit]"
# main thread
def run_tests():
### initlaization
# ros node initalization
nh = rospy.init_node('interaction', anonymous=True)
# create driver for receiving mavros msg
drv = mavros_driver.mavros_driver(nh)
# publisher for sp
target = pub_target.pub_target()
# lock for publisher thread
lock = threading.Lock()
# state: posctr, velctr
st = state.state(lock, drv)
# paths
path = paths.paths(st, target)
# modes
sp = setpoint.setpoint(st, target) #set points
#cl = circle.circle(st, target) #circle mode
# follow threads
follow_thr = follow.thread_control(st, path)
# pose publisher rate
rate = 20
# signal flag for running threads
run_event = threading.Event()
run_event.set()
# thread that sends position
#move_t = threading.Thread(target=move_pub, args=(rate, pub_pose, pose_msg, pub_twist, twist_msg, state, run_event))
move_t = threading.Thread(target=move_pub, args=(rate, st, run_event))
move_t.start()
# ctrl-c handler: aslo used to shut down during flight
def ctrlC_handler(a,b):
print 'teleop program:'
print 'disarm'
drv.arm(False)
print '> start closing all threads'
run_event.clear()
move_t.join()
follow_thr.stop_thread()
print "> threads succesfully closed. Leaving program"
sys.exit()
# catch ctrl-c
signal.signal(signal.SIGINT, ctrlC_handler)
print "set offboard"
# go into offboard mode
drv.set_mode("OFFBOARD")
pose_rel = [0, 0, 1, 0]
st.set_state("posctr")
sp.do_step(pose_rel)
# arm
drv.arm(True)
# wait some seconds until reaching hover position
time.sleep(1.0)
interactive_mode = True
# main loop in manual mode
if interactive_mode:
# show usage
usage()
do_loop = True
while do_loop:
# read input from console
user_input = sys.stdin.readline()
# split input
args = user_input.split()
# cases
if len(args) > 5:
print "too many argumets"
usage()
elif len(args) == 0:
print "no argument given"
usage()
else:
# leave program
if str(args[0]) == "exit":
print "leaving program"
#ctrlC_handler(0,0)
do_loop = False
# set position
elif str(args[0]) == "d":
# reset relative position
reset(pose_rel)
# close circle thread if running
follow_thr.stop_thread()
# set new relative position
if len(args[1:]) > 4:
print "too many arguments"
usage()
elif len(args[1:]) < 4:
print "not enough arguments"
usage()
else:
for ind, arg in enumerate(args[1:]):
if if_isNum(arg):
pose_rel[ind] = float(arg)
else:
print arg + " is not a number"
reset(pose_rel)
st.set_state("posctr")
sp.do_step(pose_rel)
# set position
elif str(args[0]) == "p":
# reset relative position
reset(pose_rel)
# close circle thread if running
follow_thr.stop_thread()
# set new relative position
if len(args[1:]) > 4:
print "too many arguments"
usage()
elif len(args[1:]) < 4:
print "not enough arguments"
usage()
else:
for ind, arg in enumerate(args[1:]):
if if_isNum(arg):
pose_rel[ind] = float(arg)
else:
print arg + " is not a number"
reset(pose_rel)
st.set_state("posctr")
sp.go_to_pose(pose_rel)
# bezier point
elif str(args[0]) == "b":
# reset relative position
reset(pose_rel)
# close circle thread if running
follow_thr.stop_thread()
# set new relative position
if len(args[1:]) > 4:
print "too many arguments"
usage()
elif len(args[1:]) < 4:
print "not enough arguments"
usage()
else:
for ind, arg in enumerate(args[1:]):
if if_isNum(arg):
pose_rel[ind] = float(arg)
else:
print arg + " is not a number"
reset(pose_rel)
st.set_state("bezier")
sp.do_step_bez(pose_rel)
# set mode
elif str(args[0]) == "mode":
mode = args[1].upper()
if str(mode) == "OFFBOARD":
# go into offboard mode
drv.set_mode("OFFBOARD")
else:
print "This mode is not yet supported"
# arm
elif str(args[0]) == "arm":
drv.arm(True)
# disarm
elif str(args[0]) == "disarm":
# start landing
print "start landing"
drv.land()
# path mode
elif str(args[0]) == "c":
# close circle thread if running
follow_thr.stop_thread()
correct_input = True
if len(args[1:]) > 4:
print "too many arguments"
usage()
elif len(args[1:]) <4:
print "too few arguments"
usage()
else:
axis = []
for ind, arg in enumerate(args[2:]):
if if_isNum(arg):
axis.append(float(arg))
else:
print arg + "not a number"
correct_input = False
radius = 0.0
if if_isNum(args[1]):
radius = float(args[1])
if correct_input:
path.circle(radius, axis, [1.0,0.0,0.0])
# start thread
follow_thr.start_thread()
else:
print "this input is not supported"
usage()
# dont waste cpu
time.sleep(1)
# start landing
print "start landing"
drv.land()
# join thread
run_event.clear()
move_t.join()
ctrlC_handler(0,0)
if __name__ == '__main__':
try:
run_tests()
sys.exit()
except rospy.ROSInterruptException:
pass
| true |
e6fb9acb32922e1c3793a6ec65c154cfd5ba9140 | Python | w1ldy0uth/netScan | /method/arp.py | UTF-8 | 1,176 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: UTF=8 -*-
from scapy.all import ARP, srp, Ether
try:
from method.sub.ipget import cidr_ip
except ImportError:
from sub.ipget import cidr_ip
class Arp:
"""A class to recieve IP and MAC addresses of hosts in current network."""
def __init__(self, verbose) -> None:
"""
Constructs all the necessary attributes.
verbose: bool
permission for output of additional info about packets
"""
self.verbose = verbose
self.ip = cidr_ip()
def scan(self) -> list:
"""Scans network and pulls out IPs and MACs from recieved packets."""
# arp packets's parts (to send)
arp = ARP(pdst=self.ip)
ether = Ether(dst="ff:ff:ff:ff:ff:ff")
ans = srp(ether/arp, timeout=4, verbose=self.verbose)[0] # sending packet to network
res = [] # storage for addresses
for snd, rcv in ans:
res.append({"IP": rcv.psrc, "MAC": rcv.hwsrc}) # pulling out IPs and MACs
return res
if __name__ == "__main__":
scanner = Arp(verbose=False)
print(scanner.scan())
| true |
911e42114a578134655fc1d3f427a67e1684f110 | Python | nisargthakkar/replicated-database-concurrency-control | /DataManager.py | UTF-8 | 2,665 | 2.75 | 3 | [] | no_license | import SiteManager
class DataManager:
def __init__(self, site):
self.data = {}
self.site = site
self.committed = {}
def keyStrKey(keyStr):
return int(keyStr[1:])
def initValue(self, key, value):
self.data[key] = [{
'transaction': '',
'value': value,
'committedTime': 0
}]
self.committed[key] = 0
def setValue(self, transaction, key, value):
lastCommittedIndex = self.committed[key]
if len(self.data[key]) > lastCommittedIndex + 1:
self.data[key][lastCommittedIndex + 1]['value'] = value
else:
self.data[key].append({
'transaction': transaction,
'value': value,
'committedTime': -1
})
def dump(self):
dumpOut = 'Site %s - ' % (self.site)
keysUnordered = self.data.keys()
keysOrdered = sorted(keysUnordered, key=DataManager.keyStrKey)
for key in keysOrdered:
lastCommittedIndex = self.committed[key]
dumpOut += '%s: %s ' % (key, self.data[key][lastCommittedIndex]['value'])
print(dumpOut.strip())
def dumpKey(self, key):
lastCommittedIndex = self.committed[key]
dumpOut = 'Site %s - ' % (self.site)
dumpOut += '%s: %s ' % (key, self.data[key][lastCommittedIndex]['value'])
print(dumpOut.strip())
def getValue(self, transaction, key):
for i in reversed(range(self.committed[key], len(self.data[key]))):
if self.data[key][i]['transaction'] == transaction:
return self.data[key][i]
return self.data[key][self.committed[key]]
def readVersionAtTime(self, transaction, key, time):
value = self.data[key][0]['value']
for valueObj in self.data[key]:
if valueObj['committedTime'] == -1 or valueObj['committedTime'] > time:
break
value = valueObj['value']
return value
def persistTransactionKey(self, transaction, key, commitTime):
lastCommittedIndex = self.committed[key]
if len(self.data[key]) > lastCommittedIndex + 1 and self.data[key][lastCommittedIndex + 1]['transaction'] == transaction:
self.committed[key] = len(self.data[key]) - 1
self.data[key][lastCommittedIndex + 1]['committedTime'] = commitTime
def revertKey(self, key):
lastCommittedIndex = self.committed[key]
self.data[key] = self.data[key][:lastCommittedIndex + 1]
def clearUncommittedData(self):
for key in self.data:
self.revertKey(key)
def getLastCommitTime(self, key):
return self.data[key][self.committed[key]]['committedTime']
def getFirstCommitTimeSinceStart(self, key):
SM = SiteManager.SiteManager
committedValuesSinceStartup = list(filter(lambda data: data['committedTime'] >= SM.sites[self.site]['startTime'], self.data[key]))
if len(committedValuesSinceStartup) == 0:
return -1
return committedValuesSinceStartup[0]['committedTime']
| true |
cd2ecb1008b185fe94b2c2cedde9dcbf8546f030 | Python | Randrews545/school-projects | /JJARS/scruml/uml_context_gui.py | UTF-8 | 13,956 | 2.828125 | 3 | [] | no_license | # ScrUML
# uml_context_gui.py
# Team JJARS
from os import path
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import pkg_resources
import webview
from scruml import uml_filesystem_io
from scruml.uml_diagram import UMLDiagram
# ----------
# __API
class __API:
"""Provides an API to the JavaScript running in the GUI window.
Can be called from the JavaScript as such: pywebview.api.FUNCTIONNAME( ... )"""
# ----------
# Static variables
__diagram: UMLDiagram = UMLDiagram()
# ----------
# Diagram information functions
# ----------
# getAllClassses
def getAllClasses(self, params: str) -> Dict[str, Dict[str, str]]:
"""Returns a dictionary containing all class information in the diagram.
Structure: dictionary[className][attributeName] == attributeValue"""
response: Dict[str, Dict[str, str]] = {}
# Populate response dictionary with classes
for class_name in self.__diagram.get_all_class_names():
class_attributes: Optional[
Dict[str, str]
] = self.__diagram.get_class_attributes(class_name)
if class_attributes is not None:
response[class_name] = class_attributes
else:
raise Exception("Class not found in diagram: " + class_name)
return response
# ----------
# getAllRelationships
def getAllRelationships(self, params: str) -> Dict[str, Dict[str, str]]:
"""Returns a dictionary containing all relationship infromation in the diagram.
Structure: dictionary[classPair][relationshipName][attributeName] == attributeValue"""
response: Dict[str, Dict[str, str]] = {}
# Populate response dictionary with relationships
for class_pair in self.__diagram.get_all_relationship_pairs():
relationships: Optional[
Dict[Optional[str], Dict[str, str]]
] = self.__diagram.get_relationships_between(class_pair[0], class_pair[1])
if relationships is not None:
for relationship_name in relationships:
relationship_id: str = (
"["
+ class_pair[0]
+ ","
+ class_pair[1]
+ (("," + relationship_name) if relationship_name else "")
+ "]"
)
response[relationship_id] = {}
# TODO: Relationship Attributes, Sprint 3
else:
raise Exception(
"Class pair not found in diagram: ["
+ class_pair[0]
+ ","
+ class_pair[1]
+ "]"
)
return response
# ----------
# __parse_class_identifier
def __parse_class_identifier(self, ident: str) -> Optional[str]:
"""Returns valid class identifier on success, or None on failure
Valid class identifiers contain no whitespace and are not surrounded by brackets"""
ident = ident.strip()
if " " in ident:
return None
if '"' in ident:
return None
if "'" in ident:
return None
if ident.startswith("[") and ident.endswith("]"):
return None
return ident
# ----------
# __parse_relationship_identifier
def __parse_relationship_identifier(
self, ident: str
) -> Optional[Tuple[str, str, Optional[str]]]:
"""Returns valid relationship identifier on success, or None on failure
Valid relationship identifiers are surrounded by brackets, contain two valid class names
separated by a comma, and an optional relationship name (also comma separated)"""
ident = ident.strip()
# Check for start and end brackets and then shear them away
if ident.startswith("[") and ident.endswith("]"):
ident = ident[1:-1]
else:
return None
# Split up the string into a list
ident_list: List[str] = ident.split(",")
# Make sure that there were enough values provided in the identifier
if len(ident_list) <= 1 or len(ident_list) >= 4:
return None
# Pull out and validate the two class names that should be in the identifier
class_A_name: Optional[str] = self.__parse_class_identifier(ident_list[0])
class_B_name: Optional[str] = self.__parse_class_identifier(ident_list[1])
if not class_A_name or not class_B_name:
return None
# If a relationship name was provided, pull it out and validate it too
# (Relationship names follow the same rules as class names for simplicity)
relationship_name: Optional[str] = None
if len(ident_list) == 3:
relationship_name = self.__parse_class_identifier(ident_list[2])
if not relationship_name:
return None
return (str(class_A_name), str(class_B_name), relationship_name)
# ----------
# Diagram file functions
# ----------
# newDiagramFile
def newDiagramFile(self, params: str) -> None:
"""Creates a new, blank diagram."""
self.__diagram = UMLDiagram()
# ----------
# loadDiagramFile
def loadDiagramFile(self, params: str) -> None:
"""Opens a file selector dialog and loads the selected diagram file."""
# Define supported file types
file_types: Tuple[str, str] = (
"ScrUML Files (*.scruml;*.yaml)",
"All Filles (*.*)",
)
# Open load file dialog
dialog_result: Union[Tuple[str], str, None] = webview.windows[
0
].create_file_dialog(webview.OPEN_DIALOG, file_types=file_types)
file_path: str = ""
# Different platforms do different things here, why????
if not dialog_result:
return
elif isinstance(dialog_result, tuple):
file_path = dialog_result[0]
elif isinstance(dialog_result, str):
file_path = dialog_result
# Make sure the user selected a file
if len(file_path) == 0:
return
# Load the new diagram file
self.__diagram = uml_filesystem_io.load_diagram(file_path)
# ----------
# saveDiagramFile
def saveDiagramFile(self, params: str) -> str:
"""Opens a file save dialog and saves to the specified diagram file."""
file_types: Tuple[str, str] = (
"ScrUML Files (*.scruml;*.yaml)",
"All Filles (*.*)",
)
# Get OS-specific home path
home_path: str = path.abspath("~/")
# Open save file dialog
dialog_result: Union[Tuple[str], str, None] = webview.windows[
0
].create_file_dialog(
webview.SAVE_DIALOG,
file_types=file_types,
save_filename="diagram.scruml",
directory=home_path,
)
file_path: str = ""
# Different platforms do different things here, why????
if not dialog_result:
return ""
elif isinstance(dialog_result, tuple):
file_path = dialog_result[0]
elif isinstance(dialog_result, str):
file_path = dialog_result
# Make sure the user selected a file
if len(file_path) == 0:
return ""
# Save file and return status message
if uml_filesystem_io.save_diagram(self.__diagram, file_path):
return "Diagram successfully saved to: {}".format(file_path)
else:
return "Failed to save diagram to: {}".format(file_path)
# ----------
# Class functions
# ----------
# addClass
def addClass(self, class_properties: Dict[str, str]) -> str:
class_name: str = class_properties["class_name"]
x: str = class_properties["x"]
y: str = class_properties["y"]
if not self.__parse_class_identifier(class_properties["class_name"]):
return "Class name is invalid. (Cannot contain whitespace or quotes, and cannot be surrounded by brackets.)"
if not self.__diagram.add_class(class_properties["class_name"]):
return (
"Class "
+ class_properties["class_name"]
+ " already exists in the diagram."
)
self.__diagram.set_class_attribute(class_name, "[x]", x)
self.__diagram.set_class_attribute(class_name, "[y]", y)
return ""
# ----------
# removeClass
def removeClass(self, class_name: str) -> None:
if not self.__diagram.remove_class(class_name):
raise Exception("Selected class not found in diagram: " + class_name)
# ----------
# Class attribute functions
# ----------
# setClassAttribute
def setClassAttribute(self, class_attribute_properties: Dict[str, str]) -> str:
class_name: str = class_attribute_properties["class_name"]
attribute_name: str = class_attribute_properties["attribute_name"]
attribute_value: str = class_attribute_properties["attribute_value"]
if not class_attribute_properties[
"ignore_naming_rules"
] and not self.__parse_class_identifier(attribute_name):
return "Attribute name is invalid. (Cannot contain whitespace or quotes, and cannot be surrounded by brackets.)"
if not self.__diagram.set_class_attribute(
class_name, attribute_name, attribute_value
):
return (
"Class "
+ class_name
+ " does not exist in the diagram. Unable to add attribute: "
+ attribute_name
)
return ""
# ----------
# removeClassAttribute
def removeClassAttribute(self, class_name: str, attribute_name: str) -> str:
if not self.__diagram.remove_class_attribute(class_name, attribute_name):
return "Attribute " + attribute_name + " not found in Class: " + class_name
return ""
# ----------
# getClassAttributes
def getClassAttributes(self, class_name: str) -> Dict[str, str]:
attr_dict: Optional[Dict[str, str]] = self.__diagram.get_class_attributes(
class_name
)
if not attr_dict:
raise Exception("Selected class not found in diagram: " + class_name)
return attr_dict
# ----------
# Relationship functions
# ----------
# addRelationship
def addRelationship(self, relationship_properties: Dict[str, str]) -> str:
class_name_a: str = relationship_properties["class_name_a"]
class_name_b: str = relationship_properties["class_name_b"]
relationship_name: str = relationship_properties["relationship_name"]
if not class_name_a in self.__diagram.get_all_class_names():
return "Class " + class_name_a + " not found in the diagram."
if not class_name_b in self.__diagram.get_all_class_names():
return "Class " + class_name_b + " not found in the diagram."
if not self.__diagram.add_relationship(
class_name_a,
class_name_b,
relationship_name if len(relationship_name) > 0 else None,
):
return (
"Relationship already exists: ["
+ class_name_a
+ ","
+ class_name_b
+ (("," + relationship_name) if relationship_name else "")
+ "]"
)
return ""
# ----------
# removeRelationship
def removeRelationship(self, relationship_id: str) -> str:
relationship_id_tuple: Optional[
Tuple[str, str, Optional[str]]
] = self.__parse_relationship_identifier(relationship_id)
if not relationship_id_tuple:
raise Exception(
"Invalid relationship identifier provided: " + relationship_id
)
class_name_a: str = relationship_id_tuple[0]
class_name_b: str = relationship_id_tuple[1]
relationship_name: Optional[str] = relationship_id_tuple[2]
if not class_name_a in self.__diagram.get_all_class_names():
return "Class " + class_name_a + " not found in the diagram."
if not class_name_b in self.__diagram.get_all_class_names():
return "Class " + class_name_b + " not found in the diagram."
if not self.__diagram.remove_relationship(
class_name_a, class_name_b, relationship_name
):
return (
"Relationship not found in diagram: [ "
+ class_name_a
+ ","
+ class_name_b
+ (("," + relationship_name) if relationship_name else "")
+ "]"
)
return ""
# ----------
# Relationship attribute functions
# TODO: Sprint 3
# ----------
# activate
def activate(enable_debug: bool = False) -> None:
"""Activates the GUI context.
If 'enable_debug' is set to 'True', enables the web console."""
api = __API()
html_file = pkg_resources.resource_filename("scruml", "assets/scruml.html")
if enable_debug:
print("Developer console enabled!")
webview.create_window(
"ScrUML", html_file, min_size=(640, 480), js_api=api, confirm_close=True
)
webview.start(debug=enable_debug, gui="cef")
| true |
caaf66f0bfd6bf781b46655ff17d1f90423e19d1 | Python | Xromocoma/Fast_api_app | /app/routers/v1/city.py | UTF-8 | 1,642 | 2.625 | 3 | [] | no_license | from typing import List
from fastapi import APIRouter, Response, status, Depends, Security
from app.core.dependencies import is_authentication, is_admin, security
from app.shemas.city import City, CityInfo
from app.core.city import city_add, city_update, city_delete, get_all_cities
router = APIRouter()
# Получение всех городов
@router.get("/city",
response_model=List[CityInfo],
dependencies=[Depends(is_authentication), Security(security)])
def get_all_city():
res = get_all_cities()
if res:
return res
return []
# Добавление города
@router.post("/city",
dependencies=[Depends(is_authentication), Depends(is_admin), Security(security)])
def add_city(city: City):
if city_add(city.name):
return Response(status_code=status.HTTP_200_OK)
return Response(status_code=status.HTTP_400_BAD_REQUEST)
# Изменение города
@router.put("/city/{city_id}",
dependencies=[Depends(is_authentication), Depends(is_admin), Security(security)])
def update_city(city_id: int, city: City):
res = city_update(city_id, city.dict())
if res:
return Response(status_code=status.HTTP_200_OK)
return Response(status_code=status.HTTP_400_BAD_REQUEST)
# Удаление города
@router.delete("/city/{city_id}",
dependencies=[Depends(is_authentication), Depends(is_admin), Security(security)])
def delete_city(city_id: int):
res = city_delete(city_id)
if res:
return Response(status_code=status.HTTP_200_OK)
return Response(status_code=status.HTTP_400_BAD_REQUEST)
| true |
a77d7a25da0508dce9f139a8e8ca7ca02354b9a3 | Python | espiritu324/cst336 | /cst311/UDPPingClient.py | UTF-8 | 1,846 | 3.15625 | 3 | [] | no_license | # UDPPingClient.py
#Mytchell Beaton & David Espiritu
#cst311 section 01
#Programming Assignment 1 UDP_Pinger
#Mar. 03, 2019
import socket
from socket import AF_INET, SOCK_DGRAM
import time
IP_ADDRESS = ""
UDP_portNum = 12000
clientSocket = socket.socket(AF_INET,SOCK_DGRAM)
clientSocket.settimeout(1) #set timeout to 1 sec
sequence_num = 1 #keeps track of number of packets
RTT =[] #keeps track of Round Trip Time for each packet
#only prints 10 packets
while sequence_num<=10:
start=str(time.time())
message = str(sequence_num)
clientSocket.sendto(message.encode('utf-8'),(IP_ADDRESS, UDP_portNum))
EstimateRTT = 0
i = 0
try:
message, address = clientSocket.recvfrom(1024)
elapsedTime = (time.time()-start)
RTT.append(elapsedTime)
EstimateRTT = .875*EstimateRTT + .125*RTT[i]
print( 'Ping message number '+str(sequence_num)+' RTT:' + str(elapsedTime) + ' secs')
i+=1
except socket.timeout: #detect if packet is dropped and prints seq num of dropped packet
print( 'Ping message number '+str(sequence_num)+' timed out')
sequence_num+=1
print( '')
if sequence_num > 10:
print('Number of packets sent:',sequence_num-1)
print('Number of packets received:',len(RTT))
LossRate = str((10-len(RTT))*10)
print( 'Packet loss rate is:' + LossRate + ' %') #print packet loss
mean = sum(RTT, 0.0)/ len(RTT)
print( 'Maximum RTT is:' + str(max(RTT)) + ' seconds') #print max RTT
print( 'MinimumRTT is:' + str(min(RTT)) + ' seconds') #print min RTT
print( 'Average RTT is:' + str(mean)+ ' seconds') #print average RTT
print( 'Estimated RTT: '+ str(EstimateRTT) + ' secs') #print Estimate RTT
clientSocket.close() | true |
13f583259b5e38272c84f8a3f124bfdce8cb70c0 | Python | maoa20-gm/Algoritmos | /aula_01/ejercicios_listas.py | UTF-8 | 4,788 | 4.40625 | 4 | [] | no_license | from typing import List
# Crie uma função que recebe uma lista de números como argumento e
# devolve uma lista onde todos os números da lista original foram
# elevados ao quadrado.
from typing import List
def Quadrado(listas:List) -> List:
square = []
for n in listas:
square.append(n**2)
return square
x = [2,4,5,10]
print(Quadrado(x))
# Crie uma função maiusculas() que recebe uma lista de strings como argumento e devolve
# uma lista onde todos as letras minúsculas do string original foram transformadas em
# maiúsculas. Dica: "abc".upper() == "ABC"
def maisculas(stringis:List) -> List:
dados = []
for n in stringis:
dados.append(n.upper())
return dados
y = ["a","b","v","dxeubdewbo"," miguel","veronica","oreo","missu","carmen"]
print(maisculas(y))
# Crie uma função que, dada uma lista de números, devolve uma lista
# contendo apenas aqueles que são múltiplos de 7.
def MultiploSeven(numeros:List) -> List:
seven = []
for n in numeros:
if n%7 == 0:
seven.append(n)
return seven
numeros = [7, 1554, 14, 1566, 23]
print(MultiploSeven(numeros))
# Crie uma função que recebe uma lista de números como argumento e
# devolve a soma desses números.
def SomaLista(numeros:List) -> int:
conteo = 0
for n in numeros:
conteo = conteo + n
return conteo
print(SomaLista(numeros))
# Crie um programa que, dada uma lista de strings, devolve um string correspondente
# à concatenação de todos eles.
def concatenar(stringis:List[str]) -> str:
juntos = ""
for n in stringis:
juntos = juntos + n
return juntos
print(concatenar(y))
# Crie uma função que recebe um string como argumento e elimina todos os
# caracteres desses string que não sejam alfanuméricos, devolvendo o
# string resultante.
def QuitAlfaNumeric(stringis:str) -> str:
stringer = ""
for n in stringis:
if n.isnumeric() == False:
stringer = stringer + n
return stringer
z = "dhwmbsi562485fhshbd452dh1fddbud52452dfdejbfd"
print(QuitAlfaNumeric(z))
# Crie uma função que lê uma sequência de no máximo 100 números a partir do teclado,
# armazenando-os em uma lista. Quando o usuário digitar <ENTER> sem nenhum número,
# seu programa deve calcular a média de todos os números armazenados e devolver
# esse valor.
def promedio() -> float:
x:str = input("Digite un numero por favor o presione enter para terminar: ")
armazenamento = []
while x.isnumeric() == True:
x = int(x)
armazenamento.append(x)
x = input("Digite un numero por favor o presione enter para terminar: ")
mean = sum(armazenamento)/ len(armazenamento)
return mean
print(promedio())
# Crie uma função diferencaListas() que recebe duas listas l1 e l2 como argumentos
# e devolve uma lista contendo todos os elementos de l1 que não são também
# elementos de l2.
def diferencaListas(l1: List, l2:List) -> List:
armazenamento = []
errados = []
for n in l1:
for i in l2:
if n == i:
errados.append(n)
else:
armazenamento.append(n)
break
return armazenamento
l1 = [3,1,2]
l2 = [5,2,7]
print(diferencaListas(l1,l2))
# Ejemplo de incluir las palabras en una lista
def partir(texto:str, separador:str) -> List[str]:
if len(separador) != 1:
return []
palavra = ""
resultado:List[str] = []
for c in texto:
if c != separador:
palavra = palavra + c
else:
if palavra != "": # Para evitar colocar strings vacios al comienzo de la oracion
resultado.append(palavra)
palavra = ""
if palavra != "":
resultado.append(palavra)
return resultado
print(partir(" Eu sou Pythonista e Fashionista", " "))
# Hacer para mas de un separador
def partirTudo(texto:str, separador:str) -> List[str]:
if len(separador) < 1:
return []
palavra = ""
resultado:List[str] = []
for c in texto:
if not ehSeparador(c, separador):
palavra = palavra + c
else:
if palavra != "": # Para evitar colocar strings vacios al comienzo de la oracion
resultado.append(palavra)
palavra = ""
if palavra != "":
resultado.append(palavra)
return resultado
def ehSeparador(c , separador):
if len(separador) == 0:
return False
resultado = False
i = 0
while i < len(separador) and (resultado == False):
if c == separador[i]:
resultado = True
i = i + 1
return resultado
print(partirTudo("Eu sou_Miguel Ortiz-vou trabalhar por voces ", " _-"))
| true |
382b6f28f6a42c663b0d267e142b8dca87996448 | Python | tahyuu/glove_test | /Ui/TranscellT831.py | UTF-8 | 1,124 | 2.53125 | 3 | [] | no_license | import serial
import re
import time
r_pun_data = r'\d(?P<data>[\+|-]\d{1,5})'
pattern = re.compile(r_pun_data)
#a_list=np.arange(1)
def dev(i):
return i/100
if __name__ == '__main__':
serial = serial.Serial('COM1', 9600)
print serial
if serial.isOpen():
print("open success")
else:
print("open failed")
try:
while True:
count = serial.inWaiting()
if count > 30:
time.sleep(0.03)
data = serial.read(count)
#if
#print data
a= pattern.findall(data)
a=map(float,a)
a=map(dev,a)
a=a
#self.parent.puncual=np.append(self.parent.puncual, [1])
#a = a.astype(np.float)
print a
#print 'ok'
# if data != b'':
# print("receive:", data)
# serial.write(data)
# else:
# serial.write(hexsend(data))
except KeyboardInterrupt:
if serial != None:
serial.close()
| true |
b4f5cdb6441f78701364ec5d311aa516289c809e | Python | glwhu/python_turtle | /turtle_5_snake.py | UTF-8 | 524 | 3.546875 | 4 | [] | no_license | import turtle
wn = turtle.Screen()
wn.bgcolor("lightgreen")
tess = turtle.Turtle()
tess.color("blue")
size = 20
for i in range(10):
tess.forward(size) # Move tess along
tess.right(10) # ... and turn her
size = 2
for i in range(6):
tess.forward(size)
tess.right(28)
size = 20
for i in range(10):
tess.forward(size) # Move tess along
tess.left(10) # ... and turn her
size = 2
for i in range(6):
tess.forward(size)
tess.right(28)
turtle.done()
| true |
596307b6ae9554963a78cd7aa2960aa0af2baa94 | Python | LeeDongGeon1996/co-te | /BOJ/14888_연산자 끼워넣기.py | UTF-8 | 923 | 3.546875 | 4 | [] | no_license | # solution: DFS, 연산자를 하나씩 소비해가며 dfs를 수행하여 모든 순열(?)을 탐색한다.
# time-complexity: O(|V|+|E|) - V=연산자순열수, E=연산자수(N-1)
# url: https://www.acmicpc.net/problem/14888
# start_input
N = int(input())
nums = list(map(int, input().split()))
opers = list(map(int, input().split()))
# end_input
_min = 1000000000
_max = -1000000000
def calc(res, oper, depth):
if opers[oper] == 0:
return
global _min, _max
if oper == 0: res += nums[depth]
elif oper == 1: res -= nums[depth]
elif oper == 2: res *= nums[depth]
else: res = int(res/nums[depth])
opers[oper] -= 1
if depth == N-1:
if _min > res: _min = res
if _max < res: _max = res
else:
for i in range(4): calc(res, i, depth+1)
opers[oper] += 1
for i in range(4): calc(nums[0], i, 1)
# start_print
print(_max)
print(_min)
# end_print | true |
71219f241af69c6a0f0983e5313e874e6a7f5012 | Python | maximilianh/pubMunch | /cgi/pubRun/jobQueue.py | UTF-8 | 7,551 | 2.53125 | 3 | [] | no_license | from __future__ import print_function
import os, sqlite3
from cPickle import loads, dumps
from time import sleep
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
# awesome compact code from http://flask.pocoo.org/snippets/88/
class JobQueue(object):
_create = (
'CREATE TABLE IF NOT EXISTS queue '
'('
' id INTEGER PRIMARY KEY AUTOINCREMENT,'
' item BLOB,'
' batchId text'
')'
)
_create_batch = (
'CREATE TABLE IF NOT EXISTS batch '
'('
' batchId TEXT PRIMARY KEY,'
' jobCount int DEFAULT 0,'
' hasFailed int DEFAULT 0,'
' jobDoneCount int DEFAULT 0,'
' concatStarted int DEFAULT 0,'
' concatDone int DEFAULT 0'
')'
)
_queue_index = ( 'CREATE INDEX IF NOT EXISTS q_idx ON queue (batchId);' )
_batch_index = ( 'CREATE UNIQUE INDEX IF NOT EXISTS b_idx ON batch (batchId);' )
_count = 'SELECT COUNT(*) FROM queue'
_count_batch = 'SELECT COUNT(*) FROM queue where batchId=?'
_concat_started= 'SELECT concatStarted FROM batch where batchId=?'
_iterate = 'SELECT id, item FROM queue'
_append = 'INSERT INTO queue (item, batchId) VALUES (?, ?)'
_append_batch = 'INSERT INTO batch (batchId, jobCount) VALUES (?, ?)'
_del_batch = 'DELETE FROM batch WHERE batchId=?'
_write_lock = 'BEGIN IMMEDIATE'
_popleft_get = (
'SELECT id, item, batchId FROM queue '
'ORDER BY id LIMIT 1'
)
_popleft_del = 'DELETE FROM queue WHERE id = ?'
_peek = (
'SELECT item FROM queue '
'ORDER BY id LIMIT 1'
)
_fail_batch = (
'UPDATE batch SET hasFailed=1 WHERE batchId = ?'
)
_del_all_jobs = (
'DELETE FROM queue WHERE batchId = ?'
)
_batch_inc_count = (
'UPDATE batch SET jobDoneCount=jobDoneCount+1 WHERE batchId = ?'
)
_batch_inc_concat = (
'UPDATE batch SET concatStarted=concatStarted+1 WHERE batchId = ?'
)
_batch_concat_done = (
'UPDATE batch SET concatDone=1 WHERE batchId = ?'
)
_batch_status = (
'SELECT hasFailed, jobCount, jobDoneCount, concatStarted, concatDone from batch WHERE batchId=?'
)
def __init__(self, path):
self.path = os.path.abspath(path)
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
conn.execute(self._create_batch)
conn.execute(self._queue_index)
conn.execute(self._batch_index)
def __len__(self):
with self._get_conn() as conn:
l = conn.execute(self._count).next()[0]
return l
def __iter__(self):
with self._get_conn() as conn:
for id, obj_buffer in conn.execute(self._iterate):
yield loads(str(obj_buffer))
def _get_conn(self):
id = get_ident()
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.path,
timeout=60)
return self._connection_cache[id]
def appendJobs(self, objList, batchId):
with self._get_conn() as conn:
conn.execute(self._append_batch, (batchId,len(objList)))
for obj in objList:
obj_buffer = buffer(dumps(obj, 2))
conn.execute(self._append, (obj_buffer,batchId))
def popleft(self, sleep_wait=True):
keep_pooling = True
wait = 0.1
max_wait = 2
tries = 0
with self._get_conn() as conn:
id = None
while keep_pooling:
conn.execute(self._write_lock)
cursor = conn.execute(self._popleft_get)
try:
id, obj_buffer, batchId = next(cursor)
keep_pooling = False
except StopIteration:
conn.commit() # unlock the database
if not sleep_wait:
keep_pooling = False
continue
tries += 1
sleep(wait)
wait = min(max_wait, tries/10 + wait)
if id:
conn.execute(self._popleft_del, (id,))
return loads(str(obj_buffer)), batchId
return None
def peek(self):
with self._get_conn() as conn:
cursor = conn.execute(self._peek)
try:
return loads(str(cursor.next()[0]))
except StopIteration:
return None
def batchFailed(self, batchId):
with self._get_conn() as conn:
conn.execute(self._fail_batch, (batchId,))
conn.execute(self._del_all_jobs, (batchId,))
def batchIncreaseCount(self, batchId):
with self._get_conn() as conn:
conn.execute(self._batch_inc_count, (batchId,))
def noJobsLeft(self, batchId):
with self._get_conn() as conn:
l = conn.execute(self._count_batch, (batchId,)).next()[0]
return l==0
def activeConcats(self, batchId):
with self._get_conn() as conn:
l = conn.execute(self._concat_started, (batchId,)).next()[0]
return l
def setConcatStarted(self, batchId):
with self._get_conn() as conn:
conn.execute(self._batch_inc_concat, (batchId,))
def concatFinished(self, batchId):
with self._get_conn() as conn:
conn.execute(self._batch_concat_done, (batchId,))
def deleteBatch(self, batchId):
with self._get_conn() as conn:
conn.execute(self._del_batch, (batchId,))
def getStatus(self, batchId):
with self._get_conn() as conn:
cursor = conn.execute(self._batch_status, (batchId,))
try:
res = next(cursor)
#if res==0:
#return "batch does not exist"
hasFailed, jobCount, jobDoneCount, concatStarted, concatDone = res
if hasFailed!=0:
return "error, after %d of %d jobs completed" % (jobDoneCount, jobCount)
elif concatDone!=0:
return "all complete"
elif concatStarted!=0:
return "jobs complete, concatting results"
elif jobCount!=jobDoneCount:
return "running, %d of %d jobs completed" % (jobDoneCount, jobCount)
else:
return "Somewhere between job completion and concat stage. Error?"
except StopIteration:
return None
def test():
q = JobQueue('test.db')
q.appendJobs([1,2,3], "mybatch")
q.appendJobs([1,2,3], "batch2")
q.batchIncreaseCount("batch2")
q.batchIncreaseCount("batch2")
e = q.popleft()
print(e)
assert(e==(1, "mybatch"))
e = q.popleft()
print(e)
assert(e==(2, "mybatch"))
e = q.popleft()
print(e)
assert(e==(3, "mybatch"))
assert(1== q.peek())
assert( q.noJobsLeft("mybatch")==True)
assert( q.activeConcats("mybatch")==0)
q.setConcatStarted("mybatch")
assert( q.activeConcats("mybatch")==1)
q.batchIncreaseCount("mybatch")
q.batchIncreaseCount("mybatch")
print(q.getStatus("batch2"))
q.deleteBatch("mybatch")
if __name__=="__main__":
test()
| true |
67a4f22d6e98d2d2ff9f77e04a819ce2d1294bf0 | Python | openkamer/openkamer | /parliament/tests.py | UTF-8 | 4,713 | 2.671875 | 3 | [
"MIT"
] | permissive | import datetime
from django.test import TestCase
from person.models import Person
from parliament.models import Parliament
from parliament.models import ParliamentMember
from parliament.models import PoliticalParty
from wikidata import wikidata
class TestPoliticalParty(TestCase):
def test_get_political_party_memberships_wikidata(self):
mark_rutte_wikidata_id = 'Q57792'
item = wikidata.WikidataItem(mark_rutte_wikidata_id)
parties = item.get_political_party_memberships()
self.assertEqual(len(parties), 1)
def test_get_political_party_memberships_ignore_local_and_youth_parties(self):
loes_ypma_wikidata_id = 'Q1194971'
item = wikidata.WikidataItem(loes_ypma_wikidata_id)
parties = item.get_political_party_memberships()
local_parties = 0
youth_parties = 0
for party in parties:
if wikidata.WikidataItem(party['party_wikidata_id']).is_local_party:
local_parties += 1
if wikidata.WikidataItem(party['party_wikidata_id']).is_youth_party:
youth_parties += 1
self.assertEqual(3, len(parties))
self.assertEqual(1, youth_parties)
self.assertEqual(1, local_parties)
def test_create_political_party(self):
name = 'Houwers'
name_short = 'Houwers'
party = PoliticalParty.objects.create(name=name, name_short=name_short)
party.update_info(language='nl')
def test_find_party(self):
name = 'Socialistische Partij'
name_short = 'SP'
party_expected = PoliticalParty.objects.create(name=name, name_short=name_short)
party = PoliticalParty.find_party(name)
self.assertEqual(party, party_expected)
party = PoliticalParty.find_party(name_short)
self.assertEqual(party, party_expected)
party = PoliticalParty.find_party('sp')
self.assertEqual(party, party_expected)
party = PoliticalParty.find_party('SocialIstische parTij')
self.assertEqual(party, party_expected)
name = 'Group K/Ö'
name_short = 'GrKO'
party_expected = PoliticalParty.objects.create(name=name, name_short=name_short)
party = PoliticalParty.find_party('GrKÖ')
self.assertEqual(party, party_expected)
party = PoliticalParty.find_party('GrKO')
self.assertEqual(party, party_expected)
def test_find_party_dash(self):
party_expected = PoliticalParty.objects.create(name='Vrijzinnig Democratische Bond', name_short='VDB')
party = PoliticalParty.find_party('Vrijzinnig Democratische Bond')
self.assertEqual(party, party_expected)
party = PoliticalParty.find_party('Vrijzinnig-Democratische Bond')
self.assertEqual(party, party_expected)
class TestParliamentMembers(TestCase):
fixtures = ['person.json', 'parliament.json']
def test_get_members_at_date(self):
tweede_kamer = Parliament.get_or_create_tweede_kamer()
active_members = tweede_kamer.get_members_at_date(datetime.date(year=2016, month=6, day=1))
self.assertEqual(len(active_members), 150)
# print(len(active_members)) # TODO: check for number if members have non null joined/left fields
def test_get_member_for_person_at_date(self):
person = Person.find_by_fullname('Diederik Samsom')
members_all = ParliamentMember.objects.filter(person=person)
self.assertEqual(members_all.count(), 4)
members = ParliamentMember.find_at_date(person, datetime.date(year=2016, month=6, day=1))
self.assertEqual(members[0].joined, datetime.date(year=2012, month=9, day=20))
self.assertEqual(members.count(), 1)
self.assertEqual(members[0].person, person)
members = ParliamentMember.find_at_date(person, datetime.date(year=2004, month=6, day=1))
self.assertEqual(members[0].joined, datetime.date(year=2003, month=1, day=30))
self.assertEqual(members.count(), 1)
self.assertEqual(members[0].person, person)
def test_find_members(self):
person = Person.find_by_fullname('Diederik Samsom')
member = ParliamentMember.find('Samsom', initials='D.M.')
self.assertEqual(member.person, person)
member = ParliamentMember.find('Samsom', initials='D.M.', date=datetime.date(year=2004, month=6, day=1))
self.assertEqual(member.person, person)
self.assertEqual(member.joined, datetime.date(year=2003, month=1, day=30))
member = ParliamentMember.find('Samsom', initials='D.M.', date=datetime.date(year=2016, month=6, day=1))
self.assertEqual(member.person, person)
self.assertEqual(member.joined, datetime.date(year=2012, month=9, day=20))
| true |
63149fec1a2c7843bae8c976d2685ce655a8935a | Python | TrendingTechnology/cwa-qr | /cwa_qr/seed.py | UTF-8 | 341 | 2.890625 | 3 | [
"MIT"
] | permissive | import random
def construct_seed(seed) -> bytes:
if type(seed) == bytes and len(seed) == 16:
return seed
if seed is None:
seed = b''
if type(seed) not in [int, float, str, bytes]:
seed = str(seed)
r = random.Random()
r.seed(seed)
return bytes([r.randrange(0, 256) for _ in range(0, 16)])
| true |
7e5cdbb1794fd0772162249cc4388b0cee4d6e57 | Python | stevenfrst/simple | /simple/util.py | UTF-8 | 1,659 | 3.21875 | 3 | [
"MIT"
] | permissive | from math import ceil
import re
import unicodedata
class Pagination(object):
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
@property
def previous(self):
return self.page - 1
@property
def next(self):
return self.page + 1
@property
def offset(self):
return (self.page - 1) * self.per_page
def iter_pages(self):
return range(1, self.pages + 1)
# https://stackoverflow.com/questions/6657820/python-convert-an-iterable-to-a-stream
class iter_to_stream(object):
def __init__(self, iterable):
self.buffered = b""
self.iter = iter(iterable)
def read(self, size):
result = b""
while size > 0:
data = self.buffered or next(self.iter, None)
self.buffered = ""
if data is None:
break
size -= len(data)
if size < 0:
data, self.buffered = data[:size], data[size:]
result += data
return result
def slugify(string):
"""
Slugify a unicode string.
Example:
>>> slugify(u"Héllø Wörld")
u"hello-world"
"""
sub = unicodedata.normalize('NFKD', string)
return re.sub('[-\\s]+', '-', re.sub('[^\\w\\s-]', '', sub)
.strip()
.lower()) | true |
271ce73b5ed8d76350cea4a5136983e7164bdf37 | Python | norashipp/ugali | /ugali/simulation/population.py | UTF-8 | 3,383 | 2.828125 | 3 | [
"MIT"
] | permissive | """
Tool to generate a population of simulated satellite properties.
"""
import numpy
import pylab
import ugali.utils.config
import ugali.utils.projector
import ugali.utils.skymap
import ugali.analysis.kernel
import ugali.observation.catalog
pylab.ion()
############################################################
def satellitePopulation(config, n,
range_distance_modulus=[16.5, 24.],
range_stellar_mass=[1.e2, 1.e5],
mode='mask',
plot=False):
"""
Create a population of n randomly placed satellites within a survey mask or catalog specified in the config file.
Satellites are uniformly placed in distance modulus, and uniformly generated in log(stellar_mass) (M_sol).
The ranges can be set by the user.
Returns the simulated area (deg^2) as well as the
lon (deg), lat (deg), distance modulus, stellar mass (M_sol), and half-light radius (deg) for each satellite
"""
if type(config) == str:
config = ugali.utils.config.Config(config)
if mode == 'mask':
mask_1 = ugali.utils.skymap.readSparseHealpixMap(config.params['mask']['infile_1'], 'MAGLIM')
mask_2 = ugali.utils.skymap.readSparseHealpixMap(config.params['mask']['infile_2'], 'MAGLIM')
input = (mask_1 > 0.) * (mask_2 > 0.)
elif mode == 'catalog':
catalog = ugali.observation.catalog.Catalog(config)
input = numpy.array([catalog.lon, catalog.lat])
lon, lat, simulation_area = ugali.utils.skymap.randomPositions(input,
config.params['coords']['nside_likelihood_segmentation'],
n=n)
distance_modulus = numpy.random.uniform(range_distance_modulus[0], range_distance_modulus[1], n)
stellar_mass = 10**numpy.random.uniform(numpy.log10(range_stellar_mass[0]), numpy.log10(range_stellar_mass[1]), n)
half_light_radius_physical = ugali.analysis.kernel.halfLightRadius(stellar_mass) # kpc
half_light_radius = numpy.degrees(numpy.arcsin(half_light_radius_physical \
/ ugali.utils.projector.distanceModulusToDistance(distance_modulus)))
if plot:
pylab.figure()
#pylab.scatter(lon, lat, c=distance_modulus, s=500 * half_light_radius)
#pylab.colorbar()
pylab.scatter(lon, lat, edgecolors='none')
xmin, xmax = pylab.xlim() # Reverse azimuthal axis
pylab.xlim([xmax, xmin])
pylab.title('Random Positions in Survey Footprint')
pylab.xlabel('Longitude (deg)')
pylab.ylabel('Latitude (deg)')
pylab.figure()
pylab.scatter(stellar_mass, ugali.utils.projector.distanceModulusToDistance(distance_modulus),
c=(60. * half_light_radius), s=500 * half_light_radius, edgecolors='none')
pylab.xscale('log')
pylab.yscale('log')
pylab.xlim([0.5 * range_stellar_mass[0], 2. * range_stellar_mass[1]])
pylab.colorbar()
pylab.title('Half-light Radius (arcmin)')
pylab.xlabel('Stellar Mass (arcmin)')
pylab.ylabel('Distance (kpc)')
return simulation_area, lon, lat, distance_modulus, stellar_mass, half_light_radius
############################################################
| true |
6f91e0c43110ab91cddb78e55caa15970b0680ea | Python | LiJunDa159/MYCODE | /2_VGGNet/tools.py | UTF-8 | 4,066 | 2.921875 | 3 | [] | no_license | import tensorflow as tf
def conv(layer_name, x, out_channels, kernel_size=None, stride=None, is_pretrain=True):
"""
Convolution op wrapper, the Activation id ReLU
:param layer_name: layer name, eg: conv1, conv2, ...
:param x: input tensor, size = [batch_size, height, weight, channels]
:param out_channels: number of output channel (convolution kernel)
:param kernel_size: convolution kernel size, VGG use [3,3]
:param stride: paper default = [1,1,1,1]
:param is_pretrain: whether you need pre train, if you get parameter from other, you don not want to train again,
so trainable = false. if not trainable = true
:return: 4D tensor
"""
kernel_size = kernel_size if kernel_size else [3, 3]
stride = stride if stride else [1, 1, 1, 1]
# x = tf.convert_to_tensor(x)
in_channels =int(x.get_shape()[-1])
with tf.variable_scope(layer_name):
w = tf.get_variable(name="weights",
shape=[kernel_size[0], kernel_size[1], in_channels, out_channels],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=is_pretrain)
b = tf.get_variable(name='biases',
shape=[out_channels],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=is_pretrain)
x = tf.nn.conv2d(x, w, stride, padding='SAME', name='conv')
x = tf.nn.bias_add(x, b, name='bias_add')
x = tf.nn.relu(x, name='relu')
return x
def pool(layer_name, x, ksize=None, stride=None, is_max_pool=True):
"""
Pooling op
:param layer_name: layer name, eg:pool1, pool2,...
:param x:input tensor
:param ksize:pool kernel size, VGG paper use [1,2,2,1], the size of 2X2
:param stride:stride size, VGG paper use [1,2,2,1]
:param is_max_pool: default use max pool, if it is false, the we will use avg_pool
:return: tensor
"""
ksize = ksize if ksize else [1, 2, 2, 1]
stride = stride if stride else [1, 2, 2, 1]
if is_max_pool:
x = tf.nn.max_pool(x, ksize, strides=stride, padding='SAME', name=layer_name)
else:
x = tf.nn.avg_pool(x, ksize, strides=stride, padding='SAME', name=layer_name)
return x
def batch_norm(x):
"""
Batch Normalization (offset and scale is none). BN algorithm can improve train speed heavily.
:param x: input tensor
:return: norm tensor
"""
epsilon = 1e-3
batch_mean, batch_var = tf.nn.moments(x, [0])
x = tf.nn.batch_normalization(x,
mean=batch_mean,
variance=batch_var,
offset=None,
scale=None,
variance_epsilon=epsilon)
return x
def FC_layer(layer_name, x, out_nodes):
"""
Wrapper for fully-connected layer with ReLU activation function
:param layer_name: FC layer name, eg: 'FC1', 'FC2', ...
:param x: input tensor
:param out_nodes: number of neurons for FC layer
:return: tensor
"""
shape = x.get_shape()
if len(shape) == 4:
size = shape[1].value * shape[2].value * shape[3].value
else:
size = shape[-1].value
with tf.variable_scope(layer_name):
w = tf.get_variable('weights',
shape=[size, out_nodes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('biases',
shape=[out_nodes],
initializer=tf.constant_initializer(0.0))
# flatten into 1D
flat_x = tf.reshape(x, [-1, size])
x = tf.nn.bias_add(tf.matmul(flat_x, w), b)
x = tf.nn.relu(x)
return x
| true |
312ac6b5babcb20057fcdb6fa51e090df387d778 | Python | Jm-Correia/learn-Python | /SearchStringAndCreateLogs/veiculo.py | UTF-8 | 741 | 2.921875 | 3 | [] | no_license | import abc, interface_veiculo
class Veiculo(interface_veiculo.interfaceVeiculo, abc.ABC):
def __init__(self, cor, tipoCombustivel, potencia):
self.cor = cor
self.tipoCombustivel = tipoCombustivel
self.__potencia = potencia
def changeColor(self, cor):
self.cor = cor
@property
def potencia(self):
self.__changePotencia(" PRIVATE METHOD? ")
return self.__potencia
@potencia.setter
def potencia(self, portencia):
self.__potencia = portencia
def __changePotencia(self,pot):
self.__potencia = pot
def __str__(self):
return f"Cor:{self.cor}, Tipo de Combustivel {self.tipoCombustivel} , " \
f"Potencia {self.potencia}"
| true |
6066a36b4d72d0f243585b79006233fc267afb72 | Python | skamjadali7/Python-Programming | /OOPCOncept/HierarchialInheritance.py | UTF-8 | 257 | 3.484375 | 3 | [] | no_license | class Parent:
def m1(self):
print("Parent Method")
class Child1(Parent):
def m2(self):
print("Child One")
class Child2(Parent):
def m3(self):
print("Child Two")
c1=Child1()
c1.m1()
c1.m2()
c2=Child2()
c2.m1()
c2.m3()
| true |
c737ab8f28e83e3893dd552c83bc85db674d4fb1 | Python | stephenward21/Guess-a-number | /Guess_a_number.py | UTF-8 | 932 | 4.25 | 4 | [] | no_license | import random
secret_number = random.randint(1,10)
the_number = True
number_of_guesses = 5
while (number_of_guesses > 0) and (the_number == True):
the_guessed_number = raw_input("Guess a number between 1 and 10.")
if(int(the_guessed_number) == secret_number):
print "Yes! You win!"
play_again = raw_input("Would you like to play again (Y / N)?")
if (play_again == "Y"):
the_number = True
number_of_guesses = 5
else:
the_number = False
print "Bye!"
if (int(the_guessed_number) > secret_number):
print "Your guess is too high"
number_of_guesses = number_of_guesses - 1
print "You have " + str(number_of_guesses) + " guesses left"
if (int(the_guessed_number) < secret_number):
print "Your guess is too low"
number_of_guesses = number_of_guesses - 1
print "You have " + str(number_of_guesses)+ " guesses left"
if (number_of_guesses == 0):
print "You ran out of guesses!"
| true |
5bad2e271d03e371ba5e415bfa24ece16462cb46 | Python | F-Akinola/onlinetraining | /example7.py | UTF-8 | 74 | 2.984375 | 3 | [] | no_license |
def onetwothree(x):
return x*1, x*2, x*3
print(onetwothree(3))
| true |
68bcf4b3f5d18661d4a9f6e0f41594cf3c8f78ea | Python | Maggieeli/halloween-project | /hlw/hlw.pyde | UTF-8 | 431 | 3.484375 | 3 | [] | no_license | def setup():
size(640,480)
def draw():
fill(60)
triangle(40,75,60,30,80,75)
triangle(30,200,60,50,90,200)
triangle(30,380,60,60,90,380)
fill(80)
triangle(80,105,100,40,120,95)
triangle(70,210,100,60,130,220)
triangle(70,400,100,70,130,390)
fill(128)
noStroke()
triangle(390,105,410,40,430,105)
triangle(390,230,410,60,430,220)
triangle(390,410,410,70,430,390)
| true |
2840e4ad1bed8ee2e5f8a7f43ccdebfdb96f0b38 | Python | PlayLife2k/WebDev | /week8/coding_bat/logic-1.py | UTF-8 | 1,536 | 3.046875 | 3 | [
"MIT"
] | permissive | #cigar_party
def cigar_party(cigars, is_weekend):
if 40<=cigars<=60:
return True
elif cigars>=60 and is_weekend:
return True
return False
#date_fashion
def date_fashion(you, date):
if you<=2 or date<=2:
return 0
elif you>=8 or date>=8:
return 2
elif 2<you<8 or 2<date<8:
return 1
#squirrel_play
def squirrel_play(temp, is_summer):
if 60<=temp<=90:
return True
elif 90<temp<=100 and is_summer==False:
return False
elif 90<temp<=100 and is_summer==True:
return True
return False
#caught_speeding
def caught_speeding(speed, is_birthday):
if is_birthday:
speed-=5
if speed<=60:
return 0
elif 61<=speed<=80:
return 1
elif speed>=81:
return 2
#sorta_sum
def sorta_sum(a, b):
sum = a+b
if 10<=sum<=19:
return 20
return sum
#alarm_clock
def alarm_clock(day, vacation):
if (day==1 or day==2 or day==3 or day==4 or day==5) and not vacation:
return "7:00"
elif ((day==0 or day==6) and not vacation) or ((day==1 or day==2 or day==3 or day==4 or day==5) and vacation):
return "10:00"
else:
return "off"
#love6
def love6(a, b):
if a==6 or b==6 or a+b==6 or abs(a-b)==6:
return True
return False
#in1to10
def in1to10(n, outside_mode):
if outside_mode and (n>=10 or n<=1):
return True
elif 1<=n<=10 and not outside_mode:
return True
return False
#near_ten
def near_ten(num):
if (num%10)<=2 or num%10>=8:
return True
return False
| true |
0e137ef2d021650d095527899fade03852c92e94 | Python | kamushekp/VoxCelebResearch_obsolete | /obsolete/vggvox_model.py | UTF-8 | 3,256 | 2.671875 | 3 | [] | no_license | import scipy.io as sio
import numpy as np
import keras.backend as K
from keras.layers import Input, GlobalAveragePooling2D, Reshape
from keras.layers.convolutional import Conv2D, ZeroPadding2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Lambda, Activation
from keras.models import Model
import constants as c
# Block of layers: Conv --> BatchNorm --> ReLU --> Pool
def conv_bn_pool(inp_tensor,conv_filters, conv_kernel_size, conv_strides, conv_pad, pool_type = "", pool_size=(2, 2),pool_strides=None):
x = ZeroPadding2D(padding=conv_pad)(inp_tensor)
x = Conv2D(filters=conv_filters,kernel_size=conv_kernel_size, strides=conv_strides, padding='valid')(x)
x = BatchNormalization(epsilon=1e-5,momentum=1)(x)
x = Activation('relu')(x)
if pool_type == 'max':
return MaxPooling2D(pool_size=pool_size,strides=pool_strides)(x)
elif pool_type == 'avg':
return AveragePooling2D(pool_size=pool_size,strides=pool_strides)(x)
return x
# Block of layers: Conv --> BatchNorm --> ReLU --> Dynamic average pool (fc6 -> apool6 only)
def conv_bn_dynamic_apool(inp_tensor,conv_filters,conv_kernel_size,conv_strides,conv_pad):
x = ZeroPadding2D(padding=conv_pad)(inp_tensor)
x = Conv2D(filters=conv_filters,kernel_size=conv_kernel_size, strides=conv_strides, padding='valid')(x)
x = BatchNormalization(epsilon=1e-5,momentum=1)(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Reshape((1,1,conv_filters))(x)
return x
def vggvox_model():
inp = Input(c.INPUT_SHAPE,name='input')
x = conv_bn_pool(inp,conv_filters=96,conv_kernel_size=(7,7),conv_strides=(2,2),conv_pad=(1,1),
pool_type='max',pool_size=(3,3),pool_strides=(2,2))
x = conv_bn_pool(x,conv_filters=256,conv_kernel_size=(5,5),conv_strides=(2,2),conv_pad=(1,1),
pool_type='max',pool_size=(3,3),pool_strides=(2,2))
x = conv_bn_pool(x,conv_filters=384,conv_kernel_size=(3,3),conv_strides=(1,1),conv_pad=(1,1))
x = conv_bn_pool(x,conv_filters=256,conv_kernel_size=(3,3),conv_strides=(1,1),conv_pad=(1,1))
x = conv_bn_pool(x,conv_filters=256,conv_kernel_size=(3,3),conv_strides=(1,1),conv_pad=(1,1), pool_type='max',pool_size=(5,3),pool_strides=(3,2))
x = conv_bn_dynamic_apool(x,conv_filters=4096,conv_kernel_size=(9,1),conv_strides=(1,1),conv_pad=(0,0))
x = conv_bn_pool(x,conv_filters=1024,conv_kernel_size=(1,1),conv_strides=(1,1),conv_pad=(0,0))
x = Lambda(lambda y: K.l2_normalize(y, axis=3))(x)
x = Conv2D(filters=1024,kernel_size=(1,1), strides=(1,1), padding='valid')(x)
m = Model(inp, x)
return m
def test():
model = vggvox_model()
num_layers = len(model.layers)
x = np.random.randn(1,512,300,1)
outputs = []
for i in range(num_layers):
get_ith_layer_output = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[i].output])
layer_output = get_ith_layer_output([x, 0])[0] # output in test mode = 0
outputs.append(layer_output)
for i in range(11):
print("Shape of layer {} output:{}".format(i, outputs[i].shape))
if __name__ == '__main__':
test()
| true |
adcb0a7b67bfc7d0809a38f979aa5c1d4ad4879e | Python | codeimt/pycones21-testing | /pycones21/isolation/3-patch-ok.py | UTF-8 | 623 | 2.53125 | 3 | [] | no_license | from pycones21.github_client import GithubClient
from unittest import mock
# Or you could use a decorator
@mock.patch("pycones21.github_client.requests.get")
def test_get_gist_urls(m_request):
response_urls = [
{
"url": "https://api.github.com/gists/fc04e72fc7bb4bf0a6c7c09551ad9c34",
}
]
m_response = mock.MagicMock()
m_response.json.return_value = response_urls
m_request.return_value = m_response
urls = GithubClient.get_gists_urls()
assert urls == [response_urls[0]["url"]]
def test_get_gist_names():
assert isinstance(GithubClient._get, mock.Mock)
| true |
afc736b1b3a17b901a6b3de7a42b9171b45c2c12 | Python | AlexLi-98/misc | /bayesopt/gaussian_process.py | UTF-8 | 3,767 | 3.375 | 3 | [] | no_license | import numpy as np
class Kernel(object):
def compute(self, a, b):
raise None
class SquaredDistanceKernel(Kernel):
def __init__(self, kernel_param=0.1):
self.kernel_parameter = kernel_param
def compute(self, a, b):
sq_dist = np.sum(a ** 2, 1).reshape(-1, 1) + np.sum(b ** 2, 1) - 2 * np.dot(a, b.T)
return np.exp(-.5 * (1/self.kernel_parameter) * sq_dist)
class Matern52Kernel(Kernel):
def __init__(self, kernel_param=0.1):
self.kernel_parameter = kernel_param
def compute(self, a, b):
sq_dist = np.sum(a ** 2, 1).reshape(-1, 1) + np.sum(b ** 2, 1) - 2 * np.dot(a, b.T)
sq_dist *= 5
return (1 + np.sqrt(sq_dist) + sq_dist/3) * np.exp(-np.sqrt(sq_dist))
class GaussianProcess(object):
"""
Implements a GP with mean zero and a custom kernel
"""
def __init__(self, kernel=Matern52Kernel(), noise_variance=0.00005, x=None, y=None):
"""
Initialize the GP with the given kernel and a noise parameter for the variance
Optionally initialize this GP with given X and Y
:param kernel: kernel function, has to be an instance of Kernel
:param noise_variance:
:param x: given input data
:param y: given input label
:return:
"""
self.X = x
self.Y = y
self.kernel = kernel
self.noise_variance = noise_variance
self.cov = None if self.X is None else kernel.compute(self.X, self.X)
self.max_observed_value = -99999
def predict(self, x, y=None):
"""
Given data in x, give the mean and covariance of the posterior predictive distribution p(f*|X*, X, f)
If y is given, the function gives the predicts, as well as update the GP internally
x should have size N x d1, y of size N x d2, where N is the number of samples
:param x: the input data
:param y: optional. If given, the GP parameters will be updated
:return: a tuple (mu, cov, s):
- mu: the mean of the posterior predictive distribution, of size N x d1
- cov: the covariance matrix of the posterior predictive distribution, of size N x N
- s: the standard deviation vector, convenient for plotting. Of size N x 1
"""
# covariance of the new data
k_2star = self.kernel.compute(x, x)
if self.cov is None:
# if there is no data in this GP, this is equivalent to the prior distribution (zero mean, unit covariance)
mu = np.zeros(x.shape)
cov_posterior = k_2star + (self.noise_variance * np.eye(k_2star.shape[0]))
if y is not None:
self.X = x
self.Y = y
self.cov = k_2star
self.max_observed_value = max(self.max_observed_value, self.Y.max())
else:
l = np.linalg.cholesky(self.cov + self.noise_variance * np.eye(self.cov.shape[0]))
k_star = self.kernel.compute(self.X, x)
l_div_k_star = np.linalg.solve(l, k_star)
mu = np.dot(l_div_k_star.T, np.linalg.solve(l, self.Y))
cov_posterior = k_2star + self.noise_variance * np.eye(k_2star.shape[0]) - np.dot(l_div_k_star.T,
l_div_k_star)
if y is not None:
self.X = np.vstack((self.X, x))
self.Y = np.vstack((self.Y, y))
self.cov = np.hstack((self.cov, k_star))
self.cov = np.vstack((self.cov, np.hstack((k_star.T, k_2star))))
self.max_observed_value = max(self.max_observed_value, self.Y.max())
return mu, cov_posterior, np.sqrt(np.diag(cov_posterior))
| true |
4a8e57ddcdc6ece188aa1ee8c1261e2c78bbaee5 | Python | shahriaarrr/Hello-World | /Python/examples/tkinter.py | UTF-8 | 339 | 3.203125 | 3 | [
"MIT"
] | permissive | from tkinter import *
root = Tk()
root.title("My Program")
root.geometry('200x300')
def function():
pass
lbl_show_hello = Label(
root,
text = "Hello, World!",
bg = 'red',
rg = 'black'
).pack() # you can use grid to have indexable page
btn = Button(
root,
text = "Click Me!"
command = function
).pack()
root.mainloop()
| true |
149a2647b878a0ff87272b6a5dc51642ee672e46 | Python | YoupengLi/leetcode-sorting | /Solutions/0125_isPalindrome.py | UTF-8 | 1,556 | 3.984375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2019/7/10 9:19
# @Author : Youpeng Li
# @Site :
# @File : 0125_isPalindrome.py
# @Software: PyCharm
'''
125. Valid Palindrome
Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Note: For the purpose of this problem, we define empty string as valid palindrome.
Example 1:
Input: "A man, a plan, a canal: Panama"
Output: true
Example 2:
Input: "race a car"
Output: false
'''
import re
class Solution:
def isPalindrome(self, s: 'str') -> 'bool':
if not s:
return True
left, right = 0, len(s)-1
s = s.lower()
while left <= right:
if not s[left].isalnum():
left += 1
continue
if not s[right].isalnum():
right -= 1
continue
if s[left] == s[right]:
left += 1
right -= 1
else:
return False
return True
def isPalindrome_1(self, s: 'str') -> 'bool':
if not s:
return True
s = s.lower()
s = re.sub(r'\W+', '', s)
return s == s[::-1]
if __name__ == "__main__":
a = Solution()
s = "A man, a plan, a canal: Panama"
print(a.isPalindrome(s))
print(a.isPalindrome_1(s))
s = "race a car"
print(a.isPalindrome(s))
print(a.isPalindrome_1(s))
s = "0P"
print(a.isPalindrome(s))
print(a.isPalindrome_1(s)) | true |
a66cd1ec7035e0db524955a8e42d700f58b1ac8c | Python | kirtymeena/DSA | /Linked List/clone_LL.py | UTF-8 | 2,522 | 3.65625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 19:52:27 2020
@author: Kirty
"""
class Node:
def __init__(self,data):
self.next = None
self.data = data
self.random = None
def __init__(self):
self.head = None
def insert(self,data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
cur_node = self.head
while cur_node.next:
cur_node = cur_node.next
cur_node.next = new_node
def printList(root):
cur_node = root
count = 1
while cur_node:
print(cur_node.data,cur_node.random.data)
if cur_node.data<cur_node.random.data:
count+=1
cur_node = cur_node.next
print(count )
def clone(originalList):
cur_node = originalList
prev = None
while cur_node:
prev = cur_node
nxt = cur_node.next
new_node = Node(prev.data)
prev.next = new_node
new_node.next = nxt
cur_node = nxt
count = 0
cur_node = originalList
while cur_node:
cur_node.next.random = cur_node.random.next
cur_node = cur_node.next.next
cur_node = originalList
dup_node = originalList.next
while cur_node.next:
temp = cur_node.next
cur_node.next = cur_node.next.next
cur_node = temp
return dup_node
originalList = Node(1)
originalList.next = Node(2)
originalList.next.next = Node(3)
originalList.next.next.next = Node(4)
originalList.next.next.next.next = Node(5)
originalList.random = originalList.next.next
# 2's random points to 1
originalList.next.random = originalList
# 3's random points to 5
originalList.next.next.random = originalList.next.next.next.next
# 4's random points to 5
originalList.next.next.next.random = originalList.next.next.next.next
# 5's random points to 2
originalList.next.next.next.next.random =originalList.next
'''Print the original linked list'''
print('Original list:')
printList(originalList)
'''Create a duplicate linked list'''
cloned_list = clone(originalList)
'''Print the duplicate linked list'''
print('\nCloned list:')
printList(cloned_list)
| true |
bb0ef7a34ff528b69e4135f5cecf6acdb5d39afe | Python | labulasi211/Learning-data-visualization | /study_plot/dice/die_visual.py | UTF-8 | 712 | 2.921875 | 3 | [] | no_license | from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
die = Die()
results = []
for value in range(10000):
result = die.roll()
results.append(result)
# 分析结果
frequencies = []
for value in range(1, die.num_sides):
frequency = results.count(value)
frequencies.append(frequency)
# 对结果进行可视化
x_value = list(range(die.num_sides))
y_value = frequencies
data = [Bar(x=x_value, y=y_value)]
x_axis_config = {'title': '结果'}
y_axis_config = {'title': '结果的频率'}
my_layout = Layout(title='投一个D6 10000次的结果', xaxis=x_axis_config, yaxis=y_axis_config)
offline.plot({'data': data, 'layout': my_layout}, filename='d6.html') | true |
63d9957d6147a3d9fc7e5305c8f11b85a41a6259 | Python | franzleeyan/PCC | /favorite_languages.py | UTF-8 | 1,032 | 3.8125 | 4 | [] | no_license | # # 定义被调查者名字
# favorite_languages = {'jen': 'python',
# 'sarah': 'c',
# 'edward': 'ruby',
# 'phil': 'python',
# }
# # print("Sarah's favorite language is " + favorite_languages['sarah'].title() + ".")
#
# # for name, languages in favorite_languages.items():
# # print("\nName: " + name.title())
# # print("Langua: " + languages.title())
#
# for name, languages in favorite_languages.items():
# print(name.title() + "'s favorite language is " + languages.title() + ".")
# 每个名字关联的值都是一个列表
favorite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'],
'phil': ['python', 'haskell']
}
# 遍历字典时,使用变量languages来依次存储字典中的每个值
for name,languages in favorite_languages.items():
print("\n" + name.title() + " 's favorite languages are:")
for language in languages:
print("\t" + language.title()) | true |
3233420f2cb14f28f62deb720debe9f381ba7ccc | Python | yanchinskiyyura432/laba01 | /laba01.py | UTF-8 | 791 | 4.09375 | 4 | [] | no_license |
#реверс
slogan = str(input ("Напишіть своє речення"))
sentence = slogan [::-1]
words = sentence.split()
sentence_rev = " ".join(reversed(words))
print ( sentence_rev)\
#хелло ворлд
("Hello world")
#калькулятор
a=int(input("Write your number"))
b=int(input("Write your number"))
c=a+b
print(c)
#шифр
i=5
while i < 15:
alpha = 'abcdefghijklmnopqrstuvwxyz1234567890QWERTYUIOPASDFGHKLZXCVBNM'
step = 1
text = input("Please write your text").strip()
res = ''
for c in text:
if c.isalpha():
res += alpha[(alpha.index(c) + step) % len(alpha)]
else:
res += c
print("Result " + res + "")
#подвоєння
word = input("Write your sentence: ")
a = "".join([x*2 for x in word])
print(a) | true |
7cd118037a52bc8398bb6bb4383bf70866a48310 | Python | madusec/firebase-scanner | /db-discovery.py | UTF-8 | 1,335 | 2.703125 | 3 | [] | no_license | import sys
import requests
from argparse import ArgumentParser, FileType
from dnsdumpster.DNSDumpsterAPI import DNSDumpsterAPI
def dnsdumpster():
results = DNSDumpsterAPI().search('firebaseio.com')
return [domain['domain'] for domain in results['dns_records']['host']]
def is_firebase_project(code: str) -> bool:
r = requests.get("https://{}.firebaseio.com".format(code))
return r.status_code != 404
def has_realtime_db(code: str) -> bool:
r = requests.options("https://{}.firebaseio.com/.json".format(code))
return r.status_code != 423
def discover_dbs(args):
db_candidates = []
if args.type == "dnsdumpster":
db_candidates = dnsdumpster()
print("Discovered DBs:")
for candidate in db_candidates:
if is_firebase_project(candidate) and has_realtime_db(candidate):
args.out.write(candidate + "\n")
if args.out != sys.stdout:
print(candidate)
def parse_args():
parser = ArgumentParser()
parser.add_argument('type', help="Look for potential dbs through this specified method", choices=["dnsdumpster"])
parser.add_argument('--out', help="A file to dump results to", nargs='?', type=FileType('w'), default=sys.stdout)
args = parser.parse_args()
discover_dbs(args)
if __name__ == '__main__':
parse_args() | true |
e4cc61033ef11fbbe25c959842a8440631c033d4 | Python | anisg/printf_checker | /check.py | UTF-8 | 3,009 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import os
version='0.1'
default_file='examples.txt'
script_dir=os.path.dirname(os.path.realpath(__file__))
dir_tmp=script_dir + '/' + 'tmp'
def shell_exec(cmd):
return os.popen(cmd).read()
def file_put_contents(filename, data):
f = open(filename, 'w')
f.write(data)
f.close()
def exec_code(code,name, index, lib_path):
c_file=dir_tmp + '/' + name+ '_' +'test'+str(index)+'.c'
c_bin=dir_tmp + '/' +name+ '_' +'bin'+str(index)
shell_exec('echo \''+ code +'\' > '+c_file)
shell_exec('gcc -o '+ c_bin +' '+ c_file +' '+ lib_path)
s = shell_exec(dir_tmp + '/./' + name +'_bin'+str(index))
os.remove(c_file)
os.remove(c_bin)
return s
def compile_tests(lib_path, header_path, filename):
count=0
c_write_true_begin='#include <stdio.h>\nint main(void){printf('
c_write_true_end=');return(0);}'
c_write_false_begin='#include \"'+ os.path.abspath(header_path) +'\"\nint main(void){ft_printf('
c_write_false_end=c_write_true_end
try:
os.mkdir(dir_tmp)
except:
pass
lines = filter(None, open(filename).read(1000).split('\n'))
for i in range(0,len(lines)):
s1 = exec_code(c_write_true_begin + lines[i] + c_write_true_end, 'true', i, '')
s2 = exec_code(c_write_false_begin + lines[i] + c_write_true_end, 'false', i, lib_path)
if s1 == s2:
count += 1
else:
print '\033[91mfail for n'+ str(i) +' : output diff\033[0m'
print '\033[94mthe content >>\033[0m' + lines[i] + '\033[94m<<\033[0m'
print '\033[92mreal printf >>\033[0m' + s1 + '\033[92m<<\033[0m'
print '\033[93myour printf >>\033[0m' + s2 + '\033[93m<<\033[0m'
print ''
print 'result for file \''+filename+'\': '+ str(count) + ' / ' + str(len(lines))
def main():
parser = argparse.ArgumentParser(description='printf checker v'+ version +', check your ft_printf')
parser.add_argument('-f', '--file',required=False, default=script_dir + '/' + default_file,
help='the name of the file you want to test (default: '+ default_file +')')
parser.add_argument('-p', '--path',required=True,
help='the path of your libftprintf.a (example: ~/project/ft_printf/libftprintf.a), note: if you provide just the directory, we will try to do a make')
parser.add_argument('-head', '--header',required=True,
help='the path of libft.h or libftprintf.h (example: ~/project/ft_printf/libftprintf.h)')
args = parser.parse_args()
''' verification: do a make or not? '''
lib_path=args.path
if os.path.isdir(args.path) == True:
print shell_exec('make -C '+args.path)
if os.path.isfile(args.path + '/' + 'libft.a') == True:
lib_path=args.path + '/' + 'libft.a'
if os.path.isfile(args.path + '/' + 'libftprintf.a') == True:
lib_path=args.path + '/' + 'libftprintf.a'
if os.path.isfile(lib_path) != True or os.path.isfile(args.header) != True or os.path.isfile(args.file) != True:
print 'error: one the argument you provided isn\'t a file'
return 0
compile_tests(lib_path, args.header, args.file)
if __name__ == '__main__':
main()
| true |
c97cf6e78f2debc60f4f05f6d31ecc3661ada2f1 | Python | AzureStarDragon/Codingame | /Puzzles/Easy/Temperatures/Temperatures.py | UTF-8 | 432 | 3.828125 | 4 | [] | no_license | n = int(input())
temps = [int(x) for x in raw_input().split()] //Reads the string and converts each number to an integer, storing it in a list.
if n > 0:
print(sorted(sorted(temps,reverse=True),key=abs)[0]) //Sorts the list twice, one by value and the second one by absolute value
else: //Printing the first number of that sorted list or 0 in case no value was found.
print(0)
| true |
589c797caab012df9844a5d4763e78abce6aff0e | Python | AlexSSun/GWAS_NLP | /maintext_clean_batch.py | UTF-8 | 3,186 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import os
import re
from html.parser import HTMLParser
from bs4 import BeautifulSoup
from bs4 import element
from itertools import product
import argparse
import numpy as np
import pandas as pd
import json
from utils import *
def extract_text(soup):
"""
convert beautiful soup object into a python dict object with cleaned main text body
Args:
soup: BeautifulSoup object of html
Return:
result: dict of the maintext
"""
h1 = soup.find_all('h1',"content-title")[0].get_text()
main_text = []
# paragraphs = soup.find_all('p',attrs='p')
paragraphs = soup.find_all('p',attrs={'id':re.compile('(__|_|)(p|P|Par|par)\d+')})
for p in paragraphs:
h2 = p.find_previous('h2','head')
# h2 = p.parent.find_previous_sibling('h2','head')
if h2:
h2=h2.get_text()
else:
h2=''
h3 = p.find_previous_sibling('h3',attrs={'id':re.compile('S[\d]title')})
if h3:
h3=h3.get_text()
else:
h3=''
main_text.append([h2,h3,p.get_text()])
result = {h1:main_text}
return result
if __name__=='__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("-p", "--pbs_index", type=int, help="pbs index/the file index")
parser.add_argument("-b", "--base_dir", help="base directory for html files")
parser.add_argument("-t", "--target_dir", help="target directory for output")
args = parser.parse_args()
# pbs_index = args.pbs_index
base_dir = args.base_dir
target_dir = args.target_dir
file_list = get_files(base_dir)
# filepath = file_list[pbs_index]
# with open(filepath,'r') as f:
# text = f.read()
# soup = BeautifulSoup(text, 'html.parser')
# for e in soup.find_all(attrs={'style':['display:none','visibility:hidden']}):
# e.extract()
# # what to do with in sentence reference
# for ref in soup.find_all(class_=['supplementary-material','figpopup','popnode','bibr']):
# ref.extract()
# process_supsub(soup)
# process_em(soup)
# process_caption(soup)
# process_table_figures(soup)
# result = extract_text(soup)
# print(filepath, len(list(result.values())[0]))
maintext_dict = {}
for i,filepath in enumerate(file_list):
pmc = filepath.split('/')[-1].strip('.html')
with open(filepath,'r') as f:
text = f.read()
soup = BeautifulSoup(text, 'html.parser')
for e in soup.find_all(attrs={'style':['display:none','visibility:hidden']}):
e.extract()
# what to do with in sentence reference
for ref in soup.find_all(class_=['supplementary-material','figpopup','popnode','bibr']):
ref.extract()
process_supsub(soup)
process_em(soup)
result = extract_text(soup)
maintext_dict[pmc] = result
# target_dir = '../output/maintext/'
for k,v in maintext_dict.items():
with open(os.path.join(target_dir,"{}_maintext.json".format(k)), "w") as outfile:
json.dump(v, outfile,ensure_ascii=False)
| true |