blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
00c947838ccf102767e4876990a0244ceb7f2d46 | Python | thaisribeiro/heimdall | /heimdall_bank_validate/bank_validate.py | UTF-8 | 919 | 2.8125 | 3 | [
"MIT"
] | permissive | import re
from heimdall_bank_validate.base_validate_error import InvalidCodeBank
class BankValidate():
def __init__(self, **kwargs):
self.bank_code = kwargs.get('bank_code')
def start(self):
switcher = {
'001': 'Banco do Brasil',
'237': 'Bradesco',
'341': 'Itaú',
'033': 'Santander',
'745': 'Citibank',
'399': 'HSBC',
'041': 'Banrisul',
'260': 'Nubank'
}
bank_valid = switcher.get(self.bank_code)
if not bank_valid:
return self.valid_bank_generic()
return True
def valid_bank_generic(self):
"""
Valida bancos genéricos
"""
regex = re.compile('^([0-9A-Za-x]{3,5})$', re.I)
match = bool(regex.match(self.bank_code))
if match == False:
raise InvalidCodeBank()
return True
| true |
8d4e296c9fa74a71f70ada0b77ddf9997c9a86b4 | Python | NLPProjectGroup34/CSCI-544-GROUP-34 | /WordNet_Similarity/similaritytoscore.py | UTF-8 | 1,553 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import division
import numpy as np
import pandas as pd
from sklearn import datasets, linear_model
from math import ceil
similarity_score = {}
train_data = {}
test_data = {}
check_data = {}
def get_data(file_name):
data = pd.read_csv(file_name)
simscr = {}
for key, similarity, score in zip(data['Key'], data['Similarity'], data['Score']):
simscr[key] = [[float(similarity)], float(score)]
return simscr
similarity_score = get_data('/home/hmallyah/Desktop/SimilarityToGrades/lch.csv')
for key in similarity_score:
if key[0: key.find('.')] not in ["11", "12"]:
train_data[key] = similarity_score[key]
else:
test_data[key] = similarity_score[key][0][0]
check_data[key] = similarity_score[key][1]
def linear_model_main(X_parameters = [],Y_parameters = [],predicts = {}):
predictions = {}
regr = linear_model.LinearRegression()
regr.fit(X_parameters, Y_parameters)
for key in predicts:
predictions[key] = regr.predict(predicts[key])[0]
return predictions
x = []
y = []
z = {}
for key in train_data:
x.append(train_data[key][0])
y.append(train_data[key][1])
for key in test_data:
z[key] = test_data[key]
predictions = {}
predictions = linear_model_main(x,y,z)
def accuracy(predictions = {}, check_data = {}):
count = 0
for key in predictions:
score = ceil(predictions[key])
if score == check_data[key] or score == (check_data[key] - 0.5) or score == (check_data[key] + 0.5):
count += 1
return((float(count)/len(predictions))*100)
print(str(accuracy(predictions, check_data)))
| true |
e81413d8d762901b138cac49489069fb31a33513 | Python | dpradhan25/Robotics-Tasks-2021 | /Ravindra-Nag/Python-task/task-1.py | UTF-8 | 3,732 | 4.03125 | 4 | [] | no_license | import random
def generate():
"""generates a random 4-digit number
Returns:
int: random 4-digit number
"""
num = 0
for i in range(4):
num = num*10 + random.randint(1,9);
return num
def convert_to_list(num):
""" Converts str to list conaining each character
Args:
num (str): string entered by user
Returns:
list: list of characters in string
"""
ans = list(x for x in num)
return ans
def check_position(guess, target, correct):
""" Checks for digits in correct place
Args:
guess (list): guess list
target (list): target list
correct (list): list containing correct digits
Returns:
list: list containing two values- number of digits in right and wrong places
"""
right = 0
wrong = 0
for i in range(len(correct)):
if guess.index(str(correct[i])) == target.index(str(correct[i])):
right +=1
else:
wrong += 1
return [right, wrong]
def get_correct_list(guess, target):
""" Compares guess with target
Args:
guess (list): guess list
target (list): target list
Returns:
list: digits guessed correctly
"""
correct = []
for i in range(4):
if target[i] in guess:
correct.append(target[i])
guess.remove(target[i])
return correct
def get_unique_guesses(correct,already_guessed):
""" Compares correct list with already_guessed list
Args:
correct (list): list of correctly guessed digits
already_guessed (list): list of already guessed digits
Returns:
int: number of unique guesses
"""
count = 0
for i in range(len(correct)):
if correct[i] in already_guessed:
already_guessed.remove(correct[i])
else:
count += 1
return count
def score_update(score, unique, correct):
# updates score
score += unique*5
score -= (4-correct)*2
return score
def game():
# game function
num = str(generate())
target = convert_to_list(num)
score = 0
final = 0 # the score once all 4 digits are guessed
won = False
already_guessed = []
for i in range(10):
print('Turns remaining:', 10-i)
x = str(input('Guess the number : '))
guess = convert_to_list(x)
while(len(guess) != 4): # checks 4 digit or not
print('Enter a 4-digit number only')
x = str(input('Guess the number : '))
guess = convert_to_list(x)
g_clone = guess.copy()
if guess==target:
print('All digits in the correct place.\nYou have won the game!!')
won = True
correct = get_correct_list(g_clone, target)
unique_guess = get_unique_guesses(correct.copy(), already_guessed)
already_guessed = correct.copy()
score = score_update(score, unique_guess, len(already_guessed))
if won:
print('Your Score:', final if final > 0 else score)
break
else:
position = check_position(guess, target, correct)
if position[0]==4:
final = score
print(len(correct),'digits:', correct, 'guessed correctly.')
if position[0] >= position[1]:
print(position[0], 'in the correct position.')
else:
print(position[1], 'in the wrong position.')
if not won:
print('You lost :(\nCorrect answer was', num)
ch = str(input('Play Again? (y/n) '))
if ch == 'y':
return True
else:
return False
choice = True
while choice:
choice = game()
print('Thanks for playing!') | true |
8b4bcffe2f26337b6b55da86ad4780859ee88236 | Python | RussellMoore1987/resources | /python/MIS-5400/mod_3/mod_3_hmwk.py | UTF-8 | 3,557 | 4.28125 | 4 | [] | no_license | ######################################################
# MIS 5400
# Module 3 Homework
#
# INSTRUCTIONS
# 1) Write code to to complete exercises below.
# 2) Save the file and submit it using Canvas.
######################################################
'''
MIS 5400 Module 3 Homework
'''
###############
# Exercise 1 #
###############
'''
Write code to analyze each number between 2000 and 6500 and do the following (HINT use the range function)
1.) If the number is divisible by 5 then print out [Fitty].
2.) If the number is divisible by 7 then print out [Sevvy].
3.) If the number is divisible by BOTH 5 and 7 print out ["Winner's win", said Bob] (quotes included)
'''
# WRITE YOUR CODE HERE
for number in range(2000, 6501):
# print(number)
if number % 5 == 0 and number % 7 == 0:
print('"Winner\'s win", said Bob')
elif number % 5 == 0:
print('Fitty')
elif number % 7 == 0:
print('Sevvy')
###############
# Exercise 2 #
###############
'''
Using the file "access.log", write code that does the following:
1) Read the file into a list.
2) Print out the following information:
How many Total logs are there?
How many logs have a status code of 404? (Hint: Membership Checking)
How many logs have a status code of 200?
How many of the logs contain the text "mis"?
3) Write some code that replaces all instances of "redflag" with "greenlight" (Hint: string replace method)
4) Put all logs with the replaced values in a new list
5) Create a file named "mis5400.log" and write out the list with the replaced values.
'''
# WRITE YOUR CODE HERE
# get path
path = r'C:\Users\truth\Desktop\code and resources\projects\resources\python\MIS-5400\mod_3\access.log'
with open(path) as f:
# # Read the file into a list.
lines = f.readlines()
log_404 = []
log_200 = []
log_mis = []
# Loop over and extract information
for line in lines:
# How many logs have a status code of 404?
if 'HTTP/1.1" 404' in line or 'HTTP/1.0" 404' in line:
log_404.append(line)
# How many logs have a status code of 200?
if 'HTTP/1.1" 200' in line or 'HTTP/1.0" 200' in line:
log_200.append(line)
# How many of the logs contain the text "mis"
if 'mis' in line:
log_mis.append(line)
# # Print out the following information:
print(f'How many Total logs are there? {len(lines)}')
print(f'How many logs have a status code of 404? {len(log_404)}')
print(f'How many logs have a status code of 200? {len(log_200)}')
print(f'How many of the logs contain the text "mis"? {len(log_mis)}')
# # Write some code that replaces all instances of "redflag" with "greenlight" (Hint: string replace method)
# this could have been done above in the for loop, but for the sake of answering this question specifically
lines2 = [line.replace('redflag', 'greenlight') for line in lines]
# # Put all logs with the replaced values in a new list
# this could have been done above in the for loop, but for the sake of answering this question specifically
lines_greenlight = [line.replace('redflag', 'greenlight') for line in lines if 'redflag' in line]
# # Create a file named "mis5400.log" and write out the list with the replaced values.
# set new path
path = r'C:\Users\truth\Desktop\code and resources\projects\resources\python\MIS-5400\mod_3\mis5400.log'
# create and write to file, if it exists it will overwrite it
with open(path, 'w') as f:
# they already have a '\n' so don't add one
f.write(''.join(lines_greenlight)) | true |
dac84a11afac59adcb1e01fc40df517af573a943 | Python | vstarman/python_codes | /20day/mini_web.py | UTF-8 | 3,609 | 2.640625 | 3 | [] | no_license | import sys, socket, re, multiprocessing
g_static_document_root = "./static"
g_dynamic_document_root = "./dynamic"
class WSGIServer(object):
"""WSGI服务的类"""
def __init__(self, port, app):
self.app = app
self.web_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.web_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.web_server_socket.bind(("", port))
self.web_server_socket.listen(128)
self.response_header = "" # 保存动态请求处理后的响应头
def run_forever(self):
while True:
client_socket, client_addr = self.web_server_socket.accept()
self.handle_request(client_socket)
client_socket.close()
def handle_request(self, client_socket):
"""处理请求信息,并作出响应"""
request_list = client_socket.recv(4096).decode("utf-8").splitlines()
path_info = re.match(r"([^/]*)([^ ]+)", request_list[0]).group(2)
if path_info == "/":
path_info = "/test.html"
if re.search(r"\.html", path_info):
env = {"file_name": path_info}
print('动态请求路径:', path_info)
response_body = self.app(env, self.handle_dynamic_request)
send_data = self.response_header.encode() + response_body
should_send_len = len(send_data)
had_send_len = 0
while had_send_len < should_send_len:
had_send_len += client_socket.send(send_data)
print("发送data:", send_data)
print("动态页面发送完成,发送大小:", had_send_len)
client_socket.close()
else:
try:
f = open(g_static_document_root + path_info, "rb")
print('静态请求路径:', g_static_document_root + path_info)
except FileNotFoundError or OSError as e:
print("请求的静态文件本地没有...", e)
response_header = "HTTP/1.1 404 not found\r\n"
response_header += "\r\n"
response_body = "404 Not Found".encode()
else:
response_header = "HTTP/1.1 200 OK\r\n"
response_header += "\r\n"
response_body = f.read()
f.close()
finally:
send_data = response_header.encode() + response_body
should_send_len = len(send_data)
had_send_len = 0
while had_send_len < should_send_len:
had_send_len += client_socket.send(send_data)
print("静态页面发送完成,发送大小:", had_send_len)
client_socket.close()
def handle_dynamic_request(self, state, header_list):
self.response_header = "HTTP/1.1 %s\r\n" % state
for key, value in header_list:
self.response_header += "%s: %s\r\n" % (key, value)
self.response_header += "\r\n"
def main():
"""控制web服务器整体"""
# python3 mini_web.py 7878 App:app
if len(sys.argv) == 3 and sys.argv[1].isdigit() and ":" in sys.argv[2]:
port = int(sys.argv[1])
module_name, app_name = sys.argv[2].split(":")
print(sys.argv)
else:
print("输入方式: python3 mini_web.py 7878 App:app")
return
# 导入模块,创建对象
web_fram_module = __import__(module_name)
app = getattr(web_fram_module, app_name)
http_server = WSGIServer(port, app)
http_server.run_forever()
if __name__ == '__main__':
main() | true |
1857206bc95b5baf9cb4d35a3f841ad38de1022d | Python | caohaitao/PythonTest | /pytorch/net.py | UTF-8 | 816 | 3.234375 | 3 | [] | no_license | __author__ = 'ck_ch'
# -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F # 激励函数都在这
class Net(torch.nn.Module): # 继承 torch 的 Module
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__() # 继承 __init__ 功能
# 定义每层用什么样的形式
self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层线性输出
self.predict = torch.nn.Linear(n_hidden, n_output) # 输出层线性输出
def forward(self, x): # 这同时也是 Module 中的 forward 功能
# 正向传播输入值, 神经网络分析出输出值
x = F.relu(self.hidden(x)) # 激励函数(隐藏层的线性值)
x = self.predict(x) # 输出值
return x
| true |
7ad9df338a87d057042c116e88daf02b2268d916 | Python | tmu-nlp/100knock2016 | /yui/chapter03/knock22.py | UTF-8 | 329 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# カテゴリ名の抽出
# 記事のカテゴリ名を(行単位ではなく名前で)抽出せよ.
import json
import re
for line in open("wiki_uk_category.txt", "r"):
extract_name = re.search('Category:(?P<name>.*)]]',line)
if extract_name:
print(extract_name.group('name'))
| true |
f5c6cbc93a215c050063e22cb84b553c4263a78d | Python | brianchiang-tw/leetcode | /No_0700_Search in a Binary Search Tree/search_in_a_binary_search_tree_iterative.py | UTF-8 | 1,996 | 4.5 | 4 | [
"MIT"
] | permissive | '''
Description:
Given the root node of a binary search tree (BST) and a value. You need to find the node in the BST that the node's value equals the given value. Return the subtree rooted with that node. If such node doesn't exist, you should return NULL.
For example,
Given the tree:
4
/ \
2 7
/ \
1 3
And the value to search: 2
You should return this subtree:
2
/ \
1 3
In the example above, if we want to search the value 5, since there is no node with value 5, we should return NULL.
Note that an empty tree is represented by NULL, therefore you would see the expected output (serialized tree format) as [], not null.
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
cur = root
while cur is not None:
if cur.val == val:
# hit
return cur
elif cur.val > val:
cur = cur.left
else:
cur = cur.right
# miss
return None
# n : the number of nodes in binary tree
## Time Complexity: O(h), worst case down to O( n )
#
# Average case is of order tree height = O( h ) = O ( log n )
# Worst case is of order tree length = O( n ) when tree is degraded to a linked list
## Space Complexity: O(1)
#
# The overhead in space is to maintain looping variable cur, which is of fixed size O( 1 )
def test_bench():
root = TreeNode(4)
root.left = TreeNode(2)
root.right = TreeNode(7)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
target = 2
# expected output:
'''
2
'''
print( Solution().searchBST(root, val = target ).val )
return
if __name__ == '__main__':
test_bench() | true |
4b257fb9e1b2af17087d822cbaa0f4f90d9f8f7a | Python | Andrey0563/Kolocvium | /№ 24.py | UTF-8 | 555 | 3.59375 | 4 | [] | no_license | '''
№24
Знайти суму елементів масиву цілих чисел, які діляться на 5 і на 8
одночасно. Розмірність масиву - 30. Заповнення масиву здійснити випадковими
числами від 500 до 1000.
Дужак Андрій 122-Г
'''
import random
import numpy as np
a = np.zeros(30, dtype=int)
s = 0
for i in range(len(a)):
a[i] = (random.randint(500, 1000))
if (a[i] % 5 == 0) and (a[i] % 8 == 0): # Перевірка умови
s += a[i]
print(s)
| true |
3389c6d311d8c25736df0caf83665d4d06f8f05b | Python | hanguyen0/MITx-6.00.1x | /hangman3.py | UTF-8 | 794 | 4.09375 | 4 | [
"Giftware"
] | permissive | '''
>>> lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
>>> print(getAvailableLetters(lettersGuessed))
abcdfghjlmnoqtuvwxyz
Hint: You might consider using string.ascii_lowercase, which is a string comprised of all lowercase letters:
>>> import string
>>> print(string.ascii_lowercase)
abcdefghijklmnopqrstuvwxyz
'''
import string
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet=string.ascii_lowercase
for letter in lettersGuessed:
alphabet=alphabet.replace(letter,'')
return alphabet
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getAvailableLetters(lettersGuessed))
| true |
aeea9a3d315ccdf474870f32510702ebc804de5f | Python | harshitandro/Python-Instrumentation | /utils/callbacks/base_callbacks.py | UTF-8 | 2,314 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | import threading
def start_callback(source, handler_callback, *args, **kwargs):
"""Callback which is called before the start of any instrumented method/function.
The args to this callback are the args passed to the instrumented method/function."""
threadID = threading.current_thread().ident
if handler_callback is not None:
handler_callback(source, threadID, *args, **kwargs)
else:
# TODO: Remove this to make the hook look effectively absent when handler set to None
print(
"StartCallback for {} :: threadID : {} :: args : {} :: kwargs : {} :: handler : {}".format(source, threadID,
args, kwargs,
handler_callback))
def end_callback(source, handler_callback, *ret_val):
"""Callback which is called after the end of any instrumented method/function.
The args to this callback is the return value of the instrumented method/function"""
threadID = threading.current_thread().ident
if handler_callback is not None:
handler_callback(source, threadID, *ret_val)
else:
# TODO: Remove this to make the hook look effectively absent when handler set to None
print("EndCallback for {} :: return val : {} :: threadID : {} :: handler : {}".format(source, ret_val, threadID,
handler_callback))
def error_callback(source, handler_callback, type, value, traceback):
"""Callback which is called after the end of any instrumented method/function if it raises any unhandled error.
The args to this callback are the details about the raised error"""
threadID = threading.current_thread().ident
if handler_callback is not None:
handler_callback(source, threadID, type, value, traceback)
else:
# TODO: Remove this to make the hook look effectively absent when handler set to None
print(
"ErrorCallback for {} :: threadID : {} :: type : {} :: value : {} :: traceback : {} :: handler : {}".format(
source, threadID, type, value, traceback, handler_callback)) | true |
ef506526bb69643df462e446ce40925762085b0e | Python | jsong00505/CodingStudy | /coursera/algorithms/part1/week3/mergesort/bottom_up_mergesort.py | UTF-8 | 1,191 | 3.0625 | 3 | [
"MIT"
] | permissive | class BottomUpMergesort:
def sort(self, a):
size = 1
res = []
while size < len(a):
lo = 0
while lo <= len(a):
mid = min(lo + size, len(a))
hi = min(lo + 2 * size, len(a))
left = a[lo:mid]
right = a[mid:hi]
if not left:
res.extend(right)
elif not right:
res.extend(left)
elif left[-1] < right[0]:
res.extend(left)
res.extend(right)
else:
while left or right:
if not left:
res.extend(right)
right = []
elif not right:
res.extend(left)
left = []
elif left[0] > right[0]:
res.append(right.pop(0))
else:
res.append(left.pop(0))
lo += size + size
size += size
a = res.copy()
res = []
return a
| true |
76df1fc3b7e0dfa57bbc89abbfdfaba56bbc8085 | Python | mutater/euclid | /proposition 03.02.py | UTF-8 | 653 | 3.03125 | 3 | [] | no_license | import pygame, sys, math
from pygame.locals import *
from euclidMath import Math
pygame.init()
Math = Math()
screen = pygame.display.set_mode((600, 600))
screen.fill((255, 255, 255))
# If two point be taken on the edge of a circle, a line connecting those points will always be inside the circle
a = (250, 350)
b = (350, 350)
c = (300, 300)
distCB = Math.distance(a[0], c[0], a[1], c[1])
pygame.draw.line(screen, (0, 0, 0), (c[0]-distCB, c[1]), (c[0], c[1]+distCB), 2)
pygame.draw.circle(screen, (0, 0, 0), c, distCB, 2)
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
| true |
1f1d8556eb23c3d30ec6b6566fa31093032a1e38 | Python | Jinmin-Goh/Codeforces | /#635_Div_2/C.py | UTF-8 | 1,023 | 3.109375 | 3 | [] | no_license | # Contest No.: 635
# Problem No.: C
# Solver: JEMINI
# Date: 20200416
import sys
def main():
n, k = map(int, input().split())
graph = {}
for _ in range(n - 1):
a, b = map(int, sys.stdin.readline().split())
if a not in graph:
graph[a] = [b]
else:
graph[a].append(b)
if b not in graph:
graph[b] = [a]
else:
graph[b].append(a)
# bfs, count the leaf nodes and depth
visited = set([1])
temp = [1]
cnt = 1
costList = []
while temp:
nextList = []
for i in temp:
visited.add(i)
for j in graph[i]:
if j not in visited:
nextList.append(j)
costList.append(cnt - 1 - (len(graph[i]) - 1))
cnt += 1
temp = nextList[:]
costList.sort()
print(costList)
ans = 0
for i in range(k):
ans += costList[-i - 1]
print(ans)
return
if __name__ == "__main__":
main() | true |
645b489167c5bb7081abe3eb08ce17650751a9f6 | Python | brainmentorspvtltd/MSIT_CorePython | /web-crawling.py | UTF-8 | 917 | 2.734375 | 3 | [] | no_license | # pip install bs4
# pip install lxml
# import bs4
from bs4 import BeautifulSoup as BS
from urllib.request import urlopen
# import urllib.request as req
URL = "https://www.indeed.co.in/jobs?q=python&l="
response = urlopen(URL)
# print(response)
# htmlSourceCode = bs4.BeautifulSoup(response, "lxml")
htmlSourceCode = BS(response, "lxml")
# print(htmlSourceCode)
heading = htmlSourceCode.find('h2', 'title')
print(heading.text.strip())
companyName = htmlSourceCode.find('span', 'company')
print(companyName.text.strip())
jobLocation = htmlSourceCode.find('div', 'location')
print(jobLocation.text.strip())
salary = htmlSourceCode.find('span', 'salaryText')
print(salary.text.strip())
summary = htmlSourceCode.find('div', 'summary')
jobDetailsList = summary.find('ul')
# li = jobDetailsList.find('li')
# print(li.text.strip())
listOfLIs = jobDetailsList.find_all('li')
for li in listOfLIs:
print(li.text.strip())
| true |
bfbe221dbf6d5ceddba342558a8ff4f11d595f0f | Python | tehamalab/tarakimu | /tests/test_cli.py | UTF-8 | 1,092 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `tarakimu` CLI."""
from click.testing import CliRunner
from tarakimu import cli
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'cli' in result.output
assert 'numtowords' in result.output
help_result = runner.invoke(cli.cli, ['--help'])
assert help_result.exit_code == 0
assert '--help' in help_result.output
def test_numtowords(numwords):
"""Test the CLI."""
runner = CliRunner()
for lang, samples in numwords.items():
for n, w in samples.items():
result = runner.invoke(cli.cli, ['numtowords', '-l', lang, '--', n])
assert result.exit_code == 0
assert w == result.output.strip()
def test_invalid_numtowords(invalid_nums):
"""Test the CLI."""
runner = CliRunner()
for n in invalid_nums:
result = runner.invoke(cli.cli, ['numtowords', n])
assert result.exit_code != 0
assert result.output == ''
| true |
df034fff35b41d8eeb19befd49d3d49c747ed782 | Python | daydreamer2023/Healing-POEs-ICML | /Code/bayesian_benchmarks_modular/bayesian_benchmarks/tasks/classification.py | UTF-8 | 4,981 | 2.53125 | 3 | [] | no_license | """
A classification task, which can be either binary or multiclass.
Metrics reported are test loglikelihood, classification accuracy. Also the predictions are stored for
analysis of calibration etc.
"""
import sys
sys.path.append('../')
import argparse
import numpy as np
from scipy.stats import multinomial
from bayesian_benchmarks.data import get_classification_data
from bayesian_benchmarks.models.get_model import get_classification_model
from bayesian_benchmarks.database_utils import Database
import tensorflow as tf
import math
from tqdm import tqdm
def parse_args(): # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument("--model", default='variationally_sparse_gp', nargs='?', type=str)
parser.add_argument("--dataset", default='statlog-german-credit', nargs='?', type=str)
parser.add_argument("--split", default=0, nargs='?', type=int)
parser.add_argument("--seed", default=0, nargs='?', type=int)
parser.add_argument("--database_path", default='', nargs='?', type=str)
return parser.parse_args()
def top_n_accuracy(preds, truths, n):
best_n = np.argsort(preds, axis=1)[:,-n:]
ts = np.argmax(truths, axis=1)
successes = 0
for i in range(ts.shape[0]):
if ts[i] in best_n[i,:]:
successes += 1
return float(successes)/ts.shape[0]
def onehot(Y, K):
return np.eye(K)[Y.flatten().astype(int)].reshape(Y.shape[:-1]+(K,))
def run(ARGS, data=None, model=None, is_test=False):
powers = [10]
dict_models={'bar':['variance'],'gPoE':['uniform','variance'],'rBCM':['diff_entr','variance'],'BCM':['no_weights'],'PoE':['no_weights']}
data = data or get_classification_data(ARGS.dataset, split=ARGS.split)
model = model or get_classification_model(ARGS.model)(data.K, is_test,seed=ARGS.seed)
Y_oh = onehot(data.Y_test, data.K)[None, :, :] # 1, N_test, K
print('model fitting')
model.fit(data.X_train, data.Y_train)
if 'expert' in ARGS.model:
print('gathering predictions')
mu_s, var_s = model.gather_predictions(data.X_test)
print('prediction aggregation')
for model_name in dict_models.keys():
for weighting in dict_models[model_name]:
for power in powers:
model.power = power
model.model = model_name
model.weighting = weighting
p = model.predict(data.X_test, mu_s, var_s)
res = {}
# clip very large and small probs
eps = 1e-12
p = np.clip(p, eps, 1 - eps)
p = p / np.expand_dims(np.sum(p, -1), -1)
logp = multinomial.logpmf(Y_oh, n=1, p=p)
res['test_loglik'] = np.average(logp)
res['top_1_acc'] = top_n_accuracy(p,np.reshape(Y_oh, (-1,data.K)), 1)
res['top_2_acc'] = top_n_accuracy(p,np.reshape(Y_oh, (-1,data.K)), 2)
res['top_3_acc'] = top_n_accuracy(p,np.reshape(Y_oh, (-1,data.K)), 3)
pred = np.argmax(p, axis=-1)
res.update(ARGS.__dict__)
res['model']=model_name+'_'+str(power)+'_'+ARGS.model.split('_')[1]+'_'+ARGS.model.split('_')[2]+'_'+weighting
print('end', res)
if not is_test: # pragma: no cover
with Database(ARGS.database_path) as db:
db.write('classification', res)
if weighting in [ 'no_weights','uniform','diff_entr']:
break
else:
p = model.predict(data.X_test) # N_test,
# clip very large and small probs
eps = 1e-12
p = np.clip(p, eps, 1 - eps)
p = p / np.expand_dims(np.sum(p, -1), -1)
# evaluation metrics
res = {}
logp = multinomial.logpmf(Y_oh, n=1, p=p)
res['test_loglik'] = np.average(logp)
res['top_1_acc'] = top_n_accuracy(p,np.reshape(Y_oh,(-1,data.K)),1)
res['top_2_acc'] = top_n_accuracy(p,np.reshape(Y_oh,(-1,data.K)),2)
res['top_3_acc'] = top_n_accuracy(p,np.reshape(Y_oh,(-1,data.K)),3)
pred = np.argmax(p, axis=-1)
res.update(ARGS.__dict__)
if not is_test: # pragma: no cover
with Database(ARGS.database_path) as db:
db.write('classification', res)
return res
if __name__ == '__main__':
run(parse_args()) | true |
7f9307a3ec6dd67aef0561be876f4bea67163063 | Python | RobWillison/RiverLeveML | /MapCoordConversion/MeteoxConversion.py | UTF-8 | 1,892 | 2.96875 | 3 | [] | no_license | from convertbng.util import convert_bng, convert_lonlat
import db_config
import math
def getMapPixelData():
return [[320, 350], 680 / 70, 1100 / 130]
def eastNorthToPixel(easting, northing):
startCoord, xStep, yStep = getMapPixelData()
pixelX = (xStep * easting) + startCoord[0]
pixelY = (yStep * northing) + startCoord[1]
return [pixelX, pixelY]
def pixelToEastNorth(pixelX, pixelY):
startCoord, xStep, yStep = getMapPixelData()
easting = (pixelX - startCoord[0]) / xStep
northing = (pixelY - startCoord[1]) / yStep
return easting, northing
def getEastingNorthing(lat, long):
return convert_bng(long, lat)
def getRainArea(mapCoords):
xGrid = math.floor(mapCoords[0] / 50)
yGrid = math.floor((1450 - mapCoords[1]) / 50)
return (yGrid * 40) + xGrid
def convertToLatLong(areaId):
yCord = (math.floor(areaId / 40)) * 50
xCord = (areaId - ((yCord / 50) * 40)) * 50
easting, northing = pixelToEastNorth(xCord, yCord)
print(easting, northing)
result = convert_lonlat(easting, northing)
print(result)
def getRiverInfo():
cursor = db_config.cnx.cursor()
sql = "SELECT * FROM rivers WHERE rain_radar_area_id IS NULL AND lat IS NOT NULL"
cursor.execute(sql)
result = cursor.fetchall()
return result
def updateRainArea(id, rainArea):
cursor = db_config.cnx.cursor()
sql = "UPDATE rivers SET rain_radar_area_id = %s WHERE id = %s"
cursor.execute(sql, (rainArea, id))
db_config.cnx.commit()
def updateRivers():
for river in getRiverInfo():
gridref = getEastingNorthing(river['lat'], river['long'])
easting = gridref[0][0] / 10000
northing = gridref[1][0] / 10000
mapCoords = eastNorthToPixel(easting, northing)
areaId = getRainArea(mapCoords)
print(areaId)
updateRainArea(river['id'], areaId)
convertToLatLong(811) | true |
13d21d38d5c7db2e4154f6cdef6f8fb711d19c99 | Python | cfrancois7/pynom2rdf | /pyio2rdf/isic2rdf.py | UTF-8 | 6,468 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | # python3
"""Transform ISIC classifications into RDF and JSON-LD
Notes:
-----
This package allows to transform ISIC classification (*.txt) into RDF and JSON-LD.
It can transform the ISIC classification into centrally registered identifier
(CRID) and into classes.
The package is compatible with the IEO ontology[1].
[1]: https://github.com/cfrancois7/IEO-ontology
"""
import argparse
from os.path import splitext, abspath
from rdflib import Graph, Literal, Namespace, RDF, RDFS
from pandas import read_csv, DataFrame
from ieo_types import nom_graph
ISIC = Namespace('https://unstats.un.org/unsd/cr/registry/')
BFO = OBI = IAO = Namespace('http://purl.obolibrary.org/obo/')
IEO = Namespace('http://www.isterre.fr/ieo/')
IAO.denotes = IAO.IAO_0000219
BFO.has_part = BFO.BFO_0000051
BFO.part_of = BFO.BFO_0000050
# Industrial activity classification (IAC)
REGISTRY_VERSION = IEO.IEO_0000043
REF_ACTIVITY = IEO.IEO_0000065
ACTITIVY_CRID = IEO.IEO_0000066
STAT_REGISTRY = IEO.IEO_0000071
def sup_spe_charact(text: str):
for char in ['\\','`','*',' ','>','#','+','-','.','!','$','\'']:
if char in text:
text = text.replace(char, "_")
elif char in ['{','}','[',']','(',')']:
text = text.replace(char, "")
return text
def verify_version() -> int:
try:
version_number = int(input('What is the version number of the ISIC Rev classification? :'))
except:
print('The version has to be an integer')
version_number = verify_version()
return version_number
def isic2crid(data: DataFrame) -> Graph:
graph = nom_graph()
crid_reg = 'ISIC'
crid_reg_label = 'International Standard Industrial Classification'
version = str(verify_version())
graph.add((STAT_REGISTRY, RDFS.label,
Literal('statistical classification registry', lang='en')))
graph.add((ISIC[crid_reg], RDF.type, STAT_REGISTRY))
graph.add((ISIC[crid_reg], RDFS.label, Literal(crid_reg_label)))
database_id = 'ISIC_Rev'+version
database_label = 'International Standard Industrial Classification (ISIC) Rev'+version
graph.add((REGISTRY_VERSION, RDFS.label,
Literal('registry version', lang='en')))
graph.add((ISIC[database_id], RDF.type, REGISTRY_VERSION))
graph.add((ISIC[database_id], RDFS.label,
Literal(database_label)))
graph.add((ISIC[database_id], IAO.denotes, ISIC[crid_reg]))
graph.add((IAO.denotes, RDFS.label,
Literal('denotes', lang='en')))
classification_label = f'ISIC Rev{version} identifier'
graph.add((ISIC.classification, RDFS.label, Literal(classification_label, lang='en')))
graph.add((ISIC.classification, RDFS.subClassOf, ACTITIVY_CRID))
ind_sector_label = classification_label+' label'
graph.add((ISIC.industrial_sector, RDFS.subClassOf, REF_ACTIVITY))
graph.add((ISIC.industrial_sector, RDFS.label, Literal(ind_sector_label, lang='en')))
for code in data.index:
activity_label = data.loc[code][0]
crid = f'{database_id}_{code}'
crid_label = f'{database_id}:{code} {activity_label}'
activity_id = sup_spe_charact(activity_label)
graph.add((ISIC[activity_id], RDF.type, ISIC.industrial_sector))
graph.add((ISIC[activity_id], RDFS.label, Literal(activity_label, lang='en')))
graph.add((ISIC[crid], RDFS.label, Literal(crid_label, lang='en')))
graph.add((ISIC[crid], RDF.type, ISIC.classification))
graph.add((ISIC[crid], RDFS.label, Literal(crid_label, lang='en')))
graph.add((ISIC[crid], BFO.has_part, ISIC[database_id]))
graph.add((ISIC[database_id], BFO.part_of, ISIC[crid]))
graph.add((ISIC[crid], BFO.has_part, ISIC[activity_id]))
graph.add((ISIC[activity_id], BFO.part_of, ISIC[crid]))
return graph
def avoid_overwrite(output_path: str) -> str:
""" The function prevents the overwriting of the source file by the
output."""
message = """" The path for the output and the input file is the same.
The input file is going to be overwritten. Are you sure to overwrite
the input file? (Yes/No): """
answer = input(message).lower()
if answer in ['yes', 'y']:
return output_path
elif answer in ('no', 'n'):
message = " What is the new path? (absolute or relative path): "
new_path = input(message)
return new_path
else:
print('Error. The expected answer is Yes or No.')
return avoid_overwrite(output_path)
def main():
description = """Transform ISIC classification registry into Graph and export
it to the proper format."""
usage = """ Usage:
-----
Command in shell:
$ python3 isic2rdf.py [OPTION] file1.xml
Arguments:
file1.xml: the Ecoinvent's MasterData file to transforme. It has to
respect the Ecospold2 format for MasterData.
Options:
-output, -o path of the output file
--format, -f format of the output"""
# create the parser
parser = argparse.ArgumentParser(
description=description,
usage=usage)
parser.add_argument(
"--format", '-f',
nargs=1,
choices=['json-ld', 'xml', 'n3', 'nt'],
default=['xml'],
help='the output format of the file (default: Xml)')
parser.add_argument(
"input_path",
metavar='path_to_input_file',
nargs=1,
type=str,
help="the ISIC's file to transforme.")
parser.add_argument(
"output_path",
metavar='path_to_output_file',
nargs='?',
type=str,
default=False,
help="the path of the output (default: input_name.format)")
args = parser.parse_args()
input_path = args.input_path[0]
try:
data = read_csv(input_path, index_col=0)
except:
raise('Error in the input file. Impossible to open it. '\
'The format expected is [code][label]')
graph = isic2crid(data)
if not args.output_path:
path = abspath(args.input_path[0])
name_file = splitext(path)[0]
new_ext = {'json-ld': '.json', 'xml': '.rdf', 'n3': '.n3',
'nt': '.nt'}
new_ext = new_ext[args.format[0]]
output_path = name_file+new_ext
else:
output_path = args.output_path
if input_path == output_path:
output_path = avoid_overwrite(output_path)
graph.serialize(output_path, format=args.format[0])
if __name__ == "__main__":
main()
| true |
3af246a8bacf037c63799f8ef54b66d1b98f5ac3 | Python | Luoyer-ly/Bj_pm2.5_predict | /build_network.py | UTF-8 | 4,087 | 2.796875 | 3 | [] | no_license | import numpy as np
def initialize_coef_deep(layers):
layer_size = len(layers)
parameters = {}
for i in range(1, layer_size):
parameters["W" + str(i)] = np.random.randn(layers[i], layers[i - 1]) * 0.01
parameters["b" + str(i)] = np.zeros((layers[i], 1))
return parameters
def forward_propagation(X, parameters):
layer_size = len(parameters) // 2
A = X
caches = {}
caches["A0"] = X
for i in range(1, layer_size):
A, Z = linear_activation_forward(A, parameters["W" + str(i)], parameters["b" + str(i)], "relu")
caches["A" + str(i)] = A
caches["Z" + str(i)] = Z
AL, ZL = linear_activation_forward(A, parameters["W" + str(layer_size)], parameters["b" + str(layer_size)],
"sigmoid")
caches["A" + str(layer_size)] = AL
caches["Z" + str(layer_size)] = ZL
return AL, caches
def L_layer_model(X, Y, layer_dims, iteration=2000, learning_rate=0.009, print_cost=True):
m = Y.shape[1]
parameters = initialize_coef_deep(layer_dims)
for i in range(iteration):
AL, caches = forward_propagation(X, parameters)
if i % 100 == 0 and print_cost:
cost = compute_cost(m, AL, Y)
print("Cost after %i iterations: %f" % (i, cost))
grads = backward_propagation(parameters, AL, caches, Y)
parameters = update_parameters(parameters, grads, learning_rate)
return parameters
def compute_cost(m, AL, Y):
cost = np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL))
cost = -np.sum(cost, axis=1, keepdims=True) / m
return cost
def backward_propagation(parameters, AL, caches, Y):
Y = Y.reshape(AL.shape)
m = Y.shape[1]
dAL = -np.divide(Y, AL) + np.divide(1 - Y, 1 - AL)
dAL = np.sum(dAL, axis=1, keepdims=True) / m
L = len(parameters) // 2
grads = {}
dA_previous, dW, db = linear_activation_backward(dAL, caches["A" + str(L - 1)], caches["Z" + str(L)],
parameters["W" + str(L)],
parameters["b" + str(L)], "sigmoid")
grads["dW" + str(L)] = dW
grads["db" + str(L)] = db
for i in reversed(range(1, L)):
dA_previous, dW, db = linear_activation_backward(dA_previous, caches["A" + str(i - 1)], caches["Z" + str(i)],
parameters["W" + str(i)],
parameters["b" + str(i)], "relu")
grads["dW" + str(i)] = dW
grads["db" + str(i)] = db
return grads
def linear_forward(A, W, b):
Z = np.dot(W, A) + b
return Z
def linear_activation_forward(A_prev, W, b, activation):
Z = linear_forward(A_prev, W, b)
if activation == 'sigmoid':
A = sigmoid(Z)
elif activation == 'relu':
A = relu(Z)
return A, Z
def linear_activation_backward(dA, A_previous, Z, W, b, activation):
# A_prev = linear_backward(A, W, b)
if activation == "sigmoid":
dZ = sigmoid_backward(dA, Z)
elif activation == "relu":
dZ = relu_backward(dA, Z)
dA_previous, dW, db = linear_backward(dZ, A_previous, W, b)
return dA_previous, dW, db
def update_parameters(parameters, grads, learning_rate):
m = len(parameters) // 2
for i in range(1, m + 1):
parameters["W" + str(i)] = parameters["W" + str(i)] - learning_rate * grads["dW" + str(i)]
parameters["b" + str(i)] = parameters["b" + str(i)] - learning_rate * grads["db" + str(i)]
return parameters
def linear_backward(dZ, A_previous, W, b):
m = dZ.shape[1]
dW = 1 / m * np.dot(dZ, A_previous.T)
db = 1 / m * np.sum(dZ, axis=1, keepdims=True)
dA_previous = np.dot(W.T, dZ)
return dA_previous, dW, db
def sigmoid(A):
return 1 / (1 + np.exp(-A))
def sigmoid_backward(dA, Z):
s = sigmoid(Z)
dZ = dA * s * (1 - s)
return dZ
def relu(A):
return np.maximum(0, A)
def relu_backward(dA, Z):
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
return dZ
| true |
c21d65456ef72f0bd762d6c8230d2c12f20a6008 | Python | y-oksaku/Competitive-Programming | /AtCoder/abc/154e_2.py | UTF-8 | 408 | 2.796875 | 3 | [] | no_license | from functools import lru_cache
N = input()
K = int(input())
@lru_cache(maxsize=None)
def search(d, cnt, isLess):
if d == len(N):
return cnt == K
a = int(N[d])
if isLess:
return search(d + 1, cnt + 1, isLess) * 9 + search(d + 1, cnt, isLess)
ret = 0
for i in range(a + 1):
ret += search(d + 1, cnt + (i != 0), i < a)
return ret
print(search(0, 0, False)) | true |
db875462e0e0f488bb9774ae633c369a264a7895 | Python | poojataksande9211/python_data | /python_tutorial/excercise_3/demo.py | UTF-8 | 1,868 | 4.53125 | 5 | [] | no_license | #list chapter summery
#list=list is a data structure that can hold any type of data
#create a list
words=["word1","word2"]
#u can store anything insight list
#----------------------------------------
# mixed=[1,2,3,[4,5,6],"seven",8.0,None] #None is a special value
# #list is a ordered collection of items
# print(mixed[0])
# print(mixed[3])
#-------------------------------------------
#add data to our list
mixed=[1,2,3,[4,5,6],"seven",8.0,None] #None is a special value
mixed.append("10")
print(mixed)
#------------------------------------------
#add complete list to list
mixed=[1,2,3,[4,5,6],"seven",8.0,None]
mixed.append([10,20,30])
mixed.append([40,50,60])
print(mixed)
#-----------------------------------------
mixed=[1,2,3,[4,5,6],"seven",8.0,None]
mixed.extend([10,20,30]) #extend can not add complete list....extend add only individual element to the list
print(mixed)
#----------------------------------------
#join two list
word1=["a,b,c,d"]
word2=["e,f,g,h"]
c=word1+word2
print(c)
#-------------------------------------
#insert
mixed=[1,2,3,[4,5,6],"seven",8.0,None]
mixed.insert(1,"pooja") #add element to a specific position
print(mixed)
#--------------------------------------
#remove element from list
mixed=[1,2,3,[4,5,6],"seven",8.0,None]
pop_item=mixed.pop() #remove last element from list
print(pop_item) #return deleted element
print(mixed)
mixed.pop(2) #remove element at second pos
print(mixed)
#------------------------------------
#remove method:u want to remove element but u didnt know the position in that case remove method use
mixed=[1,2,3,[4,5,6],"seven",8.0,None]
mixed.remove("seven")
print(mixed)
#-------------------------------------
#del statement
mixed=[1,2,3,[4,5,6],"seven",8.0,None]
del mixed[3] #delete element at 3 pos
print(mixed)
#-------------------------------------
#looping in list
for i in mixed:
print(i)
| true |
34fdf72375de1f3098254062039a9133d5ffa3f4 | Python | wsdhrqqc/Machine-learning | /aml_nn.py | UTF-8 | 3,039 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 8 19:31:28 2020
@author: qingn
"""
import tensorflow as tf
import pandas as pd
import numpy as np
import pickle
import timeit
import matplotlib.pyplot as plt
from tensorflow import keras
import os
import time
# from IPython.core.interactiveshell import InteractiveShell
# InteractiveShell.ast_node_interactivity = "all"
# Tensorflow 2.0 way of doing things
from tensorflow.keras.layers import InputLayer, Dense
from tensorflow.keras.models import Sequential
import netCDF4
# Default plotting parameters
FONTSIZE = 18
plt.rcParams['figure.figsize'] = (10, 6)
plt.rcParams['font.size'] = FONTSIZE
# build model with keras includes three hidden layers and [8,8,7] neurals, default activation function is elu, loss function is mse...
def build_model(n_inputs, n_hidden, n_output, activation='elu', lrate=0.001):
model = Sequential();
model.add(InputLayer(input_shape=(n_inputs,)))
model.add(Dense(n_hidden, use_bias=True, name="hidden_1", activation=activation))
model.add(Dense(n_hidden, use_bias=True, name="hidden_2", activation=activation))
model.add(Dense(n_hidden-1, use_bias=True, name="hidden_3", activation=activation))
model.add(Dense(n_hidden, use_bias=True, name="hidden_4", activation=activation))
model.add(Dense(n_hidden, use_bias=True, name="hidden_5", activation=activation))
# model.add(Dense(n_hidden, use_bias=True, name="hidden_6", activation=activation))
model.add(Dense(n_output, use_bias=True, name="output", activation=activation))
opt = tf.keras.optimizers.Adam(lr=lrate, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='mse', optimizer=opt)
print(model.summary())
return model
#%%
#fp = pd.read_csv("inputs.csv", "rb")
#foo = pickle.load(fp)
#fp.close()
data = netCDF4.Dataset('X_y.nc')
ins = data['X'][:]
outts = np.array(data['y'])
outs = outts.reshape(2250,1)
#%%
start = timeit.default_timer()
history= model.fit(x=ins, y=outs, epochs=80, verbose=False)
end = timeit.default_timer()
print(str(end-start))
#%%
a = []
error = []
plt.figure()
for i in np.arange(7):
start = timeit.default_timer() #Timming
model = build_model(ins.shape[1], 8, outs.shape[1],activation='tanh')#, activation='sigmoid' # setup model
history = model.fit(x=ins, y=outs, epochs=180, verbose=False) # run the model
end = timeit.default_timer()
print(str(end-start)) # How long has been used for each Independent run
# history = model.fit(x=ins, y=outs, epochs=8000, verbose=False,
# validation_data=(ins, outs),
# callbacks=[checkpoint_cb, early_stopping_cb])
a.append(history.history['loss'])
error_each = np.abs((outs-model.predict(ins))[:,0]) # error is defined in this way
error.append(error_each)
# Display
plt.plot(history.history['loss'],label = 'independent_learning_run'+str(i))
plt.legend()
plt.ylabel('MSE')
plt.xlabel('epochs') | true |
1b27a6c98ff849f9aa98ea945492c06d60fa5ccb | Python | sarvparteek/Data-structures-And-Algorithms | /fastQueue.py | UTF-8 | 2,571 | 3.984375 | 4 | [] | no_license | __author__ = 'sarvps'
'''
Author: Sarv Parteek Singh
Course: CISC 610
Term: Late Summer
Data Structures & Algorithms by Miller
Quiz 1, Problem 4
Brief: Implements a queue such that both enqueue and dequeue have O(1) performance on average.
In this case, it means that most of the time enqueue and dequeue will be O(1) except in one particular
circumstance, where dequeue will be O(n)
References: https://stackoverflow.com/questions/69192/how-to-implement-a-queue-using-two-stacks
'''
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
def getStack(self):
return self.items
class Queue:
def __init__(self):
self.inputStack = Stack()
self.outputStack = Stack()
def enqueue(self, item):
self.inputStack.push(item)
def dequeue(self):
if (self.outputStack.isEmpty()):
while (self.inputStack.isEmpty() == False):
self.outputStack.push(self.inputStack.pop())
retVal = None
if (not self.outputStack.isEmpty()):
retVal = self.outputStack.pop()
return retVal
def size(self):
return self.inputStack.size() + self.outputStack.size()
def isEmpty(self):
return self.inputStack.isEmpty() and self.outputStack.isEmpty()
def getQueue(self):
qList = []
reverseOutput = [] # Note that we can't use reverse() method since it reverses the actual list
if (len(self.outputStack.getStack())):
for i in range(len(self.outputStack.getStack()) - 1, -1, -1):
reverseOutput.append(self.outputStack.getStack()[i])
qList += reverseOutput + self.inputStack.getStack()
return qList
def main():
q = Queue()
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
print("After enqueueing 1,2,3")
print(q.getQueue())
q.dequeue()
print("After first dequeue")
print(q.getQueue())
q.enqueue(4)
q.enqueue(5)
print("After enqueueing 4,5")
print(q.getQueue())
q.dequeue()
print("After dequeueing")
print(q.getQueue())
q.enqueue(6)
q.enqueue(7)
print("After enqueueing 6,7")
print(q.getQueue())
q.dequeue()
print("After dequeueing")
print(q.getQueue())
#main() #Test
| true |
229de51935ad7e61acb817bb509fa458436673dd | Python | jasontwright/coursera | /pythonlearn/week06/sandbox.py | UTF-8 | 643 | 3.078125 | 3 | [] | no_license | fruit = 'banana'
letter = fruit[1]
print letter
n = 3
w = fruit[n - 1]
print w
print len(fruit)
index = 0
while index < len(fruit) :
letter = fruit[index]
print index,letter
index = index + 1
for letter in fruit :
print letter
word = 'banana'
count = 0
for letter in word :
if letter == 'a' :
count = count + 1
print count
if 'nan' in word :
print 'Found it!'
line = 'Please have a nice day'
if line.startswith('Please') :
print "Yes!"
if line.startswith('p') :
print "Yes!"
print len('banana') * 7
greet = 'Hello Bob'
print greet.upper()
data = 'From stephen.marquard@uct.ac.za Sat Jan'
pos = data.find('.')
print data[pos:pos+3]
| true |
cf807423201df091b6c63486b19ce446b7fd57ad | Python | Brendan-Bu/Awesome-Note | /about_window.py | UTF-8 | 3,677 | 2.59375 | 3 | [] | no_license | from PyQt5.Qt import *
from html_window import HtmlWindow
import os
class About(QWidget):
def __init__(self, parent=None):
super(About, self).__init__(parent)
self.setWindowTitle('About')
self.setObjectName("About")
self.setWindowIcon(QIcon('image/app.png'))
self.resize(400, 300)
self.setStyleSheet("#About{background-color:white}")
self.whole_v_layout = QVBoxLayout()
self.information_bar = QWidget()
self.information_bar_v_layout = QVBoxLayout(self.information_bar)
self.app_name = QLabel(self.information_bar)
self.app_name.setText("<p style='font-family:Trebuchet MS;font-size:15px'>"
"<img src='image/about/app.png' align='top' width='25' height='25' />"
" 软件名称: Awesome Note</p>")
self.information_bar_v_layout.addWidget(self.app_name)
self.app_version = QLabel(self.information_bar)
self.app_version.setText("<p style='font-family:Trebuchet MS;font-size:15px'>"
"<img src='image/about/version.png' align='top' width='25' height='25' />"
" 软件版本: 1.0.8</p>")
self.information_bar_v_layout.addWidget(self.app_version)
self.developer_name = QLabel(self.information_bar)
self.developer_name.setText("<p style='font-family:Trebuchet MS;font-size:15px'>"
"<img src='image/about/developer.png' align='top' width='25' height='25' />"
" 开发人员: </p>"
"<p> "
"<img src='image/about/1.png' align='top' width='25' height='25' />李志豪 "
"<img src='image/about/2.png' align='top' width='25' height='25' />伍文征</p>"
"<p> "
"<img src='image/about/3.png' align='top' width='25' height='25' />卜贤达 "
"<img src='image/about/4.png' align='top' width='25' height='25' />谢玮勋</p>")
self.information_bar_v_layout.addWidget(self.developer_name)
self.button_box = QWidget()
self.button_box_h_layout = QHBoxLayout(self.button_box)
self.guide_book_button = QPushButton()
self.guide_book_button.setObjectName("guide_button")
self.guide_book_button.setStyleSheet("#guide_button{background-color:white}")
self.guide_book_button.setText("用户手册")
self.guide_book_button.setIcon(QIcon('image/about/guide.png'))
self.guide_book_button.clicked.connect(self.open_guide_book)
self.button_box_h_layout.addWidget(self.guide_book_button, 0, Qt.AlignLeft)
self.md_button = QPushButton()
self.md_button.setText("MD语法")
self.md_button.setObjectName("md_button")
self.md_button.setStyleSheet("#md_button{background-color:white}")
self.md_button.setIcon(QIcon('image/about/md.png'))
self.md_button.clicked.connect(self.open_md_book)
self.button_box_h_layout.addWidget(self.md_button, 0, Qt.AlignRight)
self.whole_v_layout.addWidget(self.information_bar)
self.whole_v_layout.addWidget(self.button_box)
self.setLayout(self.whole_v_layout)
def open_guide_book(self):
pass
def open_md_book(self):
md_book_path = "http://wow.kuapp.com/markdown/basic.html"
self.md_book = HtmlWindow(md_book_path, title="Markdown语法", icon="image/about/md.png")
self.md_book.show()
| true |
b6b782eb51db1d16135f2729f1e91a3e6e42a794 | Python | vladopp/Programming101 | /week2/2/generate_numbers.py | UTF-8 | 263 | 2.8125 | 3 | [] | no_license | from sys import argv
from random import randint
def main():
script, filename, n = argv
f = open(filename, 'w')
n = int(n)
while n:
f.write(str(randint(0, 999)))
f.write(" ")
n -= 1
if __name__ == '__main__':
main()
| true |
cd86d1d003e3c0efabfb2d6175b443c5d297f96d | Python | anju-netty/pylearning | /hands_on_exercise.py | UTF-8 | 1,263 | 4.75 | 5 | [] | no_license | """Intro to Python - Part 1 - Hands-On Exercise."""
import math
import random
# TODO: Write a print statement that displays both the type and value of `pi`
pi = math.pi
print("Type of pi is {} and value of pi is {}".format(type(pi),pi))
# TODO: Write a conditional to print out if `i` is less than or greater than 50
i = random.randint(0, 100)
if i < 50 :
print(i," is less than 50")
elif i > 50:
print(i," is greater than 50")
# TODO: Write a conditional that prints the color of the picked fruit
picked_fruit = random.choice(['orange', 'strawberry', 'banana'])
print("You picked fruit ",picked_fruit)
if picked_fruit == "orange":
print("color of {} is Orange".format(picked_fruit))
elif picked_fruit == "strawberry":
print("color of {} is Red".format(picked_fruit))
elif picked_fruit == "banana":
print("color of {} is Yellow".format(picked_fruit))
else:
print("no color")
# TODO: Write a function that multiplies two numbers and returns the result
# Define the function here.
def multiply(num1,num2):
return num1 * num2
# TODO: Now call the function a few times to calculate the following answers
print("12 x 96 =",multiply(12,96))
print("48 x 17 =",multiply(48,17))
print("196523 x 87323 =",multiply(196523,87323))
| true |
1a1f5f6a577b073164e66375c4543f44aee18c57 | Python | elenaisnanocat/Algorithm | /SWEA/algorithm수업_1/swea_12166_NUMBER OF INVERSION.py | UTF-8 | 675 | 2.875 | 3 | [] | no_license | def merge_sort(s, e):
global A, result
if s == e - 1:
return
mid = (s + e) // 2
l = s
r = mid
merge_sort(s, mid)
merge_sort(mid, e)
merged_arr = []
while l < mid and r < e:
if A[l] > A[r]:
merged_arr.append(A[r])
r += 1
result += mid - l
else:
merged_arr.append(A[l])
l += 1
merged_arr.extend(A[l:mid])
merged_arr.extend(A[r:e])
A[s:e] = merged_arr[:]
T = int(input())
for case in range(1, T + 1):
N = int(input())
A = list(map(int, input().split()))
result = 0
merge_sort(0, N)
print('#{} {}'.format(case, result)) | true |
51eadb13ff6e6bf3585c4d1a3de1b77959230490 | Python | elpenor23/SotaStats | /utils/database.py | UTF-8 | 1,813 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python3
import mysql.connector
from mysql.connector import errorcode
def connect_to_db():
""" connects to the db """
try:
db = mysql.connector.connect(user='user',
password='*****',
host='localhost',
database='sota_stats')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
return db
def add_activation_record(activation_date,
activation_callsign,
summit_association_code,
summit_region_code,
summit_code,
summit_name,
summit_points,
activation_number_of_qso):
""" Adds record to the DB """
query = ("INSERT INTO raw_activator_data "
"(activation_date, activation_callsign, summit_association_code, summit_region_code, summit_code, summit_name, summit_points, activation_number_of_qso) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
data = (activation_date, activation_callsign, summit_association_code, summit_region_code, summit_code, summit_name, summit_points, activation_number_of_qso)
db = connect_to_db()
cursor = db.cursor()
try:
cursor.execute(query, data)
db.commit()
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
print("Query: " + query)
print("Data: " + str(data))
cursor.close()
db.close()
| true |
07e462e863a7c18b4b9b40a8d83ae668d01b31c0 | Python | juliafox8/cm-codes | /Lab_2/q1.py | UTF-8 | 832 | 3.375 | 3 | [] | no_license | import tkinter
from tkinter import Canvas
def olympic_rings():
window = tkinter.Tk()
c = Canvas(window, width = 600, height = 400)
#blue
c.create_oval(10, 10, 50, 50, fill= "blue")
c.create_oval(15, 15, 45, 45, fill = "white")
#black
c.create_oval(50, 10, 90, 50, fill = "black")
c.create_oval(55, 15, 85, 45, fill = "white")
#red
c.create_oval(90, 10, 130, 50, fill = "red", outline= "black")
c.create_oval(95, 15, 125, 45, fill = "white",outline= "black" )
#yellow
c.create_oval(30, 45, 70, 85, fill = "yellow")
c.create_oval(35, 50, 65, 80, fill = "white")
#green
c.create_oval(70, 45, 110, 85, fill = "green")
c.create_oval(75, 50, 105, 80, fill = "white")
c.pack()
window.mainloop()
olympic_rings() | true |
9c10ba82d4e6dafc369a6394b60c66fc584bab53 | Python | ErmantrautJoel/Python | /Funciones de Tkinter.py | UTF-8 | 973 | 3.515625 | 4 | [] | no_license | # Funciones de la libreria GUI Tkinter
# FUNCIONES DEL BOTON
import sys # Funcion sys.exit que cierra el programa
from Tkinter import * # Importa todas la funciones de la libreria
button = Button(None, text='Hello World', command=sys.exit) # arg1:ventana, arg2:texto arg3:funcion
button.pack() # Empaquetado
button.mainloop()
root = Tk() # Se crea ventana
Button(root, text='press', command=root.quit).pack(side=LEFT) # Alineacion del boton a la izquierda
root.mainloop()
root = Tk() # Se crea ventana
Button(root, text='press', command=root.quit).pack(side=LEFT, expand=YES, fill=X) # fill=X,Y,BOTH - Expande el boton
root.mainloop()
def hello(event):
print "Press twice to exit"
def quit(event):
print "Hello. i must be going"
import sys; sys.exit()
button = Button(None, text="Hello World")
button.pack()
button.bind('<Button-l>', hello)
button.bind('<Double-l>', quit)
button.mainloop()
# FUNCIONES DEL LABEL
| true |
57192b4770035ed1b081672d62a8fc501b760637 | Python | somjeat/pythainlp | /pythainlp/romanization/royin.py | UTF-8 | 21,893 | 2.59375 | 3 | [
"Apache-2.0",
"Swift-exception"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import absolute_import,division,unicode_literals,print_function
'''
โมดูลถอดเสียงไทยเป็นอังกฤษ
พัฒนาต่อจาก new-thai.py
พัฒนาโดย นาย วรรณพงษ์ ภัททิยไพบูลย์
เริ่มพัฒนา 20 มิ.ย. 2560
'''
from pythainlp.tokenize import word_tokenize
from pythainlp.tokenize import tcc
#from pythainlp.tokenize import etcc
import re
consonants = { # พยัญชนะ ต้น สะกด
'ก':['k','k'],
'ข':['kh','k'],
'ฃ':['kh','k'],
'ค':['kh','k'],
'ฅ':['kh','k'],
'ฆ':['kh','k'],
'ง':['ng','ng'],
'จ':['ch','t'],
'ฉ':['ch','t'],
'ช':['ch','t'],
'ซ':['s','t'],
'ฌ':['ch','t'],
'ญ':['y','n'],
'ฎ':['d','t'],
'ฏ':['t','t'],
'ฐ':['th','t'],
'ฑ':['th','t'], #* พยัญชนะต้น เป็น d ได้
'ฒ':['th','t'],
'ณ':['n','n'],
'ด':['d','t'],
'ต':['t','t'],
'ถ':['th','t'],
'ท':['th','t'],
'ธ':['th','t'],
'น':['n','n'],
'บ':['b','p'],
'ป':['p','p'],
'ผ':['ph','p'],
'ฝ':['f','p'],
'พ':['ph','p'],
'ฟ':['f','p'],
'ภ':['ph','p'],
'ม':['m','m'],
'ย':['y',''],
'ร':['r','n'],
'ล':['l','n'],
'ว':['w',''],
'ศ':['s','t'],
'ษ':['s','t'],
'ส':['s','t'],
'ห':['h',''],
'ฬ':['l','n'],
'อ':['',''],
'ฮ':['h','']
}
consonants_thai= u'[กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬฮ]'
def deletetone(data):
'''โค้ดส่วนตัดวรรณยุกต์ออก'''
for tone in ['่','้','๊','๋']:
if (re.search(tone,data)):
data = re.sub(tone,'',data)
if re.search(u'\w'+'์',data, re.U):
search=re.findall(u'\w'+'์',data, re.U)
for i in search:
data=re.sub(i,'',data,flags=re.U)
return data
def romanization(text):
'''
romanization(str)
'''
text=deletetone(text)
text1=word_tokenize(text,engine='newmm')
textdata=[]
#print(text1)
for text in text1:
#a1=etcc.etcc(text)
a2=tcc.tcc(text)
text=re.sub('//','/',a2)
if re.search(u'เ\w'+'ี'+'ย/ว',text, re.U):
'''
จัดการกับ เอียว
'''
#print('เอียว')
search=re.findall(u'เ\w'+'ี'+'ย/ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'iao',text,flags=re.U)
if re.search(u'แ\w'+'็'+'ว',text, re.U):
'''
จัดการกับ แอ็ว
'''
#print('แอ็ว')
search=re.findall(u'แ\w'+'็'+'ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'aeo',text,flags=re.U)
if re.search(u'แ\w/\w'+'็/'+'ว',text, re.U):
'''
จัดการกับ แออ็ว
'''
#print('แออ็ว')
search=re.findall(u'แ\w/\w'+'็/'+'ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+list(i)[3]+'aeo',text,flags=re.U)
if re.search(u'แ\w/'+'ว',text, re.U):
'''
จัดการกับ แอว
'''
#print('แอว')
search=re.findall(u'แ\w/'+'ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'aeo',text,flags=re.U)
if re.search(u'เ\w/ว',text, re.U):
'''
จัดการกับ เอว
'''
#print('เอว')
search=re.findall(u'เ\w/ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'eo',text,flags=re.U)
if re.search(u'เ\w็ว',text, re.U):
'''
จัดการกับ เอ็ว
'''
#print('เอ็ว')
search=re.findall(u'เ\w็ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'eo',text,flags=re.U)
if re.search(u'เ\wียะ',text, re.U):
'''
จัดการกับ เอียะ
'''
#print('เอียะ')
search=re.findall(u'เ\wียะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ia',text,flags=re.U)
if re.search(u'เ\wีย',text, re.U):
'''
จัดการกับ เอีย (1)
'''
#print('เอีย 1')
search=re.findall(u'เ\wีย',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ia',text,flags=re.U)
if re.search(u'เ\w/ีย',text, re.U):
'''
จัดการกับ เอีย (2)
'''
#print('เอีย 2')
search=re.findall(u'เ\w/ีย',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ia',text,flags=re.U)
if re.search(u'เ\wือ/ย',text, re.U):
'''
จัดการกับ เอือย
'''
#print('เอือย')
search=re.findall(u'เ\wือ/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ueai',text,flags=re.U)
if re.search(u'เ\wือะ',text, re.U):
'''
จัดการกับ เอือะ
'''
#print('เอือะ')
search=re.findall(u'เ\wือะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'uea',text,flags=re.U)
if re.search(u'เ\wือ',text, re.U):
'''
จัดการกับ เอือ
'''
#print('เอือ')
search=re.findall(u'เ\wือ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'uea',text,flags=re.U)
if re.search(u'โ\w/ย',text, re.U):
'''
จัดการกับ โอย
'''
#print('โอย')
search=re.findall(u'โ\w/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'oi',text,flags=re.U)
if re.search(u'\w/อ/ย',text, re.U):
'''
จัดการกับ ออย
'''
#print('ออย')
search=re.findall(u'\w/อ/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'oi',text,flags=re.U)
if re.search(u'โ\wะ',text, re.U):
'''
จัดการกับ โอะ
'''
#print('โอะ')
search=re.findall(u'โ\wะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'o',text,flags=re.U)
if re.search(u'โ\w',text, re.U):
'''
จัดการกับ โอ
'''
#print('โอ')
search=re.findall(u'โ\w',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'o',text,flags=re.U)
if re.search(u'เ/\wา/ะ/',text, re.U):
'''
จัดการกับ เอาะ (1)
'''
#print('เอาะ 1')
search=re.findall(u'เ/\wา/ะ/',text, re.U)
for i in search:
text=re.sub(i,list(i)[2]+'o',text,flags=re.U)
if re.search(u'เ\wาะ',text, re.U):
'''
จัดการกับ เอาะ (2)
'''
#print('เอาะ 2')
search=re.findall(u'เ\wาะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'o',text,flags=re.U)
if re.search(u'อำ',text, re.U):
'''
จัดการกับ อำ
'''
#print('อำ')
search=re.findall(u'อำ',text, re.U)
for i in search:
text=re.sub(i,'am',text,flags=re.U)
if re.search(u'อี',text, re.U):
'''
จัดการกับ อี
'''
#print('"อี"')
search=re.findall(u'อี',text, re.U)
for i in search:
text=re.sub(i,'i',text,flags=re.U)
# เออ
if re.search(u'เ\w/อ',text, re.U):
'''
จัดการกับ เออ
'''
#print('เออ')
search=re.findall(u'เ\w/อ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'oe',text,flags=re.U)
if re.search(u'\w/อ',text, re.U):
'''
จัดการกับ ออ
'''
#print('ออ')
search=re.findall(u'\w/อ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'o',text,flags=re.U)
if re.search(u'\wัวะ',text, re.U):
'''
จัดการกับ อัวะ
'''
#print('อัวะ')
search=re.findall(u'\wัวะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ua',text,flags=re.U)
if re.search(u'\wัว',text, re.U):
'''
จัดการกับ อัว
'''
#print('อัว')
search=re.findall(u'\wัว',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ua',text,flags=re.U)
# ใอ,อัย , อาย
if re.search(u'ใ\w',text, re.U):
'''
จัดการกับ ใอ
'''
#print('ใอ')
search=re.findall(u'ใ\w',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ai',text,flags=re.U)
if re.search(u'\wัย',text, re.U):
'''
จัดการกับ อัย
'''
#print('อัย')
search=re.findall(u'\wัย',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ai',text,flags=re.U)
if re.search(u'\wา/ย',text, re.U):
'''
จัดการกับ อาย
'''
#print('อาย')
search=re.findall(u'\wา/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ai',text,flags=re.U)
#เอา, อาว
if re.search(u'เ\wา',text, re.U):
'''
จัดการกับ เอา
'''
#print('เอา')
search=re.findall(u'เ\wา',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ao',text,flags=re.U)
if re.search(u'\wา/ว',text, re.U):
'''
จัดการกับ อาว
'''
#print('อาว')
search=re.findall(u'\wา/ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ao',text,flags=re.U)
#อุย
if re.search(u'\wุ/ย',text, re.U):
'''
จัดการกับ อุย
'''
#print('อุย')
search=re.findall(u'\wุ/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ui',text,flags=re.U)
#เอย
if re.search(u'เ\w/ย',text, re.U):
'''
จัดการกับ เอย
'''
#print('เอย')
search=re.findall(u'เ\w/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'oei',text,flags=re.U)
# แอะ, แอ
if re.search(u'แ\wะ',text, re.U):
'''
จัดการกับ แอะ
'''
#print('แอะ')
search=re.findall(u'แ\wะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ae',text,flags=re.U)
if re.search(u'แ\w',text, re.U):
'''
จัดการกับ แอ
'''
#print('แอ')
search=re.findall(u'แ\w',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ae',text,flags=re.U)
# เอะ
if re.search(u'เ\wะ',text, re.U):
'''
จัดการกับ เอะ
'''
#print('เอะ')
search=re.findall(u'เ\wะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'e',text,flags=re.U)
# อิว
if re.search(u'\wิ/ว',text, re.U):
'''
จัดการกับ อิว
'''
#print('อิว')
search=re.findall(u'\wิ/ว',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'io',text,flags=re.U)
# อวย
if re.search(u'\w/ว/ย',text, re.U):
'''
จัดการกับ อวย
'''
#print('อวย')
search=re.findall(u'\w/ว/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'uai',text,flags=re.U)
# -ว-
if re.search(u'\w/ว/\w',text, re.U):
'''
จัดการกับ -ว-
'''
#print('-ว-')
search=re.findall(u'\w/ว/\w',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ua'+list(i)[4],text,flags=re.U)
# เ–็,เอ
if re.search(u'เ\w'+'็',text, re.U):
'''
จัดการกับ เ–็
'''
#print('เ–็')
search=re.findall(u'เ\w'+'็',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'e',text,flags=re.U)
if re.search(u'เ\w/',text, re.U):
'''
จัดการกับ เอ
'''
#print('เอ')
search=re.findall(u'เ\w/',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'e',text,flags=re.U)
#ไอย
if re.search(u'ไ\w/ย',text, re.U):
'''
จัดการกับ ไอย
'''
#print('ไอย')
search=re.findall(u'ไ\w/ย',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ai',text,flags=re.U)
#ไอ
if re.search(u'ไ\w',text, re.U):
'''
จัดการกับ ไอ
'''
#print('ไอ')
search=re.findall(u'ไ\w',text, re.U)
for i in search:
text=re.sub(i,list(i)[1]+'ai',text,flags=re.U)
#อะ
if re.search(u'\wะ',text, re.U):
'''
จัดการกับ อะ
'''
#print('อะ')
search=re.findall(u'\wะ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'a',text,flags=re.U)
# –ั
if re.search(u'\wั',text, re.U):
'''
จัดการกับ –ั
'''
#print('–ั ')
search=re.findall(u'\wั',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'a',text,flags=re.U)
# รร
if re.search(u'\w/ร/ร/\w[^ก-ฮ]',text, re.U):
'''
จัดการกับ -รร-
'''
#print('-รร- 1')
search=re.findall(u'\w/ร/ร/\w[^ก-ฮ]',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'an'+list(i)[6]+list(i)[7],text,flags=re.U)
if re.search(u'\w/ร/ร/',text, re.U):
'''
จัดการกับ -รร-
'''
#print('-รร- 2')
search=re.findall(u'\w/ร/ร/',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'a',text,flags=re.U)
#อา
if re.search(u'อา',text, re.U):
'''
จัดการกับ อา 1
'''
#print('อา 1')
search=re.findall(u'อา',text, re.U)
for i in search:
text=re.sub(i,'a',text,flags=re.U)
if re.search(u'\wา',text, re.U):
'''
จัดการกับ อา 2
'''
#print('อา 2')
search=re.findall(u'\wา',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'a',text,flags=re.U)
#อำ
if re.search(u'\wำ',text, re.U):
'''
จัดการกับ อำ 1
'''
#print('อำ 1')
search=re.findall(u'\wำ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'am',text,flags=re.U)
#อิ , อี
if re.search(u'\wิ',text, re.U):
'''
จัดการกับ อิ
'''
#print('อิ')
search=re.findall(u'\wิ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'i'+'/',text,flags=re.U)
if re.search(u'\wี',text, re.U):
'''
จัดการกับ อี
'''
#print('อี')
search=re.findall(u'\wี',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'i'+'/',text,flags=re.U)
#อึ , อื
if re.search(u'\wึ',text, re.U):
'''
จัดการกับ อึ
'''
#print('อึ')
search=re.findall(u'\wึ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ue'+'/',text,flags=re.U)
if re.search(u'\wื',text, re.U):
'''
จัดการกับ อื
'''
#print('อื')
search=re.findall(u'\wื',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'ue'+'/',text,flags=re.U)
#อุ , อู
if re.search(u'\wุ',text, re.U):
'''
จัดการกับ อุ
'''
#print('อุ')
search=re.findall(u'\wุ',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'u'+'/',text,flags=re.U)
if re.search(u'\wู',text, re.U):
'''
จัดการกับ อู
'''
#print('อู')
search=re.findall(u'\wู',text, re.U)
for i in search:
text=re.sub(i,list(i)[0]+'u'+'/',text,flags=re.U)
if re.search(r'[^กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬฮ]',text, re.U):
'''
ใช้ในกรณีคำนั้นมีสระด้วย จะได้เอาพยัญชนะตัวแรกไปเทียบ
'''
d=re.search(consonants_thai,text,re.U)
text=re.sub(d.group(0),consonants[d.group(0)][0],text,flags=re.U)
listtext=list(text)
#print(listtext,0)
if re.search(consonants_thai,listtext[0], re.U):
'''
จัดการกับพยัญชนะต้น
'''
listtext[0]=consonants[listtext[0]][0]
two=False
#print(listtext,1)
if len(listtext)==2:
if re.search(consonants_thai,listtext[1], re.U):
'''
จัดการกับพยัญชนะ 2 ตัว และมีแค่ 2 ตั และมีแค่ 2 ตัวติดกันในคำ
'''
listtext.append(consonants[listtext[1]][1])
listtext[1]='o'
two=True
elif (len(listtext)==3 and listtext[1]=='/'):
#print(listtext,2)
if re.search(consonants_thai,listtext[2], re.U) and re.search(r'[ก-ฮ]',listtext[2], re.U):
'''
กร
ผ่าน tcc เป็น ก/ร แก้ไขในกรณีนี้
'''
listtext[1]='o'
listtext[2]=consonants[listtext[2]][1]
two=True
else:
two=False
i=0
while i<len(listtext) and two==False:
if re.search(consonants_thai,listtext[i], re.U):
'''
ถ้าหากเป็นพยัญชนะ
'''
listtext[i]=consonants[listtext[i]][1]
i+=1
text=''.join(listtext) # คืนค่ากลับสู่ str
#print(text)
textdata.append(re.sub('/','',text))
return ''.join(textdata)
if __name__ == '__main__':
print(romanization('วัน')+romanization('นะ')+romanization('พง'))
print(romanization('นัด')+romanization('ชะ')+romanization('โนน'))
print(romanization('สรรพ'))
print(romanization('สรร')+romanization('หา'))
print(romanization('สรร')+romanization('หา'))
print(romanization('แมว'))
print(romanization('กร')==romanization('กอน'))
| true |
b6f7b5ad8a2610a2d9794e84cc8e1fe8a60d25ec | Python | madhavchekka/SortingAlgos | /randomquicksort.py | UTF-8 | 1,623 | 4.0625 | 4 | [] | no_license | import random as r
def randomquicksort(a):
# define a helper funtion to partition the list
# Pass the array, start index, end index to the partition
# Set the first element as pivot_elem
def partition(a,start=0,end=len(a)-1):
print(f'newrecursion, start = {start} and end={end}')
if start >= end:
return a
pivot_index = r.randint(start,end)
a[start],a[pivot_index] = a[pivot_index],a[start]
pivot_index = start
pivot_elem = a[pivot_index]
i = pivot_index+1
j = end
print(f'pivot_index = {pivot_index}; pivot_elem = {pivot_elem}; i = {start+1}')
while i <= j:
print(f'In the while loop:')
print(f'i = {i}; j = {j}')
if a[i] <= pivot_elem:
i += 1
print(f'In if: i = {i}')
print(a)
elif a[j] >= pivot_elem:
j -= 1
print(f'In elif: j = {j}')
print(a)
else:
a[i],a[j] = a[j],a[i]
print(f'In else: a = {a}')
print(a)
a[pivot_index],a[j] = a[j],a[pivot_index]
print(f'swapped, now a={a}')
pivot_index = j
print(f'pivot_index={pivot_index}')
partition(a,start,pivot_index-1)
partition(a,pivot_index+1,end)
return a
return partition(a)
if __name__ == '__main__':
arrA = [3,44,38,5,5,47,15,36,26,27,2,46,4,19,50,48]
print(f'Input array: {arrA}')
print(randomquicksort(arrA))
#sortedA = quicksort(arrA)
#print(sortedA) | true |
c697463449ec2f84c259cffef637e55564efc0ae | Python | testpushkarchauhan92/bharath_python_core_advanced | /Lesson08Functions/P012KeywordArguments.py | UTF-8 | 263 | 4.0625 | 4 | [] | no_license | def average(a,b):
print('a : ', a)
print('b : ', b)
return (a+b)/2
# Old Way
# print(average(10,90))
# New Way
print(average(a=10,b=90))
# Sequence does not matter if we use keyword a b. This is called 'Keyword Arguments'
print(average(b=10,a=90))
| true |
8f39213348160ab0c475ea808f8d162d90f37c59 | Python | BaronJake/python4biologists | /chapt_6/chapt6.py | UTF-8 | 2,299 | 4.03125 | 4 | [] | no_license | """
various conditional statement practice problems with input coming from .csv file
"""
# function to return AT content
def at_content(seq):
""" function to return AT content"""
seq = seq.upper()
AT_content = seq.count("A") + seq.count("T")
return AT_content / len(seq)
# opens file and stores lines in list
with open("input/data.csv") as file:
data = file.readlines()
# prints gene names is the species is D. melanogaster or D. simulans
# new line print at end of loop is to separate outputs from different exercise sections
for line in data:
spec_name, sequence, gene_name, expression_level = line.split(",")
expression_level = int(expression_level.strip())
if "Drosophila melanogaster" in spec_name or "Drosophila simulans" in spec_name:
print(gene_name)
print("\n")
# prints gene name if length of sequence is between 90 and 110
for line in data:
spec_name, sequence, gene_name, expression_level = line.split(",")
expression_level = int(expression_level.strip())
length = len(sequence)
if 90 < length < 110:
print(gene_name)
print("\n")
# prints gene names if AT content is greater than 0.5 and exp level > 200
for line in data:
spec_name, sequence, gene_name, expression_level = line.split(",")
expression_level = int(expression_level.strip())
if at_content(sequence) > 0.5 and expression_level > 200:
print(gene_name)
print("\n")
# prints gene name for species other than D. melanogaster if it starts with k or h
for line in data:
spec_name, sequence, gene_name, expression_level = line.split(",")
expression_level = int(expression_level.strip())
if "Drosophila melanogaster" != spec_name and (
gene_name.startswith("k") or gene_name.startswith("h")
):
print(gene_name)
print("\n")
# prints message stating whether AT content is high, medium or low
for line in data:
spec_name, sequence, gene_name, expression_level = line.split(",")
expression_level = int(expression_level.strip())
qual_at_content = None
if at_content(sequence) > 0.65:
qual_at_content = "High"
elif at_content(sequence) < 0.45:
qual_at_content = "Low"
else:
qual_at_content = "Medium"
print(f"The AT content for {gene_name} is {qual_at_content}")
| true |
94a794900f35ef10cfff035b3fd0d4206f8f72ce | Python | ccharlesgb/mapt | /backend/app/core/config.py | UTF-8 | 2,331 | 2.796875 | 3 | [] | no_license | import logging
import os
from typing import Any, Dict, List, Type, Union
from pydantic import BaseSettings, validator
_env_prefix = "mapt_"
class Config(BaseSettings):
app_env: str
root_log_level: int = logging.INFO
title: str = "Mapt"
description: str = "Shape file uploader/sharing application"
@validator("root_log_level")
def is_logging_level(
cls, v: Union[str, int], values: List[str], **kwargs: Dict[str, Any]
) -> int:
# Weird function it returns the integer repr if it is valid.
# If you supply it a valid integer like logging.DEBUG it returns the string "DEBUG"
# Otherwise it returns f"Level {v}" if it isn't valid
level = logging.getLevelName(v)
if isinstance(level, int):
return level
elif isinstance(level, str):
if not level.startswith("Level "):
return int(v)
raise ValueError(f"Level name is {level}")
database_uri: str
redis_host: str
redis_db: int
class Config:
env_prefix = _env_prefix
class ConfigLocal(Config):
"""
This config is for when you are in the local development environment
"""
app_env: str = "local"
redis_host = "redis"
redis_db = 0
_configs: Dict[str, Type[Config]] = {
cfg.__fields__["app_env"].default: cfg for cfg in [ConfigLocal]
}
def get_config_from_environment() -> Config:
env_key = f"{_env_prefix.upper()}APP_ENV"
try:
app_env = os.environ[env_key]
except KeyError:
raise RuntimeError(
f"FATAL!!! Could not determine configuration class as {env_key} is not defined in the environment"
)
try:
config_klass = _configs[app_env]
except KeyError:
raise RuntimeError(
f"FATAL!!! Could ont determine configuration from {env_key}={app_env}"
)
return config_klass()
def setup_logging(config: Config) -> None:
root = logging.getLogger()
root.handlers = []
formatter = logging.Formatter(
"%(asctime)s - %(process)s - %(name)s - %(levelname)s - %(message)s"
)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(config.root_log_level)
stream_handler.setFormatter(formatter)
root.addHandler(stream_handler)
root.setLevel(config.root_log_level)
| true |
dc62863a9c9f7d3655f1685ab433e19edac3eb05 | Python | souravsaraf2000/Linear-Search | /python37-linear-search/python37-linear-search.py | UTF-8 | 359 | 3.96875 | 4 | [
"MIT"
] | permissive | n=int(input("Enter size of array : "))
arr=[]
flag=0
print('Enter array elements : ')
for i in range(n):
n=arr.append(int(input()))
key=int(input("Enter element to be searched for : "))
for x in arr:
if(x==key):
print('Found at index : ',arr.index(x))
flag=1
break
else:
flag=0
if(flag==0):
print('Not Found!')
| true |
9acd762fc1d5e9de92582a5e56a4a1d8260afdf1 | Python | Noonayah/Projet_Fil_Rouge | /ServerWiki.py | UTF-8 | 397 | 2.8125 | 3 | [] | no_license | # coding: utf-8
import socket
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.bind(('', 8080))
while True:
socket.listen(5)
client, address = socket.accept()
print ("{} se connecte".format(address))
response = client.recv(255)
if response != "":
print (response)
print ("Fermeture du serveur")
client.close()
stock.close()
| true |
bae61eaa2c1cc330ab82d74b39e1dcb8b792be09 | Python | ochinchina/my-tools | /kube-tmpl.py | UTF-8 | 6,135 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import functools
import jinja2
import json
import argparse
import os
import requests
import tempfile
import yaml
class NameItem:
def __init__( self, name ):
self.name = name
self.index = -1
if name.endswith( ']' ):
pos = name.rfind('[' )
if pos != -1:
self.index = int ( name[ pos + 1: -1 ].strip() )
self.name = name[0:pos]
def is_array( self ):
return self.index >= 0
def is_json_file( filename ):
return filename.endswith( ".json" ) or filename.endswith( ".js" )
def load_value_file( value_file ):
"""
load value file from remote web server or local file system
"""
if value_file.startswith( "http://" ) or value_file.startswith( "https://" ):
r = requests.get( value_file )
if r.status_code / 100 == 2:
if is_json_file( value_file ):
return json.loads( r.content )
else:
return yaml.safe_load( r.content )
else:
with open( value_file ) as fp:
if is_json_file( value_file ):
return json.load(fp)
else:
return yaml.safe_load( fp )
def load_value_files( value_files ):
"""
load the .json or .yaml configuration file. if same item in multiple
configuration file, the later one will overwrite the previous one
Args:
value_files: list of configuration file, and eash one configuration file
must be in json or in yaml format
Returns:
the merged configuration items
"""
result = {}
for value_file in value_files:
print load_value_file( value_file )
result.update( load_value_file( value_file ) )
return result
def parse_values( values ):
"""
parse the values from command line
Args:
values: a list of value, each value will be in following format:
name1.name2.name3=value
or name1[0].name2.name3= value
Returns:
a dictionary
"""
result = {}
for value in values:
pos = value.find( '=' )
if pos == -1:
pos = value.find( ':' )
if pos != -1:
key = value[0:pos].strip()
v = value[pos+1:].strip()
words = key.split( ".")
items = []
for w in words:
items.append( NameItem( w ) )
cur_result = result
for i, item in enumerate( items ):
if item.is_array():
if item.name not in cur_result:
cur_result[ item.name ] = []
while len( cur_result[ item.name ] ) <= item.index:
cur_result[ item.name ].append( {} )
if i == len( items ) -1:
cur_result[ item.name ][item.index] = v
else:
cur_result = cur_result[ item.name ][item.index]
else:
if item.name not in cur_result:
cur_result[ item.name ] = {}
if i == len( items ) -1:
cur_result[ item.name ] = v
return result
def load_template( templateEnv, template_path ):
"""
load a template from file, http server, s3 storage
"""
if template_path.startswith( "http://" ) or template_path.startswith( "https://" ):
r = requests.get( template_path )
if r.status_code / 100 == 2:
return templateEnv.from_string( r.content )
elif template_path.startswith( "s3://"):
f, temp_file = tempfile.mkstemp()
os.close( f )
os.system( "s3cmd get -f %s %s" % (template_path, temp_file ) )
content = ""
with open( temp_file ) as fp:
content = templateEnv.from_string( fp.read() )
os.remove( temp_file )
return content
else:
return templateEnv.get_template( os.path.abspath( template_path ) )
def change_deployment( args, action ):
"""
change the kubernetes deployment
Args:
args: the command line arguments
action: must be create or delete
Returns:
if --dry-run flag is in the command line, only print the changed template
otherwise it will call kubectl command to create/delete deployments
"""
templateLoader = jinja2.FileSystemLoader( searchpath = "/" )
templateEnv = jinja2.Environment( loader=templateLoader )
template = load_template( templateEnv, args.template )
config = {}
if args.value_files:
config.update( load_value_files( args.value_files) )
if args.values:
config.update( parse_values( args.values ) )
if args.dry_run:
print template.render( config )
else:
with tempfile.NamedTemporaryFile( suffix = ".yaml", delete = False ) as fp:
fp.write( template.render( config ) )
filename = fp.name
try:
os.system( "kubectl %s -f %s" % ( action, filename ) )
except:
pass
os.remove( filename )
def parse_args():
parser = argparse.ArgumentParser( description = "generate template" )
parser.add_argument( "--template", help = "the kubernetes .yaml template file", required = True )
parser.add_argument( "--value-files", help = "the configuration files", nargs = "*", required = False )
parser.add_argument( "--dry-run", help = "run without real action", action = "store_true" )
parser.add_argument( "--values", help = "the values", nargs = "*", required = False )
subparsers = parser.add_subparsers( help = "install a project" )
install_parser = subparsers.add_parser( "install", help = "install a project" )
install_parser.set_defaults( func = functools.partial( change_deployment, action = "create" ) )
delete_parser = subparsers.add_parser( "delete", help = "delete a project" )
delete_parser.set_defaults( func = functools.partial( change_deployment, action = "delete" ) )
return parser.parse_args()
def main():
args = parse_args()
args.func( args )
if __name__ == "__main__":
main()
| true |
b98b17f03d18bf4d624cd3e9887a93b703e22919 | Python | christopher-burke/programs | /file_searcher/main.py | UTF-8 | 2,024 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""File Searcher App."""
import os
import glob
from collections import namedtuple
SearchResult = namedtuple('SearchResult',
'file, line, text')
def main():
"""File Searcher App main function entry."""
folder = get_folder_from_user()
if not folder:
print(f'Can\'t find {folder}.')
return
text = get_search_text_from_user()
if not text:
print(f'No text, no results.')
return
matches = search_folders(folder, text)
match_count = 0
for m in matches:
match_count += 1
print(f'{m.file}, line {m.line}>> {m.text}')
print(f"Found {match_count} matches")
def get_folder_from_user():
"""Ge the folder from the user."""
folder = input('What folder do you want to search? ')
if not folder or not folder.strip():
return None
if not os.path.isdir(folder):
return None
return os.path.abspath(folder)
def get_search_text_from_user():
"""Get the search text from the user."""
text = input('Search for [single phrases only]: ')
return text.lower()
def search_folders(folder, text):
"""Search folders."""
# macOS
items = glob.glob(os.path.join(folder, '*'))
# Windows
# items = os.listdir(folder)
print(f'Searching {folder} for {text}')
for item in items:
full_item = os.path.join(folder, item)
if os.path.isdir(item):
yield from search_folders(full_item, text)
else:
yield from search_file(full_item, text)
def search_file(filename, search_text):
"""Search files."""
with open(filename, 'r', encoding='utf-8') as fin:
line_number = 0
for line in fin:
line_number += 1
if line.lower().find(search_text) >= 0:
match = SearchResult(
line=line_number,
file=filename,
text=line)
yield match
if __name__ == "__main__":
main()
| true |
af53d5ff30f8b51c8734b4468b36cbb7b11cf713 | Python | Gaurab6003/sbttk | /src/main/python/test_database.py | UTF-8 | 3,295 | 2.671875 | 3 | [] | no_license | import unittest
from decimal import Decimal
from sqlalchemy import exc
from database import engine, Base, Session, Member, RinLagani, SawaAsuli
class TestDatabase(unittest.TestCase):
def setUp(self):
# print('Running setup')
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
member1 = Member(account_no=1, name='Gaurab')
member2 = Member(account_no=2, name='Sameer')
rin_lagani1 = RinLagani(date='01/01/2077', amount=Decimal(1000))
rin_lagani2 = RinLagani(date='01/02/2077', amount=Decimal(2000))
sawa_asuli1 = SawaAsuli(date='02/01/2077', amount=Decimal(1000),
byaj=Decimal(100))
sawa_asuli2 = SawaAsuli(date='05/02/2077', amount=Decimal(1000),
byaj=Decimal(100))
sawa_asuli3 = SawaAsuli(date='10/02/2077', amount=Decimal(1000),
byaj=Decimal(100))
rin_lagani1.sawa_asulis = [sawa_asuli1, sawa_asuli2]
rin_lagani2.sawa_asulis = [sawa_asuli3]
member1.rin_laganis = [rin_lagani1, rin_lagani2]
with Session.begin() as session:
session.add(member1)
session.add(member2)
def tearDown(self):
# print('Running teardown')
Base.metadata.drop_all(engine)
def test_unique_account_no(self):
with Session() as session:
member2 = Member(account_no=2, name='Duplicate')
session.add(member2)
self.assertRaises(exc.IntegrityError, session.commit)
def test_member_rin_lagani_relationship(self):
with Session.begin() as session:
member1 = session.query(Member).filter(Member.name == 'Gaurab') \
.first()
self.assertIsNotNone(member1)
self.assertEqual(len(member1.transactions), 2)
id = member1.id
session.delete(member1)
with Session.begin() as session:
no_of_rin_laganis = session.query(RinLagani).filter(
RinLagani.member_id == id).count()
self.assertEqual(no_of_rin_laganis, 0)
def test_member_sawa_asuli_relationship(self):
with Session.begin() as session:
member1 = session.query(Member).filter(Member.name == 'Gaurab') \
.first()
self.assertIsNotNone(member1)
id = member1.id
session.delete(member1)
with Session.begin() as session:
no_of_sawa_asulis = session.query(SawaAsuli).filter(
SawaAsuli.member_id == id).count()
self.assertEqual(no_of_sawa_asulis, 0)
def test_rin_lagani_sawa_asuli_relationship(self):
with Session.begin() as session:
member1 = session.query(Member).filter(Member.name == 'Gaurab') \
.first()
id = member1.id
rin_lagani = session.query(RinLagani).filter(
RinLagani.member_id == member1.id).first()
self.assertEqual(len(rin_lagani.transactions), 2)
session.delete(rin_lagani)
with Session.begin() as session:
no_of_sawa_asulis = session.query(SawaAsuli).filter(
SawaAsuli.member_id == id).count()
self.assertEqual(no_of_sawa_asulis, 0)
| true |
c79eba5db67a5c98ebe6da22024fa0a29d850b9f | Python | Aasthaengg/IBMdataset | /Python_codes/p03241/s599321379.py | UTF-8 | 338 | 2.765625 | 3 | [] | no_license | from math import sqrt
from bisect import bisect_left
N,M = map(int,input().split())
t = M
#約数全列挙
for i in range(1,int(sqrt(M))+2,1):
if M % i ==0:
count = 0
if M % i == 0:
if N <= i and i <= t:
t = i
if N <= M//i and M//i <= t:
t = M//i
print(M//t) | true |
3fe5737dcea48671a2c28dbab57da0f25e6720d8 | Python | zhinan18/Python3 | /code/base/lesson16/listing_16-6.py | UTF-8 | 368 | 2.796875 | 3 | [] | no_license | # Listing_22-6.py
# Copyright Warren & Csrter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Using pickle to store a list to a file
import pickle
my_list = ['Fred', 73, 'Hello there']
pickle_file = open('my_p.pkl', 'wb')
pickle.dump(my_list, pickle_file)
pickle_file.close()
| true |
36a293245802a115ea44d14ede771e1fa3a80155 | Python | konradmaleckipl/python_bootcamp_20180825 | /zjazd4/praca_z_json/p1.py | UTF-8 | 455 | 3.46875 | 3 | [] | no_license | import json
obj1 = ['AAA', 2, 3, ['Konrad', 'Magda']]
print(json.dumps(obj1))
print(type(json.dumps(obj1)))
#zapis do pliku
with open('example.json', 'w', encoding='utf-8') as f:
json.dump(obj1, f)
#otwarcie pliku
with open('example.json', 'r', encoding='utf-8') as f:
data = json.load(f)
print(data)
print(type(data))
data.append('cos tam')
print(data)
with open('example.json', 'w', encoding='utf-8') as f:
json.dump(data, f) | true |
77e0490149f5f140fe4efe5a537a3680cf1fc836 | Python | toowzh/coalition-3 | /coalition3/visualisation/TRTcells.py | UTF-8 | 25,511 | 2.53125 | 3 | [] | no_license | """ [COALITION3] Plotting locations of TRT cells within training dictionary
and histograms of different TRT statistics"""
from __future__ import division
from __future__ import print_function
import os
import datetime
import shapefile
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import matplotlib.ticker as ticker
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib.patheffects as pe
import scipy.ndimage.morphology as morph
from PIL import Image
from scipy import ndimage
## =============================================================================
## FUNCTIONS:
## Function that trunctates cmap to a certain range:
def truncate_cmap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
## Plot contour lines in 2D histogram, showing the fraction of points within contour line:
def contour_of_2dHist(hist2d_1_data,percentiles=[0,40,60,80,95,100],smooth=True):
if True:
counts_total = np.sum(hist2d_1_data)
hist2d_1_cumsum = np.zeros(len(hist2d_1_data.flatten()))
hist2d_1_cumsum[np.argsort(hist2d_1_data.flatten())] = \
np.cumsum(hist2d_1_data.flatten()[np.argsort(hist2d_1_data.flatten())])
hist2d_1_data = hist2d_1_cumsum.reshape(hist2d_1_data.shape)
hist2d_1_data = 100-hist2d_1_data/np.sum(counts_total)*100.
else:
non_zero_perc_vals = np.percentile(hist2d_1_data[hist2d_1_data>0],
percentiles[1:-1])
if smooth:
hist2d_1_data_smooth = ndimage.gaussian_filter(hist2d_1_data,hist2d_1_data.shape[0]//100)
hist2d_1_data_smooth[hist2d_1_data==0] = 0
hist2d_1_data = hist2d_1_data_smooth
if True:
hist_2d_perc = hist2d_1_data; levels = percentiles[1:-1]
else:
hist_2d_perc = np.searchsorted(non_zero_perc_vals,hist2d_1_data)
for val_old, val_new in zip(np.unique(hist_2d_perc), percentiles):
hist_2d_perc[hist_2d_perc==val_old] = val_new
levels = np.unique(hist_2d_perc)[1:]
return hist_2d_perc.T, levels
def plot_band_TRT_col(axes,TRT_Rank_arr,y_loc_low,bandwidth,arrow_start=None):
## Analyse distribution of ranks
nw = np.sum(np.logical_and(TRT_Rank_arr>=12, TRT_Rank_arr<15))
ng = np.sum(np.logical_and(TRT_Rank_arr>=15, TRT_Rank_arr<25))
ny = np.sum(np.logical_and(TRT_Rank_arr>=25, TRT_Rank_arr<35))
nr = np.sum(np.logical_and(TRT_Rank_arr>=35, TRT_Rank_arr<=40))
pw = patches.Rectangle((1.2, y_loc_low), 0.3, bandwidth, facecolor='w')
pg = patches.Rectangle((1.5, y_loc_low), 1, bandwidth, facecolor='g')
py = patches.Rectangle((2.5, y_loc_low), 1, bandwidth, facecolor='y')
pr = patches.Rectangle((3.5, y_loc_low), 0.5, bandwidth, facecolor='r')
axes.add_patch(pw); axes.add_patch(pg); axes.add_patch(py); axes.add_patch(pr)
text_loc = y_loc_low+bandwidth/2
if arrow_start is None:
arrow_start = y_loc_low+bandwidth*1.5
axes.annotate(str(nw),(1.35,text_loc),(1.25,arrow_start),ha='center',va='center',color='k',arrowprops={'arrowstyle':'->'}) #,arrowprops={arrowstyle='simple'}
axes.annotate(str(ng),(2,text_loc),ha='center',va='center',color='w')
axes.annotate(str(ny),(3,text_loc),ha='center',va='center',color='w')
axes.annotate(str(nr),(3.75,text_loc),ha='center',va='center',color='w')
return axes
## Print histogram of TRT cell values:
def print_TRT_cell_histograms(samples_df,cfg_set_tds):
"""Print histograms of TRT cell information."""
fig_hist, axes = plt.subplots(3, 2)
fig_hist.set_size_inches(12, 15)
## Analyse distribution of ranks
"""
nw = np.sum(np.logical_and(samples_df["RANKr"]>=12, samples_df["RANKr"]<15))
ng = np.sum(np.logical_and(samples_df["RANKr"]>=15, samples_df["RANKr"]<25))
ny = np.sum(np.logical_and(samples_df["RANKr"]>=25, samples_df["RANKr"]<35))
nr = np.sum(np.logical_and(samples_df["RANKr"]>=35, samples_df["RANKr"]<=40))
print(" The number of Cells with TRT Rank w is: %s" % nw)
print(" The number of Cells with TRT Rank g is: %s" % ng)
print(" The number of Cells with TRT Rank y is: %s" % ny)
print(" The number of Cells with TRT Rank r is: %s" % nr)
pw = patches.Rectangle((1.2, 65000), 0.3, 10000, facecolor='w')
pg = patches.Rectangle((1.5, 65000), 1, 10000, facecolor='g')
py = patches.Rectangle((2.5, 65000), 1, 10000, facecolor='y')
pr = patches.Rectangle((3.5, 65000), 0.5, 10000, facecolor='r')
axes[0,0].add_patch(pw); axes[0,0].add_patch(pg); axes[0,0].add_patch(py); axes[0,0].add_patch(pr)
axes[0,0].annotate(str(nw),(1.35,70000),(1.25,90500),ha='center',va='center',color='k',arrowprops={'arrowstyle':'->'}) #,arrowprops={arrowstyle='simple'}
axes[0,0].annotate(str(ng),(2,70000),ha='center',va='center',color='w')
axes[0,0].annotate(str(ny),(3,70000),ha='center',va='center',color='w')
axes[0,0].annotate(str(nr),(3.75,70000),ha='center',va='center',color='w')
"""
axes[0,0] = plot_band_TRT_col(axes[0,0],samples_df["RANKr"],65000,10000,arrow_start=90500)
samples_df["RANKr"] = samples_df["RANKr"]/10.
samples_df["RANKr"].hist(ax=axes[0,0],bins=np.arange(0,4.25,0.25),facecolor=(.7,.7,.7),alpha=0.75,grid=True)
axes[0,0].set_xlabel("TRT rank")
axes[0,0].set_title("TRT Rank Distribution")
samples_df["area"].hist(ax=axes[0,1],bins=np.arange(0,650,50),facecolor=(.7,.7,.7),alpha=0.75,grid=True)
axes[0,1].set_xlabel("Cell Area [km$^2$]")
axes[0,1].set_title("Cell Size Distribution")
samples_df["date"] = samples_df["date"].astype(np.datetime64)
samples_df["date"].groupby(samples_df["date"].dt.month).count().plot(kind="bar",ax=axes[1,0],facecolor=(.7,.7,.7),
alpha=0.75,grid=True)
#axes[1,0].set_xlabel("Months")
axes[1,0].set_xlabel("")
axes[1,0].set_xticklabels(["Apr","May","Jun","Jul","Aug","Sep"],rotation=45)
axes[1,0].set_title("Monthly Number of Cells")
samples_df["date"].groupby([samples_df["date"].dt.month,
samples_df["date"].dt.day]).count().plot(kind="bar",
ax=axes[1,1],facecolor=(.7,.7,.7),alpha=0.75,edgecolor=(.7,.7,.7),grid=True)
axes[1,1].get_xaxis().set_ticks([])
axes[1,1].set_xlabel("Days over period")
axes[1,1].set_title("Daily Number of Cells")
samples_df["date"].groupby(samples_df["date"]).count().hist(ax=axes[2,0],bins=np.arange(0,150,10),
facecolor=(.7,.7,.7),alpha=0.75,grid=True)
axes[2,0].set_xlabel("Number of cells")
axes[2,0].set_title("Number of cells per time step")
#samples_df["date"].loc[samples_df["RANKr"]>=1].groupby(samples_df["date"]).count().hist(ax=axes[2,1],bins=np.arange(0,65,5),
# facecolor=(.7,.7,.7),alpha=0.75,grid=True)
#axes[2,1].set_xlabel("Number of cells")
#axes[2,1].set_title("Number of cells (TRT Rank >= 1)\n per time step")
axes[2,1].axis('off')
fig_hist.savefig(os.path.join(cfg_set_tds["fig_output_path"],u"TRT_Histogram.pdf"))
## Print map of TRT cells:
def print_TRT_cell_map(samples_df,cfg_set_tds):
"""Print map of TRT cells."""
fig, axes, extent = ccs4_map(cfg_set_tds)
axes.scatter(samples_df["LV03_x"].loc[samples_df["category"] == "DEVELOPING"],
samples_df["LV03_y"].loc[samples_df["category"] == "DEVELOPING"],c='w',edgecolor=(.7,.7,.7),s=18)
axes.scatter(samples_df["LV03_x"].loc[samples_df["category"] == "MODERATE"],
samples_df["LV03_y"].loc[samples_df["category"] == "MODERATE"],c='g',edgecolor=(.7,.7,.7),s=22)
axes.scatter(samples_df["LV03_x"].loc[samples_df["category"] == "SEVERE"],
samples_df["LV03_y"].loc[samples_df["category"] == "SEVERE"],c='y',edgecolor=(.7,.7,.7),s=26)
axes.scatter(samples_df["LV03_x"].loc[samples_df["category"] == "VERY SEVERE"],
samples_df["LV03_y"].loc[samples_df["category"] == "VERY SEVERE"],c='r',edgecolor=(.7,.7,.7),s=30)
fig.savefig(os.path.join(cfg_set_tds["fig_output_path"],u"TRT_Map.pdf"))
## Print map of TRT cells:
def ccs4_map(cfg_set_tds,figsize_x=12,figsize_y=12,hillshade=True,radar_loc=True,radar_vis=True):
"""Print map of TRT cells."""
## Load DEM and Swiss borders
shp_path_CH = os.path.join(cfg_set_tds["root_path"],u"data/shapefile/swissBOUNDARIES3D_1_3_TLM_LANDESGEBIET.shp")
shp_path_Kantone = os.path.join(cfg_set_tds["root_path"],u"data/shapefile/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp")
shp_path_count = os.path.join(cfg_set_tds["root_path"],u"data/shapefile/CCS4_merged_proj_clip_G05_countries.shp")
dem_path = os.path.join(cfg_set_tds["root_path"],u"data/DEM/ccs4.png")
visi_path = os.path.join(cfg_set_tds["root_path"],u"data/radar/radar_composite_visibility.npy")
dem = Image.open(dem_path)
dem = np.array(dem.convert('P'))
sf_CH = shapefile.Reader(shp_path_CH)
sf_KT = shapefile.Reader(shp_path_Kantone)
sf_ct = shapefile.Reader(shp_path_count)
## Setup figure
fig_extent = (255000,965000,-160000,480000)
fig, axes = plt.subplots(1, 1)
fig.set_size_inches(figsize_x, figsize_y)
## Plot altitude / hillshading
if hillshade:
ls = colors.LightSource(azdeg=315, altdeg=45)
axes.imshow(ls.hillshade(-dem, vert_exag=0.05),
extent=fig_extent, cmap='gray', alpha=0.5)
else:
axes.imshow(dem*0.6, extent=fig_extent, cmap='gray', alpha=0.5)
## Get borders of Cantons
try:
shapes_KT = sf_KT.shapes()
except UnicodeDecodeError:
print(" *** Warning: No country shape plotted (UnicodeDecodeErrror)")
else:
for KT_i, shape in enumerate(shapes_KT):
x = np.array([i[0] for i in shape.points[:]])
y = np.array([i[1] for i in shape.points[:]])
endpoint = np.where(x==x[0])[0][1]
x = x[:endpoint]
y = y[:endpoint]
axes.plot(x,y,color='darkred',linewidth=0.5,zorder=5)
## Get borders of neighbouring countries
try:
shapes_ct = sf_ct.shapes()
except UnicodeDecodeError:
print(" *** Warning: No country shape plotted (UnicodeDecodeErrror)")
else:
for ct_i, shape in enumerate(shapes_ct):
if ct_i in [0,1]:
continue
x = np.array([i[0] for i in shape.points[:]])
y = np.array([i[1] for i in shape.points[:]])
x[x<=255000] = 245000
x[x>=965000] = 975000
y[y<=-159000] = -170000
y[y>=480000] = 490000
if ct_i in [3]:
axes.plot(x[20:170],y[20:170],color='black',linewidth=0.5)
if ct_i in [2]:
## Delete common border of FR and CH:
x_south = x[y<=86000]; y_south = y[y<=86000]
x_north = x[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]
#x_north = x[np.logical_and(y>=270577,y<=491000)]
y_north = y[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]
#y_north = y[np.logical_and(y>=270577,y<=491000)]
axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)
axes.plot(x_north,y_north,color='black',linewidth=0.5,zorder=4)
if ct_i in [4]:
## Delete common border of AT and CH:
x_south = x[np.logical_and(x>=831155,y<235000)]
y_south = y[np.logical_and(x>=831155,y<235000)]
#x_north1 = x[np.logical_and(x>=756622,y>=260466)]
x_north1 = x[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]
#y_north1 = y[np.logical_and(x>=756622,y>=260466)]
y_north1 = y[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]
y_north2 = y[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]
x_north2 = x[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]
y_north2 = np.concatenate([y_north2[np.argmin(x_north2):],y_north2[:np.argmin(x_north2)]])
x_north2 = np.concatenate([x_north2[np.argmin(x_north2):],x_north2[:np.argmin(x_north2)]])
x_LI = x[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]
y_LI = y[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]
axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)
axes.plot(x_north1,y_north1,color='black',linewidth=0.5,zorder=4)
axes.plot(x_north2,y_north2,color='black',linewidth=0.5,zorder=4)
axes.plot(x_LI,y_LI,color='black',linewidth=0.5,zorder=4)
else:
continue
#axes.plot(x,y,color='black',linewidth=1,zorder=4)
## Get Swiss borders
try:
#shp_records = sf_CH.shapeRecords()
shapes_CH = sf_CH.shapes()
except UnicodeDecodeError:
print(" *** Warning: No country shape plotted (UnicodeDecodeErrror)")
else:
for ct_i, shape in enumerate(shapes_CH): #sf_CH.shapeRecords():
if ct_i!=0: continue
x = np.array([i[0]-2000000 for i in shape.points[:]])
y = np.array([i[1]-1000000 for i in shape.points[:]])
endpoint = np.where(x==x[0])[0][1]
x = x[:endpoint]
y = y[:endpoint]
## Convert to swiss coordinates
#x,y = lonlat2xy(lon, lat)
axes.plot(x,y,color='darkred',linewidth=1,zorder=3)
## Add weather radar locations:
if radar_loc:
weather_radar_y = [237000,142000,100000,135000,190000]
weather_radar_x = [681000,497000,708000,604000,780000]
axes.scatter(weather_radar_x,weather_radar_y,marker="D",#s=2,
color='orange',edgecolor='black',zorder=10)
## Add radar visibility:
if radar_vis:
arr_visi = np.load(visi_path)
arr_visi[arr_visi<9000] = 0
arr_visi2 = morph.binary_opening(morph.binary_erosion(arr_visi, structure=np.ones((4,4))), structure=np.ones((4,4)))
arr_visi[arr_visi<9000] = np.nan
axes.imshow(arr_visi, cmap="gray", alpha=0.2, extent=fig_extent)
arr_visi[np.isnan(arr_visi)] = 1
#axes.contour(arr_visi[::-1,:], levels=[2], cmap="gray", linewidths=2,
# linestyle="solid", alpha=0.5, extent=fig_extent)
#arr_visi = arr_visi[::4, ::4]
#ys, xs = np.mgrid[arr_visi.shape[0]:0:-1,
# 0:arr_visi.shape[1]]
#axes.scatter(xs.flatten(), ys.flatten(), s=4,
# c=arr_visi.flatten().reshape(-1, 3), edgecolor='face')
## Add further elements:
axes.set_xlim([255000,965000])
axes.set_ylim([-160000,480000])
axes.grid()
axes.set_ylabel("CH1903 Northing")
axes.set_xlabel("CH1903 Easting")
axes.get_xaxis().set_major_formatter( \
ticker.FuncFormatter(lambda x, p: format(int(x), ",").replace(',', "'")))
axes.get_yaxis().set_major_formatter( \
ticker.FuncFormatter(lambda x, p: format(int(x), ",").replace(',', "'")))
plt.yticks(rotation=90, verticalalignment="center")
return fig, axes, fig_extent
## Convert lat/lon-values in decimals to values in seconds:
def dec2sec(angles):
"""Convert lat/lon-values in decimals to values in seconds.
Parameters
----------
angles : list of floats
Location coordinates in decimals.
"""
angles_ = np.zeros_like(angles)
for i in range(len(angles)):
angle = angles[i]
## Extract dms
deg = float(str(angle).split(".")[0])
min = float(str((angle - deg)*60.).split(".")[0])
sec = (((angle - deg)*60.) - min)*60.
angles_[i] = sec + min*60. + deg*3600.
return angles_
## Convert lat/lon-values (in seconds) into LV03 coordinates:
def lonlat2xy(s_lon, s_lat): # x: easting, y: northing
"""Convert lat/lon-values (in seconds) into LV03 coordinates.
Parameters
----------
s_lon, s_lat : float
Lat/Lon locations in seconds (not decimals!).
"""
# convert decimals to seconds...
s_lon = dec2sec(s_lon)
s_lat = dec2sec(s_lat)
## Auxiliary values
# i.e. differences of latitude and longitude relative to Bern in the unit [10000'']
s_lng_aux = (s_lon - 26782.5)/10000.
s_lat_aux = (s_lat - 169028.66)/10000.
# easting
s_x = (600072.37
+ 211455.93*s_lng_aux
- 10938.51*s_lng_aux*s_lat_aux
- 0.36*s_lng_aux*(s_lat_aux**2)
- 44.54*(s_lng_aux**3))
# northing
s_y = (200147.07
+ 308807.95*s_lat_aux
+ 3745.25*(s_lng_aux**2)
+ 76.63*(s_lat_aux**2)
- 194.56*(s_lng_aux**2)*s_lat_aux
+ 119.79*(s_lat_aux**3))
return s_x, s_y
## Plot key Radar, SEVIRI, COSMO and THX variables for specific TRT ID:
def plot_var_time_series_dt0_multiquant(TRT_ID_sel, df_nonnan, cfg_tds):
""" Plot different Radar, SEVIRI.. variables (several quantiles) of specific TRT ID.
TRT_ID_sel : pandas dataframe
Pandas dataframe with TRT IDs in 1st column and count how often it appears in df_nonnan in 2nd.
df_nonnan : pandas dataframe
2D pandas Dataframe with training data.
"""
date_of_cell = datetime.datetime.strptime(TRT_ID_sel["TRT_ID"][:12], "%Y%m%d%H%M")
## Find cells where the there are loads of similar TRT Ranks:
DTI_sel = [dti for dti in df_nonnan.index.values if dti[13:] in TRT_ID_sel["TRT_ID"]]
cell_sel = df_nonnan.loc[DTI_sel]
cell_sel.set_index(pd.to_datetime([datetime.datetime.strptime(date[:12],"%Y%m%d%H%M") for date in cell_sel.index]),
drop=True,append=False,inplace=True)
fig, axes = plt.subplots(2,2)
fig.set_size_inches(10,8)
cmap_3_quant = truncate_cmap(plt.get_cmap('afmhot'), 0.2, 0.6)
legend_entries = []
cell_sel[["IR_108_stat|0|MIN","IR_108_stat|0|PERC05","IR_108_stat|0|PERC25"]].plot(ax=axes[0,0],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[0,0].set_title(r"Brightness Temperatures T$_B$")
axes[0,0].set_ylabel(r"IR 10.8$\mu$m [K]")
legend_entries.append(["Min","5%", "25%"])
cell_sel[["CG3_stat|0|PERC99","CG3_stat|0|PERC95","CG3_stat|0|PERC75"]].plot(ax=axes[0,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[0,1].set_title("Glaciation indicator (GI)")
axes[0,1].set_ylabel(r"IR 12.0$\mu$m - IR 10.8$\mu$m [K]")
legend_entries.append(["99%","95%", "75%"])
cell_sel[["CD5_stat|0|MAX","CD5_stat|0|PERC95","CD5_stat|0|PERC75"]].plot(ax=axes[1,0],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[1,0].set_title("Cloud optical depth indicator (COD)")
axes[1,0].set_ylabel(r"WV 6.2$\mu$m - IR 10.8$\mu$m [K]")
legend_entries.append(["Max","95%", "75%"])
cell_sel[["IR_108_stat|-15|PERC25","IR_108_stat|-15|PERC50","IR_108_stat|-15|PERC75"]].plot(ax=axes[1,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[1,1].set_title(r"Updraft strength indicator ($w_{T}$)")
axes[1,1].set_ylabel(r"IR 10.8$\mu$m (t$_0$) - IR 10.8$\mu$m (t$_{-15}$) [K]")
legend_entries.append(["25%","50%", "75%"])
for ax, leg_ent in zip(axes.flat,legend_entries):
ax.grid()
ax.legend(leg_ent, fontsize="small", loc="upper right") #, title_fontsize="small", title ="Quantiles"
plt.tight_layout()
plt.savefig(os.path.join(cfg_tds["fig_output_path"],"SEVIRI_series_%s.pdf" % (TRT_ID_sel["TRT_ID"])))
plt.close()
fig, axes = plt.subplots(3,2)
fig.set_size_inches(10,8)
legend_entries = []
cell_sel[["RZC_stat_nonmin|0|PERC50","RZC_stat_nonmin|0|PERC75","RZC_stat_nonmin|0|MAX"]].plot(ax=axes[0,0],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
ax_pixc=(100-cell_sel[["RZC_pixc_NONMIN|0|SUM"]]/4.21).plot(ax=axes[0,0],color="black",linewidth=0.5,style='--',alpha=0.8, secondary_y=True)
axes[0,0].set_title(r"Rain Rate (RR)")
axes[0,0].set_ylabel(r"Rain Rate [mm h$^{-1}$]")
ax_pixc.set_ylabel("Covered areal fraction [%]")
legend_entries.append(["50%","75%", "MAX"])
cell_sel[["LZC_stat_nonmin|0|PERC50","LZC_stat_nonmin|0|PERC75","LZC_stat_nonmin|0|MAX"]].plot(ax=axes[0,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
ax_pixc=(100-cell_sel[["LZC_pixc_NONMIN|0|SUM"]]/4.21).plot(ax=axes[0,1],color="black",linewidth=0.5,style='--',alpha=0.8, secondary_y=True)
axes[0,1].set_title("Vertically Integrated Liquid (VIL)")
axes[0,1].set_ylabel(r"VIL [kg m$^{-2}$]")
ax_pixc.set_ylabel("Covered areal fraction [%]")
legend_entries.append(["50%","95%", "MAX"])
cell_sel[["MZC_stat_nonmin|0|PERC50","MZC_stat_nonmin|0|PERC75","MZC_stat_nonmin|0|MAX"]].plot(ax=axes[1,0],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
ax_pixc=(100-cell_sel[["MZC_pixc_NONMIN|0|SUM"]]/4.21).plot(ax=axes[1,0],color="black",linewidth=0.5,style='--',alpha=0.8, secondary_y=True)
axes[1,0].set_title("Maximum Expected Severe Hail Size (MESHS)")
axes[1,0].set_ylabel("MESHS [cm]")
ax_pixc.set_ylabel("Covered areal fraction [%]")
legend_entries.append(["25%","50%", "75%"])
cell_sel[["BZC_stat_nonmin|0|PERC50","BZC_stat_nonmin|0|PERC75","BZC_stat_nonmin|0|MAX"]].plot(ax=axes[1,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
ax_pixc=(100-cell_sel[["BZC_pixc_NONMIN|0|SUM"]]/4.21).plot(ax=axes[1,1],color="black",linewidth=0.5,style='--',alpha=0.8, secondary_y=True)
axes[1,1].set_title("Probability of Hail (POH)")
axes[1,1].set_ylabel(r"POH [%]")
ax_pixc.set_ylabel("Covered areal fraction [%]")
legend_entries.append(["50%","75%", "MAX"])
cell_sel[["EZC15_stat_nonmin|0|PERC75","EZC15_stat_nonmin|0|MAX","EZC45_stat_nonmin|0|PERC75","EZC45_stat_nonmin|0|MAX"]].plot(ax=axes[2,0],color=["#fdbf6f","#ff7f00","#fb9a99","#e31a1c"],linewidth=1,style='-',alpha=0.8)
ax_pixc=(100-cell_sel[["EZC45_pixc_NONMIN|0|SUM"]]/4.21).plot(ax=axes[2,0],color="black",linewidth=0.5,style='--',alpha=0.8, secondary_y=True)
axes[2,0].set_title("Echo Top (ET)")
axes[2,0].set_ylabel("Altitude a.s.l. [km]")
ax_pixc.set_ylabel("Pixel count")
legend_entries.append(["75% (15dBZ)","Max (15dBZ)", "75% (45dBZ)", "Max (45dBZ)"])
cell_sel[["THX_dens_stat|0|MEAN","THX_densIC_stat|0|MEAN","THX_densCG_stat|0|MEAN"]].plot(ax=axes[2,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[2,1].set_title("Mean lightning Density (THX)")
axes[2,1].set_ylabel("Lightning density [km$^{-2}$]")
ax_pixc.set_ylabel("Pixel count")
legend_entries.append(["Total","IC", "CG"])
for ax, leg_ent in zip(axes.flat,legend_entries):
ax.grid()
ax.legend(leg_ent, fontsize="small", loc="upper left") #) #, title_fontsize="small", title ="Quantiles"
plt.tight_layout()
plt.savefig(os.path.join(cfg_tds["fig_output_path"],"RADAR_series_%s.pdf" % (TRT_ID_sel["TRT_ID"])))
plt.close()
fig, axes = plt.subplots(2,2)
fig.set_size_inches(10,8)
legend_entries = []
cell_sel[["CAPE_ML_stat|0|PERC50","CAPE_ML_stat|0|MAX"]].plot(ax=axes[0,0],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[0,0].set_title(r"CAPE (mean surface layer parcel)")
axes[0,0].set_ylabel(r"CAPE [J kg$^{-1}$]")
legend_entries.append(["75%", "MAX"])
cell_sel[["CIN_ML_stat|0|PERC50","CIN_ML_stat|0|MAX"]].plot(ax=axes[0,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[0,1].set_title(r"CIN (mean surface layer parcel)")
axes[0,1].set_ylabel(r"CIN [J kg$^{-1}$]")
legend_entries.append(["75%", "MAX"])
cell_sel[["WSHEAR_0-3km_stat|0|PERC50","WSHEAR_0-3km_stat|0|MAX"]].plot(ax=axes[1,0],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[1,0].set_title(r"Wind shear (0km - 3km)")
axes[1,0].set_ylabel(r"Wind shear [m s$^{-1}$]")
legend_entries.append(["75%", "MAX"])
cell_sel[["POT_VORTIC_30000_stat|0|PERC50","POT_VORTIC_30000_stat|0|MAX"]].plot(ax=axes[1,1],cmap=cmap_3_quant,linewidth=1,style='-',alpha=0.8)
axes[1,1].set_title(r"Potential vorticity (300hPa)")
axes[1,1].set_ylabel(r"PV [K m$^{2}$ kg$^{-1}$ s$^{-1}$]")
legend_entries.append(["75%", "MAX"])
for ax, leg_ent in zip(axes.flat,legend_entries):
ax.grid()
ax.legend(leg_ent, fontsize="small", loc="upper left") #) #, title_fontsize="small", title ="Quantiles"
plt.tight_layout()
plt.savefig(os.path.join(cfg_tds["fig_output_path"],"COSMO_THX_series_%s.pdf" % (TRT_ID_sel["TRT_ID"])))
plt.close()
| true |
16e426cbd7218d41d68a049791bfe3ce3946ca6c | Python | Aguacaneitor/FreeCodeCamp_Answers_Scientific-Computing-with-Python | /Area_Calculator.py | UTF-8 | 2,218 | 3.953125 | 4 | [] | no_license | class Rectangle:
def __init__(self, width,height):
self.height = height
self.width = width
def set_width(self, width):
self.width = width
def set_height(self, height):
self.height = height
def get_area(self):
return (self.width*self.height)
def get_perimeter(self):
return ((2*self.width) + (2*self.height))
def get_diagonal(self):
return ((self.width**2) + (self.height**2))**0.5
def get_picture(self):
salida = ""
if (self.width > 50 or self.height > 50): salida = "Too big for picture."
else:
for i in range(0,self.height):
salida += "*"*self.width
salida += "\n"
return salida
def get_amount_inside(self, otra_figura):
fit_horizontal = 0
fit_vertical = 0
if (self.width >= otra_figura.width):
fit_horizontal = self.width // otra_figura.width
if (self.height >= otra_figura.height):
fit_vertical = self.height // otra_figura.height
salida = fit_horizontal*fit_vertical
return salida
def __repr__(self):
return "Rectangle(width="+str(self.width)+", height="+str(self.height)+")"
def __str__(self):
return "Rectangle(width="+str(self.width)+", height="+str(self.height)+")"
class Square(Rectangle):
def __init__(self, side):
self.width = side
self.height = side
def set_side(self, side):
self.width = side
self.height = side
def set_width(self, width):
self.width = width
self.height = width
def set_height(self, height):
self.height = height
self.width = height
def __repr__(self):
return "Square(side="+str(self.width)+")"
def __str__(self):
return "Square(side="+str(self.width)+")"
rect = Rectangle(10, 5)
print(rect.get_area())
rect.set_height(3)
print(rect.get_perimeter())
print(rect)
print(rect.get_picture())
sq = Square(9)
print(sq.get_area())
sq.set_side(4)
print(sq.get_diagonal())
print(sq)
print(sq.get_picture())
rect.set_height(8)
rect.set_width(16)
print(rect.get_amount_inside(sq))
| true |
8782123b4e6bc660392736a9f94c57f4e4978111 | Python | nikitaj11/Python-Programs | /functions.py | UTF-8 | 387 | 3.984375 | 4 | [] | no_license | def add(a,b):
return a+b
def sub(a,b):
return a-b
import sys
while 1:
print(" 1.Addition \n 2. subtraction ")
choice = int(input("Enter vhoice: "))
a = int(input("Enter 1st no: "))
b = int(input("Enter 2nd no: "))
if choice == 1:
c = add(a,b)
print(c)
elif choice == 2:
c = sub(a,b)
print(c)
else:
sys.exit()
| true |
217476e044b1e563a11df3fdeceeaad86ca1a23d | Python | xuetinga/python- | /程序设计大赛习题/程序设计大赛第五题.py | UTF-8 | 1,024 | 3.84375 | 4 | [] | no_license | # 众所知周,毛学姐是一只学渣,只能代表软件学院的最低水平,有一天,他在研究《高等数论》的时候,发现了一个很神奇的现象,于是毛学姐发明了一个有趣的游戏:两人各说一个数字分别为a和b,如果a能包含b的所有质数因子,那么A就获胜。于是毛学姐找来两个好基友让他们进行人肉debug,但是当数字太大的时候,两个朋友的脑算速度就有点跟不上了。聪明的你已经识破了这个游戏的内容,请你写出这个程序,帮毛学姐debug。如果A获胜输出“Yes”,否则输出“No”。
# Input
# 输入一行,有两个用空格隔开的整数,分别为n和m(1 <= n, m <= 105)。
#
# Output
# 每行输出“Yes”或 “No”。
#
# Sample Input
# 120 75
# Sample Output
# Yes
def g(n, m):
if n % m == 0:
return m
return g(m, n % m)
n, m = [int(x) for x in input().split()]
tmp = g(n, m)
m = m / tmp
if( tmp % m == 0):
print("Yes")
else:
print("No") | true |
98f16273686006a29df2664b80b121ec132f7415 | Python | rsakh/qbb2019-answers | /day1-homework/day1-exercise-#4.py | UTF-8 | 510 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python3
#count number of alignments
import sys
#argument you put right after $. and 1 refers to second argument
if len(sys.argv)>1:
f = open(sys.argv[1])
else:
f = sys.stdin
chromosome = []
for line in f:
# filter lines that begin with @
if line.startswith("@"):
continue
#ref each column
fields =line.split("\t")
if fields[2] == "*":
continue
chromosome.append(fields[2])
if len(chromosome) >= 10:
break
print(chromosome)
| true |
541bf3d8d6ebd8de3bbe251ff16506ffdab14701 | Python | jacobfelknor/practice_interview_questions | /2019-12/bank/justify_text.py | UTF-8 | 1,058 | 4.15625 | 4 | [] | no_license | # This problem was asked by Palantir.
# Write an algorithm to justify text. Given a sequence of words
# and an integer line length k, return a list of strings which
# represents each line, fully justified.
# More specifically, you should have as many words as possible
# in each line. There should be at least one space between each
# word. Pad extra spaces when necessary so that each line has
# exactly length k. Spaces should be distributed as equally as
# possible, with the extra spaces, if any, distributed starting
# from the left.
# If you can only fit one word on a line, then you should pad
# the right-hand side with spaces.
# Each word is guaranteed not to be longer than k.
# For example, given the list of words
# ["the", "quick", "brown", "fox", "jumps", "over", "the", "lazy", "dog"]
# and k = 16, you should return the following:
# ["the quick brown", # 1 extra space on the left
# "fox jumps over", # 2 extra spaces distributed evenly
# "the lazy dog"] # 4 extra spaces distributed evenly
if __name__ == "__main__":
pass
| true |
31c149f659a8340cc0bd51f40dfd226f9ec80ed4 | Python | Aasthaengg/IBMdataset | /Python_codes/p02984/s175304233.py | UTF-8 | 225 | 2.8125 | 3 | [] | no_license | N = int(input())
A = list(map(int, input().split()))
R = []
tmp = 0
pm = -1
for i in range(N):
pm *= -1
tmp += A[i] * pm
R.append(tmp)
for i in range(1, N):
tmp = 2 * A[i-1] - R[i-1]
R.append(tmp)
print(*R) | true |
ff1ae4cb5e687cd223b14355a2720d0af7387149 | Python | dwblair/packing | /brown5.py | UTF-8 | 2,523 | 2.84375 | 3 | [] | no_license | from numpy import *
import random
#### general brownian dynamics params #####
L=250 #system size, compatible with size of display in processing
N=20 #number of particles
r=20 #particle radius
t=0 #time
maxt=10000 #max time
stepSize=r/10. #random step size
dt=.1 #timestep
gamma=5. #friction coeff
kbt=2.5 # KbT
MAXFORCE=10000000. #in case something goes wrong
timeGapForPrintout=10 #number of timesteps to skip before printing out coordinates
##### lennard jones params #####
epsilon=100. #depth of lennard jones potential
s=r # 'radius' of potential well
s6=s**6
s12=s6**2
#### gaussian random number parameters ######
sigma=1.
mu=0.
#coordinates array
coords=zeros((N,2))+L/2. #start all particles in the center
####### initially put coordinates in a grid #######
factor = 1.1
x=factor*2*r
y=factor*2*r
for i in range(0,N):
coords[i][0]=x;
coords[i][1]=y;
x=x+factor*2*r
if x>(L-factor*2*r):
x=factor*2*r
y=y+factor*2*r
############ run the simulation #########
for t in range(0,maxt): #time loop
for i in range(0,N): #loop over all particles
x=coords[i][0]
y=coords[i][1]
LJx=0.
LJy=0.
for j in range(0,N): #loop over all particles except the ith particle
if j!=i:
##### get the distance between particles
dx=coords[j][0]-coords[i][0]
dy=coords[j][1]-coords[i][1]
dr=sqrt(dx**2+dy**2)
dr7=dr**7
dr13=dr**13
###### calculate the LJ force in x and y
## note -- the neighbors need to be changed to reflect periodic boundaries (if not on compact surface)
LJ=-24*epsilon*(2*s12/dr13 - s6/dr7)
if (LJ>MAXFORCE):
LJ=MAXFORCE
LJx=LJx+(dx/dr)*LJ
LJy=LJy+(dy/dr)*LJ
#### update the particle positions
x=x+(dt/gamma)*LJx+sqrt(2*kbt*dt/gamma)*random.gauss(mu,sigma)
y=y+(dt/gamma)*LJy+sqrt(2*kbt*dt/gamma)*random.gauss(mu,sigma)
##### enforce periodic boundaries (not on periodic surface)
x=x%L
y=y%L
#put value back into coordinate array
coords[i][0]=x
coords[i][1]=y
#output the particle positions
if t%timeGapForPrintout==0:
print coords[i][0],",",coords[i][1]
#mark end of timestep
if t%timeGapForPrintout==0:
print "@", t
| true |
8a9d4e2286df6557f4f8752696b94a4b32b40f81 | Python | Ishan2K1/Class-XII | /Q3.py | UTF-8 | 565 | 3.828125 | 4 | [] | no_license | n=int(input("Enter number here: "))
factor=[]
for i in range(1,n+1):
if n%i==0:
factor.append(i)
def factors(n):
return factor
print(factors(n))
def isPrimeNo(n):
if len(factor)==2:
print("It is a prime number")
else:
print("It is not a prime number")
isPrimeNo(n)
if len(factor)>2:
factor=factor[0:len(factor)-1]
def isPerfectNo(factor):
Sum=0
for i in factor:
Sum+=i
if Sum==n:
print("It is a perfect Number")
else:
print("It is not a perfect Number")
isPerfectNo(factor)
| true |
370e29763b3eace9ef4a0d458f039fcf8a2e567a | Python | swernerx/konstrukteur | /konstrukteur/HtmlParser.py | UTF-8 | 860 | 2.71875 | 3 | [
"MIT"
] | permissive | #
# Konstrukteur - Static Site Generator
# Copyright 2013-2014 Sebastian Fastner
# Copyright 2014 Sebastian Werner
#
__all__ = ["parse"]
from bs4 import BeautifulSoup
def parse(filename):
"""HTML Parser class for Konstrukteur."""
page = {}
parsedContent = BeautifulSoup(open(filename, "rt").read())
body = parsedContent.find("body")
page["content"] = "".join([str(tag) for tag in body.contents])
page["title"] = parsedContent.title.string
firstP = body.p
if firstP:
page["summary"] = body.p.get_text()
else:
page["summary"] = ""
for meta in parsedContent.find_all("meta"):
if not hasattr(meta, "name") or not hasattr(meta, "content"):
raise RuntimeError("Meta elements must have attributes name and content : %s" % filename)
page[meta["name"].lower()] = meta["content"]
return page
| true |
289f2785c64848326896e88053c75b8b35ff878d | Python | ajayrot/Python7to8am | /FileHandling/Demo8.py | UTF-8 | 161 | 3.171875 | 3 | [] | no_license |
import os.path as pa
fname = input("File Name with ext : ")
bo = pa.exists(fname)
if bo:
print(open(fname).read())
else:
print("File not Available")
| true |
16d47ae39c50ec3da246e7d4bd09dbef4f39f962 | Python | MyloTuT/IntroToPython | /Excercise/Ex3/ex3.12.py | UTF-8 | 360 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
counter = 0
t_file = open('../python_data/FF_abbreviated.txt')
for line in t_file:
year = line[0:4]
if year == '1928':
lines = line.split()
ymd = lines[1]
counter += 1
convert_ymd = float(ymd)
double_ymd = convert_ymd * 2
print(counter, convert_ymd, double_ymd)
t_file.close()
| true |
ce5ae88f1b0444b5e28b8b9f4d8d8098e6036009 | Python | pietrotortella/py_ml_exercises | /linear regression mv.py | UTF-8 | 1,968 | 2.953125 | 3 | [] | no_license | import numpy as np
import os
import matplotlib.pyplot as plt
def get_data(filename):
data = np.genfromtxt(filename, delimiter=',')
N_attributes = data.shape[1] - 1
X = data[:, :N_attributes]
Y = data[:, N_attributes]
return X, Y
def normalize_data(Z):
mu = np.mean(Z, 0)
sigma = np.std(Z, 0)
Zn = (Z - mu) / sigma
return Zn
def add_bias(X):
H = np.ones([X.shape[0], X.shape[1] + 1])
H[:, 1:] = X
return H
def hypothesis(t, X):
return X.dot(t.transpose())
def cost(t, X, Y):
return sum((X.dot(t.transpose()) - Y) ** 2) / len(X)
def dJdt(t,X,Y,j):
return (X.dot(t.transpose()) - Y).transpose().dot(X[:,j]) * 2 / len(X)
def DJ(t,X,Y):
return (X.dot(t.transpose()) - Y).transpose().dot(X) * 2 / len(X)
def gradient_descent_step(t, X, Y, alpha):
return t - alpha * DJ(t,X,Y)
def gradient_descent(t, X, Y, alpha, steps):
t_history = [t]
cost_history = [cost(t,X,Y)]
for i in range(0, steps, 1):
t = gradient_descent_step(t, X, Y, alpha)
t_history += [t]
cost_history += [cost(t,X,Y)]
return t, t_history, cost_history
def plot_history(vector):
u = np.arange(len(vector))
for i in range(0, vector.shape[1], 1):
plt.plot(u, vector[:,i])
plt.show()
def plot_c_history(vector):
u = np.arange(len(vector))
plt.plot(u, vector)
plt.show()
def prevision(t, X):
global mu, sigma
X = (X - mu)/sigma
Y = np.ones(len(X)+1)
Y[1:] = X
return hypothesis(t, Y)
os.chdir('C:/PyExercises/ml-ex1/ex1')
X, Y = get_data('ex1data1.txt')
N_var = X.shape[1]
mu = np.mean(X,0)
sigma = np.std(X,0)
Xn = add_bias(normalize_data(X))
Theta = np.random.rand(N_var + 1)
#Theta = np.array([1000000, 10, -100000])
alpha = 0.02
steps = 300
Theta, Theta_H, cost_H = gradient_descent(Theta, Xn, Y, alpha, steps)
Theta_H = np.array(Theta_H)
cost_H = np.array(cost_H)
print(Theta)
plot_history(Theta_H)
plot_c_history(cost_H)
| true |
c0075a0af39249d59748613123d8dcb02242413f | Python | thabo-div2/python_EOMP | /TestLotto.py | UTF-8 | 277 | 2.546875 | 3 | [] | no_license | import unittest, lotto_page
import random
class TestLotto(unittest.TestCase):
def testLotto(self):
lotto = random.sample((1, 5), 3)
self.assertEqual(5, (lotto), "Generate random numbers")
if __name__ == '__lotto_page__':
unittest.main()
| true |
c12b8459be08df85e0511d2b6940c1d3c9944df3 | Python | appsjit/testament | /LeetCode/soljit/s034_firstLastPosInArray.py | UTF-8 | 662 | 3.40625 | 3 | [] | no_license | class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
first = -1
last = -1
for i in range(len(nums)):
if nums[i] == target:
first = i
break
if first == -1:
return [first, first]
for j in range(len(nums) - 1, -1, -1):
print(j)
if nums[j] == target:
last = j
break
print(last)
##return [max(first - 1,1), min(len(nums) - last + 1, len(nums) - 1)]
return [first, last]
| true |
0a6c7d235ff29b46369c8bfe327cccd9e5a5e271 | Python | XAlearnerer/PythonLearning | /GAME_AlienInvasion/game_functions.py | UTF-8 | 3,997 | 2.734375 | 3 | [] | no_license | import sys;
import pygame;
from bullet import Bullet
from alien import Alien
def check_keydown(event, ai_setting, screen, ship, bullets):
if event.key == pygame.K_RIGHT:
# ship.rect.centerx += 1;
ship.moving_right = True;
elif event.key == pygame.K_LEFT:
ship.moving_left = True;
elif event.key == pygame.K_SPACE:
if len(bullets) < ai_setting.bullet_allowed:
new_bullet = Bullet(ai_setting, screen, ship);
bullets.add(new_bullet);
def check_keyup(event, ship):
if event.key == pygame.K_RIGHT:
ship.moving_right = False;
elif event.key == pygame.K_LEFT:
ship.moving_left = False;
def check_events(ai_setting, screen, ship, bullets):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit();
elif event.type == pygame.KEYDOWN:
# if event.key == pygame.K_RIGHT:
# # ship.rect.centerx += 1;
# ship.moving_right = True;
# elif event.key == pygame.K_LEFT:
# ship.moving_left = True;
check_keydown(event, ai_setting, screen, ship, bullets);
elif event.type == pygame.KEYUP:
# if event.key == pygame.K_RIGHT:
# ship.moving_right = False;
# elif event.key == pygame.K_LEFT:
# ship.moving_left = False;
check_keyup(event, ship);
def update_screen(ai_setting, screen, ship, bullets, aliens):
screen.fill(ai_setting.bg_color);
ship.blitme();
# aliens.blieme();
aliens.draw(screen);
for bullet in bullets:
bullet.draw_bullet();
# 让最近绘制的屏幕显示
pygame.display.flip();
def update_bullet(ai_setting, screen, ship, bullets, aliens):
# 使用的是 Group类型的bullets, 所以要重载 update() 函数
bullets.update();
# 删除消失的子弹
for bull in bullets:
if bull.rect.bottom <= 0:
bullets.remove(bull);
# print(len(self.bullets));
# 检查重叠 有子弹击中外星人后 删除相应子弹与外星人
collisions=pygame.sprite.groupcollide(bullets,aliens,True,True);
if len(aliens)==0:
bullets.empty();
create_fleet(ai_setting, screen, ship, aliens);
def get_number_rows(ai_settings, ship_height, alien_height):
available_space_y = (ai_settings.screen_height - (5 * alien_height) - ship_height);
number_rows = int(available_space_y / (2 * alien_height));
return number_rows;
def get_number_aliens_x(ai_settings, alien_width):
available_space_x = ai_settings.screen_width - 2 * alien_width;
number_aliens_x = int(available_space_x / (2 * alien_width));
return number_aliens_x;
def create_alien(ai_settings, screen, aliens, ali_number, row_number):
alien = Alien(ai_settings, screen);
alien_width = alien.rect.width;
alien.x = alien_width + 2 * alien_width * ali_number;
alien.rect.x = alien.x;
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number;
aliens.add(alien);
def create_fleet(ai_settings, screen, ship, aliens):
alien = Alien(ai_settings, screen);
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width);
row_number = get_number_rows(ai_settings, ship.rect.height, alien.rect.height);
for rnum in range(row_number):
for ali_number in range(number_aliens_x):
create_alien(ai_settings, screen, aliens, ali_number, rnum);
def check_fleet_edges(ai_settings, aliens):
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_edges(ai_settings, aliens);
break;
def change_fleet_edges(ai_settings, aliens):
# 使aliens下降 并 更改方向
for alien in aliens.sprites():
alien.rect.y += ai_settings.alien_drop_speed;
ai_settings.fleet_direction *= -1;
def update_aliens(ai_settings, aliens):
check_fleet_edges(ai_settings, aliens);
aliens.update();
| true |
2b725962ed2184c9cb5c37fe4c52f33b213fe25d | Python | ra2003/CustomTkinter | /customtkinter/customtkinter_entry.py | UTF-8 | 4,738 | 2.765625 | 3 | [
"CC0-1.0"
] | permissive | import tkinter
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
class CTkEntry(tkinter.Frame):
def __init__(self,
master=None,
bg_color=None,
fg_color=CTkColorManager.ENTRY,
text_color=CTkColorManager.TEXT,
corner_radius=10,
width=120,
height=25,
*args,
**kwargs):
super().__init__(master=master)
AppearanceModeTracker.add(self.change_appearance_mode)
if bg_color is None:
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
else:
self.bg_color = bg_color
self.fg_color = fg_color
self.text_color = text_color
self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
self.width = width
self.height = height
self.corner_radius = corner_radius
self.configure(width=self.width, height=self.height)
self.canvas = tkinter.Canvas(master=self,
highlightthicknes=0,
width=self.width,
height=self.height)
self.canvas.place(x=0, y=0)
self.entry = tkinter.Entry(master=self,
bd=0,
highlightthicknes=0,
*args, **kwargs)
self.entry.place(relx=0.5, rely=0.5, relwidth=0.8, anchor=tkinter.CENTER)
self.fg_parts = []
self.draw()
def draw(self):
self.canvas.delete("all")
self.fg_parts = []
# frame_border
self.fg_parts.append(self.canvas.create_oval(0, 0,
self.corner_radius*2, self.corner_radius*2))
self.fg_parts.append(self.canvas.create_oval(self.width-self.corner_radius*2, 0,
self.width, self.corner_radius*2))
self.fg_parts.append(self.canvas.create_oval(0, self.height-self.corner_radius*2,
self.corner_radius*2, self.height))
self.fg_parts.append(self.canvas.create_oval(self.width-self.corner_radius*2, self.height-self.corner_radius*2,
self.width, self.height))
self.fg_parts.append(self.canvas.create_rectangle(0, self.corner_radius,
self.width, self.height-self.corner_radius))
self.fg_parts.append(self.canvas.create_rectangle(self.corner_radius, 0,
self.width-self.corner_radius, self.height))
for part in self.fg_parts:
if type(self.fg_color) == tuple:
self.canvas.itemconfig(part, fill=self.fg_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.fg_color, width=0)
if type(self.bg_color) == tuple:
self.canvas.configure(bg=self.bg_color[self.appearance_mode])
else:
self.canvas.configure(bg=self.bg_color)
if type(self.fg_color) == tuple:
self.entry.configure(bg=self.fg_color[self.appearance_mode],
highlightcolor=self.fg_color[self.appearance_mode])
else:
self.entry.configure(bg=self.fg_color,
highlightcolor=self.fg_color)
if type(self.text_color) == tuple:
self.entry.configure(fg=self.text_color[self.appearance_mode],
insertbackground=self.text_color[self.appearance_mode])
else:
self.entry.configure(fg=self.text_color,
insertbackground=self.text_color)
def change_appearance_mode(self, mode_string):
if mode_string.lower() == "dark":
self.appearance_mode = 1
elif mode_string.lower() == "light":
self.appearance_mode = 0
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
self.draw()
def delete(self, *args, **kwargs):
return self.entry.delete(*args, **kwargs)
def insert(self, *args, **kwargs):
return self.entry.insert(*args, **kwargs)
def get(self):
return self.entry.get()
| true |
bf0d37ba5d32db0fa62516fd803003dd4e6466a5 | Python | gwillz/epevents | /tests/event.py | UTF-8 | 1,257 | 3.03125 | 3 | [
"CC-BY-4.0"
] | permissive | import unittest, threading
from epevents import Event
class Event_test(unittest.TestCase):
def setUp(self):
self.event = Event()
def tearDown(self):
self.event = None
def test_regular(self):
self.event.add(lambda s, a: a)
self.event.add(lambda s, b: b)
actual = self.event.fire(self, "a")
expected = ('a', 'a')
self.assertEqual(actual, expected)
def test_magic(self):
self.event += lambda s, a: a
self.event += lambda s, b: b
actual = self.event(self, "a")
expected = ('a', 'a')
self.assertEqual(actual, expected)
def test_clear(self):
expected1 = lambda a: a
expected2 = lambda a: a
self.event += expected1
self.event += expected2
self.assertTrue(expected1 in self.event)
self.assertTrue(expected2 in self.event)
self.event.remove(expected1)
self.assertTrue(expected1 not in self.event)
self.assertTrue(expected2 in self.event)
self.event.clear()
self.assertTrue(expected1 not in self.event)
self.assertTrue(expected2 not in self.event)
| true |
557707a17eadc7f8aa1bea685c771cd1da96077d | Python | huizhang-zhang/mytools | /app/wechatmessage2.py | UTF-8 | 2,944 | 2.609375 | 3 | [] | no_license | """
Version: Python3.5
Author: OniOn
Site: http://www.cnblogs.com/TM0831/
Time: 2018/12/27 14:49
微信定时推送消息(非网页版微信登陆的方式)
"""
import json,datetime
import requests,sxtwl,itchat
from wxpy import TEXT
import time
class WechatMessage:
def __init__(self):
self.name = ""
#获得对应的农历
def getYMD(self):
ymc = [u"十一", u"十二", u"正", u"二", u"三", u"四", u"五", u"六", u"七", u"八", u"九", u"十"]
rmc = [u"初一", u"初二", u"初三", u"初四", u"初五", u"初六", u"初七", u"初八", u"初九", u"初十",
u"十一", u"十二", u"十三", u"十四", u"十五", u"十六", u"十七", u"十八", u"十九",
u"二十", u"廿一", u"廿二", u"廿三", u"廿四", u"廿五", u"廿六", u"廿七", u"廿八", u"廿九", u"三十", u"卅一"]
Gan = ["甲", "乙", "丙", "丁", "戊", "己", "庚", "辛", "壬", "癸"]
Zhi = ["子", "丑", "寅", "卯", "辰", "巳", "午", "未", "申", "酉", "戌", "亥"]
ShX = ["鼠", "牛", "虎", "兔", "龙", "蛇", "马", "羊", "猴", "鸡", "狗", "猪"]
numCn = ["天", "一", "二", "三", "四", "五", "六", "七", "八", "九", "十"]
lunar = sxtwl.Lunar()
year = datetime.datetime.now().year
month = datetime.datetime.now().month
rday = datetime.datetime.now().day
day = lunar.getDayBySolar(year, month, rday)
d = str(day.y) + "年" + str(day.m) + "月" + str(day.d) + "日"
if day.Lleap:
a = "润" + ymc[day.Lmc] + "月" + rmc[day.Ldi] + "日"
else:
a = ymc[day.Lmc] + "月" + rmc[day.Ldi] + "日"
b = "星期" + numCn[day.week]
c = Gan[day.Lyear2.tg] + Zhi[day.Lyear2.dz] + "年" + Gan[day.Lmonth2.tg] + Zhi[day.Lmonth2.dz] + "月" + Gan[
day.Lday2.tg] + Zhi[day.Lday2.dz] + "日"
txt = '今天日期:'+d + ', ' + b + '\n'+'中华农历: ' + a + ', ' + c
return txt # 返回当前的日期信息
# 爬取爱词霸
def get_iciba_everyday_chicken_soup(self):
# 爱词霸的api地址
url = 'http://open.iciba.com/dsapi/'
r = requests.get(url)
all = json.loads(r.text)
Englis = all['content']
Chinese = all['note']
everyday_soup = Chinese+'\n'+Englis+'\n'
# 返回爱词霸的每日一句
return everyday_soup
# 获取天气
def get_sentence(self, number):
url = "http://t.weather.sojson.com/api/weather/city/"+ number
# 向get_sentence 传入参数
santence = requests.get(url)
return santence.json()
# 发送消息
def send_message(self,message,name):
url = "https://openai.weixin.qq.com/openapi/sign/"
print(message,name)
if __name__ == '__main__':
wm = WechatMessage()
weather = wm.get_sentence("101190201")
print(weather)
| true |
4b844bbc6df3d81f65b3cb17604d99d09f19e693 | Python | opportunity356/interview-preparation | /data_structures/array/cycle_shift.py | UTF-8 | 747 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'opportunity356'
def cycle_shift_right(a, n, k):
"""
Function shifts array a on k positions right
:param a: array
:param n: length of array
:param k: the value of shift
:return:
"""
cnt = 0
i = start = 0
curr = a[i]
while cnt < n:
j = (i + k) % n
tmp = a[j]
a[j] = curr
curr = tmp
i = j
cnt += 1
if j == start:
start = (start + 1) % n # using the modulus of n for situation if k=n
i = start
curr = a[i]
return a
if __name__ == '__main__':
a = range(1, 13)
n = len(a)
k = 12
print a
print cycle_shift_right(a, n, k) | true |
20eeff7fca119b7f1e88a287deb7c47e071f532f | Python | Abdulbasith1211/100-days-of-code-python-edition- | /day_002.py | UTF-8 | 5,533 | 4.40625 | 4 | [] | no_license |
# Python program to illustrate
# while loop
count = 0
while (count < 5):
count = count + 1
print("COYG")
#Python program to illustrate
# combining else with while
count = 0
while (count <= 5):
count = count + 1
print("COYG")
else:
print("COYG AGAIN")
# Python program to illustrate for loop
# Iterating over range 0 to n-1
n = 7
for i in range(0, n):
print(i)
# Python program to illustrate
# Iterating over a list
print("List Iteration eg")
l = ["Come ", "on", "you", "gooner"]
for i in l:
print(i)
# Iterating over a tuple (immutable)
print("\nTuple Iteration")
t = ("Come", "on", "you", "gooner")
for i in t:
print(i)
# Iterating over a String
print("\nString Iteration")
s = "Gooner"
for i in s :
print(i)
# Iterating over dictionary
print("\nDictionary Iteration")
d = dict()
d['xyz'] = 231
d['abc'] = 211
for i in d :
print("%s %d" %(i, d[i]))
# Python program to illustrate
# Iterating by index
list = ["Gonner", "for", "life"]
for index in range(len(list)):
print (list[index])
# Python program to illustrate
# combining else with for loop
list = ["Gooner", "for", "life"]
for index in range(len(list)):
print (list[index])
else:
print("Not givin up")
# Python program to illustrate
# nested for loops in Python
for i in range(2, 10):
for j in range(i):
print(i, end=' ')
print()
# Prints all letters except 'e' and 's'
for letter in 'gooner for sev':
if letter == 'e' or letter == 's':
continue
print ('Current Letter :', letter)
# Python program for an empty loop
for letter in 'server':
pass
print ('Last Letter :', letter)
# A simple for loop example
giv_fruits = ["apricot", "guava", "passionfruit"]
for fruit in giv_fruits:
print(fruit)
# python3 code to
# illustrate the
# difference between
# == and is operator
# [] is an empty list
list1 = []
list2 = []
list3=list1
if (list1 == list2):
print("True")
else:
print("False")
if (list1 is list2):
print("True")
else:
print("False")
if (list1 is list3):
print("True")
else:
print("False")
list3 = list3 + list2
if (list1 is list3):
print("True")
else:
print("False")
fruits = ["apple", "orange", "kiwi"]
# Creating an iterator object
# from that iterable i.e fruits
iter_obj = iter(fruits)
# Infinite while loop
while True:
try:
# getting the next item
fruit = next(iter_obj)
print(fruit)
except StopIteration:
# if StopIteration is raised,
# break from loop
break
# A C-style way of accessing list elements
players = ["Ronaldo", "Messi", "Auba"]
i = 0
while (i < len(players)):
print (players[i])
i += 1
#Accessing items using for-in loop
players=["auba", "saka", "odegaard"]
for i in players:
print(i)
#Accessing items using indexes and for-in
players=["auba", "saka", "odegaard"]
for i in range(len(players)):
print(players[i])
#Accessing items using enumerate()
players=["laca", "saka", "xhaka"]
for i,y in enumerate(players):
print(y)
# Accessing items and indexes enumerate()
cars = ["Aston" , "Audi", "McLaren "]
for x in enumerate(cars):
print (x[0], x[1])
# demonstrating the use of start in enumerate
cars = ["Aston" , "Audi", "McLaren "]
for x in enumerate(cars, start=1):
print (x[0], x[1])
# Two separate lists
cars = ["Aston", "Audi", "McLaren"]
accessories = ["GPS kit", "Car repair-tool kit"]
# Single dictionary holds prices of cars and
# its accessories.
# First three items store prices of cars and
# next two items store prices of accessories.
prices = {1:"570000$", 2:"68000$", 3:"450000$",
4:"8900$", 5:"4500$"}
# Printing prices of cars
for index, c in enumerate(cars, start=1):
print ("Car: %s Price: %s"%(c, prices[index]))
# Printing prices of accessories
for index, a in enumerate(accessories,start=1):
print ("Accessory: %s Price: %s"\
%(a,prices[index+len(cars)]))
# Python program to demonstrate the working of zip
# Two separate lists
cars = ["Aston", "Audi", "McLaren"]
accessories = ["GPS", "Car Repair Kit",
"Dolby sound kit"]
# Combining lists and printing
for c, a in zip(cars, accessories):
print ("Car: %s, Accessory required: %s"\
%(c, a))
# Python program to demonstrate unzip (reverse
# of zip)using * with zip function
# Unzip lists
l1,l2 = zip(*[('Aston', 'GPS'),
('Audi', 'Car Repair'),
('McLaren', 'Dolby sound kit')
])
# Printing unzipped lists
print(l1)
print(l2)
# Python 3.x program to check if an array consists
# of even number
def even_number(l):
for num in l:
if num % 2 == 0:
print ("list contains an even number")
break
# This else executes only if break is NEVER
# reached and loop terminated after all iterations.
else:
print ("list does not contain an even number")
# Driver code
print ("For List 1:")
even_number([1, 9, 8])
print (" \nFor List 2:")
even_number([1, 3, 5])
count = 0
while (count < 1):
count = count+1
print(count)
break
else:
print("No Break")
#
# Python code to demonstrate range() vs xrange()
# on basis of operations usage
# initializing a with range()
a = range(1,6)
# testing usage of slice operation on range()
# prints without error
print ("The list after slicing using range is : ")
print (a[2:5])
#end of day 2 code
| true |
845f0196db724cf24ad4eb09d31a7a7f70b0b2e4 | Python | acdaly/LED-controller | /archive/OStest.py | UTF-8 | 738 | 2.546875 | 3 | [] | no_license | import os
import os
def listFiles(path):
#from notes
if (os.path.isdir(path) == False):
# base case: not a folder, but a file, so return singleton list with its path
return [os.path.abspath('.') + "/" + path]
else:
# recursive case: it's a folder, return list of all paths
files = [ ]
for filename in os.listdir(path):
files += listFiles(path + "/" + filename)
return files
# file = listFiles("Files")
# file1 = open(file[0])
# contents = file1.read()
# print(file)
# file1.close()
#print(contents)
save2 = open('Save9', 'w')
save2.write("hi")
save2.close()
# file2 = open('/Users/rollingstudent/Desktop/TP3/Save2')
# contents2 = file2.read()
# print(contents2)
| true |
6c3d56a24f69c43bcbaf6ed629946fa6fa1aeaaa | Python | sojunhwi/Python | /2920 음계.py | UTF-8 | 186 | 3.453125 | 3 | [] | no_license | # https://www.acmicpc.net/problem/2920
a = input().split()
if a == sorted(a):
print('ascending')
elif a == sorted(a,reverse = True):
print('descending')
else:
print('mixed') | true |
6707f403e78678bfddc73e3ad80a82c934947c2f | Python | 2dvodcast/Data-Science-1 | /TrueCar/diff.py | UTF-8 | 2,200 | 3.1875 | 3 | [] | no_license | '''This script reads in 2 data files and outputs the differences between the two files to a
CSV file.'''
import pandas as pd
def report_diff(x):
return x[0] if x[0] == x[1] else '{} | {}'.format(*x)
def main():
old_df = pd.read_csv('bike_data_20110921.csv')
new_df = pd.read_csv('bike_data_20140821.csv')
# find new IDs added between 09212011 and 08212014
added_df = new_df[~new_df['@ID'].isin(old_df['@ID'])]
added_df['Action'] = 'Added'
# find IDs that were removed between 09212011 and 08212014
deleted_df = old_df[~old_df['@ID'].isin(new_df['@ID'])]
deleted_df['Action'] = 'Deleted'
# create 2 data frames that have IDs that existed on 09212011 and 08212014
# one data frame will contain data from 09212011, the other from 08212014
inBoth2011_df = old_df[old_df['@ID'].isin(new_df['@ID'])]
inBoth2014_df = new_df[new_df['@ID'].isin(old_df['@ID'])]
#Make the indices equal on both data frames
inBoth2011_df.index = range(len(inBoth2011_df))
inBoth2014_df.index = range(len(inBoth2014_df))
# Find the rows that have no changes and put them into a dataframe
ne = (inBoth2011_df != inBoth2014_df).any(1)
inBoth2011_df['hasChange'] = ne
inBoth2014_df['hasChange'] = ne
noChanges_df = inBoth2011_df[~inBoth2011_df['hasChange']]
noChanges_df = noChanges_df.drop('hasChange', 1)
noChanges_df['Action'] = 'Unchanged'
# Find the rows that have changes and show the diffs in a dataframe
data2011 = inBoth2011_df[inBoth2011_df['hasChange']]
data2014 = inBoth2014_df[inBoth2014_df['hasChange']]
my_panel = pd.Panel(dict(df1=data2011,df2=data2014))
modify_df = my_panel.apply(report_diff, axis=0)
modify_df = modify_df.drop('hasChange', 1)
modify_df['Action'] = 'Modified'
# Create the final data frame and export to .csv
final_df = pd.concat([modify_df, noChanges_df, deleted_df, added_df])
cols = final_df.columns.tolist()
cols = cols[-1:] + cols[:-1]
final_df = final_df[cols]
final_df['@ID'] = final_df['@ID'].astype(int)
final_df = final_df.sort(columns='@ID')
final_df.to_csv('changes.csv', index=False)
if __name__ == "__main__": main() | true |
8af4f5b1068912a9284f90d8d8b4d59a0aaf8b0e | Python | dclegalhackers/regulations-parser | /regparser/notice/diff.py | UTF-8 | 6,796 | 2.53125 | 3 | [] | no_license | #vim: set encoding=utf-8
from itertools import takewhile
import re
from lxml import etree
from regparser.grammar import amdpar, tokens
from regparser.tree import struct
from regparser.tree.xml_parser.reg_text import build_section
def clear_between(xml_node, start_char, end_char):
"""Gets rid of any content (including xml nodes) between chars"""
as_str = etree.tostring(xml_node, encoding=unicode)
start_char, end_char = re.escape(start_char), re.escape(end_char)
pattern = re.compile(
start_char + '[^' + end_char + ']*' + end_char, re.M + re.S + re.U)
return etree.fromstring(pattern.sub('', as_str))
def remove_char(xml_node, char):
"""Remove from this node and all its children"""
as_str = etree.tostring(xml_node, encoding=unicode)
return etree.fromstring(as_str.replace(char, ''))
def find_diffs(xml_tree, cfr_part):
"""Find the XML nodes that are needed to determine diffs"""
last_context = []
diffs = []
# Only final notices have this format
for section in xml_tree.xpath('//REGTEXT//SECTION'):
section = clear_between(section, '[', ']')
section = remove_char(remove_char(section, u'▸'), u'◂')
node = build_section(cfr_part, section)
if node:
def per_node(node):
if node_is_empty(node):
for c in node.children:
per_node(c)
else:
print node.label, node.text
per_node(node)
def node_is_empty(node):
"""Handle different ways the regulation represents no content"""
return node.text.strip() == ''
def parse_amdpar(par, initial_context):
text = etree.tostring(par, encoding=unicode)
#print ""
#print text.strip()
tokenized = [t[0] for t, _, _ in amdpar.token_patterns.scanString(text)]
tokenized = switch_passive(tokenized)
tokenized = context_to_paragraph(tokenized)
tokenized = separate_tokenlist(tokenized)
tokenized, final_context = compress_context(tokenized, initial_context)
amends = make_amendments(tokenized)
return amends, final_context
def switch_passive(tokenized):
"""Passive verbs are modifying the phrase before them rather than the
phrase following. For consistency, we flip the order of such verbs"""
if all(not isinstance(t, tokens.Verb) or t.active for t in tokenized):
return tokenized
converted, remaining = [], tokenized
while remaining:
to_add = list(takewhile(
lambda t: not isinstance(t, tokens.Verb), remaining))
if len(to_add) < len(remaining):
#also take the verb
verb = remaining[len(to_add)]
to_add.append(verb)
if not verb.active:
#switch it to the beginning
to_add = to_add[-1:] + to_add[:-1]
verb.active = True
converted.extend(to_add)
remaining = remaining[len(to_add):]
return converted
def context_to_paragraph(tokenized):
"""Generally, section numbers, subparts, etc. are good contextual clues,
but sometimes they are the object of manipulation."""
# Don't modify anything if there are already paragraphs or no verbs
for token in tokenized:
if isinstance(token, tokens.Paragraph):
return tokenized
elif (isinstance(token, tokens.TokenList) and
any(isinstance(t, tokens.Paragraph) for t in token.tokens)):
return tokenized
#copy
converted = list(tokenized)
verb_seen = False
for i in range(len(converted)):
token = converted[i]
if isinstance(token, tokens.Verb):
verb_seen = True
elif (verb_seen and isinstance(token, tokens.Context)
and not token.certain):
converted[i] = tokens.Paragraph(token.label)
return converted
def separate_tokenlist(tokenized):
"""When we come across a token list, separate it out into individual
tokens"""
converted = []
for token in tokenized:
if isinstance(token, tokens.TokenList):
converted.extend(token.tokens)
else:
converted.append(token)
return converted
def compress(lhs_label, rhs_label):
"""Combine two labels where the rhs replaces the lhs. If the rhs is
empty, assume the lhs takes precedent."""
if not rhs_label:
return lhs_label
label = list(lhs_label)
label.extend([None]*len(rhs_label))
label = label[:len(rhs_label)]
for i in range(len(rhs_label)):
label[i] = rhs_label[i] or label[i]
return label
def compress_context(tokenized, initial_context):
"""Add context to each of the paragraphs (removing context)"""
#copy
context = list(initial_context)
converted = []
for token in tokenized:
if isinstance(token, tokens.Context):
# One corner case: interpretations of appendices
if (len(context) > 1 and len(token.label) > 1
and context[1] == 'Interpretations'
and token.label[1]
and token.label[1].startswith('Appendix')):
context = compress(
context,
[token.label[0], None, token.label[1]] + token.label[2:])
else:
context = compress(context, token.label)
continue
# Another corner case: a "paragraph" is indicates interp context
elif (
isinstance(token, tokens.Paragraph) and len(context) > 1
and len(token.label) > 3 and context[1] == 'Interpretations'
and token.label[1] != 'Interpretations'):
context = compress(
context,
[token.label[0], None, token.label[2], '(' + ')('.join(
p for p in token.label[3:] if p) + ')'])
continue
elif isinstance(token, tokens.Paragraph):
context = compress(context, token.label)
token.label = context
converted.append(token)
return converted, context
def make_amendments(tokenized):
"""Convert a sequence of (normalized) tokens into a list of amendments"""
verb = None
amends = []
for i in range(len(tokenized)):
token = tokenized[i]
if isinstance(token, tokens.Verb):
assert token.active
verb = token.verb
elif isinstance(token, tokens.Paragraph):
if verb == tokens.Verb.MOVE:
if isinstance(tokenized[i-1], tokens.Paragraph):
amends.append((
verb,
(tokenized[i-1].label_text(), token.label_text())))
elif verb:
amends.append((verb, token.label_text()))
return amends
| true |
42963a16ade44800f9d0c694d4dea90a07756598 | Python | TracyCuiCan/ULMFIT-in-Tensorflow | /layers/mixture_of_softmaxes.py | UTF-8 | 1,875 | 2.609375 | 3 | [] | no_license | import tensorflow as tf
class MixtureOfSoftmaxes():
def __init__(self, k, h_size, embeddings):
self.k = k
self.h_size = h_size
self.embed_size = embeddings.shape[1]
self.embeddings = embeddings
self.build()
def build(self):
self.Whk = tf.Variable(tf.random_normal((self.k, self.h_size, self.embed_size)))
self.Wpk = tf.Variable(tf.random_normal((self.h_size, self.k)))
self._trainable_weights = [self.Whk, self.Wpk, self.embeddings]
def compute_k_softmaxes(self, k_hct, embeddings):
return tf.map_fn(lambda hct : tf.nn.softmax(tf.matmul(hct,
tf.transpose(embeddings))),
k_hct)
def forward(self, ht, embeddings):
# Compute the pi weights
pi_k = tf.nn.softmax(tf.matmul(ht, self.Wpk))
# Make the size of the hidden outputs as (b_size, K, 1, hidden_dim)
ht = tf.expand_dims(ht, axis=1)
ht = tf.expand_dims(ht, axis=1)
ht = tf.tile(ht, [1,self.k,1,1])
# Compute MoS over a batch. This has shape (b_size, k, voc_dim)
batch_of_sm = tf.squeeze(
tf.map_fn(
lambda ht_b: self.compute_k_softmaxes(tf.nn.tanh(tf.matmul(ht_b, self.Whk)),
embeddings),
ht))
# Prepare the prior to be broadcasted, shape is (b_size,k,1)
# broadcasted to (b_size, k, voc_dim)
# output after reduce is (b_size, voc_dim)
pi_k = tf.expand_dims(pi_k, axis=-1)
output = tf.reduce_sum(batch_of_sm * pi_k, axis=1)
return output
def get_trainable_weights(self):
return self._trainable_weights
| true |
9f6990c98aa26853d168bd92a17e70c23225c162 | Python | FXXDEV/CalculatorRMI-REST | /python/client.py | UTF-8 | 1,028 | 3.5625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
import Pyro.util
import Pyro.core
Pyro.core.initClient()
calc = Pyro.core.getProxyForURI("PYRONAME://simple")
print("Selecione a operação.")
print("1.Adição")
print("2.Subtração")
print("3.Multiplicação")
print("4.Divisão")
print("5.Potenciação")
while True:
choiceList = [1,2,3,4,5]
choice = int(input("Digite uma opção(1/2/3/4/5): "))
if choice in choiceList:
num1 = float(input("Primeiro número: "))
num2 = float(input("Segundo número: "))
if choice == 1:
print(num1, "+", num2, "=", calc.add(num1, num2))
elif choice == 2:
print(num1, "-", num2, "=", calc.sub(num1, num2))
elif choice == 3:
print(num1, "*", num2, "=", calc.mult(num1, num2))
elif choice == 4:
print(num1, "/", num2, "=", calc.div(num1, num2))
elif choice == 5:
print(num1, '^', num2, "=", calc.pow(num1, num2))
break
else:
print("Operação inválida")
| true |
b117a1292900a91fdffa931de930570bd851c5d3 | Python | RicardoBalderas/algoritmosaleatorios | /collector/python/collector.py | UTF-8 | 902 | 3.46875 | 3 | [] | no_license | import random
tries = 1000 # Times the algorithm will be ran.
ncoupons = 50 # Number of coupons.
boxeslist = [] # List of opnened boxes per try.
expected = 0.0 # Expected number of boxed to get all coupons.
mean = 0.0 # Mean number of boxes opened in all tries.
for i in range (1, ncoupons + 1):
expected += 1.0 / float(i)
expected *= ncoupons
for t in range (0, tries):
boxes = 0 # Number of opened boxes.
coupons = range (0, ncoupons) # List of coupons.
while coupons != []:
boxes += 1
coupon = random.randint(0, ncoupons)
if coupon in coupons:
coupons.remove(coupon)
boxeslist.append(boxes)
for b in boxeslist:
mean += b
print("La cantidad media de intentos fue " + str(int(mean / tries)) + ".")
print("La cantidad de intentos esperada era " + str(int(expected)) + ".")
| true |
5cb85614f88bf0cb0f81c860925f7c9adee5a8fb | Python | Glaceon31/NMTPhraseDecoding | /thumt/scripts/src2null_prob.py | UTF-8 | 1,858 | 2.65625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import operator
import os
import json
def parseargs():
msg = "get probability table for source word to null"
usage = "src2null_prob.py [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
parser.add_argument("--source", type=str, required=True,
help="source corpus")
parser.add_argument("--alignment", type=str, required=True,
help="alignment file")
parser.add_argument("--output", type=str, help="output path")
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
source = open(args.source, 'r').read()
lines_source = source.split('\n')
if lines_source[-1].strip() == '':
del lines_source[-1]
align = open(args.alignment, 'r').read()
lines_align = align.split('\n')
if lines_align[-1].strip() == '':
del lines_align[-1]
# result: {'word':[null_count, total_count, prob]}
result = {}
for i in range(len(lines_source)):
if (i+1) % 10000 == 0:
print(i+1)
s = lines_source[i]
a = lines_align[i]
words = s.split(' ')
aligned = [0] * len(words)
for tmp in a.split(' '):
srcpos, trgpos = tmp.split('-')
aligned[int(srcpos)] = 1
for j in range(len(words)):
if not result.has_key(words[j]):
result[words[j]] = [0,0]
result[words[j]][0] += aligned[j]
result[words[j]][1] += 1
result = {i: [result[i][0], result[i][1], 1-1.0*result[i][0]/result[i][1]] for i in result}
json.dump(result, open(args.output, 'w'))
| true |
f76b76010801a5726eee49f212aabeb0da877e23 | Python | MinistereSupRecherche/bso | /scripts/process_publications.py | UTF-8 | 3,135 | 2.5625 | 3 | [
"MIT"
] | permissive | import requests
import math
import datetime
from joblib import Parallel, delayed
APP_URL = "http://0.0.0.0:5000/publications"
APP_URL_DATA = "http://0.0.0.0:5000/publications"
YEAR_START = 2013
YEAR_END = 2013
header = {'Authorization': 'Basic YWRtaW46ZGF0YUVTUjIwMTk='}
NB_JOBS = 10 # nb jobs in parrallel
def update_unpaywall_dump(elt, etag):
""" Set treated flag to true in unpaywll_dump collection """
# url_unpaywall_dump = APP_URL + "/dumps_unpaywall/{}".format(elt['doi'])
url_unpaywall_dump = APP_URL_DATA
url_unpaywall_dump += "/dumps/unpaywall/{}".format(elt['doi'])
headers_update = header.copy()
headers_update['If-Match'] = etag
elt['treated'] = True
r = requests.patch(url_unpaywall_dump, headers=headers_update, json=elt)
if r.ok is False:
print("MAJ unpaywall_dump ERREUR pour le doi {}".format(elt['doi']))
# print(r.text)
def process_doi_unpaywall(elt):
etag = elt['etag']
# remove datastore fields
for field in ['modified_at', 'created_at', '_id', 'etag']:
if field in elt:
del elt[field]
# send data to analyzer unpaywall service
url_unpaywall_publi = APP_URL + "/analyzers/unpaywall_publication"
r = requests.post(url_unpaywall_publi, json=elt, headers=header)
if r.ok is False:
print("MAJ publication ERREUR pour le doi {}".format(elt['doi']))
# print(r.text)
# update unpaywall dump collection (setting treated flag to True)
update_unpaywall_dump(elt, etag)
def keep_updating(year):
NB_ELT = 1000
j = 0
url = APP_URL_DATA + "/dumps/unpaywall/?where={\"treated\":false,\"year\":"
url += str(year) + "}&max_results=" + str(NB_ELT) + "&page="+str(j)
r = requests.get(url, headers=header)
nb_elts = r.json()['meta']['total']
nb_pages = math.ceil(nb_elts/NB_ELT)
print("Still {} pages to process for year {}".format(nb_pages, year))
return {'nb_pages': nb_pages, 'data': r.json()['data']}
def process_year(year):
should_keep_updating = keep_updating(year)
max_iter = should_keep_updating['nb_pages'] + 2
nb_iter = 0
while ((should_keep_updating['nb_pages'] > 0) and (nb_iter < max_iter)):
start_time = datetime.datetime.now()
Parallel(n_jobs=NB_JOBS)(delayed(
process_doi_unpaywall)(
elt) for elt in should_keep_updating['data'])
end_time = datetime.datetime.now()
print("{}: {}".format(
should_keep_updating['nb_pages'], end_time-start_time), end=" -- ")
should_keep_updating = keep_updating(year)
nb_iter += 1
def test():
url = APP_URL_DATA + "/dumps/unpaywall/?where={\"doi\":\""
url += "10.4000/rechercheformation.2839\",\"treated\":false}"
try:
test_json = requests.get(url, headers=header).json()['data'][0]
print(test_json)
except Exception:
print("The test element is not in the unpaywall dump collection \
or it has already be processed")
return
process_doi_unpaywall(test_json)
# test()
for year in range(YEAR_START, YEAR_END + 1):
process_year(year)
| true |
a39a5dca097ecfee0f9ea8426d9a3ede94d95fc1 | Python | xiphodon/ML_demo | /ML_demo_03.py | UTF-8 | 5,342 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/2/15 19:04
# @Author : GuoChang
# @Site : https://github.com/xiphodon
# @File : ML_demo_03.py
# @Software: PyCharm Community Edition
# 梯度下降
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from mpl_toolkits.mplot3d import Axes3D
def init():
'''
初始化数据
:return:
'''
pga = pd.read_csv(r"data/pga.csv")
return pga
def ML_01(data):
'''
梯度下降
简单实现梯度下降
:param data:
:return:
'''
# 数据归一化处理
data["distance"] = (data["distance"] - data["distance"].mean()) / data["distance"].std()
data["accuracy"] = (data["accuracy"] - data["accuracy"].mean()) / data["accuracy"].std()
# data.distance = (data.distance - data.distance.mean()) / data.distance.std()
# data.accuracy = (data.accuracy - data.accuracy.mean()) / data.accuracy.std()
print(data.head())
# plt.scatter(data["distance"], data["accuracy"])
# plt.xlabel("normalized distance")
# plt.ylabel("normalized accuracy")
# plt.show()
print("shape of the series:", data["distance"].shape)
print("shape with newaxis:", data["distance"][:, np.newaxis].shape) # 创建新的一列,多加一个维度
lr = LinearRegression()
lr.fit(data["distance"][:, np.newaxis], data["accuracy"])
theta_1 = lr.coef_[0]
print(theta_1)
# 简单实现代价函数
def cost(theta_0, theta_1, x, y):
'''
代价函数
:param theta_0: 偏移量
:param theta_1: 权重量
:param x: 数据集
:param y: 数据集对应标签
:return: 预测代价
'''
J = 0
m = len(x) # 数据长度
for i in range(m):
h = theta_1 * x[i] + theta_0 # 回归预测值
J += (h - y[i]) ** 2 # 预测值与真实值差的平方,累加
J /= (2 * m) # 平均值,即代价
return J
print(cost(0, 1, data["distance"], data["accuracy"]))
theta_0 = 100
theta_1_list = np.linspace(-3, 2, 100)
costs = []
for theta_1 in theta_1_list:
costs.append(cost(theta_0, theta_1, data["distance"], data["accuracy"]))
plt.plot(theta_1_list, costs) # 画出theta_1 与其对应的 代价值
plt.show()
# 画出theta_0 和theta_1与其对应的代价值(例子)
x = np.linspace(-10, 10, 100)
y = np.linspace(-10, 10, 100)
# 生成网络采样点
X, Y = np.meshgrid(x, y)
Z = X ** 2 + Y ** 2
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.plot_surface(X=X, Y=Y, Z=Z)
plt.show()
# 简单实现theta_0 和theta_1与其对应的代价值
def partial_cost_theta_1(theta_0, theta_1, x, y):
'''
theta_1 局部梯度下降最大导数
:param theta_0:
:param theta_1:
:param x:
:param y:
:return:
'''
h = theta_0 + theta_1 * x
diff = (h - y) * x
partial = diff.sum() / (x.shape[0])
return partial
def partial_cost_theta_0(theta_0, theta_1, x, y):
'''
theta_0 局部梯度下降最大导数
:param theta_0:
:param theta_1:
:param x:
:param y:
:return:
'''
h = theta_0 + theta_1 * x
diff = h - y
partial = diff.sum() / (x.shape[0])
return partial
partial_1 = partial_cost_theta_1(0, 5, data["distance"], data["accuracy"])
print("partail_1 = ", partial_1)
partial_0 = partial_cost_theta_0(0, 5, data["distance"], data["accuracy"])
print("partail_0 = ", partial_0)
def gradient_descent(x, y, alpha=0.1, theta_0=0, theta_1=0):
'''
梯度下降
:param x: 数据集
:param y: 数据集标签
:param alpha: 学习率
:param theta_0:
:param theta_1:
:return:
'''
max_epochs = 1000 # 最大迭代次数
counter = 0 # 迭代次数
c = cost(theta_0, theta_1, x, y) # 初始化代价值
costs = [c] # 代价值列表
convergence_thres = 0.000001 # 收敛阈值(停止条件)
cprev = c + 10
theta_0_list = [theta_0]
theta_1_list = [theta_1]
while (np.abs(cprev - c) > convergence_thres) and (counter < max_epochs):
# 两次梯度下降差在收敛阈值内或达到最大收敛次数时,终止循环
cprev = c
updata_0 = alpha * partial_cost_theta_0(theta_0, theta_1, x, y)
updata_1 = alpha * partial_cost_theta_1(theta_0, theta_1, x, y)
theta_0 -= updata_0
theta_1 -= updata_1
theta_0_list.append(theta_0)
theta_1_list.append(theta_1)
c = cost(theta_0, theta_1, x, y)
costs.append(c)
counter += 1
return {"theta_0":theta_0, "theta_1":theta_1, "costs":costs}
print("Theta_1 = ", gradient_descent(data["distance"], data["accuracy"])["theta_1"])
descend = gradient_descent(data["distance"], data["accuracy"], alpha=0.01)
plt.scatter(range(len(descend["costs"])), descend["costs"]) # 横轴为迭代次数,纵轴为代价值
plt.show()
if __name__ == "__main__":
data = init()
ML_01(data) | true |
50916c279cf170b471609bfc23efb70022917812 | Python | gjkood/analyze_this | /gen_test_data.py | UTF-8 | 877 | 3.109375 | 3 | [] | no_license | import string
import random
import argparse
MAX_COL_SIZE=65535
col_data = ''
def gen_column_data(col_size):
if col_size > MAX_COL_SIZE:
col_size = MAX_COL_SIZE
return string.zfill('0', col_size)
def gen_line(num_cols, col_size, delimiter):
global col_data
if len(col_data) == 0: #Avoid calling gen_column_data more than once
col_data = gen_column_data(col_size)
row_column = []
for i in range(num_cols):
col_data_len = random.randint(0, col_size)
row_column.append(col_data[:col_data_len])
row_data = delimiter.join(row_column)
return row_data
def gen_test_data(num_rows, num_cols, col_size, delimiter):
for i in range(num_rows):
row_data = gen_line(num_cols, col_size, delimiter)
print 'Line %s %s' % (i, row_data)
if __name__ =='__main__':
gen_test_data(100, 5, 20, '|')
| true |
629120037844197c2f4749e324cc135036d18eee | Python | manoj2509/Python-Practice | /CLRS/2.1-4 Array Int Sum.py | UTF-8 | 384 | 3.59375 | 4 | [] | no_license | __author__ = 'Mj'
#Sum of 2 n-digit numbers. Numbers are stored in list
a = input().strip()
b = input().strip()
c = list()
n = len(a)
carry = 0
for i in range(n-1, -1, -1):
temp = int(a[i]) + int(b[i]) + carry
if(temp > 10 ):
carry = 1
c.insert(0, temp - 10)
else:
carry = 0
c.insert(0, temp)
if(carry == 1):
c.insert(0, carry)
print(c) | true |
c329deb202536ea1b23d010fc21c75bc132c2944 | Python | bufan77/InterviewQuestion | /1.py | UTF-8 | 990 | 3.40625 | 3 | [] | no_license | # i = 1
# while i < 6:
# j = 0
# while j < i:
# print('*', end='')
# j += 1
# print('')
# i += 1
# i = 1
# while i <=9:
# j = 1
# while j <= i:
# print('%d * %d = %d'%(i, j, i*j), end='\t')
# j += 1
# i += 1
# print('')
# dict = {'name':'xiaomin','age':'22',"gender":'male'}
# for value in dict:
# print(value, dict[value])
# for i in range(1, 10):
# for j in range(1, i+1):
# print('%d * %d = %d'%(i, j, i*j), end='\t')
# print('')
# o = 0
# for i in range(101):
# if i%2 == 1:
# o += i
# i += 1
# print(i)
# print(o)
# o = 0
# for i in range(1,5):
# for j in range(1,5):
# if j == i:
# continue
# else:
# for h in range(1,5):
# if h == i or h == j:
# continue
# else:
# print(str(i)+str(j)+str(h))
# o += 1
# print(o)
| true |
517dce0cbeedd6cb5c1cfdd41856955a72ee977a | Python | life-efficient/The-Month-of-ML | /day_10-pre-trained_networks.py | UTF-8 | 3,335 | 2.875 | 3 | [] | no_license | import torch
import pandas as pd
import torchvision.models as models
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
id_to_classname = {574:'golf ball', 471:'cannon', 455:'bottlecap'}
class ClassificationDataset(Dataset):
def __init__(self, images_root='day_10-example_data/images/', csv='day_10-example_data/labels.csv', transform=None):
self.csv = pd.read_csv(csv)
self.images_root=images_root
self.fnames = self.csv['Filename'].tolist()
self.labels = self.csv['Label'].tolist()
self.transform = transform
def __len__(self):
return len(self.fnames)
def __getitem__(self,idx):
filepath = self.images_root+self.fnames[idx]
img = Image.open(filepath)
label = self.labels[idx]
if self.transform:
img, label = self.transform((img, label))
return img, label
class SquareResize():
"""Adjust aspect ratio of image to make it square"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple)) # assert output_size is int or tuple
self.output_size = output_size
def __call__(self, sample):
image, label = sample
h, w = image.size
if h>w:
new_w = self.output_size
scale = new_w/w
new_h = scale*h
elif w>h:
new_h = self.output_size
scale = new_h/h
new_w = scale*w
else:
new_h, new_w = self.output_size, self.output_size
new_h, new_w = int(new_h), int(new_w) # account for non-integer computed dimensions (rounds to nearest int)
image = image.resize((new_h, new_w))
image = image.crop((0, 0, self.output_size, self.output_size))
return image, label
class ToTensor():
def __init__(self):
pass
def __call__(self, sample):
image, label = sample
image = np.array(image)/255
image = image.transpose((2, 0, 1))
return torch.Tensor(image), label
def test():
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
img_label_text = ax.text(0, -5, '', fontsize=15)
print('Started evaluation...')
mymodel.eval() #put model into evaluation mode
#calculate the accuracy of our model over the whole test set in batches
correct = 0
for x, y in test_samples:
h = mymodel.forward(x)
pred = h.data.max(1)[1]
correct += pred.eq(y).sum().item()
y_ind=0
im = np.array(x[y_ind])
im = np.array(x[y_ind]).transpose(1, 2, 0)
predicted_class = id_to_classname[h.max(1)[1][y_ind].item()]
ax.imshow(im)
img_label_text.set_text('Predicted class: '+ str(predicted_class))
fig.canvas.draw()
plt.pause(1)
acc = round(correct/len(test_data), 4)
print('Test accuracy', acc)
return acc
mytransforms = []
mytransforms.append(SquareResize(224))
mytransforms.append(ToTensor())
mytransforms = transforms.Compose(mytransforms)
batch_size=1
test_data = ClassificationDataset(transform=mytransforms)
test_samples = DataLoader(test_data, batch_size=batch_size, shuffle=True)
mymodel = models.resnet18(pretrained=True)
test()
| true |
5d0db1b5d7c31d202f8f264df633a9b80849ed1c | Python | zeno17/LessIsMore | /run_measure_loss.py | UTF-8 | 4,419 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 30 14:53:43 2021
"""
import argparse
import os
import pickle
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForMaskedLM
from transformers import BertTokenizer
from transformers import DataCollatorForWholeWordMask
from dataset.dataset import StrategizedTokenizerDataset, DefaultTokenizerDataset
def run_loss_benchmark(dataloader, model):
total_loss = 0
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
model.train()
for batch in tqdm(dataloader):
inputs = {k: v.to(device) for k,v in batch.items()}
outputs = model.forward(**inputs)
loss = outputs.loss.item()
del outputs #During local testing it would give memory errors because the outputs arent used in a backward pass
total_loss += loss*dataloader.batch_size
return total_loss
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--model-dir", required=True,
default="test_experiment/model2/",
help='Where the models are')
parser.add_argument("--model-name", required=True,
default="test_experiment/model2/",
help='Which pretrained model to finetune')
parser.add_argument("--cache-dir", required=True,
help="Location of pre-made files")
parser.add_argument("--data-dir", required=True,
help="Location of saved pytorch tensors")
parser.add_argument('--run-mode', required=True,
type=str,
default='full',
help="Whether to run a 1/100 sample or full version of the finetuning.")
parser.add_argument("--batch_size", required=False,
type=int, default=32,
help="Desired batch size")
parser.add_argument("--dataset", required=True,
type=str,
default='StrategizedMasking',
help='Whether to select the RandomMasking or StrategizedMasking')
args = parser.parse_args()
model_dir = args.model_dir
model_name = args.model_name
cache_dir = args.cache_dir
data_dir = args.data_dir
dataset = args.dataset
batch_size = args.batch_size
run_mode = args.run_mode
if run_mode == 'full':
book_file = 'subset_meta_ratio_100M.pkl'
elif run_mode == 'test':
book_file = 'subset_meta_ratio_100K.pkl'
else:
raise ValueError('Invalid value for argument --run-mode. Needs to be "full" or "test"')
with open(os.path.join(cache_dir, book_file), 'rb') as f:
book_list = pickle.load(f)['subset_booklist']
print('Loaded book_list')
print('Creating dataset object')
if dataset == 'StrategizedMasking':
benchmark_dataset = StrategizedTokenizerDataset(datadir=data_dir, max_seq_length=128)
benchmark_dataset.populate(book_list=book_list)
dataloader = DataLoader(benchmark_dataset,
batch_size=batch_size)
elif dataset == 'RandomMasking':
train_dataset_og_bert = DefaultTokenizerDataset(datadir=data_dir, max_seq_length=128)
train_dataset_og_bert.populate(book_list=book_list)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer,
mlm=True,
mlm_probability=0.15)
dataloader = DataLoader(train_dataset_og_bert, batch_size=batch_size, collate_fn=data_collator)
print('Created dataloader object with populated dataset')
model = AutoModelForMaskedLM.from_pretrained(os.path.join(model_dir, model_name, '0'))
print('Loaded model')
total_loss = run_loss_benchmark(dataloader, model)
print('Computing loss complete: {}'.format(total_loss))
with open(os.path.join(model_dir, model_name, '0', '{}_benchmark_result.pkl'.format(dataset)), 'wb') as f:
pickle.dump(total_loss, f)
print('Saved loss to {}'.format(f))
if __name__ == "__main__":
main()
| true |
3a136fd7649bc67fc8d16b4273efc65c9886d1e8 | Python | AkihikoWatanabe/ApproxAP | /libs/updater.py | UTF-8 | 1,974 | 2.796875 | 3 | [] | no_license | # coding=utf-8
"""
A python implementation of ApproxAP.
"""
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from tqdm import tqdm
from update_func_approxap import approx_ap
class Updater():
""" This class support ApproxAP updater.
"""
def __init__(self, eta=0.01, alpha=10, beta=1):
"""
Params:
eta(float): learning rate
alpha(int): scaling constant for approximated position function
beta(int): scaling constant for approximated truncation function
"""
self.eta = eta
self.alpha = alpha
self.beta = beta
def __get_shuffled_qids(self, x_dict, y_dict, epoch):
"""
Params:
x_dict(dict): dict of csr_matrix of feature vectors.
y_dict(dict): dict of np.ndarray of labels corresponding to each feature vector
epoch(int): current epoch number (the number is used for seed of random)
Returns:
qids(np.array): shuffled qids
"""
qids = np.asarray(x_dict.keys())
N = len(qids) # # of qids
np.random.seed(epoch) # set seed for permutation
perm = np.random.permutation(N)
return qids[perm]
def update(self, x_dict, y_dict, weight):
""" Update weight parameter using ApproxAP.
Params:
x_dict(dict): dict of csr_matrix of feature vectors.
y_dict(dict): dict of np.ndarray of labels corresponding to each feature vector
weight(Weight): class of weight
"""
assert len(x_dict) == len(y_dict), "invalid # of qids"
qids = self.__get_shuffled_qids(x_dict, y_dict, weight.epoch)
w = weight.get_dense_weight()
for qid in tqdm(qids):
w = approx_ap(x_dict[qid].toarray(), y_dict[qid], w, self.eta, self.alpha, self.beta)
weight.set_weight(sp.csr_matrix(w.reshape((1, weight.dims))))
weight.epoch += 1
| true |
2092f12fa3c47b9c2bb809ff9a53161d21ffb130 | Python | cat4er/cl-srv-app | /chat/Lesson2/task2.py | UTF-8 | 1,807 | 3.46875 | 3 | [] | no_license | # ### 2. Задание на закрепление знаний по модулю json.
# Есть файл orders в формате JSON с информацией о заказах. Написать скрипт, автоматизирующий его заполнение данными.
# Для этого:
# Создать функцию write_order_to_json(), в которую передается 5 параметров — товар (item), количество (quantity),
# цена (price), покупатель (buyer), дата (date). Функция должна предусматривать запись данных в виде словаря
# в файл orders.json. При записи данных указать величину отступа в 4 пробельных символа;
# Проверить работу программы через вызов функции write_order_to_json() с передачей в нее значений каждого параметра.
import json
i = 'Macbook Pro m1 16Gb 1TB'
q = 1
p = 199990
b = 'Victor Pavlyuk'
d = '17.12.2021'
def write_order_to_json(item, quantity, price, buyer, date):
with open('orders.json') as f_n: # будем вставлять данные только в раздел orders, а не пересоздавать файл заново
f_n_content = f_n.read()
obj = json.loads(f_n_content)
obj.update({'orders': [
{'item': item},
{'quantity': quantity},
{'price': price},
{'buyer': buyer},
{'date': date}
]}) # для теста добавил в файл разделы buyer, lead
with open('orders.json', 'w') as f_d:
json.dump(obj, f_d, indent=4)
write_order_to_json(i, q, p, b, d) | true |
323a98e466671ee8d7437ce3498daa5b765d6893 | Python | SebastianKuhn/OOPBallers | /Loser_Groups/Group_2/SourceFiles/MainProject/Calculation.py | UTF-8 | 2,279 | 3.578125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from MainProject.priceCollection import priceCollection
from MainProject.tweetCollection import tweetCollection
class Calculation:
def __init__(self):
self.Test = None
def compute_coef(self, xNb, yNb):
# Linear regression with function --> Y = b1*X + b0
# determine the slope b1
x = np.array(xNb)
y = np.array(yNb)
b1 = (((np.mean(x)*np.mean(y)) - np.mean(x*y))/
((np.mean(x)*np.mean(x)) - np.mean(x*x)))
b1= round(b1, 2)
#determine the intercept b0
b0=(np.mean(y) - np.mean(x)*b1)
b0 = round(b0, 2)
return b1, b0
"""
m = b1
c = b0
Y = b1x+ b0
x= independant = tweets
y= dependant = Crypto value
The mathematical formula to calculate slope (m) is:
(mean(x) * mean(y) – mean(x*y)) / ( mean (x)^2 – mean( x^2))
The formula to calculate intercept (c) is:
mean(y) – mean(x) * m
"""
def regressionCrypto(self, currency, cryptoVal, cryptoNum):
cryptoList = priceCollection().getPrice(cryptoVal)
twitList = tweetCollection().getAnalysis(cryptoVal)
new_tweets = tweetCollection().get_last_tweets(cryptoVal)
b1, b0 = Calculation().compute_coef(twitList, cryptoList)
actual_data = priceCollection().getActualPrice(cryptoNum)
print(b1, b0)
# create the regression caclulation
regressionFunc = [(b1 * x) + b0 for x in twitList]
# The prediction calculation
bitcoinPredict = b1 * new_tweets + b0
print(bitcoinPredict)
print(regressionFunc)
# create the regression graph and the view after the calculation
upperTitle = plt.figure()
upperTitle.canvas.set_window_title('%s Prediction' % currency)
plt.scatter(twitList, cryptoList, color="red")
plt.plot(twitList, regressionFunc)
plt.ylabel("%s value in $" % currency)
plt.xlabel("Tweets per day")
plt.title('''Now 1 %s costs $ %s.-
We predict it will be worth $ %s .- tomorrow.''' % (currency, actual_data, bitcoinPredict))
plt.show()
| true |
17cd27e9b65eb07e70c57d7eb4e792b99c59af64 | Python | luliyucoordinate/Leetcode | /src/0239-Sliding-Window-Maximum/0239.py | UTF-8 | 576 | 3.0625 | 3 | [] | no_license | class Solution:
def maxSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
if not nums:
return list()
res, stack = list(), list()
for i, val in enumerate(nums):
while stack and nums[stack[-1]] < val:
stack.pop()
stack.append(i)
if i - stack[0] >= k:
stack.pop(0)
if i >= k - 1:
res.append(nums[stack[0]])
return res | true |
37aec3222fd48326e27d87cfb35c0df7757fdce6 | Python | sentimentinvestor/sentipy | /tests/sentipy_tests.py | UTF-8 | 5,363 | 2.890625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """Tests various methods of the SentiPy module."""
import os
import unittest
# vcrpy is untyped
# Therefore, ignore all vcr decorators
import vcr # type: ignore[import]
from beartype import beartype
from sentipy._typing_imports import ListType
from sentipy.sentipy import Sentipy
class SentipyTestCase(unittest.TestCase):
"""Testing class for the SentiPy module."""
sentipy: Sentipy
@beartype
def setUp(self) -> None:
"""Checks whether the key and token have been defined, and then authenticates."""
sentipy_key = os.getenv("API_SENTIMENTINVESTOR_KEY")
sentipy_token = os.getenv("API_SENTIMENTINVESTOR_TOKEN")
# Makes the sentipy args str rather than Optional[str]
if sentipy_key is None or sentipy_token is None:
self.fail(
"API_SENTIMENTINVESTOR_KEY or API_SENTIMENTINVESTOR_TOKEN is not set"
)
self.sentipy = Sentipy(
key=sentipy_key,
token=sentipy_token,
)
@beartype
def assertHasAttr(self, object: object, attr: str) -> None:
"""Checks whether a dictionary has a certain attribute or not.
Args:
object: The dictionary to search for the attribute
attr: Which attribute to search for in the object
Raises:
AssertionError: If the attribute is not in the object
"""
self.assertTrue(
hasattr(object, attr), f"{object!r} does not have attribute {attr!r}"
)
@beartype
def assertHasAttrs(self, object: object, attrs: ListType[str]) -> None:
"""Checks whether a dictionary has certain attributes or not.
Args:
object: The dictionary to search for the attribute
attrs: Which attributes to search for in the object
Raises:
AssertionError: If any of the attributes aren't in the object
"""
for attr in attrs:
self.assertHasAttr(object, attr)
@beartype
def check_basics(self, data: object) -> None:
"""Checks whether the api response was successful.
Args:
data: The response from the Sentiment Investor API
Raises:
AssertionError: If the response was not successful.
"""
# The data will have a success attribute
self.assertTrue(data.success) # type: ignore[attr-defined]
self.assertHasAttr(data, "symbol")
@vcr.use_cassette("vcr_cassettes/parsed.yml") # type: ignore[misc]
@beartype
def test_parsed(self) -> None:
"""Tests SentiPy's `parsed` method."""
data = self.sentipy.parsed("AAPL")
self.check_basics(data)
self.assertHasAttrs(data, ["sentiment", "AHI", "RHI", "SGP"])
@vcr.use_cassette("vcr_cassettes/raw.yml") # type: ignore[misc]
@beartype
def test_raw(self) -> None:
"""Tests SentiPy's `raw` method."""
data = self.sentipy.raw("AAPL")
self.check_basics(data)
self.assertHasAttrs(
data,
[
"reddit_comment_mentions",
"reddit_comment_sentiment",
"reddit_post_mentions",
"reddit_post_sentiment",
"tweet_mentions",
"tweet_sentiment",
"stocktwits_post_mentions",
"stocktwits_post_sentiment",
"yahoo_finance_comment_mentions",
"yahoo_finance_comment_sentiment",
],
)
@vcr.use_cassette("vcr_cassettes/quote.yml") # type: ignore[misc]
def test_quote(self) -> None:
"""Tests SentiPy's `quote` method."""
data = self.sentipy.quote("AAPL")
self.check_basics(data)
self.assertHasAttrs(
data,
[
"sentiment",
"AHI",
"RHI",
"SGP",
"reddit_comment_mentions",
"reddit_comment_sentiment",
"reddit_post_mentions",
"reddit_post_sentiment",
"tweet_mentions",
"tweet_sentiment",
"stocktwits_post_mentions",
"stocktwits_post_sentiment",
"yahoo_finance_comment_mentions",
"yahoo_finance_comment_sentiment",
],
)
@vcr.use_cassette("vcr_cassettes/bulk.yml") # type: ignore[misc]
@beartype
def test_bulk(self) -> None:
"""Tests SentiPy's `bulk` method."""
data = self.sentipy.bulk(["AAPL", "TSLA", "PYPL"])
self.assertEqual(len(data), 3)
for stock in data:
self.assertHasAttrs(
stock,
[
"sentiment",
"AHI",
"RHI",
"SGP",
"reddit_comment_mentions",
"reddit_comment_sentiment",
"reddit_post_mentions",
"reddit_post_sentiment",
"tweet_mentions",
"tweet_sentiment",
"stocktwits_post_mentions",
"stocktwits_post_sentiment",
"yahoo_finance_comment_mentions",
"yahoo_finance_comment_sentiment",
],
)
if __name__ == "__main__":
unittest.main()
| true |
7cf245f2204839793e0edd98c66afd1c4e0f8743 | Python | bsadoski/entra21 | /aula3/poo.py | UTF-8 | 582 | 4.34375 | 4 | [] | no_license | # Criando uma classe
class Cachorro:
# atributo de classe
especie = "Canis familiaris"
# inicialização da classe
def __init__(self, nome, idade):
# atributos de instancia
self.nome = nome
self.idade = idade
# alterando a descrição
#def __str__(self):
# return f"{self.nome} tem {self.idade} anos de idade"
def emitir_som(self):
print("Woof Woof")
if __name__ == "__main__":
c = Cachorro("Bilu", 10)
print(c)
# objeto cachorro?
print(isinstance(c, Cachorro))
c.emitir_som() | true |
e2b809fe02db0631f9bfd0e1f42f457592a90c97 | Python | SuperLouV/CS559A | /HW03/HW03P2FLD.py | UTF-8 | 2,221 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project -> File :CS559A -> HW03P2FLD
@IDE :PyCharm
@Author :Yilin Lou
@Date :5/2/20 4:11 下午
@Group :Stevens Institute of technology
'''
import numpy as np
import matplotlib.pyplot as plt
D1 = np.array([[-2, 1],
[-5, -4],
[-3, 1],
[0, -3],
[-8, -1]]);
D2 = np.array([[2, 5], [1, 0], [5, -1], [-1, -3], [6, 1]]);
# print(D1)
# print(D2)
D = np.concatenate((D1, D2), axis=0)
#count mean and cov
def cal_cov_and_avg(samples):
mean = np.mean(samples, axis=0)
cov_m = np.zeros((samples.shape[1], samples.shape[1]))
for s in samples:
t = s - mean
cov_m += t * t.reshape(2, 1)
return cov_m, mean
# c_1 c_2 which are two class
def fisher(c_1, c_2):
cov_1, mean1 = cal_cov_and_avg(c_1)
print(mean1)
cov_2, mean2 = cal_cov_and_avg(c_2)
print(mean2)
s_w = cov_1 + cov_2
u, s, v = np.linalg.svd(s_w)
s_w_inv = np.dot(np.dot(v.T, np.linalg.inv(np.diag(s))), u.T)
return np.dot(s_w_inv, mean1 - mean2)
# if True if Class1 else Class2
def judge(sample, w, c_1, c_2):
u1 = np.mean(c_1, axis=0)
u2 = np.mean(c_2, axis=0)
center_1 = np.dot(w.T, u1)
center_2 = np.dot(w.T, u2)
pos = np.dot(w.T, sample)
return abs(pos - center_1) < abs(pos - center_2)
w = fisher(D1, D2) # use function to FLD
print(w) #print vector
acc=0
size=len(D)
for i in range(len(D)):
out = judge(D[i], w, D1, D2) # 判断所属的类别
if i <5: # count acc of D1
if out:
print("Corrtct ",D[i])
acc+=1
else:
print("Uncorrect",D[i])
else:
if out==False:
print("Correct ",D[i])
acc += 1
else:
print("Uncorrect ",D[i])
print ("Accuracy rate is : ",acc/size)
#draw a picture
plt.scatter(D1[:, 0], D1[:, 1], c='#99CC99')
plt.scatter(D2[:, 0], D2[:, 1], c='#FFCC00')
line_x = np.arange(min(np.min(D1[:, 0]), np.min(D2[:, 0])),
max(np.max(D1[:, 0]), np.max(D2[:, 0])),
step=1)
#count rate
line_y = - (w[0] * line_x) / w[1]
plt.plot(line_x, line_y)
plt.show()
| true |
2be07d537c39a9b1a1e139f2018d18cb04d77949 | Python | rg-github-hub/TaskManager2049 | /test.py | UTF-8 | 32 | 2.84375 | 3 | [] | no_license |
l=[1,2,3,4,5]
l[:10:2]
print(l) | true |
bfdf0b4ac72fe7cb54a0ed40d445e1eff9c6daa2 | Python | shraddhalokhande/PythonProject | /test/Python-SQL-Project-CodeBase-DS-DE/Python-SQL-Project-CodeBase-DS-DE/Python-SQL-Project-CodeBase-DS-DE/Python_SQL_Project_CodeBase-DS-DE.py | UTF-8 | 7,245 | 2.625 | 3 | [] | no_license | import argparse as agp
import getpass
import os
from myTools import MSSQL_DBConnector as mssql
from myTools import DBConnector as dbc
import myTools.ContentObfuscation as ce
try:
import pandas as pd
except:
mi.installModule("pandas")
import pandas as pd
def printSplashScreen():
print("*************************************************************************************************")
print("\t THIS SCRIPT ALLOWS TO EXTRACT SURVEY DATA FROM THE SAMPLE SEEN IN SQL CLASS")
print("\t IT REPLICATES THE BEHAVIOUR OF A STORED PROCEDURE & TRIGGER IN A PROGRAMMATIC WAY")
print("\t COMMAND LINE OPTIONS ARE:")
print("\t\t -h or --help: print the help content on the console")
print("*************************************************************************************************\n\n")
def processCLIArguments()-> dict:
retParametersDictionary:dict = None
dbpassword:str = ''
obfuscator: ce.ContentObfuscation = ce.ContentObfuscation()
try:
argParser:agp.ArgumentParser = agp.ArgumentParser(add_help=True)
argParser.add_argument("-n", "--DSN", dest="dsn", \
action='store', default= None, help="Sets the SQL Server DSN descriptor file - Take precedence over all access parameters", type=str)
#TODO
retParametersDictionary = {
"dsn" : argParsingResults.dsn,
"dbserver" : argParsingResults.dbserver,
"dbname" : argParsingResults.dbname,
"dbusername" : argParsingResults.dbusername,
"dbuserpassword" : dbpassword,
"trustedmode" : argParsingResults.trustedmode,
"viewname" : argParsingResults.viewname,
"persistencefilepath": argParsingResults.persistencefilepath,
"resultsfilepath" : argParsingResults.resultsfilepath
}
except Exception as e:
print("Command Line arguments processing error: " + str(e))
return retParametersDictionary
def getSurveyStructure(connector: mssql.MSSQL_DBConnector) -> pd.DataFrame:
surveyStructResults = None
#TODO
return surveyStructResults
def doesPersistenceFileExist(persistenceFilePath: str)-> bool:
success = True
#TODO
return success
def isPersistenceFileDirectoryWritable(persistenceFilePath: str)-> bool:
success = True
#TODO
return success
def compareDBSurveyStructureToPersistenceFile(surveyStructResults:pd.DataFrame, persistenceFilePath: str) -> bool:
same_file = False
#TODO
return same_file
def getAllSurveyDataQuery(connector: dbc.DBConnector) -> str:
#IN THIS FUNCTION YOU MUST STRICTLY CONVERT THE CODE OF getAllSurveyData written in T-SQL, available in Survey_Sample_A19 and seen in class
# Below is the beginning of the conversion
# The Python version must return the string containing the dynamic query (as we cannot use sp_executesql in Python!)
strQueryTemplateForAnswerColumn: str = """COALESCE(
(
SELECT a.Answer_Value
FROM Answer as a
WHERE
a.UserId = u.UserId
AND a.SurveyId = <SURVEY_ID>
AND a.QuestionId = <QUESTION_ID>
), -1) AS ANS_Q<QUESTION_ID> """
strQueryTemplateForNullColumnn: str = ' NULL AS ANS_Q<QUESTION_ID> '
strQueryTemplateOuterUnionQuery: str = """
SELECT
UserId
, <SURVEY_ID> as SurveyId
, <DYNAMIC_QUESTION_ANSWERS>
FROM
[User] as u
WHERE EXISTS
( \
SELECT *
FROM Answer as a
WHERE u.UserId = a.UserId
AND a.SurveyId = <SURVEY_ID>
)
"""
strCurrentUnionQueryBlock: str = ''
strFinalQuery: str = ''
#MAIN LOOP, OVER ALL THE SURVEYS
# FOR EACH SURVEY, IN currentSurveyId, WE NEED TO CONSTRUCT THE ANSWER COLUMN QUERIES
#inner loop, over the questions of the survey
# Cursors are replaced by a query retrived in a pandas df
surveyQuery:str = 'SELECT SurveyId FROM Survey ORDER BY SurveyId'
surveyQueryDF:pd.DataFrame = connector.ExecuteQuery_withRS(surveyQuery)
#CARRY ON THE CONVERSION
#TODO
return strFinalQuery
def refreshViewInDB(connector: dbc.DBConnector, baseViewQuery:str, viewName:str)->None:
if(connector.IsConnected == True):
#TODO
pass
def surveyResultsToDF(connector: dbc.DBConnector, viewName:str)->pd.DataFrame:
results:pd.DataFrame = None
#TODO
def main():
cliArguments:dict = None
printSplashScreen()
try:
cliArguments = processCLIArguments()
except Except as excp:
print("Exiting")
return
if(cliArguments is not None):
#if you are using the Visual Studio Solution, you can set the command line parameters within VS (it's done in this example)
#For setting your own values in VS, please make sure to open the VS Project Properties (Menu "Project, bottom choice), tab "Debug", textbox "Script arguments"
#If you are trying this script outside VS, you must provide command line parameters yourself, i.e. on Windows
#python.exe Python_SQL_Project_Sample_Solution --DBServer <YOUR_MSSQL> -d <DBName> -t True
#See the processCLIArguments() function for accepted parameters
try:
connector = mssql.MSSQL_DBConnector(DSN = cliArguments["dsn"], dbserver = cliArguments["dbserver"], \
dbname = cliArguments["dbname"], dbusername = cliArguments["dbusername"], \
dbpassword = cliArguments["dbuserpassword"], trustedmode = cliArguments["trustedmode"], \
viewname = cliArguments["viewname"])
connector.Open()
surveyStructureDF:pd.DataFrame = getSurveyStructure(connector)
if(doesPersistenceFileExist(cliArguments["persistencefilepath"]) == False):
if(isPersistenceFileDirectoryWritable(cliArguments["persistencefilepath"]) == True):
#pickle the dataframe in the path given by persistencefilepath
#TODO
print("\nINFO - Content of SurveyResults table pickled in " + cliArguments["persistencefilepath"] + "\n")
#refresh the view using the function written for this purpose
#TODO
else:
#Compare the existing pickled SurveyStructure file with surveyStructureDF
# What do you need to do if the dataframe and the pickled file are different?
#TODO
pass #pass only written here for not creating a syntax error, to be removed
#get your survey results from the view in a dataframe and save it to a CSV file in the path given by resultsfilepath
#TODO
print("\nDONE - Results exported in " + cliArguments["resultsfilepath"] + "\n")
connector.Close()
except Exception as excp:
print(excp)
else:
print("Inconsistency: CLI argument dictionary is None. Exiting")
return
if __name__ == '__main__':
main() | true |
291a7e4622a9d0f7f232faea93d50d1f5fae1bdf | Python | PratishtaRao/Big-_Data_Analysis | /HW_08/HW_08_Rao_Pratishta.py | UTF-8 | 6,977 | 3.390625 | 3 | [] | no_license | """
Title: HW_08_Rao_Pratishta.py
Course: CSCI 720
Date: 03/31/2019
Author: Pratishta Prakash Rao, Srikanth Lakshminarayan
Description: Code to implement the agglomeration clustering
"""
from haversine import haversine
from geopy.geocoders import Nominatim
import pandas
from geopy.extra.rate_limiter import RateLimiter
import matplotlib.pyplot as plt
import geopandas
from shapely.geometry import Point
import scipy.cluster.hierarchy as sci
def mergeCluster(all_clusters, cluster_1, cluster_2):
"""
Function to merge data points into clusters
:param all_clusters: list of lists containing tuples of latitude and longitude
:param cluster_1: index of cluster for which the new found data point has to be added
:param cluster_2: index of new found data point
"""
# Adding the data point to the cluster after finding the minimum
# haversine distance
all_clusters[cluster_1].extend(all_clusters[cluster_2])
# Removing the data point whihc was added to the other cluster
all_clusters.pop(cluster_2)
def getDistance(list_i_cluster, list_j_cluster):
"""
Function to get the single linkage using haversine distance
:param list_i_cluster: list of data points
:param list_j_cluster: list of data points
:return: minimum distance
"""
min_dist = float('inf')
# For each data point in a cluster find the
# haversine distance with each data point in
# the other cluster
for each_i in list_i_cluster:
for each_j in list_j_cluster:
dist = haversine(each_i, each_j)
if (min_dist > dist):
min_dist = dist
return min_dist
def agglomeration(all_cluster_latitude_longitude):
"""
Function to implement the agglomeration clustering
:param all_cluster_latitude_longitude: list containin the latitude and longitude of the city
:return:
"""
while len(all_cluster_latitude_longitude) > 12:
min_dist = float('inf')
min_i_index = float('inf')
min_j_index = float('inf')
for cluster1_idx in range(len(all_cluster_latitude_longitude) - 1):
for cluster2_idx in range(cluster1_idx + 1, len(all_cluster_latitude_longitude)):
dist = getDistance(all_cluster_latitude_longitude[cluster1_idx],
all_cluster_latitude_longitude[cluster2_idx])
if min_dist > dist:
min_dist = dist
min_i_index = cluster1_idx
min_j_index = cluster2_idx
mergeCluster(all_cluster_latitude_longitude, min_i_index, min_j_index)
print("Process complete")
total_count_in_cluster = []
for h in all_cluster_latitude_longitude:
total_count_in_cluster.append(len(h))
print(len(h))
print("Total count in cluster", sum(total_count_in_cluster))
return all_cluster_latitude_longitude
def get_gps_points(df):
"""
Function to get the latitude and longitude of the cities
:param df: data frame with cities
:return: list of litst containing latitude and longitude
"""
geolocator = Nominatim()
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
rows, columns = df.shape
my_data = df.values
all_cluster_latitude_longitude=[]
try:
with open('lat_lon1.csv', mode='w') as myfile:
if rows is not None:
for rows in my_data:
location_obj = geolocator.geocode(rows)
myfile.write(str(location_obj.latitude)+","+str(location_obj.longitude))
myfile.write('\n')
all_cluster_latitude_longitude.append([(location_obj.longitude, location_obj.latitude)])
except:
print("An error occured while getting values for latitudes and longitudes.Reading from lat_long.csv. "
"Pls make sure this python file and csv files are kept together.")
all_cluster_latitude_longitude = []
"""
The points which we were getting from api were sometimes wrong and were not consistent.
hence a file is made from which all points are read.
"""
all_cluster_latitude_longitude=[]
dt = pandas.read_csv('lat_long.csv', header=None)
my_data = dt.values
for dat in my_data:
all_cluster_latitude_longitude.append([(float(dat[1]), float(dat[0]))])
return all_cluster_latitude_longitude
def plotting(all_cluster_latitude_longitude,list_colors):
'''
Method to plot the given clusters onto the world map.
:param all_cluster_latitude_longitude: List of List of clusters
:param list_colors: list of colours for different clusters
:return: void
'''
f, ax = plt.subplots(1, figsize=(12, 6))
ax.set_title('Clusters')
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
world.plot(ax=ax, facecolor='lightgray', edgecolor='gray')
ax.set_ylim([-90, 90])
ax.set_axis_off()
plt.axis('equal')
index = 0
for clusters in all_cluster_latitude_longitude:
latitude = []
longitude = []
for clstr in clusters:
longitude.append(clstr[1])
latitude.append(clstr[0])
dataframe = pandas.DataFrame({'Latitude': latitude, 'Longitude': longitude})
dataframe['Coordinates'] = list(zip(dataframe.Longitude, dataframe.Latitude))
dataframe['Coordinates'] = dataframe['Coordinates'].apply(Point)
crs = {'init': 'epsg:4326'}
gdf = geopandas.GeoDataFrame(dataframe, crs=crs, geometry='Coordinates')
gdf.crs
gdf.plot(ax=ax, marker='o', color=list_colors[index], markersize=.5, linewidth="5")
index += 1
plt.show()
def dendogram(city_country_csv,data):
'''
Method to create a dendogram of the first 50 points
:param city_country_csv: dataframe of city_country values
:param data: dataframe of data
:return:
'''
city_country_csv = city_country_csv.head(50)
city_vals = city_country_csv['City'].tolist()
country_vals = city_country_csv['Country'].tolist()
labels = [str(x) + " " + str(y) for x, y in zip(city_vals, country_vals)]
points = sci.linkage(data.head(50), method='single')
sci.dendrogram(points, truncate_mode='lastp', p=50, labels=labels)
plt.show()
def main():
list_colors = ['blue', 'green', 'red', 'yellow', 'cyan', 'magenta', 'white', 'black', 'orange', 'pink', 'purple',
'gray']
# To read the city and country into a data frame
city_country_csv = pandas.read_csv("/Users/srinivaslakshminarayan/PycharmProjects/bda/CS_720_City_Country.csv")
all_cluster_latitude_longitude= get_gps_points(city_country_csv)
all_cluster_latitude_longitude=agglomeration(all_cluster_latitude_longitude)
plotting(all_cluster_latitude_longitude,list_colors)
data = pandas.read_csv('lat_long.csv', header=None)
dendogram(city_country_csv,data)
if __name__ == '__main__':
main()
| true |
935cbadee487b30ef52a219ac0f542b71dc7bf9f | Python | liuweiping2020/pyml | /src/modeler/birnnmodel.py | UTF-8 | 2,264 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | from modeler.tfmodel import TFModel
import tensorflow as tf
class BiRNNModel(TFModel):
def __init__(self):
self.learning_rate = 0.01
self.batch_size = 128
self.display_step = 10
self.n_input = 28 # MNIST data input (img shape: 28*28)
self.n_steps = 28 # timesteps
self.n_hidden = 256 # hidden layer num of features
self.n_classes = 10 # MNIST total classes (0-9 digits)
pass
def add_placeholder(self):
# tf Graph input
self.x = tf.placeholder("float", [None, self.n_steps, self.n_input])
self.y = tf.placeholder("float", [None, self.n_classes])
pass
def build(self):
# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of foward + backward cells
'out': tf.Variable(tf.random_normal([2 * self.n_hidden, self.n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([self.n_classes]))
}
pred = self.BiRNN(self.x, weights, biases)
# Define loss and optimizer
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=self.y))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
pass
def BiRNN(self, x, weights, biases):
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, self.n_input])
x = tf.split(x, self.n_steps)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
| true |
b68493d09c05690a30127f0126d168e1928ba893 | Python | icebale-coder/pyneng | /exercises/06_control_structures/task_6_2.py | UTF-8 | 1,506 | 3.6875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Задание 6.2
Запросить у пользователя ввод IP-адреса в формате 10.0.1.1
В зависимости от типа адреса (описаны ниже), вывести на стандартный поток вывода:
'unicast' - если первый байт в диапазоне 1-223
'multicast' - если первый байт в диапазоне 224-239
'local broadcast' - если IP-адрес равен 255.255.255.255
'unassigned' - если IP-адрес равен 0.0.0.0
'unused' - во всех остальных случаях
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
ip = input('Введите ip адрес в формате x.x.x.x: ')
octet_list = ip.split('.')
first_octet = int(octet_list[0])
if (first_octet > 0) and (first_octet < 223):
print('{} - Это unicast'.format(ip))
elif (first_octet > 224) and (first_octet < 239):
print('{} - Это multicast'.format(ip))
elif (int(octet_list[0]) == 255) and (int(octet_list[1]) == 255) and (int(octet_list[2]) == 255) and (int(octet_list[3]) == 255):
print('{} - Это local broadcast'.format(ip))
elif (int(octet_list[0]) == 0) and (int(octet_list[1]) == 0) and (int(octet_list[2]) == 0) and (int(octet_list[3]) == 0):
print('{} - Это local broadcast'.format(ip))
else:
print('{} - Это unused'.format(ip))
| true |
03c417b1ac4373aefb9d93e33145f5d375c16800 | Python | alifahriander/ethz-clustering | /findAssignments.py | UTF-8 | 1,443 | 2.59375 | 3 | [] | no_license | import os
import argparse
import scipy.stats as stats
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
from matplotlib.pyplot import rcParams
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str)
args = parser.parse_args()
PATH = args.path
DIRPATH = os.path.dirname(PATH)
# Read data
y = pd.read_csv(os.path.join(PATH,"y_observed.csv"), header=None)
y = np.array(y.values[0])
overlapRegion = y [(y[:]>=-1)&(y[:]<=1)]
print(overlapRegion)
print(len(overlapRegion))
assignments = pd.read_csv(os.path.join(PATH,"assignments.csv"), header=None)
assignments = assignments.values[0]
s_z = pd.read_csv(os.path.join(PATH,"s_z.csv"), header=None)
s_z = s_z.values[-1]
z = pd.read_csv(os.path.join(PATH,"z.csv"), header=None)
z = z.values[-1]
zVariances = [list(s_z[0::2]) , list(s_z[1::2])]
# # zVariances = [list(z[0::2]) , list(z[1::2])]
def findMinVariance(variances):
length = len(variances[0])
estimateAssignments = []
for i in range(length):
if(abs(variances[0][i]) < abs(variances[1][i])):
estimateAssignments.append(0)
else:
estimateAssignments.append(1)
return estimateAssignments
estimateAssignments = findMinVariance(zVariances)
mistakes = abs(assignments - estimateAssignments)
print(mistakes)
index = np.where(mistakes==1)
print(index)
print(y[index[0]])
print(len(y[index[0]])) | true |
67b05986df7fd5fff69dc6988ab3e4154b210ea2 | Python | minrivertea/kungfupeople | /newsletter/multipart_email.py | UTF-8 | 1,468 | 2.65625 | 3 | [] | no_license | ## Taken from http://www.rossp.org/blog/2007/oct/25/easy-multi-part-e-mails-django/
## but butchered a bit
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
def send_multipart_mail(text_part, html_part, subject, recipients,
sender=None, fail_silently=False, bcc=None):
"""
This function will send a multi-part e-mail with both HTML and
Text parts.
template_name must NOT contain an extension. Both HTML (.html) and TEXT
(.txt) versions must exist, eg 'emails/public_submit' will use both
public_submit.html and public_submit.txt.
email_context should be a plain python dictionary. It is applied against
both the email messages (templates) & the subject.
subject can be plain text or a Django template string, eg:
New Job: {{ job.id }} {{ job.title }}
recipients can be either a string, eg 'a@b.com' or a list, eg:
['a@b.com', 'c@d.com']. Type conversion is done if needed.
sender can be an e-mail, 'Name <email>' or None. If unspecified, the
DEFAULT_FROM_EMAIL will be used.
"""
if not sender:
sender = settings.DEFAULT_FROM_EMAIL
if type(recipients) != list:
recipients = [recipients,]
msg = EmailMultiAlternatives(subject, text_part, sender, recipients,
bcc=bcc)
msg.attach_alternative(html_part, "text/html")
return msg.send(fail_silently)
| true |
8092e03fdf68949cd7b45be7de50647a67d91eb6 | Python | AMALj248/Wine_Quality | /Wine_qlty.py | UTF-8 | 2,432 | 3.609375 | 4 | [] | no_license | #IMPORTING THE REQUIRED LIBRARIES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import seaborn as sns
wine = pd.read_csv('winequality-red.csv')
#seeing a few values of the csv files
wine.head()
wine.info()
print(wine.isnull())
#since we find there is no null values we can proceed
#now plotting the data to find some good asssumptions as to best fitfig = plt.figure(figsize = (10,6))
fig = plt.figure(figsize = (10,10))
sns.barplot(x = 'quality', y = 'fixed acidity', data = wine)
plt.show()
#assigning input values to x and y
x = wine[['fixed acidity','volatile acidity','citric acid','residual sugar','chlorides','free sulfur dioxide','total sulfur dioxide','density','pH','sulphates','alcohol']]
y = wine['quality'].values
#y=y*10
# showing the wine dataset in tabular cloumn
wine.describe()
#information about the wine datatypes
wine.info()
#TRAIN AND TEST SPLIT
#SPLITTING THE DATA USING SIMPLE TEST TRAIN SLIT OF DATA
from sklearn.model_selection import train_test_split
x_train , x_test , y_train , y_test = train_test_split(x ,y , test_size = 0.2, random_state=0)
#printing the dimensions of splitted data
print("x_train shape :", x_train.shape)
print("x_test shape : ", x_test.shape)
print("y_train shape :",y_train.shape)
print("y_test shape :", y_test.shape)
#applying linear regression model to the dataset
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train, y_train)
# predicting the test results
y_pred = regressor.predict(x_test)
#plotting the scatter plot between y_test and y_predicited
plt.scatter(y_test, y_pred, c='green')
plt.xlabel("Input parameters")
plt.ylabel("Wine quality /10 ")
plt.title("True value vs predicted value : Linear Regression ")
plt.show()
#Result from the MULTI LINEAR REGRESSION MODEL
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(y_test, y_pred)
print(" Mean Square Error : ", mse)
m =math.sqrt(mse)
print(" SQUARE ROOT OF MEAN SQUARED ERROR")
print (m)
print(y_pred)
for x in range(len(y_pred)):
if y_test[x] >= 70:
print ('Good')
else:
print('Bad')
#print (" Model Accuracy :", 100-mse)
#Mean absolute error
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_pred- y_test))))
#predictiing quality via giving lables
| true |
ce44097b0789984a65b44b9b8eff2241b567f325 | Python | lisisis/stars | /eg | UTF-8 | 1,507 | 2.984375 | 3 | [] | no_license | import struct
import time
def ReadFloat(*args):
for n, m in args:
n, m = '%04x' % n, '%04x' % m
v = n + m
y_bytes = v.decode('hex')
y = struct.unpack('!f', y_bytes)[0]
y = round(y, 6)
return y
def WriteFloat(value):
y_bytes = struct.pack('!f', value)
y_hex = y_bytes.encode('hex')
n, m = y_hex[:-4], y_hex[-4:]
n, m = int(n, 16), int(m, 16)
v = [n, m]
return v
def ReadDint(*args):
for n, m in args:
n, m = '%04x' % n, '%04x' % m
v = n + m
y_bytes = v.decode('hex')
y = struct.unpack('!i', y_bytes)[0]
return y
def WriteDint(value):
y_bytes = struct.pack('!i', value)
y_hex = y_bytes.encode('hex')
n, m = y_hex[:-4], y_hex[-4:]
n, m = int(n, 16), int(m, 16)
v = [n, m]
return v
def ReadInt(*args):
for v in args:
v = '%d' % v
return int(v)
def WriteInt(value):
v = [value]
return v
#print(ReadFloat((15729, 16458)))
# print(WriteFloat(3.16))
#print(ReadDint((1734, 6970)))
# print(WriteDint(456787654))
def str_to_hex(s):
return ' '.join([hex(ord(c)).replace('0x', '').zfill(2).upper() for c in s])
def hex_to_str(s):
return ''.join([chr(i) for i in [int(b, 16) for b in s.split(' ')]])
def str_to_bin(s):
return ' '.join([bin(ord(c)).replace('0b', '') for c in s])
def bin_to_str(s):
return ''.join([chr(i) for i in [int(b, 2) for b in s.split(' ')]])
def currtime():
return ':'.join(str(i).zfill(2) for i in time.localtime()[3:6])
| true |
3805a5fd64d88298bc446af23c1950b2b4229bb6 | Python | Corkster919/GabScraper | /scrape_posts.py | UTF-8 | 4,526 | 2.703125 | 3 | [] | no_license | """ Scrapes Gab.ai posts. """
# pylint: disable=unsubscriptable-object
import argparse
import json
import os
import random
import sys
import time
import traceback
import mechanize
def shuffle_posts(min_num, max_num):
""" Generates a scraping order. """
post_numbers = range(min_num, max_num)
random.shuffle(post_numbers)
return post_numbers
def login(username="", password=""):
""" Login to gab.ai. """
if not len(username) or not len(password):
auth_data = json.load(open("auth.json"))
try:
username = auth_data["username"]
except:
print "No username specified."
return
try:
password = auth_data["password"]
except:
print "No password specified."
return
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.set_handle_refresh(False)
browser.addheaders = [("User-agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36")]
r = browser.open("https://gab.ai/auth/login")
browser.select_form(nr=0)
browser["username"] = username
browser["password"] = password
r = browser.submit()
# Debug output post-login
print r.read()[0:500]
return browser
def process_posts(browser, post_numbers):
""" Scrapes the specified posts. """
fail = 0
j = 0
k = 0
for i in post_numbers:
# Check if the post already exists.
num = str(i)
ones = num[-1]
tens = num[-2:]
hundreds = num[-3:]
if os.path.isfile("posts/" + ones + "/" + tens + "/" + hundreds + "/" + str(i) + ".json"):
if random.randint(1, 10) == 10:
print "Skipping " + str(i)
continue
# Make directory structure if necessary.
if not os.path.exists("posts"):
os.makedirs("posts")
if not os.path.exists("posts/" + ones):
os.makedirs("posts/" + ones)
if not os.path.exists("posts/" + ones + "/" + tens):
os.makedirs("posts/" + ones + "/" + tens)
if not os.path.exists("posts/" + ones + "/" + tens + "/" + hundreds):
os.makedirs("posts/" + ones + "/" + tens + "/" + hundreds)
# Read the post
try:
r = browser.open("https://gab.ai/posts/" + str(i))
data = r.read()
with open("posts/" + ones + "/" + tens + "/" + hundreds + "/" + str(i) + ".json", "w") as f:
f.write(data)
print data
print i
print ""
# Error handling.
except mechanize.HTTPError as error_data:
if isinstance(error_data.code, int) and error_data.code == 429:
print "ALERT TOO MANY REQUESTS SHUT DOWN"
print i
sys.exit(-1)
return
elif isinstance(error_data.code, int) and error_data.code == 404:
print "Gab post deleted or ID not allocated"
print i
fail = fail + 1
elif isinstance(error_data.code, int) and error_data.code == 400:
print "Invalid request -- possibly a private Gab post?"
print i
fail = fail + 1
else:
print error_data.code
print traceback.format_exc()
print "ERROR: DID NOT WORK"
print i
except:
print traceback.format_exc()
print "ERROR: STILL DID NOT WORK"
print i
# Pausing between jobs.
pause_timer = random.randint(1, 10)
if pause_timer == 10:
print "Waiting..."
time.sleep(random.randint(2, 3))
elif pause_timer == 1 or pause_timer == 2:
time.sleep(0.1)
if fail > 1000:
del browser
browser = login()
fail = 0
k = k + 1
j = j + 1
if j >= 5000:
print "Medium length break."
time.sleep(random.randint(10, 20))
j = 0
del browser
browser = login()
if k >= 51000:
print "Long break."
time.sleep(random.randint(60, 90))
k = 0
def process_args():
""" Extracts command line arguments. """
parser = argparse.ArgumentParser(description="Gab.ai scraper.")
parser.add_argument("-u", "--username", action="store", dest="username", help="Specify a username", default="")
parser.add_argument("-p", "--password", action="store", dest="password", help="Specify a password", default="")
parser.add_argument("num_limits", nargs="*", help="Minimum and maximum post numbers.")
args = vars(parser.parse_args())
if len(args["num_limits"]) != 2:
min_num = 1
max_num = 1000000
print "Failed to get post number limits."
else:
try:
min_num = int(args["num_limits"][0])
max_num = int(args["num_limits"][1])
except:
print "Failed to get post number limits."
min_num = 1
max_num = 10000
post_order = shuffle_posts(min_num, max_num)
browser = login(args["username"], args["password"])
if browser is not None:
process_posts(browser, post_order)
else:
print "Failed login."
if __name__ == "__main__":
process_args()
| true |
d3c8559fe38e82755f03c6d5e9a157a31777ba88 | Python | sanapagarkar/Advertisement-Optimizer | /ts.py | UTF-8 | 854 | 3.0625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset= pd.read_csv('Ads_CTR_Optimisation.csv')
#Implementing Thompson Sampling
import random
d=10
N=10000
ads_selected=[]
noOfRewards1 = [0]*d
noOfRewards0 = [0]*d
totalReward=0
for n in range(0,N):
max_random = 0
ad = 0
for i in range (0,d):
random_beta = random.betavariate(noOfRewards1[i]+1,noOfRewards0[i]+1)
if(random_beta>max_random):
max_random = random_beta
ad=i
ads_selected.append(ad)
reward = dataset.values[n,ad]
if(reward == 1):
noOfRewards1[ad]+=1
else:
noOfRewards0[ad]+=1
totalReward+=reward
# Visualising the results - Histogram
plt.hist(ads_selected)
plt.title('Histogram of ads selections')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show() | true |
b2bbd64925e9a727fac50f7f7da5f8f9d71d7a9c | Python | datairahub/dscompass-back | /src/protection_defenders/defenders_auth/middlewares.py | UTF-8 | 597 | 2.578125 | 3 | [] | no_license | from django.conf import settings
class CookieJWTMiddleware:
"""
CookieJWTMiddleware
If a refresh token cookie is present on the request,
add the token to request.refresh to handle it later
"""
def __init__(self, get_response):
self.cookie_name = settings.SIMPLE_JWT['COOKIE_REFRESH_KEY']
self.get_response = get_response
def __call__(self, request):
if hasattr(request, 'COOKIES') and request.COOKIES.get(self.cookie_name, None):
request.refresh = request.COOKIES.get(self.cookie_name)
return self.get_response(request)
| true |