blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
701651504cac9f502d94c0fbc8b99063904d0a5b | Python | maikiperin/estudo-python | /pythonbasico/aula11-tratamento-de-erros.py | UTF-8 | 725 | 3.140625 | 3 | [] | no_license | import time
try:
a = 1200 / 0
except:
print('Erro! Divisão por zero.')
print('o programa continua...')
try:
a = 1200 / 0
except ZeroDivisionError:
print('Erro! Divisão por zero.')
try:
funcaoquenaoexiste()
except ZeroDivisionError:
print('Erro! Divisão por zero.')
except NameError:
print('Você digitou alguma coisa errada.')
try:
funcaoquenaoexiste()
except Exception as erro:
print('Erro:', erro)
def abre_arquivo():
try:
open('arquivoquenaoexiste.txt')
return True
except Exception as erro:
print('Erro:', erro)
return False
while not abre_arquivo():
print('Tentando abrir o arquivo...')
time.sleep(5)
print('Abriu o arquivo') | true |
a20c737d917077761e80bea51b455f116e3f7010 | Python | UW-COSMOS/Cosmos | /cosmos/api/cosmos/embeddings.py | UTF-8 | 663 | 2.515625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | from flask import (
Blueprint, request, current_app, abort, jsonify
)
bp = Blueprint('embeddings', __name__, url_prefix='')
@bp.route('/api/v1/word2vec', endpoint='word2vec', methods=['GET'])
def word2vec():
query_word= request.values.get('word')
n_responses= int(request.values.get('n', '10'))
if not hasattr(current_app, 'word_embeddings_model'):
abort(500)
if not query_word:
abort(400)
results = current_app.word_embeddings_model.get_nearest_neighbors(query_word, k=n_responses)
# Reverse words and scores
scores, words = zip(*results)
results = list(zip(words, scores))
return jsonify(results)
| true |
6eaabd402b768616dc005a67fa7d575e7985261c | Python | Krzyzaku21/Git_Folder | /_python_base_code/plotly/data.py | UTF-8 | 6,756 | 3.125 | 3 | [] | no_license | # %%
# ? making line graph
from plotly.graph_objs import Scatter
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
#pass the data to a graph object, and store it in a list
data = [Scatter(x=x_values, y=squares)]
# data = [Scatter(x=x_values, y=squares, mode='markers')]
#pass the data and a filename to plot()
offline.plot(data, filename='squares.html')
# %%
# ? Making bar graph
from plotly.graph_objs import Bar
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
data = [Bar(x=x_values, y=squares)]
#pass data and filename to plot
offline.plot(data, filename='squares.html')
# %%
# ? Using layout objects
from plotly.graph_objs import Scatter, Layout
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
#pass the data to a graph object, and store it in a list
data = [Scatter(x=x_values, y=squares)]
title = 'Square Numbers'
x_axis_config = {
"title" : 'x'
}
y_axis_config ={
'title' : 'Square of x'
}
my_layout = Layout(title=title, xaxis=x_axis_config, yaxis=y_axis_config)
offline.plot({
'data' : data,
'layout' : my_layout
}, filename='squares.html')
# %%
# ? data as dictionary
from plotly.graph_objs import Scatter
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
#pass the data to a graph object, and store it in a list
data = [{
'type' : 'scatter',
'x' : x_values,
'y' : squares,
'mode' : 'markers',
}]
offline.plot(data, filename='squares.html')
# %%
# ? squares and cubes
from plotly.graph_objs import Scatter
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
cubes = [x**3 for x in x_values]
#pass the data to a graph object, and store it in a list
data = [{
#trace 1 squares
'type' : 'scatter',
'x' : x_values,
'y' : squares,
'name' : 'Squares',
},
{
#trace 2 cubes
'type' : 'scatter',
'x' : x_values,
'y' : cubes,
'name' : 'Cubes',
}
]
offline.plot(data, filename='squares_cubes.html')
# %%
# ? layout as dictionary
from plotly.graph_objs import Scatter
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
#pass the data to a graph object, and store it in a list
data = [{
#trace 1 squares
'type' : 'scatter',
'x' : x_values,
'y' : squares,
'mode' : 'markers',
'marker' : {
'size' : 10,
'color' : '#6688dd'
}
},
]
my_layout = {
'title' : 'Square Numbers',
'xaxis' : {
'title' : 'x',
'titlefont' : {
'family' : 'monospace'
}
},
'yaxis' : {
'title' : 'Square of x',
'titlefont' : {
'family' : 'monospace'
}
}
}
offline.plot({
'data' : data,
'layout' : my_layout
}, filename='squares.html')
# %%
# ? using colorscale
from plotly.graph_objs import Scatter
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
#pass the data to a graph object, and store it in a list
data = [{
#trace 1 squares
'type' : 'scatter',
'x' : x_values,
'y' : squares,
'mode' : 'markers',
'marker' : {
'colorscale' : "Viridis",
'color' : squares,
'colorbar' : {
'title' : 'Value'
}
}
},
]
my_layout = {
'title' : 'Square Numbers',
'xaxis' : {
'title' : 'x',
'titlefont' : {
'family' : 'monospace'
}
},
'yaxis' : {
'title' : 'Square of x',
'titlefont' : {
'family' : 'monospace'
}
}
}
offline.plot({
'data' : data,
'layout' : my_layout
}, filename='squares.html')
# %%
# ? Adding subplots to a figure
from plotly.subplots import make_subplots
from plotly.graph_objs import Scatter
from plotly import offline
#define the data
x_values = list(range(11))
squares = [x**2 for x in x_values]
cubes = [x**3 for x in x_values]
fig = make_subplots(rows=1, cols=2, shared_xaxes=True)
#pass the data to a graph object, and store it in a list
data = {
'type' : 'scatter',
'x' : x_values,
'y' : squares,
}
fig.add_trace(data, row=1, col=1)
data = {
'type' : 'scatter',
'x' : x_values,
'y' : squares,
}
fig.add_trace(data, row=1, col=2)
offline.plot(fig, filename='subplots.html')
# %%
# ? Plotting global datasets, scattergeo chart type
from plotly import offline
#Points in (lat, lon) format.
peak_coords = [
(63.069, -151.0063),
(60.5671, -140.4055),
(46.8529, -121.7604)
]
#make matching lists of lats, lons, and labels
lats = [pc[0] for pc in peak_coords]
lons = [pc[1] for pc in peak_coords]
peak_names = ['Denali', 'Mt logan', 'Mt Rainier']
data = [{
'type' : 'scattergeo',
'lon' : lons,
'lat' : lats,
'marker' : {
'size' : 20,
'color' : '#227722'
},
'text' : peak_names
}]
my_layout1 = {
'title' : 'Selected High Peaks',
'geo' : {
'scope' : 'north america',
'showland' : True,
'showocean' : True,
'showlakes' : True,
'showrivers' : True,
}
}
offline.plot({
'data' : data,
'layout' : my_layout1
}, filename='peaks.html')
# %%
# ? Plotting global datasets, scattergeo chart type
from plotly import offline
#Points in (lat, lon) format.
peak_coords = [
(54.423459, 18.483281),
(53.821852, 22.368968),
(50.399905, 18.893328)
]
#make matching lists of lats, lons, and labels
lats = [pc[0] for pc in peak_coords]
lons = [pc[1] for pc in peak_coords]
peak_names = ['Nike', 'Ełk', 'Radzionków']
data = [{
'type' : 'scattergeo',
'lon' : lons,
'lat' : lats,
'marker' : {
'size' : 20,
'color' : '#227722'
},
'text' : peak_names
}]
my_layout1 = {
'title' : 'Polska',
'geo' : {
'scope' : 'europe',
'showland' : True,
'showocean' : True,
'showlakes' : True,
'showrivers' : True,
'showlegend' : True,
}
}
offline.plot({
'data' : data,
'layout' : my_layout1
}, filename='polska.html')
# %%
| true |
18b3dc5150fd3d2cec2e74693f7492739634b775 | Python | chenxu0602/LeetCode | /1282.group-the-people-given-the-group-size-they-belong-to.py | UTF-8 | 1,522 | 3.140625 | 3 | [] | no_license | #
# @lc app=leetcode id=1282 lang=python3
#
# [1282] Group the People Given the Group Size They Belong To
#
# https://leetcode.com/problems/group-the-people-given-the-group-size-they-belong-to/description/
#
# algorithms
# Medium (83.78%)
# Likes: 125
# Dislikes: 71
# Total Accepted: 14K
# Total Submissions: 16.7K
# Testcase Example: '[3,3,3,3,3,1,3]'
#
# There are n people whose IDs go from 0 to n - 1 and each person belongs
# exactly to one group. Given the array groupSizes of length n telling the
# group size each person belongs to, return the groups there are and the
# people's IDs each group includes.
#
# You can return any solution in any order and the same applies for IDs. Also,
# it is guaranteed that there exists at least one solution.
#
#
# Example 1:
#
#
# Input: groupSizes = [3,3,3,3,3,1,3]
# Output: [[5],[0,1,2],[3,4,6]]
# Explanation:
# Other possible solutions are [[2,1,6],[5],[0,4,3]] and
# [[5],[0,6,2],[4,3,1]].
#
#
# Example 2:
#
#
# Input: groupSizes = [2,1,3,3,3,2]
# Output: [[1],[0,5],[2,3,4]]
#
#
#
# Constraints:
#
#
# groupSizes.length == n
# 1 <= n <= 500
# 1 <= groupSizes[i] <= n
#
#
#
# @lc code=start
from collections import defaultdict
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
count = defaultdict(list)
for i, size in enumerate(groupSizes):
count[size].append(i)
return [l[i:i+s] for s, l in count.items() for i in range(0, len(l), s)]
# @lc code=end
| true |
41809b5ec9adbf77d1344c04a965a22812694e80 | Python | trallala9/curly_potato | /excersize_four.py | UTF-8 | 462 | 2.984375 | 3 | [] | no_license | # shapes and texts
import cv2
import numpy as np
img = np.zeros((512, 512, 3), np.uint8)
#print(img)
#img[200:300, 100:200] = 255,0,0
cv2.line(img,(0, 0),(300,300),(0, 255, 255),3)
cv2.line(img,(0, 0),(img.shape[1], img.shape[0]),(0, 255, 255),3)
cv2.rectangle(img,(0,0),(250,350),(0,0, 255),2)
cv2.circle(img,(400,50),30,(255,255,0),5)
cv2.putText(img, "Moj tekst",(200,100), cv2.FONT_HERSHEY_TRIPLEX,1,(0,0,255),1)
cv2.imshow("Image", img)
cv2.waitKey(0) | true |
8c94286359b851dc619a70ac707d63229ab56489 | Python | jepebe/aoc2018 | /aoc2020/day7/day7.py | UTF-8 | 2,663 | 3.203125 | 3 | [] | no_license | import intcode as ic
tester = ic.Tester('Handy Haversacks')
def read_file():
with open('input') as f:
lines = f.read()
return lines.split('\n')
def parse_lines(lines):
bags = {}
for line in lines:
line = line.replace('.', '')
name, content = line.split(' bags contain ')
if name in bags:
print('What?')
bags[name] = {}
inner_bags = content.split(',')
for bag in inner_bags:
bag = bag.strip()
if bag.startswith('no'):
break
else:
i = bag.find(' ')
count = int(bag[:i])
bag_name = bag[i:bag.find('bag')].strip()
bags[name][bag_name] = count
return bags
def is_golden(bags, bag, prefix='-'):
if bag == 'shiny gold':
return True
elif len(bags[bag]) == 0:
return False
else:
return any(is_golden(bags, b, prefix + '-') for b in bags[bag])
def count_golden(bags):
count = 0
for bag in bags.keys():
if bag != 'shiny gold':
count += 1 if is_golden(bags, bag) else 0
return count
def bag_size(bags, bag, prefix='-'):
count = 1 if bag != 'shiny gold' else 0
for name, size in bags[bag].items():
count += size * bag_size(bags, name, prefix + '-')
return count
lines = """light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.""".split("\n")
bags = parse_lines(lines)
tester.test_value(count_golden(bags), 4)
tester.test_value(bag_size(bags, 'shiny gold'), 32)
lines = """shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.""".split("\n")
bags = parse_lines(lines)
tester.test_value(count_golden(bags), 0)
tester.test_value(bag_size(bags, 'shiny gold'), 126)
lines = read_file()
bags = parse_lines(lines)
tester.test_value(count_golden(bags), 259, 'solution to exercise 1=%s')
tester.test_value(bag_size(bags, 'shiny gold'), 45018, 'solution to exercise 2=%s')
| true |
36546074b6ecd7c57b4ebbb859ee15c19146cc37 | Python | jeepcambo/ctf | /projecteuler/problem20.py | UTF-8 | 671 | 4.03125 | 4 | [] | no_license | # PROBLEM 20
# and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
#
# Find the sum of the digits in the number 100!
import sys
def factorial(num):
x = int(num)
if x < 0:
print 'Invalid Input!'
if x == 0:
return 1
elif x == 1:
return 1
else:
return x * factorial(x - 1)
def sum_digits(num):
result = expand(num)
total = 0
for l in result:
total += int(l)
return total
def expand(num):
return str(num)
def solve(num):
return sum_digits(factorial(num))
num = sys.argv[1]
print ('The sum of all digits of the factorial result is: ' + str(solve(num)))
| true |
9ec4fe1d83b579b0d7707aff924b3773230c3d5d | Python | Aasthaengg/IBMdataset | /Python_codes/p02708/s188360411.py | UTF-8 | 184 | 2.515625 | 3 | [] | no_license | N, K = map(int, input().split())
kMod = 10**9+7
ans = 0
for k in range(K, N+2):
lb = (k-1) * k // 2
ub = (N+1-k + N) * k // 2
ans += (ub-lb+1)
ans %= kMod
print(ans) | true |
d9a5ef866cd90eca9d6492f3847e8bddf717f2df | Python | Jihong-Tang/computational-genomics | /genome-assembly-mapping/infection_investigator.py | UTF-8 | 6,094 | 2.734375 | 3 | [] | no_license | #coding:utf-8
from bwt_structures import *
from read_aligner import *
from compsci260lib import *
def reverse_complement(seq):
"""
Returns the reverse complement of the input string.
"""
comp_bases = {'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'}
rev_seq = list(seq)
rev_seq = rev_seq[::-1]
rev_seq = [comp_bases[base] for base in rev_seq]
return ''.join(rev_seq)
def align_patient_reads():
"""YOUR CODE GOES HERE..."""
"""Create a dictionary called ref_dict to input all reference bacterial fasta file
"""
ref_dict = {}
ref_name = ['Bacteroides_ovatus','Bacteroides_thetaiotaomicron','Bifidobacterium_longum',
'Eubacterium_rectale', 'Lactobacillus_acidophilus', 'Peptoniphilus_timonensis',
'Prevotella_copri', 'Roseburia_intestinalis', 'Ruminococcus_bromii','Vibrio_cholerae']
for item in ref_name:
read_dict = get_fasta_dict('reference_genomes/%s.fasta'% item)
ref_dict.update(read_dict)
ref_keys = ref_dict.keys()
ref_keys.sort()
# for key in ref_keys:
# ref_dict[key] = reverse_complement(ref_dict[key])
"""Create a dictionary called ref_fm_dict to store all data structure needed
to make the fm-index procedure for all ten reference genomes
"""
ref_fm_dict = {}
for key in ref_keys:
ref_fm_dict[key] = make_all(ref_dict[key])
patient1_dict = get_fasta_dict('patients/patient1.fasta')
patient2_dict = get_fasta_dict('patients/patient2.fasta')
patient3_dict = get_fasta_dict('patients/patient3.fasta')
def find_prevalence(patient_dict):
pkey = patient_dict.keys()
result_dict ={}
for key in ref_keys:
result_dict[key] = 0
for i in pkey:
p_dict = {}
p_dict[i] = []
re_seq = reverse_complement(patient_dict[i])
# re_seq = patient_dict[i]
for j in ref_keys:
result = find(re_seq, ref_fm_dict[j])
if result != []:
p_dict[i].append(j)
if len(p_dict[i]) == 1:
result_dict[p_dict[i][0]] += 1
return result_dict
def cal_print_pre(patient_name, result_dict):
total = 0
for key in ref_keys:
total += result_dict[key]
for key in ref_keys:
pre = float(result_dict[key]) / float(total)
print "The estimated prevalence of microbe %s for %s is %.2f%%." % (key, patient_name, pre*100)
return 0
print 'The estimated microbe prevalences for patient1 are shown as following:'
cal_print_pre('patient1', find_prevalence(patient1_dict))
print 'The estimated microbe prevalences for patient2 are shown as following:'
cal_print_pre('patient2', find_prevalence(patient2_dict))
print 'The estimated microbe prevalences for patient3 are shown as following:'
cal_print_pre('patient3', find_prevalence(patient3_dict))
def find_count(patient_dict, ref_genome_name):
count_list = [0] * len(ref_dict[ref_genome_name])
pkey = patient_dict.keys()
read_length = len(patient_dict[pkey[0]])
start_list = []
for key in pkey:
re_seq = reverse_complement(patient_dict[key])
p_dict = {}
p_dict[key] = []
for j in ref_keys:
result = find(re_seq, ref_fm_dict[j])
if result != []:
p_dict[key].append(j)
if p_dict[key] == [ref_genome_name]:
start_list += find(re_seq, ref_fm_dict[ref_genome_name])
for e in start_list:
for i in range(read_length):
count_list[e+i] += 1
return count_list
# print find_count(patient1_dict, 'Vibrio cholerae')[]
def find_zeros(count_list):
"""
In order to use the re function, I convert the count_list to count_str.
However, it will be an error if original mutation is taken since some number
in the count_list has more than one digit position, this will cause trouble
in the determination of position in string. Therefore, I firstly change all
number which are not equal to zero to be one for convenience.
"""
mut_list = [1 if e != 0 else 0 for e in count_list]
count_str = ''.join(str(e) for e in mut_list)
# print count_list
# print mut_list
# print count_str
zeros = re.finditer('0+', count_str)
max, opt = 0, 0
for m in zeros:
if m.start() !=0 and m.end()!= len(count_list):
interval = m.end() - m.start()
if interval > max:
max = interval
opt = m
if opt != 0:
return opt.start(), opt.end()
else:
return 'None continuous internal zeros can be found from the given information.','You may check your data.'
start1, end1 = find_zeros(find_count(patient1_dict, 'Vibrio cholerae'))
a = find_count(patient1_dict, 'Vibrio cholerae')[start1:end1]
print 'After using the new hunch method on patient1, the 0-indexed start position of the longest internal string of 0s in genome Vibrio cholerae will be %d, and the 1-indexed end position will be %d.' %(start1, end1)
start2, end2 = find_zeros(find_count(patient3_dict, 'Vibrio cholerae'))
print 'The information can be worked out for patient3 is: %s %s' %(start2, end2)
# print 'After using the new hunch method on patient3, the 0-indexed start position of the longest internal string of 0s in genome Vibrio cholerae will be %, and the 1-indexed end position will be % .' %(start2, end2)
target_str = ref_dict['Vibrio cholerae'][start1:end1]
print 'The target string found is: %s' % target_str
if __name__ == '__main__':
align_patient_reads()
| true |
8cdd2a2454bd78842d07c5395e5e662e9e40d95d | Python | ZRiddle/SantaK2015 | /Fine_Tuning_v2.py | UTF-8 | 27,936 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: zach.riddle
"""
import pandas as pd
import numpy as np
import time
from matplotlib import pyplot as plt
from matplotlib.pylab import cm
import seaborn
from sklearn.cluster import KMeans
import pickle
from util import *
lat_long = ['Latitude','Longitude']
# Read in Data
# Save files as pkl
with open('trip_metrics_v1.pkl','r') as f:
trip_metrics = pickle.load(f)
with open('trip_list_v1.pkl','r') as f:
T = pickle.load(f)
with open('gifts_v1.pkl','r') as f:
gifts = pickle.load(f)
def add_NorthPole(df):
h = ['Latitude','Longitude','Weight','LB']
# Insert start and end
start = pd.DataFrame(data=[[90,0,0,0]],index=[0],columns = h)
end = pd.DataFrame(data=[[90,0,10,0]],index=[100001],columns = h)
return start.append(df[h].copy()).append(end)
def get_distances(trip):
# Returns an array of the distances
dist = [0]
for i in range(1,trip.shape[0]):
dist.append(haversine(tuple(trip.iloc[i-1][lat_long]),tuple(trip.iloc[i][lat_long])))
return dist
def WRW_trip(trip,d=[]):
if len(d) == trip.shape[0]:
d = np.array(d)
else:
d = np.array(trip.Distance)
return (d.cumsum()*trip.Weight).sum()
def anneal_update(trip,ind,d):
trip = trip.ix[ind]
trip['Distance'] = d
return trip
def anneal_trip(trip,G=500,alpha=0.03,beta=10,verbose=True):
# Add North Pole to start and end
trip = add_NorthPole(trip)
# Create column with previous distance
trip['Distance'] = get_distances(trip)
best_trip = trip.copy()
###############################
# Simulated Annealing
# Constants
WRW_pre = WRW_trip(trip) # Prior WRW
WRW_post = WRW_pre # Post-Swap WRW
WRW_low = WRW_pre
ind = np.array(trip.index) # Copy index
# Get the distances
d = get_distances(trip.ix[ind])
# Records
WRW = [WRW_pre]
swaps = []
X = np.zeros((G/100,trip.shape[0]-2,2))
for g in range(G):
# Choose a random position to swap
rand = np.random.randint(trip.shape[0]-3)+1 # Cannot swap first or last
#rand2 = np.random.randint(trip.shape[0]-2)+1 # Cannot swap first or last
# Swap the index with the one after it
ind = np.array(trip.index) # Copy index
a = ind[rand] # copy i
ind[rand] = ind[rand+1] # copy j to i
ind[rand+1] = a # copy i to j
# Calculate the new distances for only 3 paths, involving 4 points
d_temp = get_distances(trip.ix[ind[rand-1:rand+3]])
# Get the distances
d[rand:rand+3] = d_temp[1:]
'''
# Calculate the new distances for only 2 paths, involving 3 points
d_temp = get_distances(trip.ix[ind[rand2-1:rand2+2]])
# Get the distances
d[rand2:rand2+2] = d_temp[1:]
'''
# Calculate the new WRW
WRW_post = WRW_trip(trip.ix[ind],d)
if WRW_post < WRW_pre:
# If it's better update
trip = anneal_update(trip,ind,d)
WRW_pre = WRW_post
else:
# If it's worse, upate with some probability
p = max((min(WRW_pre/WRW_post,1)-alpha-(.2*g/G)),0)**beta
# Generate a random number and swap
if p > np.random.rand():
trip = anneal_update(trip,ind,d)
WRW_pre = WRW_post
swaps.append(1)
else:
# Do nothing
swaps.append(0)
WRW.append(WRW_pre)
if WRW_pre < WRW_low:
WRW_low = WRW_pre
best_trip = trip.copy()
# Print outputs
if verbose and (g+1)%100==0:
print '%.0f Iteration Complete...'%(g+1)
print ' -Best WRW = %.0f'%(WRW_low)
X[g/100,:] = np.array(trip.ix[ind][lat_long])[1:-1]
return best_trip.iloc[1:trip.shape[0]-1],WRW,swaps,X
# Plot Trips
def plot_trip(T,tripid,newplot=True,color='green'):
temp = T[tripid]
if newplot:
plt.figure('Trip',figsize=(11,8))
plt.scatter(temp['Longitude'], temp['Latitude'], s=temp['Weight'], color='red')
plt.plot(temp['Longitude'],temp['Latitude'], color=color)
def plot_all_presents(T):
# define the colormap
cmap = plt.cm.RdYlGn
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(len(T))]
np.random.shuffle(cmaplist)
# Plot Figure
plt.figure("SANTA!!!!!!",figsize=(12,9))
for tripid in range(len(T)):
temp = T[tripid].iloc[1:-1]
plt.scatter(temp['Longitude'], temp['Latitude'], s=temp['Weight'], color=cmaplist[tripid])
plt.plot(temp['Longitude'],temp['Latitude'], color='red')
'''
#####################################################################
#####################################################################
#####################################################################
## Plots
############################################
# Find Worst Trips
headroom = trip_metrics.WRW - trip_metrics.LB
headroom.sort_values(inplace=True,ascending=False)
#plot_trip(T,headroom.index[0])
#plot_all_presents(T)
n = 33
# create the new map
cmap = cm.get_cmap('winter')
colors = [cmap(1.*i/n) for i in range(n)]
# create the new map
cmap = cm.get_cmap('winter', 33)
plt.figure('Worst Trips',figsize=(17,11))
plt.scatter(gifts.Longitude,gifts.Latitude,color = 'gray')
c=0
coords = [360,0,180,0]
for tripid in headroom.index[:15]:
#range(510,530):#
plot_trip(T,tripid,newplot=False,color=colors[c+1])
c+=1
# Get boundries
if T[tripid].iloc[1:-1].Longitude.min() + 180 < coords[0]:
coords[0] = T[tripid].iloc[1:-1].Longitude.min() + 180
if T[tripid].iloc[1:-1].Longitude.max() + 180 > coords[1]:
coords[1] = T[tripid].iloc[1:-1].Longitude.max() + 180
if T[tripid].iloc[1:-1].Latitude.min() + 90 < coords[2]:
coords[2] = T[tripid].iloc[1:-1].Latitude.min() + 90
if T[tripid].iloc[1:-1].Latitude.max() + 90 > coords[3]:
coords[3] = T[tripid].iloc[1:-1].Latitude.max() + 90
band = .1
plt.xlim(coords[0]*(1-band)-180,coords[1]*(1+band)-180)
plt.ylim(coords[2]*(1-band)-90,coords[3]*(1+band)-90)
plt.title('Worst Trips - Most Wasted Movement',fontsize=20)
plt.tight_layout()
print 'Calculated Lower Bound for these trips =',trip_metrics.LB.sum()
'''
'''
####################################################
# Find Lightest Trips
headroom = trip_metrics.Weight.copy()
headroom.sort_values(inplace=True)#,ascending=False)
#plot_trip(T,headroom.index[0])
#plot_all_presents(T)
def plot_trips(trips,title=''):
n = len(trips)+2
# create the new map
cmap = cm.get_cmap('winter')
colors = [cmap(1.*i/n) for i in range(n)]
plt.figure('Smallest Trips',figsize=(17,11))
plt.scatter(gifts.Longitude,gifts.Latitude,color = 'gray')
c=0
coords = [360,0,180,0]
for tripid in trips:
#range(510,530):#
plot_trip(T,tripid,newplot=False,color=colors[c+1])
c += 1
# Get boundries
if T[tripid].iloc[1:-1].Longitude.min() + 180 < coords[0]:
coords[0] = T[tripid].iloc[1:-1].Longitude.min() + 180
if T[tripid].iloc[1:-1].Longitude.max() + 180 > coords[1]:
coords[1] = T[tripid].iloc[1:-1].Longitude.max() + 180
if T[tripid].iloc[1:-1].Latitude.min() + 90 < coords[2]:
coords[2] = T[tripid].iloc[1:-1].Latitude.min() + 90
if T[tripid].iloc[1:-1].Latitude.max() + 90 > coords[3]:
coords[3] = T[tripid].iloc[1:-1].Latitude.max() + 90
band = .1
plt.xlim(coords[0]*(1-band)-180,coords[1]*(1+band)-180)
plt.ylim(coords[2]*(1-band)-90,coords[3]*(1+band)-90)
plt.title(title,fontsize=20)
plt.tight_layout()
plot_trips(headroom.index[:24],title='Lightest Trips - Total Weight < 400')
print 'Lightest Trips:\n',trip_metrics.sort_values('Weight').head()
'''
#### Notes on updating ###
# Adding Gift:
# If gift i is inserted into trip T, then T_i and T_{i+1} Distances need to be updated
# Then for T, all the trip metrics will need to be recomputed
# Removing Gift
# If gift i is removed from trip T, then T_{i+1} Distance needs to be updated
# Then for T, all the trip metrics will need to be recomputed
class Trips:
'''
Trips Class for optimizing Santa's routes
Properties:
T : list of pandas dfs
Each df is a single trip
trip_metrics : pandas df
1 row per trip
gifts : pandas df
gifts dataset
wrw : float
Weighted Reindeer Weariness - Loss Function
https://www.kaggle.com/c/santas-stolen-sleigh/details/evaluation
'''
lat_long = ['Latitude','Longitude']
def __init__(self,T=None,trip_metrics=None,gifts=None,path=''):
# Set initial tables
if T == None:
with open('trip_list'+path+'.pkl','r') as f:
self.T = pickle.load(f)
else:
self.T = T
if trip_metrics == None:
with open('trip_metrics'+path+'.pkl','r') as f:
self.trip_metrics = pickle.load(f)
else:
self.trip_metrics = trip_metrics
if gifts == None:
with open('gifts'+path+'.pkl','r') as f:
self.gifts = pickle.load(f)
else:
self.gifts = gifts
if 'SPF' not in self.trip_metrics.columns:
# Add SouthPoleFlag
self.trip_metrics['SPF'] = 0
for tr in range(len(self.T)):
if self.T[tr].Latitude.mean() < -60:
self.trip_metrics.iloc[tr,5] = 1
# Set wrw
self.wrw = self.trip_metrics.WRW.sum()
def haversine(self,v1,v2):
# calculate haversine
lat = np.array(np.radians(v1['Latitude'])) - np.array(np.radians(v2['Latitude']))
lng = np.array(np.radians(v1['Longitude'])) - np.array(np.radians(v2['Longitude']))
d = np.sin(lat / 2) ** 2 + np.array(np.cos(np.radians(v1['Latitude']))) *\
np.array(np.cos(np.radians(v2['Latitude']))) * np.sin(lng / 2) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return h # in kilometers
def haversine_NP(self,v1):
# calculate haversine
lat = np.radians(v1['Latitude']) - np.radians(north_pole[0])
lng = np.radians(v1['Longitude']) - np.radians(north_pole[1])
d = np.sin(lat / 2) ** 2 + np.cos(np.radians(v1['Latitude'])) * np.cos(np.radians(north_pole[0])) * np.sin(lng / 2) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return h # in kilometers
def remove_gift(self,tripid,giftid,update=True):
'''0
Removes a gift from a trip
Updates the trip
Returns the gift to add into another trip
'''
# Copy gift
loose_gift = self.T[tripid].ix[giftid].copy()
# Record index
ind = self.T[tripid].index.get_loc(giftid)
# Delete gift
self.T[tripid].drop(giftid,inplace=True)
if update:
# Recalculate the Distance for the gift that is now at the index
if ind == 0:
self.T[tripid].iloc[ind,4] = self.haversine_NP(self.T[tripid].iloc[ind][lat_long])
elif ind < self.T[tripid].shape[0]:
self.T[tripid].iloc[ind,4] = self.haversine(self.T[tripid].iloc[ind-1][lat_long],self.T[tripid].iloc[ind][lat_long])
# Update metrics for trip id
self.update_trip_metrics(tripid)
# Return gift
return loose_gift
def update_trip_metrics(self,tripid):
'''
Updates the trip_metrics dataframe
'''
# Don't change South Pole Flag
SPF = self.trip_metrics.ix[tripid].SPF
# Updates Weight, Count, LB, WRW, and AvgLong for a trip
# Weight - sum weights
weight = self.T[tripid].Weight.sum()
# Count - number of presents
ct = self.T[tripid].shape[0]
# Lower Bound for trip
# Sum the Lower Bounds * Weights + the largest LB Distances*2*10 for roundtrip sleigh
LB = (self.T[tripid].LB*self.T[tripid].Weight).sum() + 2*self.T[tripid].LB.max()*10
# WRW for trip
# Sum actual cumulative distances * Weights + sled weight including trip home
wrw = np.sum((self.T[tripid].Distance).cumsum()*self.T[tripid].Weight)
wrw += (self.T[tripid].iloc[-1]['LB']+self.T[tripid].Distance.sum())*10
# Compute Average Longitude
avg_long = np.mean(self.T[tripid]['Longitude'])
# Update row
self.trip_metrics.ix[tripid] = [weight,ct,LB,wrw,avg_long,SPF]
# Update overall wrw
self.wrw = self.trip_metrics.WRW.sum()
def add_gift(self,tripid,loose_gift):
'''
Removes a gift from a trip
Updates the trip
Returns the gift to add into another trip
'''
# Add to trip
self.T[tripid] = self.T[tripid].append(loose_gift)
# Put into the correct spot based on Latitude
g_id = self.T[tripid].index[-1]
self.T[tripid].sort_values(by='Latitude',inplace=True,ascending=False)
ind = self.T[tripid].index.get_loc(g_id)
# Recalculate the Distance for the gift that is now at the index and the one after it
if ind == 0:
self.T[tripid].iloc[ind,4] = self.T[tripid].iloc[ind]['LB']
else:
self.T[tripid].iloc[ind,4] = self.haversine(self.T[tripid].iloc[ind-1][lat_long],self.T[tripid].iloc[ind][lat_long])
if ind+1 < self.T[tripid].shape[0]:
self.T[tripid].iloc[ind+1,4] = self.haversine(self.T[tripid].iloc[ind][lat_long],self.T[tripid].iloc[ind+1][lat_long])
# Update metrics for trip id
self.update_trip_metrics(tripid)
########################################
#Plotting
# Plot Trips
def plot_trip(self,tripid,newplot=True,color='green'):
temp = self.T[tripid].copy()
if newplot:
plt.figure('Trip',figsize=(8,5))
plt.scatter(temp['Longitude'], temp['Latitude'], s=temp['Weight'], color='red')
plt.plot(temp['Longitude'],temp['Latitude'], color=color)
def plot_trips(self,trips,title='Trips',newplot=True):
n = len(trips)+2
# create the new map
cmap = cm.get_cmap('winter')
colors = [cmap(1.*i/n) for i in range(n)]
if newplot:
plt.figure(title,figsize=(12,8))
plt.subplot(211)
else:
plt.subplot(212)
plt.scatter(self.gifts.Longitude,gifts.Latitude,color = 'gray')
c=0
coords = [360,0,180,0]
for tripid in trips:
#range(510,530):#
self.plot_trip(tripid,newplot=False,color=colors[c+1])
c += 1
# Get boundries
if self.T[tripid].Longitude.min() + 180 < coords[0]:
coords[0] = self.T[tripid].Longitude.min() + 180
if self.T[tripid].Longitude.max() + 180 > coords[1]:
coords[1] = self.T[tripid].Longitude.max() + 180
if self.T[tripid].Latitude.min() + 90 < coords[2]:
coords[2] = self.T[tripid].Latitude.min() + 90
if self.T[tripid].Latitude.max() + 90 > coords[3]:
coords[3] = self.T[tripid].Latitude.max() + 90
band = .1
plt.xlim(coords[0]*(1-band)-180,coords[1]*(1+band)-180)
plt.ylim(coords[2]*(1-band)-90,coords[3]*(1+band)-90)
plt.title(title,fontsize=20)
plt.tight_layout()
def write_sub(self,filename):
sub = pd.DataFrame(columns=T[0].columns)
for tID in self.trip_metrics.index:
# Add trip to DF
temp = self.T[tID].copy()
temp['LB'] = tID
sub = pd.concat([sub,temp])
sub.index.names = ['GiftId']
sub['TripId'] = sub['LB'].astype(int)
sub['TripId'].to_csv(filename,header = ['TripId'])
def save_data(self,path=''):
# Save files as pkl
with open('trip_metrics'+path+'.pkl','w') as f:
pickle.dump(self.trip_metrics,f)
with open('trip_list'+path+'.pkl','w') as f:
pickle.dump(self.T,f)
with open('gifts'+path+'.pkl','w') as f:
pickle.dump(self.gifts,f)
def destroy_trip(self,tripid):
'''
Destroy a trip - returning a list of loose presents
'''
# Get list of free gifts
loose_gifts = []
for g in self.T[tripid].index:
l = self.remove_gift(tripid,g,update=False)
loose_gifts.append(l)
# Update trip metrics to 0s
self.trip_metrics.drop(tripid,inplace=True)
return loose_gifts
def diffuse_gifts(self,loose_gifts):
# Keep a south pole flag for this algorithm
SP = 0
if loose_gifts[-1].Latitude < -60:
#print 'SP = 1'
SP = 1
# Step 1 - Add all gifts to the closest trip
for g in loose_gifts:
# Find Nearest Trip
new_trip = ((self.trip_metrics[self.trip_metrics.SPF == SP].AvgLong - g.Longitude)**2).sort_values().index[0]
# Add present to trip
self.add_gift(new_trip,g)
if np.random.rand() > .5:
self.diffuse_east(SP)
self.diffuse_west(SP)
else:
self.diffuse_west(SP)
self.diffuse_east(SP)
def diffuse_east(self,SP,cap=1000):
# Step 2 - Start with the heaviest trip, Diffuse east
curr_trip = self.trip_metrics[self.trip_metrics.SPF == SP].sort_values('AvgLong',ascending=True).index[0]
mask = np.logical_and(self.trip_metrics.SPF == SP,self.trip_metrics.Weight>cap)
eastmost_trip = self.trip_metrics[mask].sort_values('AvgLong',ascending=False).index[0]
# Keep going east until weight satisfied
while curr_trip != eastmost_trip and mask.sum():
# Keep removing presents until weight satisfied
print ' -Eastward - trip #'+str(curr_trip)
while self.trip_metrics.ix[curr_trip].Weight > cap:
# Take east-most gift
east_gift = self.T[curr_trip].sort_values('Longitude',ascending=False).index[0]
loose = self.remove_gift(curr_trip,east_gift)
# Add it to it's nearest trip (Not Itself!)
new_trip = ((self.trip_metrics[self.trip_metrics.SPF == SP].AvgLong - loose.Longitude)**2).sort_values().index[:2]
if new_trip[0]==curr_trip:
new_trip = new_trip[1]
else:
new_trip = new_trip[0]
self.add_gift(new_trip,loose)
# Increment current trip to next eastward trip over 1000
mask = np.logical_and(self.trip_metrics.AvgLong > self.trip_metrics.ix[curr_trip].AvgLong,self.trip_metrics.Weight > cap)
mask = np.logical_and(mask,self.trip_metrics.SPF == SP)
if mask.sum():
curr_trip = self.trip_metrics[mask].sort_values('AvgLong').index[0]
def diffuse_west(self,SP,cap=1000):
# Step 3 - Diffuse west
curr_trip = self.trip_metrics[self.trip_metrics.SPF == SP].sort_values('AvgLong',ascending=False).index[0]
# Create Eastward Mask
mask = np.logical_and(self.trip_metrics.SPF == SP,self.trip_metrics.Weight>cap)
eastmost_trip = self.trip_metrics[mask].sort_values('AvgLong',ascending=True).index[0]
# Keep going east until weight satisfied
while curr_trip != eastmost_trip and mask.sum():
# Keep removing presents until weight satisfied from current trip
print ' -Westward - trip #'+str(curr_trip)
while self.trip_metrics.ix[curr_trip].Weight > cap:
# Take east-most gift
east_gift = self.T[curr_trip].sort_values('Longitude',ascending=True).index[0]
loose = self.remove_gift(curr_trip,east_gift)
# Add it to it's nearest trip (Not Itself!)
new_trip = ((self.trip_metrics[self.trip_metrics.SPF == SP].AvgLong - loose.Longitude)**2).sort_values().index[:2]
if new_trip[0]==curr_trip:
new_trip = new_trip[1]
else:
new_trip = new_trip[0]
self.add_gift(new_trip,loose)
# Increment current trip to next wastward trip over 1000
mask = np.logical_and(self.trip_metrics.AvgLong < self.trip_metrics.ix[curr_trip].AvgLong,self.trip_metrics.Weight > cap)
mask = np.logical_and(mask,self.trip_metrics.SPF == SP)
if mask.sum():
curr_trip = self.trip_metrics[mask].sort_values('AvgLong',ascending = False).index[0]
def destroy_diffuse(self,tripid):
loose_gifts = self.destroy_trip(tripid)
self.diffuse_gifts(loose_gifts)
'''
# Plot the Average Longitude
plt.figure('Trip Longitudes',figsize=(11,9))
temp_metrics = Santa.trip_metrics.copy()
temp_metrics.AvgLong = np.round(temp_metrics.AvgLong,-1).astype(int)
temp_metrics.groupby('AvgLong')['Weight'].sum().plot(kind='bar')
'''
##############################
def swap_worst_trip(Santa,ntry=10,plot_it = True,greedOfTheNoob=True,SPF = True,verbose=1):
start_wrw = Santa.wrw
stime = time.time()
for k in range(ntry):
pp= '~'*33+'\nLoop #'+str(k)
headroom = Santa.trip_metrics.WRW - Santa.trip_metrics.LB
headroom.sort_values(inplace=True,ascending=False)
# Plot single worst trip
#plot_trip(T,headroom.index[0],newplot=False,color=colors[c+1])
# Look at worst trip
# Choose Worst Trip Stochastically
p = headroom.head(500)+10
p /= p.sum()
w = np.random.choice(np.arange(p.shape[0]),p=p)
worst_trip = headroom.index[w]
#Santa.trip_metrics.ix[worst_trip]
#Santa.T[worst_trip].head()
# Find the outlier present in the worst trip
# Find the outlier points
dis = (Santa.T[worst_trip].Longitude - Santa.trip_metrics.ix[worst_trip].AvgLong)**2
dis.sort_values(inplace=True,ascending=False)
# Choose worst Present stochastically
p = dis.head(20)
p /= p.sum()
w = np.random.choice(np.arange(p.shape[0]),p=p)
outlier = Santa.T[worst_trip].ix[dis.index[w]]
worst_present = dis.index[w]
# Find trip with closest Avg Long
# Segregate South Pole
if SPF:
SP_mask = Santa.trip_metrics.SPF == Santa.trip_metrics.ix[worst_trip].SPF
closest_trip = (outlier.Longitude - Santa.trip_metrics[SP_mask].AvgLong)**2
else:
closest_trip = (outlier.Longitude - Santa.trip_metrics.AvgLong)**2
closest_trip.sort_values(inplace=True)
# Choose new trip stochastically
new_trip = closest_trip.head(2).index
if new_trip[0]==worst_trip:
new_trip = new_trip[1]
else:
new_trip = new_trip[0]
if Santa.trip_metrics.ix[new_trip].Weight + Santa.T[worst_trip].ix[worst_present].Weight > 1000:
if verbose:
print pp
print 'Too Heavy'
else:
Santa.trip_metrics.ix[closest_trip.head().index]
if verbose:
print ' -Worst Trip',worst_trip
print ' -Worst Present',worst_present
print ' -New Trip',new_trip
if plot_it:
Santa.plot_trips([worst_trip,new_trip],title='Before '+str(k))
# Try out the remove and add functions
# First check the combined WRW for these 2 trips
wrw0 = Santa.trip_metrics.ix[[worst_trip,new_trip]].WRW.sum()
# Remove it
pres = Santa.remove_gift(worst_trip,worst_present)
# Add it
Santa.add_gift(new_trip,pres)
wrw2 = Santa.trip_metrics.ix[[worst_trip,new_trip]].WRW.sum()
if verbose:
print ' -WRW reduction after swap =',1-wrw2/wrw0
print ' -Absolute WRW Reduction =',wrw0-wrw2
elif wrw2 < wrw0 and (k+1)%1000==0:
print pp
print ' -Sucess! Reduced by',wrw0-wrw2
if plot_it:
Santa.plot_trips([worst_trip,new_trip],title='After '+str(k),newplot=False)
if wrw0 < wrw2 and greedOfTheNoob:
# UNDO!!!!
pres = Santa.remove_gift(new_trip,worst_present)
Santa.add_gift(worst_trip,pres)
perc_gained_lb = (start_wrw-Santa.wrw) / (start_wrw - Santa.trip_metrics.LB.sum())
print '\nStarting WRW = %0f\nNew WRW = %.0f\nWRW %% Gained = %.2f%%'%(start_wrw,Santa.wrw,perc_gained_lb*100)
print 'Runtime (minutes) = %.2f'%((time.time()-stime)/60.0)
return Santa
# Initialize Class
Santa = Trips(path='_v1')
# Good Trips
good_trips = []
bad_trips = []
gain = 1
total_g = 0
total_l = 0
beg_wrw = Santa.wrw
best_wrw = beg_wrw
wrw_all = []
#while gain > 0 and len(good_trips) < 10:
for k in range(200):
# Find the lightest trip
light_trip = Santa.trip_metrics[Santa.trip_metrics.Weight > 0].sort_values('Weight').index[0]
# Record pre WRW
wrw_start = Santa.wrw
w = Santa.trip_metrics.ix[light_trip].Weight
# Destroy it
print 'Loop %.0f\n -Destroying trip %.0f\n -Weight = %.0f'%(k,light_trip,w)
Santa.destroy_diffuse(light_trip)
# Check new WRW
wrw_end = Santa.wrw
gain = wrw_start - wrw_end
print ' -Gain = %.0f'%(gain)
wrw_all.append(wrw_end/1000000)
if gain > 0:
good_trips.append(light_trip)
total_g += gain
else:
#print 'Trip',light_trip,'Was a bad move! STOPPPPPPP!!!!!'
bad_trips.append(light_trip)
total_l += gain
if wrw_end < best_wrw:
best_wrw = wrw_end
print 'Total trips killed =',len(good_trips)+len(bad_trips)
print 'New WRW =',wrw_end
print 'Total WRW Gain = %0f'%(beg_wrw - wrw_end)
print 'Best WRW = %.0f'%best_wrw
Santa.save_data(path='_v3')
# Check The Capacity For 2 groups
w_top = Santa.trip_metrics[Santa.trip_metrics.SPF==0].Weight.sum()
w_bottom = Santa.trip_metrics[Santa.trip_metrics.SPF==1].Weight.sum()
ct_top = Santa.trip_metrics[Santa.trip_metrics.SPF==0].shape[0]
ct_bottom = Santa.trip_metrics[Santa.trip_metrics.SPF==1].shape[0]
print 'Top Avg Capacity = %.1f'%(w_top/ct_top)
print 'Bottom Avg Capacity = %.1f'%(w_bottom/ct_bottom)
# Look for groups of 4 trips, break them into 5 optimally
wrw_all = np.array(wrw_all)
plt.plot(wrw_all)
#####################################
# Shake Down
wrw_before = Santa.wrw
# Go to 150, Diffuse with Cap of 950 ish, Do swapping
cap = 990
while cap >= 950:
Santa.diffuse_east(1,cap+5)
Santa.diffuse_west(1,cap)
Santa.diffuse_east(0,cap+5)
Santa.diffuse_west(0,cap)
cap -= 10
# Calc new WRW
gain = wrw_before - Santa.wrw
wrw_before = Santa.wrw
print 'WRW Gain = %.0f'%gain
Santa.save_data(path='_v3')
#####################################
# Greedy optimize
for omg in range(18):
Santa = swap_worst_trip(Santa,30000,
plot_it=False,
greedOfTheNoob=True,
SPF=True,
verbose=0)
Santa.save_data(path='_v3')
| true |
17973f995008cfb1d909ce8b76f9a7f305c970b4 | Python | sjcoope/drdb-base | /src/function-check-emr-status/function.py | UTF-8 | 1,915 | 2.546875 | 3 | [] | no_license | import sys
import traceback
import boto3
import logging
import json
# Setup logging for lambda and local development
logger = logging.getLogger()
if len(logging.getLogger().handlers) > 0:
logging.getLogger().setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
def handler(event, context):
try:
logger.info(f"Starting EMR check status operation")
client = boto3.client("emr")
cluster_name = "DRDB-Job-Processor"
# TODO: Fix paging of this (returns max of 50) and we need to accomodate that.
clusters = client.list_clusters(
ClusterStates=["RUNNING", "WAITING", "STARTING", "BOOTSTRAPPING"]
)
logger.info(f"Active Clusters Found: {len(clusters['Clusters'])}")
matched_clusters = [i for i in clusters["Clusters"] if i["Name"] == cluster_name]
logger.info(f"Matched Clusters Found: {len(matched_clusters)}")
cluster_id = ""
cluster_status = ""
if(len(matched_clusters) > 1):
logger.warn(f"Number of active clusters is {len(matched_clusters)} when it should be 1")
# Get first cluster from list
if(len(matched_clusters) == 1):
cluster = matched_clusters[0]
cluster_id = cluster["Id"]
cluster_status = cluster["Status"]["State"]
return {
"Id": cluster_id,
"Status": cluster_status
}
except Exception as exp:
exception_type, exception_value, exception_traceback = sys.exc_info()
traceback_string = traceback.format_exception(exception_type, exception_value, exception_traceback)
err_msg = json.dumps({
"errorType": exception_type.__name__,
"errorMessage": str(exception_value),
"stackTrace": traceback_string
})
logger.error(err_msg)
if __name__ == '__main__':
handler(None, None) | true |
05bd14811cc17775e18efcbaa6dc1b4bb69af3d4 | Python | dominik31415/GenotypeTable2Fasta | /GenotypeTable2Fasta.py | UTF-8 | 3,514 | 2.90625 | 3 | [] | no_license | # GenotypeTable2Fasta.py
# version 1.0
#
# July 23, 2016
#
# Authors: Dominik Geissler & Hai D.T. Nguyen
# Correspondence: geissler_dominik@hotmail.com, hai.nguyen.1984@gmail.com
# Acknowledgements: Benjamin Furman for inspiration
#
# This script will read in an SNP (single nucleotide polymorphism) genotype table called
# file1.txt and convert it to a fasta file called file2.fasta.
#
# Where there is a heterozygous site, the IUPAC code will be used (A/G = R; C/T = Y; etc.)
# Missing and other calls will be substituted with a gap (./. = -; C/* = -; etc.)
#
# The genotype table can have n individuals and the script will output n sequences in
# a multi fasta file.
# This resulting multi fasta file is useful for genotyping with phylogenetic analysis
#
# REQUIREMENTS
#
# 1. Pandas has to be installed.
# In Ubuntu, you can install it by typing "sudo apt-get install python-pandas"
# If using MacOS X, you can try this command "sudo easy_install pandas"
#
# 2. Generate a genotype table (file1.txt) from a VCF file with bcftools.
# First make sure bcftools is installed (https://samtools.github.io/bcftools/)
# This script was tested with bcftools 1.3 (using htslib 1.3)
#
# The VCF file tested was generated by GATK 3.6 (https://www.broadinstitute.org/gatk/).
# First use HaplotypeCaller on each sample or individual with the flag -ERC GVCF to
# generate one GVCF file per sample or individual.
#
# Then merge the GVCF files into a single VCF file with GenotypeGVCFs
#
# java -jar GenomeAnalysisTK.jar -T GenotypeGVCFs \
# -R reference_genome.fasta \
# -V individual_1.haplotypecaller.vcf \
# -V individual_2.haplotypecaller.vcf \
# -V individual_3.haplotypecaller.vcf \
# -V individual_4.haplotypecaller.vcf \
# -o all_individuals.joint.haplotypecaller.vcf
#
# Then select out the SNP's using SelectVariants
#
# java -jar GenomeAnalysisTK.jar -T SelectVariants \
# -R reference_genome.fasta \
# -V all_individuals.joint.haplotypecaller.vcf \
# -selectType SNP \
# -o all_individuals.joint.haplotypecaller.snps.vcf
#
# Optional: filter out false positives with manual filtering or other methods recommended
# by the makers of GATK
#
# Then process the final VCF file and run bcftools index:
#
# bgzip all_individuals.joint.haplotypecaller.snps.vcf
# bcftools index all_individuals.joint.haplotypecaller.snps.vcf.gz
#
# Output the table called file1.txt with this command:
#
# bcftools view all_individuals.joint.haplotypecaller.snps.vcf.gz | bcftools query -f '[%TGT\t]\n' > file1.txt
#
# Make sure this script and file1.txt are in the same folder. Then execute the script by typing:
#
# python GenotypeTable2Fasta.py
#
#import pandas as pd
from pandas import DataFrame
import csv
import os
path = 'file1.txt' #name of input file
data0 = list(csv.reader(open(path, 'r'), delimiter='\t'))
data = DataFrame(data0)
#data = data.drop(4,axis=1) #this removes the 4th column because it starts with a tab
#replacing
ep0 = [['T/A', 'W'],['A/T', 'W'],['C/G', 'S'],['G/C', 'S'],['A/G', 'R'],['G/A', 'R'],['A/C', 'M'],['C/A', 'M'],['G/T', 'K'],['T/G', 'K']]
ep1 =[['C/T', 'Y'],['T/C', 'Y'],['G/*', '-'],['A/*', '-'],['C/*', '-'],['T/*', '-'],['G/G', 'G'],['A/A', 'A'],['C/C', 'C'],['T/T', 'T'],['./.', '-']]
ep2 = ep0+ep1
for x in ep2:
data = data.replace(x[0],x[1])
##outputs
pp = 'file2.fasta' #name of output file
data = data.transpose()
ff = open(pp,'w')
n = 0
for row in data.iterrows():
tmp = list(row[:][1])
tmp2= ''.join(tmp)
if tmp2 != '': #skip over empty columns
n += 1
ff.write('>individual_'+str(n)+'\n') #>individual_1
ff.write(tmp2+'\n')
ff.close()
| true |
7566669aa3edc5e80e30db8e2cfe811cd6b730af | Python | papibenjie/TaskQueue | /taskQueue/queue/queue_creator.py | UTF-8 | 836 | 3.28125 | 3 | [] | no_license | from .func_node import FuncNode
from .base_node import BaseNode
from .base_queue import BaseQueue
def queue_from_list(funcs):
_validate_func_list(funcs)
if len(funcs) == 0:
return BaseQueue(BaseNode())
elif len(funcs) == 1:
return BaseQueue(FuncNode(funcs[0]))
else:
root = FuncNode(funcs[0])
node = root
for i in range(1, len(funcs)):
node.child = FuncNode(funcs[i])
node = node.child
return BaseQueue(root)
def _validate_func_list(funcs):
if not isinstance(funcs, list):
raise ValueError("A list of functions is needed to create a queue, '{0}' was passed.".format(funcs))
for f in funcs:
if not callable(f):
raise ValueError("All elements in list '{0}' must be callable, '{1}' is not.".format(funcs, f))
| true |
305af71177c0e78c565cfb06dd54ab051f115b2c | Python | techiemilin/SeleniumWithPython | /com/seleniumpython/Cookies.py | UTF-8 | 482 | 2.875 | 3 | [] | no_license | '''
Created on Apr. 11, 2019
@author: milinpatel
'''
from selenium import webdriver
driver = webdriver.Chrome("/Users/milinpatel/Documents/workspace/SeleniumWithPython/drivers/chromedriver ")
driver.get("https://www.amazon.ca/")
cookies = driver.get_cookies()
print(cookies)
print(len(cookies))
# adding cookie
cookie = {"name" : "My cookie"}
driver.add_cookie(cookie)
driver.get_cookies
print(driver.get_cookies)
print(len(driver.get_cookies))
driver.delete_all_cookies()
| true |
d89dd567f9008ff1a89c101306f58124860b6af5 | Python | limz10/NLP | /pset4/pset4.py | UTF-8 | 10,069 | 3.09375 | 3 | [] | no_license | import sys, re
import nltk
from nltk.corpus import treebank
from collections import defaultdict
from nltk import induce_pcfg
from nltk.grammar import Nonterminal
from nltk.tree import Tree
from math import exp, pow
unknown_token = "<UNK>" # unknown word token.
""" Removes all function tags e.g., turns NP-SBJ into NP.
"""
def RemoveFunctionTags(tree):
for subtree in tree.subtrees(): # for all nodes of the tree
# if it's a preterminal node with the label "-NONE-", then skip for now
if subtree.height() == 2 and subtree.label() == "-NONE-": continue
nt = subtree.label() # get the nonterminal that labels the node
labels = re.split("[-=]", nt) # try to split the label at "-" or "="
if len(labels) > 1: # if the label was split in two e.g., ["NP", "SBJ"]
subtree.set_label(labels[0]) # only keep the first bit, e.g. "NP"
""" Return true if node is a trace node.
"""
def IsTraceNode(node):
# return true if the node is a preterminal node and has the label "-NONE-"
return node.height() == 2 and len(node) == 1 and node.label() == "-NONE-"
""" Deletes any trace node children and returns true
if all children were deleted.
"""
def RemoveTraces(node):
if node.height() == 2: # if the node is a preterminal node
return False # already a preterminal, cannot have a trace node child.
i = 0
while i < len(node): # iterate over the children, node[i]
# if the child is a trace node or it is a node whose children were deleted
if IsTraceNode(node[i]) or RemoveTraces(node[i]):
del node[i] # then delete the child
else: i += 1
return len(node) == 0 # return true if all children were deleted
""" Preprocessing of the Penn treebank.
"""
def TreebankNoTraces():
tb = []
for t in treebank.parsed_sents():
if t.label() != "S": continue
RemoveFunctionTags(t)
RemoveTraces(t)
t.collapse_unary(collapsePOS = True, collapseRoot = True)
t.chomsky_normal_form()
tb.append(t)
return tb
""" Enumerate all preterminal nodes of the tree.
"""
def PreterminalNodes(tree):
for subtree in tree.subtrees():
if subtree.height() == 2:
yield subtree
""" Print the tree in one line no matter how big it is
e.g., (VP (VB Book) (NP (DT that) (NN flight)))
"""
def PrintTree(tree):
if tree.height() == 2: return "(%s %s)" %(tree.label(), tree[0])
return "(%s %s)" %(tree.label(), " ".join([PrintTree(x) for x in tree]))
""" Initialize vocabulary from the training set
"""
def init_vocab(training_set):
vocab = set()
dictionary = defaultdict(int)
for sentence in training_set:
for word in sentence.leaves():
if word in dictionary:
vocab.add(word)
dictionary[word] += 1
return vocab
""" As usual, build a static vocabulary from the training set,
treating every word that occurs not more than once as an unknown token.
"""
def PreprocessText(text_set, vocab):
prep_list = []
for sent in text_set:
for NPsubtree in PreterminalNodes(sent):
if NPsubtree[0] not in vocab:
NPsubtree[0] = unknown_token
prep_list.append(sent)
return prep_list
""" Learning a PCFG from dataset
"""
def learn_PCFG(text_set, start_token):
s = Nonterminal(start_token)
production_list = []
for sent in text_set:
production_list += sent.productions()
return induce_pcfg(s, production_list)
class InvertedGrammar:
def __init__(self, pcfg):
self._pcfg = pcfg
self._r2l = defaultdict(list) # maps RHSs to list of LHSs
self._r2l_lex = defaultdict(list) # maps lexical items to list of LHSs
self.BuildIndex() # populates self._r2l and self._r2l_lex according to pcfg
def PrintIndex(self, filename):
f = open(filename, "w")
for rhs, prods in self._r2l.iteritems():
f.write("%s\n" %str(rhs))
for prod in prods:
f.write("\t%s\n" %str(prod))
f.write("---\n")
for rhs, prods in self._r2l_lex.iteritems():
f.write("%s\n" %str(rhs))
for prod in prods:
f.write("\t%s\n" %str(prod))
f.write("---\n")
f.close()
def BuildIndex(self):
""" Build an inverted index of your grammar that maps right hand sides of all
productions to their left hands sides.
"""
for production in self._pcfg.productions():
if production.is_lexical():
self._r2l_lex[production.rhs()].append(production)
else:
self._r2l[production.rhs()].append(production)
self.PrintIndex("index")
def Parse(self, sent):
""" Implement the CKY algorithm for PCFGs,
populating the dynamic programming table with log probabilities of
every constituent spanning a sub-span of a given
test sentence (i, j) and storing the appropriate back-pointers.
"""
table = defaultdict(dict)
backpointers = defaultdict(dict)
for j in xrange(1, len(sent) + 1):
for A in self._r2l_lex[tuple([sent[j - 1]])]:
table[(j - 1, j)][A.lhs()] = A.logprob()
if j >= 2:
for i in reversed(xrange(j - 1)):
for k in xrange(i + 1, j):
for B in table[(i, k)]:
for C in table[(k, j)]:
for A in self._r2l[(B, C)]:
temp = A.logprob() + table[(i, k)][B] + \
table[(k, j)][C]
if A.lhs() not in table[(i, j)]:
table[(i, j)][A.lhs()] = temp
backpointers[(i, j)][A.lhs()] = (k, B, C)
elif table[(i, j)][A.lhs()] < temp:
table[(i, j)][A.lhs()] = temp
backpointers[(i, j)][A.lhs()] = (k, B, C)
return table, backpointers
@staticmethod
def BuildTree(cky_table, sent):
""" Build a tree by following the back-pointers starting from the largest span
(0, len(sent)) and recursing from larger spans (i, j) to smaller sub-spans
(i, k), (k, j) and eventually bottoming out at the preterminal level (i, i+1).
"""
if Nonterminal('S') not in cky_table[(0, len(sent))]:
return None
else:
return InvertedGrammar.recursive_build(cky_table, sent, Nonterminal("S"), 0, len(sent))
@staticmethod
def recursive_build(cky_back, sent, nt, i, j):
if j - i == 1:
TreeOut = Tree(nt.symbol(), [sent[i]])
else:
(k, B, C) = cky_back[(i, j)][nt]
TreeOut = Tree(nt.symbol(), [
InvertedGrammar.recursive_build(cky_back, sent, B, i, k),
InvertedGrammar.recursive_build(cky_back, sent, C, k, j)])
return TreeOut
def bucketing(test_set_prep):
bucket1 = []
bucket2 = []
bucket3 = []
bucket4 = []
bucket5 = []
for sent in test_set_prep:
if 0 < len(sent.leaves()) < 10:
bucket1.append(sent)
elif 10 <= len(sent.leaves()) < 20:
bucket2.append(sent)
elif 20 <= len(sent.leaves()) < 30:
bucket3.append(sent)
elif 30 <= len(sent.leaves()) < 40:
bucket4.append(sent)
elif len(sent.leaves()) >= 40:
bucket5.append(sent)
return bucket1, bucket2, bucket3, bucket4, bucket5
def main():
treebank_parsed_sents = TreebankNoTraces()
training_set = treebank_parsed_sents[:3000]
test_set = treebank_parsed_sents[3000:]
""" Transform the data sets by eliminating unknown words.
"""
vocabulary = init_vocab(training_set)
training_set_prep = PreprocessText(training_set, vocabulary)
test_set_prep = PreprocessText(test_set, vocabulary)
print PrintTree(training_set_prep[0])
print PrintTree(test_set_prep[0])
""" Implement your solutions to problems 2-4.
"""
""" Training a PCFG
"""
pcfg = learn_PCFG(training_set_prep, "S")
NP_dict = {}
for production in pcfg.productions():
if str(production.lhs()) == "NP":
NP_dict[production] = production.prob()
print "Total number for NP nonterminal: ", len(NP_dict), " \n"
print "The most probable 10 productions for the NP nonterminal: \n"
print sorted(NP_dict, key=NP_dict.get, reverse=True)[:9], " \n"
""" Testing: Implement the probabilistic CKY algorithm for parsing a test
sentence using your learned PCFG.
"""
ig = InvertedGrammar(pcfg)
sample_sentence = ['Terms', 'were', "n't", 'disclosed', '.']
table, tree = ig.Parse(sample_sentence)
print 'The log probability of the 5-token sentence: ', \
table[(0, len(sample_sentence))][Nonterminal('S')]
print 'The parse tree for the 5-token sentence:\n', \
ig.BuildTree(tree, sample_sentence)
""" Bucketing
"""
bucket1, bucket2, bucket3, bucket4, bucket5 = bucketing(test_set_prep)
print "Number of sentences in each bucket: ", \
len(bucket1), len(bucket2), len(bucket3), len(bucket4), len(bucket5)
test_bucket = bucket4
test_file = open('test_4', "w")
gold_file = open('gold_4', "w")
count = 0
for sent in test_bucket:
count += 1
temp_tree = ig.BuildTree(ig.Parse(sent.leaves())[1], sent.leaves())
sent.un_chomsky_normal_form()
if temp_tree is None:
test_file.write('\n')
else:
temp_tree.un_chomsky_normal_form()
test_file.write(PrintTree(temp_tree) + '\n')
gold_file.write(PrintTree(sent) + '\n')
print count
test_file.close()
gold_file.close()
if __name__ == "__main__":
main()
| true |
92bc0cf87413578bb46db54da29e3a6787b2f380 | Python | rjkviegas/fruit-machine | /lib/Player.py | UTF-8 | 611 | 3.1875 | 3 | [] | no_license | class Player:
def __init__(self, balance):
self.balance = balance
def get_balance(self):
return self.balance
def play(self, game_machine):
self.pay_fee_for(game_machine)
game_machine.play(self)
def pay_fee_for(self, game_machine):
if self.get_balance() < game_machine.fee:
raise InsufficientBalance('Insufficient balance to play')
self.balance -= game_machine.get_fee()
game_machine.balance += game_machine.get_fee()
class InsufficientBalance(ZeroDivisionError):
def __init__(self, msg):
self.msg = msg | true |
93652bf93884a5448ea39ebe70e86780403b1fc8 | Python | jeaninebeckle/raterproject-server | /raterprojectreports/views/ratings/bottomgamesbyrating.py | UTF-8 | 1,658 | 2.921875 | 3 | [] | no_license | """Module for generating games by user report"""
import sqlite3
from django.shortcuts import render
from raterprojectapi.models import Game
from raterprojectreports.views import Connection
def bottomgamerating_list(request):
"""Function to build an HTML report of games by rating"""
if request.method == 'GET':
# Connect to project database
with sqlite3.connect(Connection.db_path) as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
# Query for all games, with related rating info.
db_cursor.execute("""
SELECT
g.id,
g.title,
AVG(r.value) AS average_rating
FROM
raterprojectapi_game g
JOIN
raterprojectapi_rating r ON r.game_id = g.id
GROUP BY g.title
ORDER BY average_rating ASC
LIMIT 5
""")
dataset = db_cursor.fetchall()
bottom_games_by_rating = []
for row in dataset:
# Create a Game instance and set its properties. String in brackets matches the SQL results
game = Game()
game.title = row["title"]
game.rating = row["average_rating"]
bottom_games_by_rating.append(game)
# Specify the Django template and provide data context
template = 'ratings/list_with_lowest_ratings.html'
context = {
'bottomgamerating_list': bottom_games_by_rating
}
return render(request, template, context)
| true |
2259528e78ea92e3b074f54438e62b0ca30c4b97 | Python | rodrigojgrande/python-mundo | /desafios/desafio-037.py | UTF-8 | 1,020 | 4.625 | 5 | [] | no_license | #Exercício Python 37: Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal.
numero = int(input('Digite um número inteiro:'))
print('Escolha uma das bases para conversão:')
print('[ \033[1;33m1\033[m ] Converter para Binário')
print('[ \033[1;33m2\033[m ] Converter para Octal')
print('[ \033[1;33m3\033[m ] Converter para Hexadecimal')
escolha = int(input('Sua opção:'))
if escolha == 1:
resultado = bin(numero)[2:]
print('\033[1;32m{} convertido para Binário é igual a {}.\033[m'.format(numero, resultado))
elif escolha == 2:
resultado = oct(numero)[2:]
print('\033[1;312{} convertido para Octal é igual a {}.\033[m'.format(numero, resultado))
elif escolha == 3:
resultado = hex(numero)[2:]
print('\033[1;32m{} convertido para Hexadecimal é igual a {}.\033[m'.format(numero, resultado))
else:
print('\033[1;31mComando inválido!\033[m')
| true |
ebca65502675702e552840eaf4f3603670a3ac93 | Python | Cuadernin/BotESFM | /MaestrosESFM.py | UTF-8 | 775 | 3.3125 | 3 | [] | no_license | import pandas as pd
def buscador(texto):
""">>>>>>>>>>>>>>>>>> BUSCADOR QUE ENCUENTRA EL NOMBRE COMPLETO USANDO UN NOMBRE Y APELLIDO <<<<<<<<<<<<<<<<<<<"""
nombres=texto.split(" ")
nombre=nombres[0].lower()
apellido=nombres[1].lower()
df=pd.read_excel("ProfesoresESFMV2.xlsx")
df=df["PROFESOR"]
df=df.dropna()
lista=[]
for txt in df:
pal=txt.lower()
indexN=pal.find(nombre)
indexA=pal.find(apellido)
if indexA>=0 and indexN>=0:
return pal.upper()
def cap(row):
for i,item in enumerate(row):
row[i]=item.title()
return row
def consulta():
df=pd.read_excel("ProfesoresESFMV2.xlsx",index_col=None)
df=df[["PROFESOR"]]
df=df.apply(lambda row:cap(row))
return df
| true |
8dab6ad2e3debe83cd540bac5aa029c6514ca979 | Python | dpawlows/enrollment | /plot_enrolled.py | UTF-8 | 2,035 | 2.765625 | 3 | [] | no_license | from matplotlib import pyplot as pp
from matplotlib import gridspec
from enrolled import *
plotsdir = 'plots/'
def plotMajorHist(students):
data = DataLoader()
terms = data.getUnique(students,'termID')
terms.sort()
numbers = []
fig, ax = pp.subplots()
width = 0.2
ind = arange(len(terms))
i=0
colors = ['b','y','g']
rects = []
gs = gridspec.GridSpec(2,1,height_ratios=[3,1])
ax0 = pp.subplot(gs[0])
for major in majors:
nmajor = []
for term in terms:
theseStudents = data.filter(students,termID=term)
theseStudents = data.get(theseStudents,major1=major,major2=major)
nmajor.append(len(theseStudents))
rects.append(ax0.bar(ind+i*width,nmajor,width,color=colors[i]))
i+=1
numbers.append(nmajor)
#Plot a histogram for each major over each term
ax0.set_xticks(ind + (i*width)/2.)
ax0.set_xticklabels(["" for term in terms])
ax0.set_ylabel('Number of Students')
ax0.legend(rects,majors)
#Line plot for total students
ax1 = pp.subplot(gs[1])
termnumbers = [sum(inum) for inum in zip(*numbers)]
sterms = [codeToTerm(s) for s in terms]
ax1.plot(ind+(i*width)/2.,termnumbers,lw=2,color='m')
ax1.set_xlim([0,max(ind)+1])
ax1.set_xticks(ind + (i*width/2.))
ax1.set_xticklabels(sterms)
ax1.locator_params(axis='y',nbins=4)
ax1.set_ylabel('Total Students')
pp.savefig(plotsdir+'plot.png')
maxstudents = max(termnumbers)
diff = [(nterm - maxstudents)/float(maxstudents) for nterm in termnumbers]
print 'Difference from max:\n'
for iterm in range(len(terms)):
print '{}: {}%'.format(terms[iterm],round(float(diff[iterm])*100))
inFiles = ['data/2011_2014.csv','data/2015_2016.csv']
students = readStudentFile(inFiles)
plotMajorHist(students)
data = DataLoader()
students = data.getUnique(students,'id')
# students = data.get(students,major1='PHY',major2='PHY',major1='ENGR',major2='ENGR',
# major1='PHYR',major2='PHYR')
| true |
9f79f49c56af4f99a747d168928163d302d67c1e | Python | tushar-rishav/Algorithms | /Archive/Contests/HackerEarth/May_Hem/baseline.py | UTF-8 | 449 | 2.734375 | 3 | [] | no_license | from sys import stdin,exit
def med(x):
m,r= divmod(len(x),2)
if r:
return sorted(x)[m]
return sum(sorted(x)[m-1:m+1])/2
def main():
t=input()
while t:
n,k=map(int,raw_input().split())
c=n
s=list()
while c:
s.append(map(ord,stdin.readline()[:-1]))
c-=1
s=zip(*s)
for i in range(len(s)):
s[i]=list(s[i])
st=str()
for i in s:
st+=chr(int(med(i)))
print st
t-=1
print
exit(0)
if __name__=="__main__":
main() | true |
292dbfe01663d001049426479d4ac029dbe5122f | Python | shivaallani7/BFS-1 | /LevelOrdertraversal.py | UTF-8 | 1,165 | 3.5625 | 4 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
//Time Complexity: O(n)
//Space Complexity: O(2power(h)) or O(n)
// Did it run on Leet Code: Yes
// loop untill the queue is empty.
//find the length of the queue and loop through the queue nodes and those elements to the result.
//for each node of the queue add their left and right elements to the queue.
//
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return
q = deque([root])
res = []
while q:
levNodes = []
qlen = len(q)
for i in range(qlen):
node = q.popleft()
levNodes.append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
res.append(levNodes)
return res
| true |
e404b4bd91345ad0a4e37c0604dbb4ed0f6ad8cc | Python | pythongenuis/SMT_project | /SMT_Project/test_plot.py | UTF-8 | 355 | 3.453125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
# t = np.arange(0.0, 2.0, 0.01)
# print(t)
# s = 1 + np.sin(2*np.pi*t)
#print(s)
t=[1,2,3,4,5,6,7,8,9]
s=[10,100,10,110,120,130,140,150,170]
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('About as simple as it gets, folks')
plt.grid(True)
plt.savefig("test.png")
plt.show() | true |
b64f9025bdc24b84a8e886a5f6c339b6d39bd731 | Python | franklinshe/youtube-analytics | /src/utils/charts.py | UTF-8 | 1,032 | 2.671875 | 3 | [] | no_license | import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
# from io import BytesIO
# import base64
# def get_image():
# buffer = BytesIO()
# plt.savefig(buffer, format='png')
# buffer.seek(0)
# image_png = buffer.getvalue()
# graph = base64.b64encode(image_png)
# graph = graph.decode('utf-8')
# buffer.close()
# return graph
def get_time_series_graph(x, data, labels):
print(data)
fig = go.Figure()
for y, label in zip(data, labels):
fig.add_trace(go.Scatter(
x=x, y=y,
name=label,
hoverinfo='name+y',
mode='lines',
# line=dict(width=0.5, color='rgb(131, 90, 241)'),
stackgroup='one' # define stack group
))
return fig.to_html(full_html=False, default_height=800, default_width=1300)
def get_pie_chart(labels, sizes):
fig = go.Figure(data=[go.Pie(labels=labels, values=sizes)])
return fig.to_html(full_html=False, default_height=800, default_width=1000) | true |
e8c624db981d2b9d0d4cccfc7c30bbd0e66ba4b3 | Python | munQueen/trans-twitter-classification | /code/retrieve_tweets.py | UTF-8 | 912 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 00:08:41 2019
@author: jwulz
"""
from twitterscraper import query_tweets
import pandas as pd
import datetime
import os
# set the search term here:
query_term = 'cisgender'
tweets = query_tweets(query="cisgender", begindate=datetime.date(2019, 6, 1), enddate=datetime.date(2019, 10, 1), limit=2000, lang='en')
tweet_list = (t.__dict__ for t in tweets)
tweet_df = pd.DataFrame(tweet_list)
filename = query_term + ".csv"
os.chdir('C:/Learning/ML/trans_twitter/csv_files')
language_regex = 'lang="en"'
# use regular expressions to remove URLS and images
tweet_df = tweet_df[tweet_df.html.str.contains(language_regex)]
tweet_df.text.replace('http\S+', '', regex=True, inplace=True)
tweet_df.text.replace('pic.twitter\S+', '', regex=True, inplace=True)
tweet_df = tweet_df.drop_duplicates(subset='text')
tweet_df.to_csv(filename) | true |
6188091d9028d8bf0e146736a497653244d15f6c | Python | yiboyang/LDA | /LDA/slda.py | UTF-8 | 10,027 | 2.84375 | 3 | [] | no_license | """
supervised LDA
Based on the sLDA paper http://www.cs.columbia.edu/~blei/papers/BleiMcAuliffe2007.pdf
For more details see https://arxiv.org/pdf/1003.0783.pdf
'eta' denote the vector of weights in the exp linear component of GLM
'var' means the dispersion parameter of GLM; sigma squared for Gaussian
Here we use a normal linear model for regression.
"""
import numpy as np
import scipy.optimize
import scipy.stats
from scipy.special import gammaln, digamma
def calc_E_ATA_per_doc(d_len, Phi_d, Phi_sum_d):
"""
Helper function for computing expected sufficient stats;
equivalent to equation (5) in sLDA paper but more efficient
:param d_len:
:param Phi_d: Phi[d.beg_w_pos:d.end_w_pos]
:param Phi_sum_d: Phi[d.beg_w_pos:d.end_w_pos].sum(axis=0)
:return:
"""
all_outer_prod_sum = np.einsum('i,kj->ij', Phi_sum_d, Phi_d)
# above is equivalent to:
# np.sum([np.outer(Phi[n], Phi[m]) for n in range(len(Phi)) for m in range(len(Phi))], axis=0)
return (all_outer_prod_sum - np.dot(Phi_d.T, Phi_d) + np.diag(Phi_sum_d)) / (d_len) ** 2
def expected_moments(docs, Phi, njobs=1):
"""
Expected sufficient statistics (first/second) moments under variational posterior distribution
:param docs:
:param Phi:
:param njobs: number of processes to launch for parallel processing
:return:
"""
Phi_sums = np.array([np.sum(Phi[d.beg_w_pos:d.end_w_pos], axis=0) for d in docs]) # DxK; not worth paralleling
E_A = Phi_sums / np.array([d.len for d in docs])[:, np.newaxis]
if njobs > 1:
from joblib import Parallel, delayed
parallelizer = Parallel(n_jobs=njobs)
tasks_iterator = (delayed(calc_E_ATA_per_doc)(d.len, Phi[d.beg_w_pos:d.end_w_pos], Phi_sums[i]) for i, d in
enumerate(docs))
partial_results = parallelizer(tasks_iterator)
E_ATA = np.sum(partial_results, axis=0)
else: # vanilla for loop; faster for smaller corpus (< 1M) because of parallelization overhead
K = Phi.shape[1]
E_ATA = np.zeros((K, K))
for i, d in enumerate(docs):
E_ATA += calc_E_ATA_per_doc(d.len, Phi[d.beg_w_pos:d.end_w_pos], Phi_sums[i])
return E_A, E_ATA
def vem_estep(docs, resps, K, alpha, Beta, eta, var, stats, prev_params=None, tol=0.01, max_try=20):
"""
Variational Bayesian inference for sLDA. We adopt a semi-Bayesian approach and maintain independent variational
distributions over the latent variables of the model, thetas and zs.
Gamma: D x K matrix of parameters for the variational Dirichlet distribution for all the documents, where
the dth row parametrizes document d, a Dirichlet distribution over topics
Phi: W x K matrix parameters for the variational categorical distribution for all the topic assignments,
where W is the total number of words in corpus, and the wth row encodes a probability vector over the
topic assignment for word w.
All the expectations in the code are taken wrt to the variational distribution q. Equations referenced are from
the LDA paper http://www.cs.columbia.edu/~blei/papers/BleiNgJordan2003.pdf
:param docs: document objects whose distributions are to be approximated
:param K: # topics
:param alpha: float, symmetric Dirichlet prior hyperparam for the topic mixing proportion for each document
:param Beta: 2d array, KxV categorical parameters for topics
:param eta: GLM weight param
:param var: GLM variance param
:param prev_params: variational parameters from previous iteration, (Gamma, Phi); if provided will generally
make vem more efficient
:param tol: float, error tolerance, the minimum percentage increase of elbo, below which the algorithm is
considered to have converged
:param max_try: int; if elbo has not increased by more than tol for max_try iterations, return
:return:
"""
def elbo(E_log_Theta, log_Beta, Gamma, Phi):
"""
Calculate the variational lower bound on log evidence.
We use the equation L = E_q[log p(X,Z)] + H[log q(Z)], where H is the entropy; for LDA it can be decomposed
L = E_q[log p(theta|alpha)] + E_q[log p(z,w|theta,beta)] + E_q[log p(y|z,eta,sigma)] +
H[q(theta|gamma)] + H[q(z|phi)]
The unspecified parameters are constant global variables.
:param E_log_Theta: expectation of log theta wrt q(theta|Gamma), calculated in e step for efficiency.
:param log_Beta:
:param Gamma:
:param Phi:
:return: variational lower bound for naive mean field
"""
E_log_p_theta = (alpha - 1) * E_log_Theta.sum() + D * (gammaln(alpha * K) -
K * gammaln(alpha)) # line 1 of eq (15)
E_log_p_zw = 0
for i, d in enumerate(docs):
E_log_p_zw += np.sum((E_log_Theta[i] + log_Beta[:, d.words].T) *
Phi[d.beg_w_pos: d.end_w_pos]) # line 2,3 of eq (15) combined
E_log_p_y = -0.5 * np.log(2 * np.pi * var) - 1 / (2 * var) * \
(resps_norm_sq - 2 * np.dot(resps, np.dot(E_A, eta)) +
np.dot(np.dot(eta, E_ATA), eta)) # sLDA eq (9)
H_q_theta = sum(scipy.stats.dirichlet.entropy(g) for g in Gamma)
H_q_z = -(Phi * np.log(Phi)).sum()
lb = E_log_p_theta + E_log_p_zw + E_log_p_y + H_q_theta + H_q_z
return lb
D = len(docs)
W = sum(d.len for d in docs) # total # of words in corpus
eta_prod = eta * eta # Hadamard product
resps_norm_sq = np.dot(resps, resps) # resps L2 norm squared
E_A, E_ATA = stats
if prev_params is None:
# random initialization based on Blei's paper figure 6
Gamma = np.random.rand(D, K) + alpha + np.array([d.len / K for d in docs])[:, np.newaxis]
Phi = np.random.dirichlet(alpha=np.ones(K) * alpha, size=W)
else:
Gamma, Phi = prev_params
lb_prev = float("inf") # ELBO from previous iteration
while True:
# do some preliminary calculations for lower bound computation as well as parameter updates;
# try to operate on large matrices for efficiency on PC; may take forever :)
E_log_Theta = digamma(Gamma) - digamma(Gamma.sum(axis=1))[:, np.newaxis] # eq (8), for all thetas
log_Beta = np.log(Beta + 1e-32) # for numeric stability
lb = elbo(E_log_Theta, log_Beta, Gamma, Phi)
print(lb)
if lb_prev != float("inf") and abs((lb - lb_prev) / lb_prev) < tol: # if no improvement in elbo
if num_try > 0:
num_try -= 1
else: # num_try == 0
break
else: # if there was improvement in elbo
num_try = max_try
lb_prev = lb
for i, d in enumerate(docs): # unfortunately un-parallelizable, has to be done sequentially
Phi_sum = np.sum(Phi[d.beg_w_pos: d.end_w_pos], axis=0)
for j, w in zip(range(d.beg_w_pos, d.end_w_pos), d.words):
y = resps[i]
Phi_sum -= Phi[j]
log_Phi_new_j = E_log_Theta[i] + log_Beta[:, w] + (y / d.len / var) * eta - \
(2 * np.dot(eta, Phi_sum) * eta + eta_prod) / (
2 * (d.len) ** 2 * var) # eq (7) of sLDA paper
Phi_new_j = np.exp(log_Phi_new_j)
Phi[j] = Phi_new_j / np.sum(Phi_new_j)
Phi_sum += Phi[j]
Gamma[i] = alpha + Phi_sum # update distributions gamma_d over document; re-use Phi_sum calculation
return Gamma, Phi
def vem_mstep(docs, resps, K, V, Phi, stats):
"""
M-step of variational EM to estimate hyperparameters using maximum likelihood based on expected sufficient stats
under approximate posterior; same as maximizing ELBO wrt to Beta, eta, and var
:return:
"""
D = len(docs)
Beta = np.zeros((K, V)) # categorical param for topics
for j, w in enumerate(w for d in docs for w in d.words): # loop through all words
Beta[:, w] += Phi[j]
Beta /= np.sum(Beta, axis=1)[:, np.newaxis] # eq (9), MLE for Beta
# MLE for the GLM params
E_A, E_ATA = stats
E_AT_y = np.dot(E_A.T, resps)
eta = np.linalg.solve(E_ATA, E_AT_y)
var = (1 / D) * (np.dot(resps, resps) - np.dot(E_AT_y.T, eta))
print('MSE:', np.sum((np.dot(E_A, eta) - resps) ** 2) / D) # should decrease
return Beta, eta, var
def vem(docs, resps, alpha, K, V, niter, njobs=1):
# model params
Beta = np.random.dirichlet(np.ones(V), K)
eta = np.linspace(start=-1, stop=1, num=K) # initialization based on bottom of page 7 of sLDA paper
var = np.var(resps)
# variational params
Gamma = np.random.rand(len(docs), K) + alpha + np.array([d.len / K for d in docs])[:, np.newaxis]
Phi = np.random.dirichlet(alpha=np.ones(K) * alpha, size=sum(d.len for d in docs))
for it in range(niter):
stats = expected_moments(docs, Phi, njobs)
Beta, eta, var = vem_mstep(docs, resps, K, V, Phi, stats)
Gamma, Phi = vem_estep(docs, resps, K, alpha, Beta, eta, var, stats,
prev_params=(Gamma, Phi), tol=0.1, max_try=5)
return Beta, eta, var # optimized model parameters
def predict(docs, alpha, K, Beta, eta, tol=0.001, max_try=20):
"""
Label a new set of documents with responses
:param docs:
:param alpha:
:param K:
:param V:
:param Beta:
:param eta:
:return:
"""
# run inference in the original LDA model to obtain the covariates (phi bars for each doc)
from . import lda
_, Phi = lda.vem_estep(docs=docs, K=K, alpha=alpha, Beta=Beta, prev_params=None, tol=tol, max_try=max_try)
E_A = np.empty((len(docs), K))
for i, d in enumerate(docs):
E_A[i] = np.sum(Phi[d.beg_w_pos:d.end_w_pos], axis=0) / d.len
return np.dot(E_A, eta)
| true |
6e5813cc1c19911b7594a02887a29cf20b4a404a | Python | statsonice/statsonice-public | /scripts/old/combine_skaters.py | UTF-8 | 2,253 | 2.90625 | 3 | [] | no_license | """
This script combines skaters and skater pairs into a single skater
"""
import os
import sys
parent_path = os.path.dirname(os.path.realpath(__file__))+'/../../util/'
sys.path.append(parent_path)
from get_settings import load_settings
load_settings(sys.argv)
from statsonice.models import *
def combine_skaters(skaters):
correct_skater = skaters[0]
skaters_to_combine = skaters[1:]
# Check that skaters are all pairs or all singles
skater_type = type(correct_skater)
for skater in skaters_to_combine:
if type(skater) != skater_type:
raise ValueError("Cannot combine pairs and singles competitors")
# Get competitors
competitors = []
for skater in list(skaters_to_combine)+[correct_skater]:
competitors += skater.competitor_set.all()
correct_competitor = competitors.pop()
print "Using", correct_competitor
if type(correct_skater) == Skater:
correct_competitor.skater = correct_skater
elif type(correct_skater) == SkaterPair:
correct_competitor.skater_pair = correct_skater
# Copy over skaterresults
for competitor in competitors:
for skaterresult in competitor.skaterresult_set.all():
skaterresult.competitor = correct_competitor
print "Changing ", skaterresult
skaterresult.save()
# Delete competitor
for competitor in competitors:
print "deleting", competitor
competitor.delete()
correct_competitor.save()
if type(correct_skater) == Skater:
# Merge skater pair values
for skater_to_combine in skaters_to_combine:
for coach in skater_to_combine.coach.all():
if coach not in correct_skater.coach.all():
skater.coach.add(coach)
for choreographer in skater_to_combine.choreographer.all():
if choreographer not in correct_skater.coach.all():
correct_skater.choreographer.add(choreographer)
# TODO
elif type(correct_skater) == SkaterPair:
# Merge skater values
# TODO
pass
# Delete extra skaters
for skater in skaters_to_combine:
print "Deleting", skater
skater.delete()
correct_skater.save()
| true |
36c4429a336cf9d7d9166a3c1aea634fca6d406e | Python | abelloma/project-2 | /working folders/database.py | UTF-8 | 699 | 2.546875 | 3 | [] | no_license | import pymongo
import os
import pandas as pd
import csv
from sqlalchemy import create_engine
combined_data = 'data/combined_data.csv'
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.beer_db
collection = db.states
# def csv_to_dict():
# reader = csv.DictReader(open(combined_data))
# result = {}
# for row in reader:
# key = row.pop('First_value')
# result[key] = row
# return query
# # Final insert statement
# db.collection.insert_one(csv_to_dict())
def csv_to_json(filename, header=none):
data = pd.read_csv(filename, header=header)
return data.to_dict('records')
collection.insert_many(csv_to_json(combined_data))
| true |
6da82372bc21f82f28e1e5c783895ee5d220ed27 | Python | hafrei/advent | /day3/run5.py | UTF-8 | 1,075 | 2.796875 | 3 | [] | no_license | # elf: ?, x:y, top_left: top_right, bottom_left: bottom_right
def get_specs(deets):
elf = deets[deets.find("#")+1 : deets.find("@")].strip()
x_axis = deets[deets.find("@")+1 : deets.find(",")].strip()
y_axis = deets[deets.find(",")+1 : deets.find(":")].strip()
length = deets[deets.find(":")+1 : deets.find("x")].strip()
width = deets[deets.find("x")+1:].strip()
specs = [elf, x_axis, y_axis, length, width]
return specs
def stake_claim(idea,owner):
#if owner is not int:
bluh = ''
return bluh
plan = []
fabric = [['' for x in range(1000)] for y in range(1000)]
with open('input.txt') as f:
claims = f.read().splitlines()
plan = [get_specs(x) for x in claims]
# 0 1 2 3 4
#elf, x axis, y axis, wide, tall
for idea in plan:
owner = idea[0]
stop_x = idea[1] + idea[3]
stop_y = idea[2] + idea[4]
#for x in range(idea[1],idea[i])
print("idea[3] is {}, idea[4] is {}".format(idea[3],idea[4]))
fabric[int(idea[3])][int(idea[4])] = owner
#if fabric[int(idea[4]),int(idea[5])]:
# owner = "X"
#stake_claim(idea,owner)
| true |
6f983c0c9cbc6c5446384d42979a68fe9e832e48 | Python | ViiSkor/ML-From-Scratch | /utils/clusterization.py | UTF-8 | 1,387 | 3.265625 | 3 | [] | no_license | import numpy as np
def init_centroids(data, n_centroids, mode="random_sample"):
""" Initialize the centroids.
Has two mode: take n random samples of data as the centroids and
random sharing.
Read about random sharing:
https://www.kdnuggets.com/2017/03/naive-sharding-centroid-initialization-method.html
Args:
data: numpy array
2d dataset array, where a horizontal axis is equal to the
number of features, a vertical axis is equal to the number of
dataset samples.
n: int
The number of clusters the algorithm will form.
Returns:
centroids: numpy 0d array
An array stores the init coordinates of the centroids.
"""
if mode == "random_sample":
index = np.random.choice(data.shape[0], n_centroids, replace=False)
return data[index]
elif mode == "sharding_init":
attr_sum = np.sort(np.sum(data, axis=1))
clusters_idx = np.array_split([*range(len(attr_sum))], n_centroids)
centroids = []
for idx in clusters_idx:
centroids.append(np.mean(data[idx], axis=0))
return np.array(centroids)
else:
raise Exception('No such init!') | true |
f4c78f6be6839bf4a6a3fa74c91a18cc0bb24879 | Python | battyone/qtrio | /qtrio/_core.py | UTF-8 | 24,109 | 2.703125 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | """The module holding the core features of QTrio.
Attributes:
_reenter_event_type: The event type enumerator for our reenter events.
"""
import contextlib
import functools
import math
import sys
import traceback
import typing
import typing_extensions
import async_generator
import attr
import outcome
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
import trio
import trio.abc
import qtrio
import qtrio._qt
import qtrio._util
_reenter_event_type: typing.Optional[QtCore.QEvent.Type] = None
def registered_event_type() -> typing.Optional[QtCore.QEvent.Type]:
"""Get the registered event type.
Returns:
The type registered with Qt for the reenter event. :obj:`None` if no event type
has been registered yet.
"""
return _reenter_event_type
def register_event_type() -> None:
"""Register a Qt event type for use by Trio to reenter into the Qt event loop.
Raises:
qtrio.EventTypeAlreadyRegisteredError: if an event type has already been
registered.
qtrio.EventTypeRegistrationFailedError: if a type was not able to be
registered.
"""
global _reenter_event_type
if _reenter_event_type is not None:
raise qtrio.EventTypeAlreadyRegisteredError()
event_hint = QtCore.QEvent.registerEventType()
if event_hint == -1:
raise qtrio.EventTypeRegistrationFailedError()
# assign to the global
_reenter_event_type = QtCore.QEvent.Type(event_hint)
def register_requested_event_type(
requested_value: typing.Union[int, QtCore.QEvent.Type]
) -> None:
"""Register the requested Qt event type for use by Trio to reenter into the Qt event
loop.
Arguments:
requested_value: The value to ask Qt to use for the event type being registered.
Raises:
qtrio.EventTypeAlreadyRegisteredError: if an event type has already been
registered.
qtrio.EventTypeRegistrationFailedError: if a type was not able to be registered.
qtrio.RequestedEventTypeUnavailableError: if the type returned by Qt does not
match the requested type.
"""
global _reenter_event_type
if _reenter_event_type is not None:
raise qtrio.EventTypeAlreadyRegisteredError()
event_hint = QtCore.QEvent.registerEventType(requested_value)
if event_hint == -1:
raise qtrio.EventTypeRegistrationFailedError()
elif event_hint != requested_value:
raise qtrio.RequestedEventTypeUnavailableError(
requested_type=requested_value, returned_type=event_hint
)
# assign to the global
_reenter_event_type = QtCore.QEvent.Type(event_hint)
class ReenterEvent(QtCore.QEvent):
"""A proper ``ReenterEvent`` for reentering into the Qt host loop."""
def __init__(self, fn: typing.Callable[[], object]):
super().__init__(_reenter_event_type)
self.fn = fn
class Reenter(QtCore.QObject):
"""A ``QtCore.QObject`` for handling reenter events."""
def event(self, event: QtCore.QEvent) -> bool:
"""Qt calls this when the object receives an event."""
reenter_event = typing.cast(Reenter, event)
reenter_event.fn()
return False
async def wait_signal(signal: qtrio._util.SignalInstance) -> typing.Tuple[object, ...]:
"""Block for the next emission of ``signal`` and return the emitted arguments.
Warning:
In many cases this can result in a race condition since you are unable to
first connect the signal and then wait for it.
Args:
signal: The signal instance to wait for emission of.
Returns:
A tuple containing the values emitted by the signal.
"""
event = trio.Event()
result: typing.Tuple[object, ...] = ()
def slot(*args: object) -> None:
"""Receive and store the emitted arguments and set the event so we can continue.
Args:
args: The arguments emitted from the signal.
"""
nonlocal result
result = args
event.set()
with qtrio._qt.connection(signal, slot):
await event.wait()
return result
@attr.s(auto_attribs=True, frozen=True, slots=True, eq=False)
class Emission:
"""Stores the emission of a signal including the emitted arguments. Can be
compared against a signal instance to check the source. Do not construct this class
directly. Instead, instances will be received through a channel created by
:func:`qtrio.enter_emissions_channel`.
Note:
Each time you access a signal such as ``a_qobject.some_signal`` you get a
different signal instance object so the ``signal`` attribute generally will not
be the same object. A signal instance is a ``QtCore.SignalInstance`` in
PySide2 or ``QtCore.pyqtBoundSignal`` in PyQt5.
"""
signal: qtrio._util.SignalInstance
"""An instance of the original signal."""
args: typing.Tuple[object, ...]
"""A tuple of the arguments emitted by the signal."""
def is_from(self, signal: qtrio._util.SignalInstance) -> bool:
"""Check if this emission came from ``signal``.
Args:
signal: The signal instance to check for being the source.
Returns:
Whether the passed signal was the source of this emission.
"""
# bool() to accomodate SignalInstance being typed Any right now...
return bool(self.signal == signal)
def __eq__(self, other: object) -> bool:
if type(other) != type(self):
return False
# TODO: workaround for https://github.com/python/mypy/issues/4445
if not isinstance(other, type(self)): # pragma: no cover
return False
return self.is_from(signal=other.signal) and self.args == other.args
@attr.s(auto_attribs=True, frozen=True)
class EmissionsChannelSlot:
internal_signal: qtrio._util.SignalInstance
send_channel: trio.MemorySendChannel
def slot(
self,
*args: object,
) -> None:
try:
self.send_channel.send_nowait(
Emission(signal=self.internal_signal, args=args)
)
except (trio.WouldBlock, trio.ClosedResourceError):
# TODO: log this or... ?
pass
@attr.s(auto_attribs=True)
class Emissions:
"""Hold elements useful for the application to work with emissions from signals.
Do not construct this class directly. Instead, use
:func:`qtrio.enter_emissions_channel`.
"""
channel: trio.MemoryReceiveChannel
"""A memory receive channel to be fed by signal emissions."""
send_channel: trio.MemorySendChannel
"""A memory send channel collecting signal emissions."""
async def aclose(self) -> None:
"""Asynchronously close the send channel when signal emissions are no longer of
interest.
"""
await self.send_channel.aclose()
@async_generator.asynccontextmanager
async def open_emissions_channel(
signals: typing.Collection[qtrio._util.SignalInstance],
max_buffer_size: typing.Union[int, float] = math.inf,
) -> typing.AsyncGenerator[Emissions, None]:
"""Create a memory channel fed by the emissions of the signals. Each signal
emission will be converted to a :class:`qtrio.Emission` object. On exit the send
channel is closed. Management of the receive channel is left to the caller.
Note:
Use this only if you need to process emissions *after* exiting the context
manager. Otherwise use :func:`qtrio.enter_emissions_channel`.
Args:
signals: A collection of signals which will be monitored for emissions.
max_buffer_size: When the number of unhandled emissions in the channel reaches
this limit then additional emissions will be silently thrown out the window.
Returns:
The emissions manager with the signals connected to it.
"""
# Infinite buffer because I don't think there's any use in storing the emission
# info in a `slot()` stack frame rather than in the memory channel. Perhaps in the
# future we can implement a limit beyond which events are thrown away to avoid
# infinite queueing. Maybe trio.MemorySendChannel.send_nowait() instead.
send_channel, receive_channel = trio.open_memory_channel[Emission](
max_buffer_size=max_buffer_size
)
async with send_channel:
with contextlib.ExitStack() as stack:
emissions = Emissions(channel=receive_channel, send_channel=send_channel)
for signal in signals:
slot = EmissionsChannelSlot(
internal_signal=signal, send_channel=send_channel
)
stack.enter_context(qtrio._qt.connection(signal, slot.slot))
yield emissions
@async_generator.asynccontextmanager
async def enter_emissions_channel(
signals: typing.Collection[qtrio._util.SignalInstance],
max_buffer_size: typing.Union[int, float] = math.inf,
) -> typing.AsyncGenerator[Emissions, None]:
"""Create a memory channel fed by the emissions of the signals and enter both the
send and receive channels' context managers.
Args:
signals: A collection of signals which will be monitored for emissions.
max_buffer_size: When the number of unhandled emissions in the channel reaches
this limit then additional emissions will be silently thrown out the window.
Returns:
The emissions manager.
"""
async with open_emissions_channel(
signals=signals, max_buffer_size=max_buffer_size
) as emissions:
async with emissions.channel:
async with emissions.send_channel:
yield emissions
class StarterProtocol(typing_extensions.Protocol):
def start(self, *args: object) -> None:
...
@attr.s(auto_attribs=True, frozen=True)
class DirectStarter:
slot: typing.Callable[..., typing.Awaitable[object]]
nursery: trio.Nursery
def start(self, *args: object) -> None:
self.nursery.start_soon(self.slot, *args)
@attr.s(auto_attribs=True, frozen=True)
class WrappedStarter:
slot: typing.Callable[..., typing.Awaitable[object]]
wrapper: typing.Callable[
[typing.Callable[..., typing.Awaitable[object]]], typing.Awaitable[object]
]
nursery: trio.Nursery
def start(self, *args: object) -> None:
self.nursery.start_soon(self.wrapper, self.slot, *args)
@attr.s(auto_attribs=True)
class EmissionsNursery:
"""Holds the nursery, exit stack, and wrapper needed to support connecting signals
to both async and sync slots in the nursery.
"""
nursery: trio.Nursery
"""The Trio nursery that will handle execution of the slots."""
exit_stack: contextlib.ExitStack
"""The exit stack that will manage the connections so they get disconnected."""
wrapper: typing.Optional[
typing.Callable[
[typing.Callable[..., typing.Awaitable[object]]],
typing.Awaitable[object],
]
] = None
"""The wrapper for handling the slots. This could, for example, handle exceptions
and present a dialog to avoid cancelling the entire nursery.
"""
def connect(
self,
signal: qtrio._util.SignalInstance,
slot: typing.Callable[..., typing.Awaitable[object]],
) -> None:
"""Connect an async signal to this emissions nursery so when called the slot
will be run in the nursery.
"""
starter: StarterProtocol
if self.wrapper is None:
starter = DirectStarter(slot=slot, nursery=self.nursery)
else:
starter = WrappedStarter(
slot=slot, wrapper=self.wrapper, nursery=self.nursery
)
self.exit_stack.enter_context(qtrio._qt.connection(signal, starter.start))
def connect_sync(
self, signal: qtrio._util.SignalInstance, slot: typing.Callable[..., object]
) -> None:
"""Connect to a sync slot to this emissions nursery so when called the slot will
be run in the nursery.
"""
async def async_slot(*args: object) -> None:
slot(*args)
self.connect(signal=signal, slot=async_slot)
@async_generator.asynccontextmanager
async def open_emissions_nursery(
until: typing.Optional[qtrio._util.SignalInstance] = None,
wrapper: typing.Optional[typing.Callable[..., typing.Awaitable[object]]] = None,
) -> typing.AsyncGenerator[EmissionsNursery, None]:
"""Open a nursery for handling callbacks triggered by signal emissions. This allows
a 'normal' Qt callback structure while still executing the callbacks within a Trio
nursery such that errors have a place to go. Both async and sync callbacks can be
connected. Sync callbacks will be wrapped in an async call to allow execution in
the nursery.
Arguments:
until: Keep the nursery open until this signal is emitted.
wrapper: A wrapper for the callbacks such as to process exceptions.
Returns:
The emissions manager.
"""
async with trio.open_nursery() as nursery:
with contextlib.ExitStack() as exit_stack:
emissions_nursery = EmissionsNursery(
nursery=nursery,
exit_stack=exit_stack,
wrapper=wrapper,
)
if until is not None:
async with wait_signal_context(until):
yield emissions_nursery
else:
yield emissions_nursery
@async_generator.asynccontextmanager
async def wait_signal_context(
signal: qtrio._util.SignalInstance,
) -> typing.AsyncGenerator[None, None]:
"""Connect a signal during the context and wait for it on exit. Presently no
mechanism is provided for retrieving the emitted arguments.
Args:
signal: The signal to connect to and wait for.
"""
event = trio.Event()
def slot(*args: object, **kwargs: object) -> None:
event.set()
with qtrio._qt.connection(signal=signal, slot=slot):
yield
await event.wait()
@attr.s(auto_attribs=True, frozen=True, slots=True)
class Outcomes:
"""This class holds an :class:`outcome.Outcome` from each of the Trio and the Qt
application execution. Do not construct instances directly. Instead, an instance
will be returned from :func:`qtrio.run` or available on instances of
:attr:`qtrio.Runner.outcomes`.
"""
qt: typing.Optional[outcome.Outcome] = None
"""The Qt application :class:`outcome.Outcome`"""
trio: typing.Optional[outcome.Outcome] = None
"""The Trio async function :class:`outcome.Outcome`"""
def unwrap(self) -> object:
"""Unwrap either the Trio or Qt outcome. First, errors are given priority over
success values. Second, the Trio outcome gets priority over the Qt outcome.
Returns:
Whatever captured value was selected.
Raises:
Exception: Whatever captured exception was selected.
qtrio.NoOutcomesError: if no value or exception has been captured.
"""
if self.trio is not None:
# highest priority to the Trio outcome, if it is an error we are done
result = self.trio.unwrap()
# since a Trio result is higher priority, we only care if Qt gave an error
if self.qt is not None:
self.qt.unwrap()
# no Qt error so go ahead and return the Trio result
return result
elif self.qt is not None:
# either it is a value that gets returned or an error that gets raised
return self.qt.unwrap()
# neither Trio nor Qt outcomes have been set so we have nothing to unwrap()
raise qtrio.NoOutcomesError()
def run(
async_fn: typing.Callable[..., typing.Awaitable[object]],
*args: object,
done_callback: typing.Optional[typing.Callable[[Outcomes], None]] = None,
clock: typing.Optional[trio.abc.Clock] = None,
instruments: typing.Sequence[trio.abc.Instrument] = (),
) -> object:
"""Run a Trio-flavored async function in guest mode on a Qt host application, and
return the result.
Args:
async_fn: The async function to run.
args: Positional arguments to pass to `async_fn`.
done_callback: See :class:`qtrio.Runner.done_callback`.
clock: See :class:`qtrio.Runner.clock`.
instruments: See :class:`qtrio.Runner.instruments`.
Returns:
The object returned by ``async_fn``.
"""
runner = Runner(
done_callback=done_callback, clock=clock, instruments=list(instruments)
)
runner.run(async_fn, *args)
return runner.outcomes.unwrap()
def outcome_from_application_return_code(return_code: int) -> outcome.Outcome:
"""Create either an :class:`outcome.Value` in the case of a 0 `return_code` or an
:class:`outcome.Error` with a :class:`ReturnCodeError` otherwise.
Args:
return_code: The return code to be processed.
Returns:
The outcome wrapping the passed in return code.
"""
if return_code == 0:
return outcome.Value(return_code)
return outcome.Error(qtrio.ReturnCodeError(return_code))
def maybe_build_application() -> QtGui.QGuiApplication:
"""Create a new Qt application object if one does not already exist.
Returns:
The Qt application object.
"""
maybe_application = QtWidgets.QApplication.instance()
if maybe_application is None:
application = QtWidgets.QApplication(sys.argv[1:])
else:
application = maybe_application
application.setQuitOnLastWindowClosed(False)
return application
@attr.s(auto_attribs=True, slots=True)
class Runner:
"""This class helps run Trio in guest mode on a Qt host application."""
application: QtGui.QGuiApplication = attr.ib(factory=maybe_build_application)
"""The Qt application object to run as the host. If not set before calling
:meth:`run` the application will be created as
``QtWidgets.QApplication(sys.argv[1:])`` and ``.setQuitOnLastWindowClosed(False)``
will be called on it to allow the application to continue throughout the lifetime of
the async function passed to :meth:`qtrio.Runner.run`.
"""
quit_application: bool = True
"""When true, the :meth:`done_callback` method will quit the application when the
async function passed to :meth:`qtrio.Runner.run` has completed.
"""
clock: typing.Optional[trio.abc.Clock] = None
"""The clock to use for this run. This is primarily used to speed up tests that
include timeouts. The value will be passed on to
:func:`trio.lowlevel.start_guest_run`.
"""
instruments: typing.Sequence[trio.abc.Instrument] = ()
"""The instruments to use for this run. The value will be passed on to
:func:`trio.lowlevel.start_guest_run`.
"""
reenter: Reenter = attr.ib(factory=Reenter)
"""The :class:`QtCore.QObject` instance which will receive the events requesting
execution of the needed Trio and user code in the host's event loop and thread.
"""
done_callback: typing.Optional[typing.Callable[[Outcomes], None]] = attr.ib(
default=None
)
"""The builtin :meth:`done_callback` will be passed to
:func:`trio.lowlevel.start_guest_run` but will call the callback passed here before
(maybe) quitting the application. The :class:`outcome.Outcome` from the completion
of the async function passed to :meth:`run` will be passed to this callback.
"""
outcomes: Outcomes = attr.ib(factory=Outcomes, init=False)
"""The outcomes from the Qt and Trio runs."""
cancel_scope: trio.CancelScope = attr.ib(default=None, init=False)
"""An all encompassing cancellation scope for the Trio execution."""
_done: bool = attr.ib(default=False, init=False)
"""Just an indicator that the run is done. Presently used only for a test."""
def run(
self,
async_fn: typing.Callable[..., typing.Awaitable[object]],
*args: object,
execute_application: bool = True,
) -> Outcomes:
"""Start the guest loop executing ``async_fn``.
Args:
async_fn: The async function to be run in the Qt host loop by the Trio
guest.
args: Arguments to pass when calling ``async_fn``.
execute_application: If True, the Qt application will be executed and this
call will block until it finishes.
Returns:
If ``execute_application`` is true, a :class:`qtrio.Outcomes` containing
outcomes from the Qt application and ``async_fn`` will be returned.
Otherwise, an empty :class:`qtrio.Outcomes`.
"""
if _reenter_event_type is None:
register_event_type()
trio.lowlevel.start_guest_run(
self.trio_main,
async_fn,
args,
run_sync_soon_threadsafe=self.run_sync_soon_threadsafe,
done_callback=self.trio_done,
clock=self.clock, # type: ignore[arg-type]
instruments=self.instruments,
)
if execute_application:
return_code = self.application.exec_()
self.outcomes = attr.evolve(
self.outcomes,
qt=outcome_from_application_return_code(return_code),
)
return self.outcomes
def run_sync_soon_threadsafe(self, fn: typing.Callable[[], object]) -> None:
"""Helper for the Trio guest to execute a sync function in the Qt host
thread when called from the Trio guest thread. This call will not block waiting
for completion of ``fn`` nor will it return the result of calling ``fn``.
Args:
fn: A no parameter callable.
"""
event = ReenterEvent(fn=fn)
self.application.postEvent(self.reenter, event)
async def trio_main(
self,
async_fn: typing.Callable[..., typing.Awaitable[object]],
args: typing.Tuple[object, ...],
) -> object:
"""Will be run as the main async function by the Trio guest. It creates a
cancellation scope to be cancelled when
:meth:`QtGui.QGuiApplication.lastWindowClosed` is emitted. Within this scope
the application's ``async_fn`` will be run and passed ``args``.
Args:
async_fn: The application's main async function to be run by Trio in the Qt
host's thread.
args: Positional arguments to be passed to ``async_fn``
Returns:
The result returned by `async_fn`.
"""
result: object = None
with trio.CancelScope() as self.cancel_scope:
with contextlib.ExitStack() as exit_stack:
if self.application.quitOnLastWindowClosed():
exit_stack.enter_context(
qtrio._qt.connection(
signal=self.application.lastWindowClosed,
slot=self.cancel_scope.cancel,
)
)
result = await async_fn(*args)
return result
def trio_done(self, run_outcome: outcome.Outcome) -> None:
"""Will be called after the Trio guest run has finished. This allows collection
of the :class:`outcome.Outcome` and execution of any application provided done
callback. Finally, if :attr:`qtrio.Runner.quit_application` was set when
creating the instance then the Qt application will be requested to quit.
Actions such as outputting error information or unwrapping the outcomes need
to be further considered.
Arguments:
run_outcome: The outcome of the Trio guest run.
"""
self.outcomes = attr.evolve(self.outcomes, trio=run_outcome)
if self.done_callback is not None:
self.done_callback(self.outcomes)
if self.quit_application:
self.application.quit()
self._done = True
| true |
af6330da7f286211f309b5e7eaddc41f1ae09b55 | Python | mcrobertw/python | /usandosimplejson.py | UTF-8 | 355 | 3.921875 | 4 | [] | no_license | #Formar un json de un diccionario en python
import json
person = '{"name": "Bob", "languages": ["English", "Fench"]}'
person_dict = json.loads(person)
# Output: {'name': 'Bob', 'languages': ['English', 'Fench']}
print( person_dict)
# Output: ['English', 'French']
print(person_dict['languages'])
#fuente: https://www.programiz.com/python-programming/json | true |
65c67d58280b06ece76f3020a127dd6a92e5bc19 | Python | thanhtranna/python-algo | /39_back_track/regex.py | UTF-8 | 1,134 | 3.40625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: UTF-8 -*-
is_match = False
def rmatch(r_idx: int, m_idx: int, regex: str, main: str):
global is_match
if is_match:
return
if r_idx >= len(regex): # The regular strings are all matched
is_match = True
return
# The regular string has not been matched, but the main string has not matched
if m_idx >= len(main) and r_idx < len(regex):
is_match = False
return
if regex[r_idx] == '*': # * Match 1 or more arbitrary characters, recursively search each case
for i in range(m_idx, len(main)):
rmatch(r_idx+1, i+1, regex, main)
elif regex[r_idx] == '?': # ? Match 0 or 1 arbitrary character, in two cases
rmatch(r_idx+1, m_idx+1, regex, main)
rmatch(r_idx+1, m_idx, regex, main)
else: # Non-special characters need to match exactly
if regex[r_idx] == main[m_idx]:
rmatch(r_idx+1, m_idx+1, regex, main)
if __name__ == '__main__':
regex = 'ab*eee?d'
main = 'abcdsadfkjlekjoiwjiojieeecd'
rmatch(0, 0, regex, main)
print(is_match)
| true |
9916a1de554ce818a1709f89bc488028fc2eb98a | Python | Cristiantorre/Python-TorrentsCristian | /M14UF1E03.py | UTF-8 | 133 | 2.546875 | 3 | [] | no_license | si,no=True,False
edat=input("Ets major d’edat?' (True/False):")
if:Si=input("es major de edat")
if:No=input("no es major de edat")
| true |
1780f64ae5e5f98fa8335fb056a4ddd61a9934c2 | Python | Coderu2058/Yet_Another_Algorithms_Repository | /Algorithms/binary_search/python-binary_search-O(log(n)).py | UTF-8 | 960 | 4 | 4 | [
"MIT"
] | permissive | def binary_search(list_, val, asc=1):
'''
list_: sorted list with unique values
val: value to search
asc: asc==1 list is sorted in ascending order
Searches for a given element in a list sorted in ascending order. For searching in a list sorted in descending order, pass the argument value for asc as '0'.
'''
lo = 0
hi = (len(list_)-1)
while (lo <= hi):
mid = int(lo + (hi-lo)/2)
#print("mid", mid)
if (list_[mid] == val):
return mid
elif (list_[mid] > val):
if (asc == 1): # increasing
hi = mid-1
else :
lo = mid+1
elif (list_[mid] < val):
if (asc == 1): # increasing
lo = mid+1
else :
hi = mid-1
return -1
a = [14,15,16,17,18,19]
a_= [19,18,17,16,15,14]
print(binary_search(a, 16))
print(binary_search(a, 19))
print(binary_search(a, 18))
print(binary_search(a, 11))
print(binary_search(a_, 16, 0))
print(binary_search(a_, 19, 0))
print(binary_search(a_, 18, 0))
print(binary_search(a_, 11, 0))
| true |
a47a717b66b07a12aa5a2d771834b5cd3cd8cb6c | Python | JadoBleu/Adventure-Game-Project | /equipment.py | UTF-8 | 12,770 | 3.796875 | 4 | [] | no_license | '''Generates new weapons, armor, ring, and relevant functions '''
# Imports
import random
import math
from housekeeping import rng
def new_rarity(common=80, uncommon=15, rare=5):
'''Returns a rarity value based off the chances. Default to 80:15:5'''
if rng(common):
rarity = "common"
elif rng(uncommon, (uncommon + rare)):
rarity = "uncommon"
else:
rarity = "rare"
return rarity
def new_method_type():
'''Selects a random damage type out of the 2 available:"single", "splash"'''
damage_type = "single", "splash"
return damage_type[random.randrange(0, len(damage_type))]
def new_ability():
'''Generates a new ability based on the type and method
returns dict ability'''
ability = {}
# generate new method and damage type
ability["method"] = new_method_type()
ability["type"] = new_damage_type()
# generate a new name for the ability
if ability["method"] == "single":
ability["energy"] = 10
if ability["type"] == "slash":
name = "Rising Fury"
elif ability["type"] == "impact":
name = "Heavy Blow"
elif ability["type"] == "magic":
name = "Fire Ball"
elif ability["type"] == "spirit":
name = "Nether Smite"
elif ability["method"] == "splash":
ability["energy"] = 15
if ability["type"] == "slash":
name = "Whirling Slash"
elif ability["type"] == "impact":
name = "Meteor Strike"
elif ability["type"] == "magic":
name = "Chain Lightning"
elif ability["type"] == "spirit":
name = "Arcane Blast"
ability["name"] = name
ability["level"] = 1
ability["experience"] = 0
return ability
def print_ability_info(ability):
print("\tAbility:\t"+ability["name"])
print("\t Level:\t\t", ability["level"])
print("\t Energy Cost:\t", ability["energy"])
if ability["method"] == "single":
print("\t Description:\t This ability deals a large amount of\n\t\t\t",
ability["type"],"damage to a single target\n")
elif ability["method"] == "splash":
print("\t Description:\t This ability deals an increased amount of\n\t\t\t",
ability["type"],"damage to a single enemy, \n\t\t\t"
" and reduced damage to all other enemies\n")
# Generate Weapon
def new_damage_type():
'''Selects a random damage type out of the 4 available:"slash", "impact", "magic", "spirit"'''
damage_type = "slash", "impact", "magic", "spirit"
return damage_type[random.randrange(0, len(damage_type))]
def new_weapon_name(weapon_rarity, damage_type):
'''Generates a weapon name based on the damage type'''
name = weapon_rarity.capitalize()+" "
# Damage type weapon names
slash_names = "Great Sword", "Hunting Knife", "Long Sword"
impact_names = "Spiked Mace", "War Hammer", "Heavy Flail"
magic_names = "Engraved Wand", "Wizard Staff", "Enchanted Sword"
spirit_names = "Divine Scepter", "Scrying Ball", "Sacred Chime"
if damage_type == "slash":
name += slash_names[random.randrange(0, len(slash_names))]
elif damage_type == "impact":
name += impact_names[random.randrange(0, len(impact_names))]
elif damage_type == "magic":
name += magic_names[random.randrange(0, len(magic_names))]
elif damage_type == "spirit":
name += spirit_names[random.randrange(0, len(spirit_names))]
return name
def new_weapon(common=80, uncommon=15, rare=5, level=1):
'''Generates a new weapon based on the player's level'''
weapon_rarity = new_rarity(common, uncommon, rare)
weapon_type = new_damage_type()
weapon_name = new_weapon_name(weapon_rarity, weapon_type)
# Generate Attack values with a minimum range of 2
try:
weapon_attack_min = int((10 + math.pow(level, 0.91) * 2)
+ (level * (random.randrange(-100, 100, 1)/1000)))
except:
weapon_attack_min = int((12) + (random.randrange(-1, 2, 1)))
weapon_attack_max = int((weapon_attack_min * (random.randrange(100, 110)/100)) + 5)
weapon_energy_regen = int(weapon_attack_min * (random.randrange(50, 60)/100))
# Calculate weapon cost
weapon_value = math.floor(level/10)*3 + 50 + random.randrange(-3, 3)
# Packs the data into a dictionary to return
weapon_data = {
"item": "weapon",
"level": level,
"rarity": weapon_rarity,
"type": weapon_type,
"name": weapon_name,
"min": weapon_attack_min,
"max": weapon_attack_max,
"energy": weapon_energy_regen,
"value": weapon_value
}
return weapon_data
def print_weapon_info(weapon):
'''Takes the data returned from new_weapon() and prints it in a readable format'''
if weapon["name"] == "":
print("\tWeapon:\t\t", "None Equipped")
else:
print("\tWeapon:\t\t", weapon["name"], "("+str(weapon["value"])+"g)")
print("\t Attack:\t\t ", weapon["min"], "-", weapon["max"])
print("\t Energy Regen:\t\t ", weapon["energy"])
# Generate Armor
def new_armor_type():
'''Selects a random damage type out of the 4 available:"plate", "leather", "chain", "robe"'''
armor_type = "plate", "leather", "chain", "robe"
return armor_type[random.randrange(0, len(armor_type))]
def new_armor_name(armor_rarity, armor_type):
'''Generates a armor name based on the damage type'''
name = armor_rarity.capitalize()+" "
plate_names = "Dragonscale Plate", "Dragon Breastplate", "Gladiator Plate"
leather_names = "Dragonskin Vest", "Leather Jerkin", "Thief's Garb"
chain_names = "Crusader Chainmail", "Battle Lamellar", "Scale Hauberk"
robe_names = "Blessed Robe", "Enchanted Regalia", "Holy Silks"
# Plate damage type armor names
if armor_type == "plate":
name += plate_names[random.randrange(0, len(plate_names))]
# Leather damage type armor names
elif armor_type == "leather":
name += leather_names[random.randrange(0, len(leather_names))]
# Chain damage type armor names
elif armor_type == "chain":
name += chain_names[random.randrange(0, len(chain_names))]
# Robe damage type armor names
elif armor_type == "robe":
name += robe_names[random.randrange(0, len(robe_names))]
return name
def new_armor(common=80, uncommon=15, rare=5, level=1):
'''Generates a new armor based on the player's level'''
armor_rarity = new_rarity(common, uncommon, rare)
armor_type = new_armor_type()
armor_name = new_armor_name(armor_rarity, armor_type)
# Generate Resistance values within a range.
armor_resistance = int((15 + level * 0.5)
+ (level * (random.randrange(-50, 50, 1)/1000)))
# Generate the amount of additional stats
if armor_rarity == "common":
stat_amount = 1
elif armor_rarity == "uncommon":
stat_amount = random.randint(2, 3)
elif armor_rarity == "rare":
stat_amount = 4
# Generate the stats
bonus_health = 0
bonus_energy = 0
bonus_dexterity = 0
for _ in range(stat_amount):
if rng(1, 3):
bonus_health = bonus_health + (int(math.sqrt(level)*5) + 10)
elif rng(1, 2):
bonus_energy = bonus_energy + (int(math.sqrt(level)*1.5) + 1)
else:
bonus_dexterity = bonus_dexterity + (int(math.sqrt(level)) + 5)
# Calculate cost of armor
armor_value = math.floor(level/10)*3 + 50 + random.randrange(-3, 3)
# Packs the data into a dictionary to return
armor_data = {
"item": "armor",
"level": level,
"rarity": armor_rarity,
"type": armor_type,
"name": armor_name,
"resistance": armor_resistance,
"health": bonus_health,
"energy": bonus_energy,
"dexterity": bonus_dexterity,
"value": armor_value
}
return armor_data
def print_armor_info(armor):
'''Takes the data returned from new_armor() and prints it in a readable format'''
if armor["name"] == "":
print("\tArmor:\t\t", "None Equipped")
else:
print("\tArmor:\t\t", armor["name"], "("+str(armor["value"])+"g)")
print("\t Type:\t\t\t ", armor["type"].capitalize())
print("\t Resistance:\t\t ", armor["resistance"])
if armor["health"] != 0:
print("\t Health:\t\t ", armor["health"])
if armor["energy"] != 0:
print("\t Energy:\t\t ", armor["energy"])
if armor["dexterity"] != 0:
print("\t Dexterity:\t\t ", armor["dexterity"])
# Generate Rings
def new_ring_name(ring_rarity):
'''Generates a ring name'''
name = ring_rarity.capitalize()+" "
ring_names = (
"Coral",
"Sapphire",
"Opal",
"Amethyst",
"Topaz",
"Ruby",
"Diamond",
"Gold",
"Platinum",
"Silver",
"Emerald"
)
name += ring_names[random.randrange(0, len(ring_names))]+" Ring"
return name
def new_ring(common=70, uncommon=20, rare=10, level=1):
'''Generates a new ring based on the player's level'''
ring_rarity = new_rarity(common, uncommon, rare)
ring_name = new_ring_name(ring_rarity)
# Generate Resistance values within a range.
# Generate the amount of additional stats
if ring_rarity == "common":
stat_amount = 2
elif ring_rarity == "uncommon":
stat_amount = random.randint(3, 4)
elif ring_rarity == "rare":
stat_amount = random.randint(5, 6)
# Generate the stats
bonus_attack = 0
bonus_health = 0
bonus_energy = 0
bonus_dexterity = 0
for _ in range(stat_amount):
if rng(1, 4):
bonus_attack = bonus_attack + (int(math.sqrt(level)) + 2)
elif rng(1, 3):
bonus_health = bonus_health + (int(math.sqrt(level)*3) + 1)
elif rng(1, 2):
bonus_energy = bonus_energy + (int(math.sqrt(level)*1.5) + 1)
else:
bonus_dexterity = bonus_dexterity + (int(math.sqrt(level)) + 1)
# Calculate cost of armor
ring_value = math.floor(level/10)*3 + 50 + random.randrange(-3, 3)
# Packs the data into a dictionary to return
ring_data = {
"item": "ring",
"level": level,
"rarity": ring_rarity,
"name": ring_name,
"health": bonus_health,
"energy": bonus_energy,
"dexterity": bonus_dexterity,
"attack": bonus_attack,
"value": ring_value
}
return ring_data
def print_ring_info(ring):
'''Takes the data returned from new_ring() and prints it in a readable format'''
if ring["name"] == "":
print("\tRing:\t\t", "None Equipped")
else:
print("\tRing:\t\t", ring["name"], "("+str(ring["value"])+"g)")
if ring["attack"] != 0:
print("\t Attack:\t\t ", ring["attack"])
if ring["health"] != 0:
print("\t Health:\t\t ", ring["health"])
if ring["energy"] != 0:
print("\t Energy:\t\t ", ring["energy"])
if ring["dexterity"] != 0:
print("\t Dexterity:\t\t ", ring["dexterity"])
def new_enemy_name(enemy_data, boss = False):
'''Generate enemy name based off armour type and if boss'''
name = ""
# Jobs based off armor type
if enemy_data["armor"]["type"] == "plate":
if rng(1,2) == True:
name += "Ogre"
else:
name += "Troll"
elif enemy_data["armor"]["type"] == "leather":
if rng(1,2) == True:
name += "Goblin"
else:
name += "Bandit"
elif enemy_data["armor"]["type"] == "chain":
if rng(1,2) == True:
name += "Gremlin"
else:
name += "Kobold"
elif enemy_data["armor"]["type"] == "robe":
if rng(1,2) == True:
name += "Vampire"
else:
name += "Draconian"
# Jobs based off weapon type and Boss
if boss == True:
name += " "+"Boss"
else:
if enemy_data["weapon"]["type"] == "slash":
if rng(1,2) == True:
name += " Scout"
else:
name += " Hunter"
elif enemy_data["weapon"]["type"] == "impact":
if rng(1,2) == True:
name += " Brute"
else:
name += " Warrior"
elif enemy_data["weapon"]["type"] == "magic":
if rng(1,2) == True:
name += " Sorcerer"
else:
name += " Mage"
elif enemy_data["weapon"]["type"] == "spirit":
if rng(1,2) == True:
name += " Witch Doctor"
else:
name += " Shaman"
return name | true |
007876f653dbd0314e93352dcbd204824e977e42 | Python | wiktorm3n/Data-Visualization | /Chapter 17/TryIt17_1.py | UTF-8 | 5,493 | 3.328125 | 3 | [] | no_license | import requests
'''Make an API call and store the response'''
urlhaskell = 'https://api.github.com/search/repositories?q=language:haskell&sort=stars'
url_javascript = 'https://api.github.com/search/repositories?q=language:javascript&sort=stars'
url_ruby = 'https://api.github.com/search/repositories?q=language:ruby&sort=stars'
url_c = 'https://api.github.com/search/repositories?q=language:c&sort=stars'
url_java = 'https://api.github.com/search/repositories?q=language:java&sort=stars'
url_go = 'https://api.github.com/search/repositories?q=language:go&sort=stars'
headers = {'Accept': 'application/vnd.github.v3+json'}
r_javascript = requests.get(url_javascript, headers=headers)
rhaskell = requests.get(urlhaskell, headers = headers)
r_ruby = requests.get(url_ruby, headers=headers)
r_c = requests.get(url_c, headers=headers)
r_java = requests.get(url_java, headers=headers)
r_go = requests.get(url_go, headers=headers)
print(f"Status code heskell: {rhaskell.status_code}")
print(f"Status code javascript: {r_javascript.status_code}")
print(f"Status code ruby: {r_ruby.status_code}")
print(f"Status code c: {r_c.status_code}")
print(f"Status code java: {r_java.status_code}")
print(f"Status code go: {r_go.status_code}")
'''Store API response in variable'''
response_dict_haskell = rhaskell.json()
response_dict_javascript = r_javascript.json()
response_dict_java = r_java.json()
response_dict_c = r_c.json()
response_dict_ruby = r_ruby.json()
response_dict_go= r_go.json()
print(f"Total heskell repositories: {response_dict_haskell['total_count']}")
print(f"Total javascript repositories: {response_dict_javascript['total_count']}")
print(f"Total java repositories: {response_dict_java['total_count']}")
print(f"Total c repositories: {response_dict_c['total_count']}")
print(f"Total ruby repositories: {response_dict_ruby['total_count']}")
print(f"Total go repositories: {response_dict_go['total_count']}")
'''Explore information about the repositories'''
repo_dicts_haskell = response_dict_haskell['items']
repo_dicts_javascript = response_dict_javascript['items']
repo_dicts_java = response_dict_java['items']
repo_dicts_ruby = response_dict_ruby['items']
repo_dicts_c = response_dict_c['items']
repo_dicts_go = response_dict_go['items']
print(f" Javascript repositories returned : {len(repo_dicts_javascript)}")
print(f"Repositories haskell returned : {len(repo_dicts_haskell)}")
print(f" Java repositories returned : {len(repo_dicts_java)}")
print(f" Ruby repositories returned : {len(repo_dicts_ruby)}")
print(f" C repositories returned : {len(repo_dicts_c)}")
print(f" Go repositories returned : {len(repo_dicts_go)}")
'''Examine the first repository'''
print("\nSelected information about each repository:")
for repo_dict in repo_dicts_haskell:
print("\nSelected information about haskell repository: ")
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}")
for repo_dict in repo_dicts_javascript:
print("\nSelected information about javascript repository: ")
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}")
for repo_dict in repo_dicts_java:
print("\nSelected information about java repository: ")
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}")
for repo_dict in repo_dicts_ruby:
print("\nSelected information about ruby repository: ")
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}")
for repo_dict in repo_dicts_c:
print("\nSelected information about c repository: ")
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}")
for repo_dict in repo_dicts_go:
print("\nSelected information about go repository: ")
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}")
| true |
6e928b91364c3f7ad75b63f12f81fd235c0c493d | Python | BoLu2019/luB | /10_occupy_flask_st/utils/reader.py | UTF-8 | 1,368 | 3.609375 | 4 | [] | no_license | import csv
from csv import reader
occDict = {}
occList = []
#returns a dictionary of jobs/percentage pairs (occDict)
def readcsv():
#opens csv file, reads it as dictionary
#separates by commas not contained in double quotes
with open('data/occupations.csv', 'r') as infile:
reader = csv.DictReader(infile)
#for each row in the csv file
for row in reader:
#add occupation and percentage to dictionary
occDict[ row['Job Class'] ] = row['Percentage']
#remove the dictionary key/value pair of 'Total'
occDict.pop('Total')
#Turns percentage string into a floating point
#Loop through each key/value pair in dictionary
for i in occDict.keys():
occDict[i] = eval(occDict[i])
return occDict
#returns a list of jobs, with the amount of each job proportional to the percentage of the workforce employed in that area (occList)
def percentages():
#Adds jobs to list where it can be selected randomly based on its percentage
for job in occDict.keys():
for numTimes in range(int(occDict[job] * 10)):
#Need to convert percentages to integers accurately to use the range() function,
occList.append(job)
#populate an list of ~1000 values, because of the use of range
#list populated by percentages
return occList
| true |
daed5f54f4902d90c7562db580416a65cbda47c2 | Python | iam-abbas/cs-algorithms | /Sorting Algorithm/Selection Sort/Python/selection_sort.py | UTF-8 | 559 | 4.03125 | 4 | [
"MIT"
] | permissive | # Function for selection sort
def selection_sort(array):
for i in range(0, len(array) - 1):
min_index = i
for j in range(i + 1, len(array)):
if array[j] < array[min_index]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
# Function to print list
def print_list(array):
for i in range(0, len(array)):
print(array[i], end = " ")
print()
num = int(input())
array = []
for i in range(0, num):
array.append(int(input()))
selection_sort(array)
print_list(array)
| true |
1a3333e774134085f11cc6d07d4d689bc9d5635e | Python | mehrdadn/ray | /python/ray/serve/metric/client.py | UTF-8 | 5,181 | 2.625 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | import asyncio
from typing import Dict, Optional, Tuple, List
from ray.serve.metric.types import (MetricType, convert_event_type_to_class,
MetricMetadata, MetricRecord)
from ray.serve.utils import _get_logger
from ray.serve.constants import METRIC_PUSH_INTERVAL_S
logger = _get_logger()
class MetricClient:
def __init__(
self,
metric_exporter_actor,
push_interval: float = METRIC_PUSH_INTERVAL_S,
default_labels: Optional[Dict[str, str]] = None,
):
"""Initialize a client to push metrics to the exporter actor.
Args:
metric_exporter_actor: The actor to push metrics to.
default_labels(dict): The set of labels to apply for all metrics
created by this actor. For example, {"source": "worker"}.
"""
self.exporter = metric_exporter_actor
self.default_labels = default_labels or dict()
self.registered_metrics: Dict[int, MetricMetadata] = dict()
self.metric_records: List[MetricRecord] = []
assert asyncio.get_event_loop().is_running()
self.push_task = asyncio.get_event_loop().create_task(
self.push_to_exporter_forever(push_interval))
logger.debug("Initialized client")
def new_counter(self,
name: str,
*,
description: Optional[str] = "",
label_names: Optional[Tuple[str]] = ()):
"""Create a new counter.
Counters are used to capture changes in running sums. An essential
property of Counter instruments is that two events add(m) and add(n)
are semantically equivalent to one event add(m+n). This property means
that Counter events can be combined.
Args:
name(str): The unique name for the counter.
description(Optional[str]): The description for the counter.
label_names(Optional[Tuple[str]]): The set of label names to be
added when recording the metrics.
Usage:
>>> client = MetricClient(...)
>>> counter = client.new_counter(
"http_counter",
description="This is a simple counter for HTTP status",
label_names=("route", "status_code"))
>>> counter.labels(route="/hi", status_code=200).add()
"""
return self._new_metric(name, MetricType.COUNTER, description,
label_names)
def new_measure(self,
name,
*,
description: Optional[str] = "",
label_names: Optional[Tuple[str]] = ()):
"""Create a new measure.
Measure instruments are independent. They cannot be combined as with
counters. Measures can be aggregated after recording to compute
statistics about the distribution along selected dimension.
Args:
name(str): The unique name for the measure.
description(Optional[str]): The description for the measure.
label_names(Optional[Tuple[str]]): The set of label names to be
added when recording the metrics.
Usage:
>>> client = MetricClient(...)
>>> measure = client.new_measure(
"latency_measure",
description="This is a simple measure for latency in ms",
label_names=("route"))
>>> measure.labels(route="/hi").record(42)
"""
return self._new_metric(name, MetricType.MEASURE, description,
label_names)
def _new_metric(
self,
name,
metric_type: MetricType,
description: str,
label_names: Tuple[str] = (),
):
if not isinstance(label_names, tuple):
raise ValueError("label_names need to be a tuple, it is {}".format(
type(label_names)))
metric_metadata = MetricMetadata(
name=name,
type=metric_type,
description=description,
label_names=label_names,
default_labels=self.default_labels.copy(),
)
key = hash(metric_metadata)
if key in self.registered_metrics:
raise ValueError("Metric named {} and associated metadata "
"is already registered.".format(name))
self.registered_metrics[key] = metric_metadata
metric_class = convert_event_type_to_class(metric_type)
metric_object = metric_class(
client=self, key=key, label_names=label_names)
return metric_object
async def _push_to_exporter_once(self):
if len(self.metric_records) == 0:
return
old_batch, self.metric_records = self.metric_records, []
logger.debug("Pushing metric batch {}".format(old_batch))
await self.exporter.ingest.remote(self.registered_metrics, old_batch)
async def push_to_exporter_forever(self, interval_s):
while True:
await self._push_to_exporter_once()
await asyncio.sleep(interval_s)
| true |
9f648d114d3ace475e6f7d53e6d8a4475b9b36dc | Python | kothamanideep/iprimedpython | /day9tasks/decarators.py | UTF-8 | 589 | 3.734375 | 4 | [] | no_license | # def simple(a,b):
# return a+b
# # print(simple(2,3))
# x=simple
# print(x(2,3))
# class decarator:
# def __init__(self,a,b):
# self.a=a
# self.b=b
# print("decarator is working")
# obj=decarator
# obj(1,2)
# def sum(a,b):
# return a+b
# def difference(a,b):
# return a-b
# def operation(op,x,y):
# result=op(x,y)
# return result
# print(operation(sum,10,5))
# print(operation(difference,10,5))
def hello(fn):
def sayhello():
print("hello")
fn()
return sayhello
@hello
def sayhi():
print("hi")
sayhi()
| true |
a61fd5a1d0c2318afa5631233499397305c05c6f | Python | DeniseIvy/DiegoTheVoiceAssistant | /main.py | UTF-8 | 2,524 | 2.84375 | 3 | [] | no_license | import speech_recognition as sr
import pywhatkit as kit
import datetime
import webbrowser
import pyttsx3
import time
import subprocess
r = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices',)
def talk(text):
engine.say(text)
engine.runAndWait()
def record_audio(ask = False):
with sr.Microphone() as source:
if ask:
talk(ask)
audio = r.listen(source)
voice_data = ''
try:
voice = r.listen(source)
voice_data = r.recognize_google(audio)
except sr.UnknownValueError:
talk('Sorry I did not get that')
except sr.RequestError:
talk('Sorry, my speech service is down')
return voice_data
def respond(voice_data):
if 'name' in voice_data:
print('Hi, my name is Diego the Digital Assistant.')
talk('Hi, my name is Diego the Digital Assistant.')
if 'time' in voice_data:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('The current time is' + time)
print('The current time is' + time)
if 'search' in voice_data:
search = record_audio('What do you want to search for?')
url = 'https://google.com/search?q=' + search
webbrowser.get().open(url)
talk('Here is what I found' + search)
if 'location' in voice_data:
location = record_audio('What is the location?')
url = 'https://google.nl/maps/place/' + location + '/&:'
webbrowser.get().open(url)
talk('Here is the location of' + location)
if 'play' in voice_data:
song = voice_data.replace('play', '')
talk('playing' + song)
print('playing' + song)
kit.playonyt(song)
if 'open google' in voice_data:
url = "http://google.com"
webbrowser.open(url, new=2)
# Opening the Desktop Application
if 'notepad' in voice_data:
subprocess.Popen(['notepad.exe'])
talk('Opening the notepad')
if 'calculator' in voice_data:
subprocess.Popen(['calculator.exe'])
talk('Opening the calculator')
if 'paint' in voice_data:
subprocess.Popen(['mspaint.exe'])
talk('Opening the paint')
# CLosing the app
if 'exit' in voice_data:
exit()
return respond
time.sleep(1)
print('Hi, I am Diego the Digital Assistant. How can I help you?')
talk('Hi, I am Diego the Digital Assistant. How can I help you?')
while 1:
voice_data = record_audio()
respond(voice_data)
| true |
1b1458c3ca4de9418e3cb068ba674919e7d10be2 | Python | jeongleo/Cansat_Terminal | /python_terminal2.py | UTF-8 | 821 | 2.8125 | 3 | [] | no_license | import decoder
import plot
import numpy as np
import matplotlib.pyplot as plt
input_file_name = "./입력파일/output_static_home.txt"
with open(input_file_name, 'r') as f: # 입력 파일 읽기
input_stream = f.read()
a = []
for i in input_stream:
a.append(int(i, 16)) # 16진수 표기를 10진수 정수로 바꾸기
length = len(a)
input_stream = np.zeros(length//2, dtype=np.uint8)
for i in range(length//2):
input_stream[i] = a[2*i]*16 + a[2*i + 1] # 정수를 한 바이트 단위로 합침
myDecoder = decoder.Decoder("./출력파일/flight_data_py.csv")
map_plot = plot.PlotGPS("./지도/하빈.png")
for i in input_stream:
done = myDecoder.check(i) # 패킷 디코딩 시작
if done:
map_plot.gps_tracking(myDecoder.gps[2], myDecoder.gps[1], myDecoder.gps[3])
plt.show()
| true |
b1f00ef4a9f1dd97c4d03ed35440f766146c3fa9 | Python | shmilee/gdpy3 | /src/cores/converter.py | UTF-8 | 3,322 | 2.515625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# Copyright (c) 2019-2020 shmilee
'''
Contains Converter core class.
'''
import re
from .base import BaseCore, AppendDocstringMeta
from ..glogger import getGLogger
__all__ = ['Converter']
clog = getGLogger('C')
class Converter(BaseCore, metaclass=AppendDocstringMeta):
'''
Convert raw data in files to pickled data.
Return results in a dict.
Attributes
----------
rawloader: rawloader object to get raw data
files: str or list
matched file name(s) of raw data
group: str
group name of pickled data
short_files: str
short files if :attr:`files` list is too long
'''
__slots__ = ['_files', '_group']
@property
def rawloader(self):
return self.loader
@property
def files(self):
return self._files
@property
def group(self):
return self._group
@property
def groupnote(self):
return self._group
# None,
# tuple ('items index 0', 'pat','repl'),
# etc
_short_files_subs = None
@property
def short_files(self):
if self.nitems == '?':
return self.items[0]
# else nitems == '+'
if self._short_files_subs is None:
# default re sub, preserve section, filter all items
items = self.items
for idx, sect in enumerate(self.section, 65):
items = [re.sub(sect, '#ST%s#' % chr(idx), i) for i in items]
items = list({re.sub('\d', '*', i) for i in items})
res = items
for idx, sect in enumerate(self.section, 65):
res = [re.sub('#ST%s#' % chr(idx), sect, i) for i in res]
if len(res) == 1:
return res[0]
else:
return str(res)
else:
# use specified _short_files_subs
if self._short_files_subs[0] == 0:
pat, repl = self._short_files_subs[1:]
return re.sub(pat, repl, self.items[0])
# etc
@classmethod
def generate_cores(cls, rawloader):
'''Return generated Core instances for *rawloader*.'''
ccs = super(Converter, cls).generate_cores(
rawloader, rawloader.filenames)
if ccs:
group_files = []
for cc in ccs:
group_files.append((cc.group, cc.short_files))
clog.debug("%s: loader, %s; %d group and files, %s."
% (ccs[0].clsname, rawloader.path,
len(group_files), group_files))
return ccs
def __init__(self, rawloader, section, items, common):
super(Converter, self).__init__(rawloader, section, items, common)
if self.nitems == '?':
self._files = self.items[0]
else:
self._files = self.items
self._group = '/'.join(self.section)
def _convert(self):
'''Convert raw data.'''
raise NotImplementedError()
def convert(self):
'''Read raw data, convert them. Return a dict.'''
try:
clog.info('Converting raw data in %s ...' % self.short_files)
return self._convert()
except Exception:
clog.error('Failed to convert raw data in %s.' % self.short_files,
exc_info=1)
| true |
280af3a32eb7b533c7e8457c9cc2a0c70b85b9a4 | Python | okisker/g_voice_its | /excel.py | UTF-8 | 2,129 | 2.71875 | 3 | [] | no_license | import xlrd
book = xlrd.open_workbook(raw_input("File name: ")) #test.xls
myname = raw_input('Your name: ')
sh = book.sheet_by_index(0)
#print("Cell D30 is {0}".format(sh.cell_value(rowx=29, colx=3)))
#for rx in range(sh.nrows):
#print(sh.row(rx))
# Print all values, iterating through rows and columns
#
num_cols = sh.ncols # Number of columns
for row_idx in range(1, sh.nrows): # Iterate through rows
print ('-'*40)
#print ('Row: %s' % row_idx) # Print row number
date=sh.cell_value(row_idx, colx=0)
y, m, d, h, i, s = xlrd.xldate_as_tuple(date, book.datemode)
date = ("{0}/{1}/{2}".format(m, d, y))
#print("Date: ", date)
#for col_idx in range(0, num_cols): # Iterate through columns
#cell_obj = sh.cell(row_idx, col_idx) # Get cell object by row, col
fullname = ("{0}".format(sh.cell_value(row_idx, colx=1)))
print("Full name: ", fullname)
firstname= fullname.split()[0]
#print("First name: ", firstname)
cell=(("{0}".format(sh.cell_value(row_idx, colx=5))))
cell=cell.replace("-","")
cell=cell.replace("/","")
cell=cell.replace(".0","")
#print("Cell: ", cell)
home=("{0}".format(sh.cell_value(row_idx, colx=6)))
home=home.replace("-","")
home=home.replace("/","")
home=home.replace(".0","")
#print ('Column: [%s] text: [%s]' % (col_idx, cell_obj))
messagetext=("Hello %s, my name is %s from the ITS Service Center at Syracuse University, letting you know that your NetID password will expire at the end of the day today, %s. Please change your password at netid.syr.edu before your account is locked tonight. If you have any questions, please call us at 315-443-2677. Thanks!" % (firstname, myname, date))
#print(messagetext)
#import subprocess
#subprocess.call('./g_voice.py', shell=True)
try:
from googlevoice import Voice
from googlevoice.util import input
voice = Voice()
voice.login()
phoneNumber = cell
text = 'testingtesting123'
voice.send_sms(phoneNumber, text)
except:
print("You have sent too many texts!")
| true |
aad4c800c9f2043d315d794480230b87874ca3df | Python | joose1983/answer-for-python-crush-course-2ndE | /Chapter 10/10-11-1.py | UTF-8 | 246 | 3.28125 | 3 | [] | no_license | import json
filename= 'favorite_number.txt'
try:
with open(filename) as f:
favorite_num=json.load(f)
except FileNotFoundError:
print(f"{filename} not found.")
else:
print(f"I know your favorite number, it is {favorite_num}")
| true |
3213f9038f3e39e43df26abb72cafa96cff62e2f | Python | stecd/Frequencies-Gradients | /gradient.py | UTF-8 | 1,203 | 2.625 | 3 | [] | no_license | import numpy as np
from scipy import sparse
import cv2 as cv
import matplotlib.pyplot as plt
from utils import Profiler
def computeGradient(im):
im2var = np.arange(im.shape[0] * im.shape[1]).reshape(*im.shape[0:2])
numPx = im.shape[0] * im.shape[1]
numEq = 2 * numPx + 1
A = sparse.csr_matrix((numEq, numPx)).tolil()
b = np.zeros((numEq))
i = np.repeat(np.arange(im.shape[0]), im.shape[1] - 1)
j = np.tile(np.arange(im.shape[1] - 1), im.shape[0])
e = np.arange(im.shape[0] * (im.shape[1] - 1))
A[e, im2var[i, j + 1]] = 1
A[e, im2var[i, j]] = -1
b[e] = im[i, j + 1] - im[i, j]
i = np.repeat(np.arange(im.shape[0] - 1), im.shape[1])
j = np.tile(np.arange(im.shape[1]), im.shape[0] - 1)
e = np.arange((im.shape[0] - 1) * im.shape[1]) + len(e)
A[e, im2var[i + 1, j]] = 1
A[e, im2var[i, j]] = -1
b[e] = im[i + 1, j] - im[i, j]
A[-1, im2var[0, 0]] = 1
b[-1] = im[0, 0]
V = sparse.linalg.lsqr(A.tocsr(), b)
V = np.array(V[0]).reshape(*im.shape)
return V
def init():
im = plt.imread('inputs/toy_problem.png')
V = computeGradient(im)
plt.imshow(V, cmap='Greys_r', aspect='equal')
plt.show()
| true |
1eaadaf9ef53037bc275ce356f4a20187bb2bddd | Python | danmandel/CodeWars | /7kyu/numerical-palindrome/solution.py | UTF-8 | 112 | 3.21875 | 3 | [] | no_license | def palindrome(num):
return str(num)[::-1] == str(num) if isinstance(num, int) and num > 0 else 'Not valid'
| true |
340d148714dec03c93db467dca072afdcf0cc805 | Python | Margarita-Sergienko/codewars-python | /5 kyu/Simple Pig Latin.py | UTF-8 | 450 | 4.21875 | 4 | [] | no_license | # 5 kyu
# Simple Pig Latin
# https://www.codewars.com/kata/520b9d2ad5c005041100000f
# Move the first letter of each word to the end of it, then add "ay" to the end of the word. Leave punctuation marks untouched.
# Examples
# pig_it('Pig latin is cool') # igPay atinlay siay oolcay
# pig_it('Hello world !') # elloHay orldway !
def pig_it(text):
return " ".join([el if el in "!?.," else el[1:] + el[0] + "ay" for el in text.split()]) | true |
2f68a10442f622d3a174487c7b41901162d24648 | Python | MansourM61/Blodiator | /blodiator/etc/cntsheetcanavs.py | UTF-8 | 12,923 | 3.046875 | 3 | [
"MIT"
] | permissive | '''
********************************************************************************
Python Script: cntsheetcanvas Module
Writter: Mojtaba Mansour Abadi
Date: 20 Januarry 2019
This Python script is compatible with Python 3.x.
The script is used to define CntSheetCanvas class the container
Blodiator. This module provides required canvas information, panning,
converting coordinates, drawing grids.
CntSheetCanvas
|
|____tk.Canvas
Histoty:
Ver 0.0.10: 26 Feburary 2019;
first code
Ver 0.0.11: 8 March 2019;
1- Snapping coordinate is added
Ver 0.0.31: 24 June 2019;
1- logging is added.
********************************************************************************
'''
import tkinter as tk
from . import coloredtext
from ..grafix import gfxline
style = 'normal'
fg = 'purple'
bg = 'black'
src = 'CanvasSheet: '
#################################################
WIDTH = 300 # canvas width
HEIGHT = 500 # canvas height
BACKGROUND_COLOR = 'white' # canvas background color
GRID_STATE = True # default grid state
GRID_X_STEP = 50 # default grid x spacing
GRID_Y_STEP = 50 # default frid y spacing
GRID_X_BRUSH = ('black', 1.0, (2, )) # default line thickness and style for grid x
GRID_Y_BRUSH = ('red', 1.0, (2, )) # default line thickness and style for grid x
MODE = ('normal', 'disabled', 'selected', 'erroneous') # states of the object
#################################################
# CntSheetCanvas class: this is the sheet canvas class
# {
class CntSheetCanvas(tk.Canvas):
"""
Wrapper class for tk.Canvas.
Define an instance of 'CntSheetCanvas' with appropriate arguments:
master = root widget
size = (width, height) of diagram canvas
std = standard output which is an instance of 'ColoredText' class
The class creates a canvas and also deals with converting window coordinate
to canvas coordinate. The grids and guidelines are also managed in this class
as well as required functions for panning and scrolling.
"""
version = '0.0.31' # version of the class
# < class functions section >
# < class functions section >
# < inherited functions section >
# __init__ func: initialiser dunar
# {
def __init__(self, master, size=(WIDTH, HEIGHT), std=None):
"""
Construct a CntSheetCanvas
input:
master = root widget
size = (width, height) of diagram canvas
std = standard output which is an instance of 'ColoredText' class
output: none
"""
if std is None:
print('\n' + src + ': Please specify a standard output for messages!')
exit()
else:
self.std = std
self.std.Print('Initialising SheetCanvas', fg, bg, style, src)
self.__width_neg = 0 # minimum x coordinate
self.__height_neg = 0 # minimum y coordinate
self.__width_pos = size[0] # maximum x coordinate
self.__height_pos = size[1] # minimum y coordinate
self.__background = BACKGROUND_COLOR
self.__master = master
self.__scale = 1
self.__x_org_zoom = 0
self.__y_org_zoom = 0
self.__grid_state = GRID_STATE
self.__grid_x_step = GRID_X_STEP
self.__grid_y_step = GRID_Y_STEP
self.__grid_x_brush = GRID_X_BRUSH
self.__grid_y_brush = GRID_Y_BRUSH
self.__grid_x = []
self.__grid_y = []
super(CntSheetCanvas, self).__init__(master=master,
width=self.__width_pos,
height=self.__height_pos,
background=BACKGROUND_COLOR)
self.updateGrids()
# } __init__ func
# < inherited functions section >
# < class functions section >
# convert_coords func: convert coordinates
# {
def convert_coords(self, coords_raw, snap_mode, conversion_mode=True):
"""
Converts windows coordinate to desired coordinate. It considers snapping
mode (on/off) and type of coversion (canvas to canvas/windows to canvas)
input:
coords_raw = raw input coordinate
snap_mode = if snapping to the grid is considered (True) or not (False)
conversion_mode = if it is a windows to canvas (True) or canvas to
canvas (False) conversion
output:
x, y = converted coordinates
"""
if(conversion_mode == True):
coords = [self.canvasx(coords_raw[0]), self.canvasy(coords_raw[1])]
pass
else:
coords = coords_raw
pass
if snap_mode == False:
return coords
else:
return round(coords[0]/self.__grid_x_step)*self.__grid_x_step,\
round(coords[1]/self.__grid_y_step)*self.__grid_y_step
# } convert_coords func
# update_region func: update the canvas region
# {
def update_region(self, bbox):
"""
Updates canvas region based on the input region
input:
bbox = region to be updated
output: none
"""
BBOX_raw = bbox
BBOX_org = (self.__width_neg, self.__height_neg, self.__width_pos, self.__height_pos)
if(BBOX_raw != BBOX_org):
self.__width_neg = BBOX_raw[0]
self.__height_neg = BBOX_raw[1]
self.__width_pos = BBOX_raw[2]
self.__height_pos = BBOX_raw[3]
self.updateGrids()
self.configure(scrollregion=BBOX_raw)
pass
# print(BBOX_raw)
# print(BBOX_org)
# if (BBOX_org != BBOX_raw):
# pass
# } update_region func
# draw_grids func: draw the object
# {
def draw_grids(self):
"""
Draws the grids on the canvas
input: none
output: none
"""
for obj in self.__grid_x:
obj.draw()
item = self.find_withtag(obj.tag)
self.tag_lower(item)
for obj in self.__grid_y:
obj.draw()
item = self.find_withtag(obj.tag)
self.tag_lower(item)
# } draw_grids func
# updateGrids func: update the grid
# {
def updateGrids(self):
"""
Updates the grids on the canvas
input: none
output: none
"""
for h_grid_x in self.__grid_x:
h_grid_x.erase()
for h_grid_y in self.__grid_y:
h_grid_y.erase()
self.__grid_x = []
self.__grid_y = []
if self.__grid_state == True:
colorList = [[GRID_X_BRUSH[0]]*2, [GRID_X_BRUSH[0]]*2, [GRID_X_BRUSH[0]]*2, [GRID_X_BRUSH[0]]*2]
brushList = [GRID_X_BRUSH[1:3], GRID_X_BRUSH[1:3], GRID_X_BRUSH[1:3], GRID_X_BRUSH[1:3]]
CS = dict(zip(MODE, colorList))
BS = dict(zip(MODE, brushList))
y0 = self.__height_neg
y1 = self.__height_pos
dummy = []
for i in range(0, int(self.__width_pos/self.__grid_x_step)):
tag = 'grid-x-pos-' + str(i)
x = (i + 1)*self.__grid_x_step
pts = [ [x, y0], [x, y1] ]
obj = gfxline.GfxLine(sheetCanvas=self, points=pts, arrow=(False, (1,1,1)),
std=self.std, tag=tag)
obj.colorset = CS
obj.brushset = BS
dummy.append(obj)
pass
for i in range(0, int(abs(self.__width_neg)/self.__grid_x_step) + 1):
tag = 'grid-x-neg-' + str(i)
x = -(i)*self.__grid_x_step
pts = [ [x, y0], [x, y1] ]
obj = gfxline.GfxLine(sheetCanvas=self, points=pts, arrow=(False, (1,1,1)),
std=self.std, tag=tag)
obj.colorset = CS
obj.brushset = BS
dummy.append(obj)
pass
self.__grid_x = dummy
colorList = [[GRID_Y_BRUSH[0]]*2, [GRID_Y_BRUSH[0]]*2, [GRID_Y_BRUSH[0]]*2, [GRID_Y_BRUSH[0]]*2]
brushList = [GRID_Y_BRUSH[1:3], GRID_Y_BRUSH[1:3], GRID_Y_BRUSH[1:3], GRID_Y_BRUSH[1:3]]
CS = dict(zip(MODE, colorList))
BS = dict(zip(MODE, brushList))
x0 = self.__width_neg
x1 = self.__width_pos
dummy = []
for i in range(0, int(self.__height_pos/self.__grid_y_step)):
tag = 'grid-pos-y-' + str(i)
y = (i + 1)*self.__grid_y_step
pts = [ [x0, y], [x1, y] ]
obj = gfxline.GfxLine(sheetCanvas=self, points=pts, arrow=(False, (1,1,1)),
std=self.std, tag=tag)
obj.colorset = CS
obj.brushset = BS
above_tag = self.find_above(tag)
if above_tag:
self.tag_lower(above_tag, tag)
dummy.append(obj)
pass
for i in range(0, int(abs(self.__height_neg)/self.__grid_y_step) + 1):
tag = 'grid-neg-y-' + str(i)
y = -(i)*self.__grid_y_step
pts = [ [x0, y], [x1, y] ]
obj = gfxline.GfxLine(sheetCanvas=self, points=pts, arrow=(False, (1,1,1)),
std=self.std, tag=tag)
obj.colorset = CS
obj.brushset = BS
above_tag = self.find_above(tag)
if above_tag:
self.tag_lower(above_tag, tag)
dummy.append(obj)
pass
self.__grid_y = dummy
self.draw_grids()
# } updateGrids func
# zoom func: zoom the sheet
# {
def zoom(self):
"""
Zooms in/out the canvas. (under development)
input: none
output: none
"""
self.scale("all", self.__x_org_zoom, self.__y_org_zoom, self.__scale, self.__scale)
# } zoom func
# start_pan func: start pan the sheet
# {
def start_pan(self, point):
"""
Starts panning function based on input point
input:
point: start point coordinate
output: none
"""
self.scan_mark(point[0], point[1])
# } start_pan func
# stop_pan func: stop pan the sheet
# {
def stop_pan(self, point):
"""
Stops panning function based on input point
input:
point: stop point coordinate
output: none
"""
self.scan_dragto(point[0], point[1], 1)
# } stop_pan func
# < class functions section >
# < getter and setter functions section >
# property: grid_zoom_org
# grid_zoom_org getter func: grid zoom org getter
# {
@property
def grid_zoom_org(self):
"""
Class property getter: origin point of zoom operation
"""
return self.__x_org_zoom, self.__y_org_zoom
# } grid_zoom_org getter func
# grid_scale setter func: grid zoom org setter
# {
@grid_zoom_org.setter
def grid_zoom_org(self, grid_org_zoom):
"""
Class property setter: origin point of zoom operation
"""
self.__x_org_zoom, self.__y_org_zoom = grid_org_zoom
# } grid_zoom_org setter func
# property: grid_scale
# grid_scale getter func: grid scale getter
# {
@property
def grid_scale(self):
"""
Class property getter: scale of zoom operation
"""
return self.__scale
# } grid_scale getter func
# grid_scale setter func: grid scale setter
# {
@grid_scale.setter
def grid_scale(self, grid_scale):
"""
Class property setter: scale of zoom operation
"""
self.__scale = grid_scale
self.zoom()
# } grid_scale setter func
# property: grid_state
# grid_state getter func: grid state getter
# {
@property
def grid_state(self):
"""
Class property getter: grid on/off state
"""
return self.__grid_state
# } grid_state getter func
# grid_state setter func: grid state setter
# {
@grid_state.setter
def grid_state(self, grid_state):
"""
Class property setter: grid on/off state
"""
self.__grid_state = grid_state
self.updateGrids()
# } grid_state setter func
# property: grid_x_step
# grid_x_step getter func: grid x step getter
# {
@property
def grid_x_step(self):
"""
Class property getter: grid x direction step
"""
return self.__grid_x_step
# } grid_x_step getter func
# grid_x_step setter func: grid x step setter
# {
@grid_x_step.setter
def grid_x_step(self, x_step):
"""
Class property setter: grid x direction step
"""
self.__grid_x_step = x_step
self.updateGrids()
# } grid_x_step setter func
# property: grid_y_step
# grid_y_step getter func: grid y step getter
# {
@property
def grid_y_step(self):
"""
Class property getter: grid y direction step
"""
return self.__grid_y_step
# } grid_y_step getter func
# grid_y_step setter func: grid y step setter
# {
@grid_y_step.setter
def grid_y_step(self, y_step):
"""
Class property setter: grid y direction step
"""
self.__grid_y_step = y_step
self.updateGrids()
# } grid_y_step setter func
# < getter and setter functions section >
# } CntSheetCanvas class
# main func: contains code to test CntSheetCanvas class
# {
def main():
CT = coloredtext.ColoredText()
root = tk.Tk()
TestApp = CntSheetCanvas(master=root, std=CT, size=(WIDTH, HEIGHT))
TestApp.create_rectangle([50,50, 200, 200])
TestApp.pack()
TestApp.grid_zoom_org = (10, 30)
TestApp.grid_scale = 2
TestApp.zoom()
# TestApp.start_pan((50, 50))
# TestApp.stop_pan((10, 50))
root.mainloop()
pass
# } main func
if __name__ == '__main__':
main()
| true |
f98b90c091d3e73c6a6b53ac79fc51a8cd046fb9 | Python | ashwani608/pythonScripts | /SampleFileInputScrapper/scrapper.py | UTF-8 | 719 | 3.234375 | 3 | [] | no_license | import requests
from BeautifulSoup import BeautifulSoup
def myfun (arg):
url = arg
response = requests.get(url.strip())
#add .strip() to remove \n from begining and end
html = response.content
soup = BeautifulSoup(html)
table = soup.find('table', attrs={'class': 'tbldata14 bdrtpg'})
for row in table.findAll('tr'):
list_of_cells = []
for cell in row.findAll('td'):
text = cell.text.replace(' ', '')
list_of_cells.append(text)
print list_of_cells
with open('cpu.txt') as f:
for line in f:
myfun(line);
print 'helloooooooooo'
#save url in file without quotes, and between two consecutive lines don't leave a newline as a gap...
| true |
179c555c607a3cdb0722408c44469ac50828b54f | Python | dhenriquedba/code-combat | /Masmorra-Kithgard/ingredient-identification.py | UTF-8 | 501 | 4.0625 | 4 | [] | no_license | #Variables are like labeled bottles that hold data.
# A variable is a container with a label that holds data.
# This variable is named `someVariableName`
# It contains the value `"a string"`
someVariableName = "a string"
# This variable is named `lolol`
# It contains the number `42`
lolol = 42
# Create 2 more variables and assign values to them:
# You can name them whatever you want, and hold any value in them!
# Use `=` to assign a value to a variable.
poteCheio = "a string"
poteVazio = 42
| true |
a966ab32671627d2fca0d9bbb3dd939eb1fc53b6 | Python | thom974/the-green-reaper | /data/scripts/effects.py | UTF-8 | 3,862 | 2.921875 | 3 | [] | no_license | import pygame
import random
#
pygame.init()
# screen = pygame.display.set_mode((500,500))
# char = pygame.image.load('char.png').convert()
# char.set_colorkey((255,255,255))
# char = pygame.transform.scale(char,(100,100))
def create_glitch_effect(size_len,**kwargs):
glitch_colours = [(16, 26, 86), (22, 45, 118), (36, 86, 196), (195, 20, 118), (51, 7, 57), (28, 93, 129),(163, 127, 241), (99, 24, 79), (69, 173, 204)]
bn = 30
sn = 100
height = size_len
glitch_bg_sl = pygame.Surface((600, 600))
glitch_bg_fl = pygame.Surface((600, 600))
glitch_bg = pygame.Surface((600, 600))
glitch_bg.fill((10, 7, 44))
glitch_bg.set_alpha(50)
glitch_bg_fl.set_colorkey((0, 0, 0))
frame_bg = None
if 'frame' in kwargs:
frame_bg = kwargs['frame']
frame_bg = pygame.transform.scale(frame_bg, (int(size_len * 1.5), int(size_len * 1.5)))
for _ in range(bn):
colour = random.choice(glitch_colours)
w, h = random.randint(300, 400), random.randint(75, 100)
x, y = random.randint(-50, 550), random.randint(0, 550)
pygame.draw.rect(glitch_bg_sl, colour, (x, y, w, h), 0)
for _ in range(sn):
colour = random.choice(glitch_colours)
w, h = random.randint(100, 220), random.randint(4, 7)
x, y = random.randint(-50, 550), random.randint(0, 550)
pygame.draw.rect(glitch_bg_fl, colour, (x, y, w, h), 0)
if 'height' in kwargs:
height = kwargs['height']
glitch_bg = pygame.transform.scale(glitch_bg, (size_len,height))
glitch_bg_sl = pygame.transform.scale(glitch_bg_sl, (size_len, height))
glitch_bg_fl = pygame.transform.scale(glitch_bg_fl, (size_len, height))
if frame_bg is not None:
return [glitch_bg,glitch_bg_sl,glitch_bg_fl,frame_bg]
else:
return [glitch_bg,glitch_bg_sl,glitch_bg_fl]
def create_death_screen(num,char):
# set up base frame
s1, s2, s3 = create_glitch_effect(100)
s1.set_alpha(255)
s1.blit(s2,(0,0))
s1.blit(char, (0, 0))
s1.blit(s3,(0,0))
# code for enhanced glitch effect
glitch_frames = []
char_frames = []
for _ in range(3):
temp = []
for i in range(num):
new_rect = pygame.Rect(0,i*100/num,100,100/num)
new_surf = pygame.Surface((100,100/num))
new_surf.blit(s1,(0,0),new_rect)
temp.append(new_surf)
glitch_frames.append(temp)
# for _ in range(2):
# temp = []
# for i in range(cs_num):
# new_rect = pygame.Rect(0, i * cs.get_height() / num, cs.get_width(), cs.get_height() / num)
# new_surf = pygame.Surface((cs.get_width(), cs.get_height() / num))
# new_surf.blit(s1, (0, 0), new_rect)
# temp.append(new_surf)
# glitch_frames.append(temp)
for glitch_frame_list in glitch_frames:
char_surf = pygame.Surface((100,100))
char_surf.fill((255,255,255))
char_surf.set_colorkey((255,255,255))
for j, glitch_frame in enumerate(glitch_frame_list):
offset = random.randint(-20,20)
char_surf.blit(glitch_frame,(0 + offset,0+j*100/10))
for _ in range(50):
char_frames.append(char_surf)
return char_frames
def create_glitch_screen(current_screen,num):
screen_frames = []
for _ in range(3):
cs_frame = pygame.Surface((current_screen.get_width(),current_screen.get_height()))
for i in range(current_screen.get_height()//num):
offset = random.randint(-100,100)
new_rect = pygame.Rect(0 + offset, i * current_screen.get_height() / num, current_screen.get_width(), current_screen.get_height() / num)
cs_frame.blit(current_screen, (0, i * current_screen.get_height() // num), new_rect)
screen_frames.append(cs_frame)
return screen_frames
| true |
c4d5020ee3bb6ad4d07b323f1118d158af830316 | Python | hihiworld/pymoo | /pymoo/operators/survival/fitness_survival.py | UTF-8 | 928 | 2.84375 | 3 | [
"MIT"
] | permissive | import numpy as np
from pymoo.model.survival import Survival
from pymop.problem import Problem
class FitnessSurvival(Survival):
"""
This survival method is just for single-objective algorithm.
Simply sort by first constraint violation and then fitness value and truncate the worst individuals.
"""
def _do(self, pop, off, size, **kwargs):
pop.merge(off)
if pop.F.shape[1] != 1:
raise ValueError("FitnessSurvival can only used for single objective problems!")
if pop.G is None or len(pop.G) == 0:
CV = np.zeros(pop.F.shape[0])
else:
CV = Problem.calc_constraint_violation(pop.G)
CV[CV < 0] = 0.0
# sort by cv and fitness
sorted_idx = sorted(range(pop.size()), key=lambda x: (CV[x], pop.F[x]))
# now truncate the population
sorted_idx = sorted_idx[:size]
pop.filter(sorted_idx)
| true |
7a6ba8c53ba23bd30c9e5ca61695b921aa0d3070 | Python | plutoese/mars | /application/DataWarehouse/database/class_admindatabase.py | UTF-8 | 2,429 | 3.046875 | 3 | [] | no_license | # coding=UTF-8
# -----------------------------------------------------------------------------------------
# @author: plutoese
# @date: 2015.10.10
# @class: AdminDatabase
# @introduction: 类AdminDatabase表示行政区划数据库。
# @property:
# - period: 数据库覆盖的年份
# @method:
# - find(self,**conds):查询数据,参数conds是一系列参数。返回值是pymongo.cursor。
# - version(year):数据库行政区划的版本,参数year是年份,默认参数None,表示所有年份。返回值
# 是版本的列表。
# -----------------------------------------------------------------------------------------
from DataWarehouse.database.class_database import Database
from pymongo import ASCENDING
class AdminDatabase(Database):
'''
类AdminDatabase用来连接行政区划数据库
'''
# 构造函数
def __init__(self):
# 连接AdminDatabase集合
Database.__init__(self)
self._connect('regionDB','AdminCode')
# 查询
def find(self,**conds):
# 设置projection
projection = conds.get('projection')
if projection is None:
projection = {'region':1,'version':1,'adminlevel':1,'acode':1,'_id':1,'parent':1}
else:
conds.pop('projection')
# 设置sorts
sorts = conds.get('sorts')
if sorts is None:
sorts= [('year',ASCENDING),('acode',ASCENDING)]
else:
conds.pop('sorts')
# 设置查询条件
condition = dict()
for key in conds:
if isinstance(conds[key],list):
condition[key] = {'$in':conds[key]}
else:
condition[key] = conds[key]
# 返回查询结果
return self.collection.find(condition,projection).sort(sorts)
# 年份
@property
def period(self):
return sorted(self.find().distinct('year'))
# 版本号
def version(self,year=None):
if year is None:
return sorted(self.find().distinct('version'))
else:
return sorted(self.find(year=str(year)).distinct('version'))
if __name__ == '__main__':
db = AdminDatabase()
print(db.collection)
print(db.period)
print(db.version(year=2004))
print(list(db.find(year='2010',projection={'region':1,'_id':0})))
print(list(db.find(adminlevel=2,version='2004_12_31')))
| true |
5f7addf07ee791a3b5d2919473f705edb5f4ecae | Python | shreykuntal/My-other-Python-Programs | /programs/2 greatest 10 digit(1).py | UTF-8 | 520 | 3.640625 | 4 | [] | no_license | for_odds =[1,2,3,4,5,6,7,8,9,10] #your list here #random.sample(range(150), 100) # 100 random numbers b/w 0,150
biggest_odd=float('-inf')
for i in for_odds:
if i%2!=0:
if i>biggest_odd:
biggest_odd=i
print("The greatest odd number is ", biggest_odd)
del for_odds[for_odds.index(biggest_odd)]
biggest_odds=float('-inf')
for j in for_odds:
if j%2!=0:
if j>biggest_odds:
biggest_odds=j
print("The second greatest odd number is ", biggest_odds)
| true |
ccd836a98fe7914c0bb4d1e9e17f8a2ce928326b | Python | xiangpengm/algorithm | /dynamic/leetcode_121_easy.py | UTF-8 | 1,381 | 4.15625 | 4 | [
"MIT"
] | permissive | """
给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。
如果你最多只允许完成一笔交易(即买入和卖出一支股票),设计
一个算法来计算你所能获取的最大利润。
注意你不能在买入股票前卖出股票。
示例 1:
输入: [7,1,5,3,6,4]
输出: 5
解释: 在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。
注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格。
示例 2:
输入: [7,6,4,3,1]
输出: 0
解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。
"""
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
# max_profit[i] = max(price[i] - min_buy_price[i-1], max_profit[i-1])
# min_buy_price[i] = min(price[i], min_buy_price[i-1])
max_profit = 0
min_buy_price = prices[0]
if len(prices) < 2:
return 0
for price in prices:
if price > min_buy_price:
max_profit = max(price-min_buy_price, max_profit)
else:
min_buy_price = min(min_buy_price, price)
return max_profit
def main():
s = Solution()
prices = [7, 1, 5, 3, 6, 4]
print(s.maxProfit(prices))
if __name__ == "__main__":
main()
| true |
229d0295c262c82efdcd04d1ee39b3ddc2ac974c | Python | zch0803/mooc | /Python/numtri.py | UTF-8 | 917 | 2.984375 | 3 | [] | no_license | import numpy as np
def main():
n = int(raw_input())
the_max = n * (n+1) / 2
a = np.zeros((n, n), dtype=int)
length = 0
number = 1
while number <= the_max :
if number == the_max:
a[length*2][length] = number
break
for i in range(0, n-1-length*3):
a[i+length*2][length] = number
number += 1
if number > the_max:
break
for i in range(0, n-1-length*3):
a[n-1-length][i+length] = number
number += 1
if number > the_max:
break
for i in range(0, n-1-length*3):
a[n-1-length-i][n-1-length*2-i] = number
number += 1
length += 1
for i in range(0, n):
for j in range(0,i+1):
if a[i][j] < 10:
print "",
print a[i][j],
print "\n"
if __name__ == '__main__':
main()
| true |
6abb95e9b028cec43b2473fa63a04d5dfc24431d | Python | anthonyz15/MCRSS-Final | /OdinAPI/handler/dao/event_dao.py | UTF-8 | 14,461 | 3.046875 | 3 | [] | no_license | from .config.sqlconfig import db_config
from flask import jsonify
import psycopg2
from datetime import datetime
class EventDAO:
def __init__(self):
connection_url = "dbname={} user={} password={} host ={} ".format(
db_config['database'],
db_config['username'],
db_config['password'],
db_config['host']
)
self.conn = psycopg2.connect(connection_url)#Establish a connection with the relational database.
def getAllEvents(self):
"""
Gets all events in the database.
Performs a query on the database in order to get all
valid events in the database. It returns a list of the
of the events with their information including their
sport, branch. It will also include the scores of an
event if they exist.
Returns:
A list containing all te valid events in the
database with their information.
"""
cursor = self.conn.cursor()
query = """select E.id,E.event_date,E.is_local,E.venue,E.team_id,E.opponent_name,E.event_summary,S.name,S.sport_image_url,B.name,T.season_year,F.local_score,F.opponent_score
from (event as E inner join ((sport as S inner join branch as B on S.branch_id=B.id) inner join team as T on S.id=T.sport_id) on E.team_id=T.id) full outer join final_score as F on F.event_id=E.id
where E.is_invalid=false
and T.is_invalid=false
and (F.is_invalid=false
or F.is_invalid is null)
"""
result = None
try:
cursor.execute(query)
result = []#Will contain the event records
for row in cursor:
result.append(row)
if not result:#No valid event exist
return None
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de buscar todos los eventos."
finally:
if self.conn is not None:
self._closeConnection()
return result
def get_events_in_24_hours(self):
cursor = self.conn.cursor()
moment = datetime.now()
query = """select id, event_date, is_local, venue, team_id, opponent_name, event_summary
from event
where is_invalid=false
and event_date > %s::timestamp + '1 day'::INTERVAL
and event_date < %s::timestamp + '1 day'::INTERVAL + '32 minutes'::INTERVAL
"""
result = None
try:
cursor.execute(query, (moment, moment))
result = [] # Will contain the event records
for row in cursor:
result.append(row)
if not result: # No valid event exist
return None
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de buscar todos los eventos."
finally:
if self.conn is not None:
self._closeConnection()
return result
def getEventsByTeam(self,tID):
"""
Gets all the events in the database of a
team by their id.
Performs a query on the database in order to fetch
all valid events of a team by their id. It returns
a list containing all the valid events of the team.
Args:
tID: The id of the team participating in the events.
Returns:
A list containing the all of the valid events in
the database that have the team identified by their
id as a participant.
"""
cursor = self.conn.cursor()
query = """select E.id,E.event_date,E.is_local,E.venue,E.team_id,E.opponent_name,E.event_summary,S.name,S.sport_image_url,B.name,T.season_year,F.local_score,F.opponent_score
from (event as E inner join ((sport as S inner join branch as B on S.branch_id=B.id) inner join team as T on S.id=T.sport_id) on E.team_id=T.id) full outer join final_score as F on F.event_id=E.id
where E.is_invalid=false
and T.is_invalid=false
and E.team_id=%s
and (F.is_invalid=false
or F.is_invalid is null)
"""
result = None
try:
cursor.execute(query,(tID,))
result = []#Will contain the event records
for row in cursor:
result.append(row)
if not result:#No valid event exist
return "Occurrió un error interno tratando de buscar los eventos de un equipo.\n Al parecer no hay eventos para ese equipo."
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de buscar los eventos de un equipo."
finally:
if self.conn is not None:
self._closeConnection()
return result
def getEventByID(self,eID):
"""
Gets a single event in the database by the
id of the event given.
Performs a query on the database in order to fetch
a valid event in the database by the id given.
Args:
eID: The id of the event to be fetched.
Returns:
A list containing the information of the event in
the database that has the id given.
"""
cursor = self.conn.cursor()
query = """select E.id,E.event_date,E.is_local,E.venue,E.team_id,E.opponent_name,E.event_summary,S.name,S.id,S.sport_image_url,B.name,T.season_year
from (event as E inner join ((sport as S inner join branch as B on S.branch_id=B.id) inner join team as T on S.id=T.sport_id) on E.team_id=T.id)
where E.is_invalid=false
and T.is_invalid=false
and E.id=%s
"""
result = None
try:
cursor.execute(query,(eID,))
result = cursor.fetchone()
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de buscar un evento por su identificador."
finally:
if self.conn is not None:
self._closeConnection()
return result
def getEventTeamByID(self,eID):
"""
Returns the team id of an existing event.
Peforms a query in the database in order to
get the id of the team participating in the
event identified by the eID given. Then it
returns the id of the team if the event is
valid and it exists.
Args:
eID: The id of the event.
Returns:
The id of the team participating in the
event.
"""
cursor =self.conn.cursor()
query = """select T.id
from (event as E inner join ((sport as S inner join branch as B on S.branch_id=B.id) inner join team as T on S.id=T.sport_id) on E.team_id=T.id)
where E.is_invalid=false
and T.is_invalid=false
and E.id=%s
"""
tID = None
try:
cursor.execute(query,(eID,))
tID = cursor.fetchone()[0]
cursor.close()
except Exception as e:
print(e)
tID = None
finally:
if self.conn is not None:
self._closeConnection()
return tID
def addEvent(self,tID,eventDate,isLocal,venue,opponentName,eventSummary):
"""
Adds a new event into the database with the information given.
Uses the arguments given in order to perform an insert query on
the database.Then it returns the id of the newly added event in
the datase.
Args:
tID: The id of the team participating in the event.
eventDate: Date of the event.
isLocal: Designates if the game is local.
venue: The venue for the event.
opponentName: The name of the opponent team.
eventSummary: A summary of the event.
Returns:
The id of the newly added event.
"""
cursor = self.conn.cursor()
query = """insert into event(team_id,event_date,is_local,venue,opponent_name,event_summary,is_invalid)
values(%s,%s,%s,%s,%s,%s,false)
returning id;
"""
eID = None
try:
cursor.execute(query,(tID,eventDate,isLocal,venue,opponentName,eventSummary,))
eID = cursor.fetchone()[0]
if not eID:
return "Occurrió un error interno tratando de añadir un evento."
self.commitChanges()
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de añadir un evento."
return eID
def editEvent(self,eID,eventDate,isLocal,venue,opponentName,eventSummary):
"""
Updates an existing event in the database.
Uses the arguments given in order to perform an update query on
the database with the id of the edit given.Then it returns
the id of the newly updated event in the datase.
Args:
eID: The id of event to be updated.
eventDate: Date of the event.
isLocal: Designates if the game is local.
venue: The venue for the event.
opponentName: The name of the opponent team.
eventSummary: A summary of the event.
Returns:
The id of the newly updated event.
"""
cursor = self.conn.cursor()
query = """update event
set event_date=%s,
is_local=%s,
venue=%s,
opponent_name=%s,
event_summary=%s
where id=%s
and is_invalid=false
returning id;
"""
eid = None
try:
cursor.execute(query,(eventDate,isLocal,venue,opponentName,eventSummary,eID,))
eid = cursor.fetchone()[0]
if not eid:
return "Occurrió un error interno tratando de eliminar un evento existente."
self.commitChanges()
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de actualizar un evento existente."
return eid
def removeEvent(self,eID):
"""
Invalidates an event on the database.
This method accepts the id of the event in order
to set the is_invalid field to true in the database.
This effectively acts as a removal of the event in
from the system.
Args:
eID: The id of the event to invalidate.
Returns
The id of the updated event record.
"""
cursor = self.conn.cursor()
query = """update event
set is_invalid=true
where id=%s
returning id;
"""
eid = None
try:
cursor.execute(query,(eID,))
eid = cursor.fetchone()[0]
if not eid:
return "Occurrió un error interno tratando de eliminar un evento existente."
self.commitChanges()
cursor.close()
except Exception as e:
print(e)
return "Occurrió un error interno tratando de eliminar un evento existente."
finally:
if self.conn is not None:
self.conn.close()
return eid
def commitChanges(self):
"""
Commits the changes done on the database after
insertion and update queries have been done on the
database.
Uses the connection created when this EventDAO was
instantiated to commit the changes performed on the datasase
after insertion and update queries.
"""
self.conn.commit()
def teamExists(self,tID):
"""
Confirms the existance of a team by the team id
given.
Performs a simple fetch query to determine if
the team given exists.
Args:
tID: The id of the team being confirmed
Returns:
True if the team exists in the database,
false otherwise.
"""
cursor = self.conn.cursor()
exists = True
query = """select id
from team
where id=%s
and is_invalid=false
"""
try:
cursor.execute(query,(tID,))
if not cursor.fetchone():
exists = False
except Exception as e:
print(e)
exists = False
return exists
def eventExists(self,eID):
"""
Confirms the existance of a event by the event id
given.
Performs a simple fetch query to determine if
the event given exists.
Args:
eID: The id of the event being confirmed
Returns:
True if the event exists in the database,
false otherwise.
"""
cursor = self.conn.cursor()
exists = True
query = """select id
from event
where id=%s
and is_invalid=false
"""
try:
cursor.execute(query,(eID,))
if not cursor.fetchone():
exists = False
except Exception as e:
print(e)
exists = False
return exists
def _closeConnection(self):
"""
Closes the connection to the database.
"""
if self.conn is not None:
self.conn.close()
| true |
bbf7f7018c4ba3dcac5f68b164606225903df87c | Python | vrlambert/project_euler | /45_triangle_pentagonal_hexagonal.py | UTF-8 | 486 | 4.15625 | 4 | [] | no_license | # Find the next number after 40755 that is triangular, hexagonal, and pentagonal
# Turns out all hexagonal numbers are triangular, so just check those
def main():
n_hex = 144 # 144
n_pent = 165 # 165
pentag = 1
while n_hex < 100000:
hexag = n_hex * (2 * n_hex - 1)
while pentag < hexag:
pentag = n_pent * (3 * n_pent - 1) / 2
n_pent += 1
if pentag == hexag:
print hexag, pentag
n_hex += 1
main()
| true |
989389fee9b6ad75ae78e3fffbb4da9e66bb9bd9 | Python | InkaTriss/InkaTriss | /wizzair.py | UTF-8 | 3,825 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
from selenium import webdriver
import unittest
from time import sleep
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
name='Paulina'
surname='Kozak'
valid_phone_number='697909233'
adres1="kozanioagmail.pl"
password= "1887gTy3"
valid_country="Polska"
class WizzairRegistration(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.get('https://wizzair.com/pl-pl#/')
#self.driver.implicitly_wait(15)
self.driver.maximize_window()
def testCorrectRegistration(self):
driver=self.driver
# odnajdz button sign in
sign_in=WebDriverWait(driver, 45).until(EC.element_to_be_clickable((By.XPATH,'//button[@data-test="navigation-menu-signin"]')))
sign_in.click()
rejestracja=WebDriverWait(driver, 45).until(EC.element_to_be_clickable((By.XPATH, '//button[text()= " Rejestracja "]')))
rejestracja.click()
imie = WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.NAME, 'firstName')))
imie.send_keys(name)
nazwisko = WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.NAME, 'lastName')))
nazwisko.send_keys(surname)
f = driver.find_element_by_xpath('//label[@data-test="register-genderfemale"]').click()
nr_kraju = driver.find_element_by_xpath('//div[@data-test="booking-register-country-code"]').click()
# WebDriverWait
sleep(4)
polska = WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.XPATH, '//li[@data-test="PL"]')))
polska.click()
nr_telefonu=driver.find_element_by_name('phoneNumberValidDigits').send_keys(valid_phone_number)
email_input=driver.find_element_by_name('email')
email_input.send_keys(adres1)
password_input=driver.find_element_by_name('password')
password_input.send_keys(password)
country_field = driver.find_element_by_xpath('//input[@data-test="booking-register-country"]')
country_field.click()
# Wyszukaj kraje
country_to_choose = driver.find_element_by_xpath("//div[@class='register-form__country-container__locations']")
# Poszukaj elementow "label" wewnatrz listy "countries"
countries = country_to_choose.find_elements_by_tag_name("label")
# Iteruj po kazdym elemencie w liscie "countries"
for label in countries:
option=label.find_element_by_tag_name('strong')
if option.get_attribute("innerText") == valid_country:
option.location_once_scrolled_into_view
option.click()
break
akceptacja=driver.find_element_by_xpath('//label[@for="registration-privacy-policy-checkbox"][@class="rf-checkbox__label"]')
akceptacja.click()
zarejestruj=driver.find_element_by_xpath('//button[@data-test="booking-register-submit"]')
zarejestruj.click()
error_notices = driver.find_elements_by_xpath('//span[@class="rf-input__error__message"]/span')
# Zapisuję widoczne błędy do listy visible_error_notices
visible_error_notices = []
for error in error_notices:
# Jesli jest widoczny, to dodaj do listy
if error.is_displayed():
visible_error_notices.append(error)
# Sprawdzam, czy widoczny jest tylko jeden błąd
assert len(visible_error_notices) == 1
# Sprawdzam treść widocznego błędu
error_text = visible_error_notices[0].get_attribute("innerText")
assert error_text == "Nieprawidłowy adres e-mail"
if __name__=='__main__':
unittest.main(verbosity=2)
| true |
dc145d3e6a42f86b1711a49b3d5bf25b2de1631b | Python | void-trinity/Calorie-Counter-Flask | /routes/users.py | UTF-8 | 1,972 | 2.75 | 3 | [] | no_license | from flask_restful import Resource, reqparse
from models.users import UserModel
class Users(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='This field cannot be blank', required=True)
parser.add_argument('password', type=str, help='This field cannot be blank', required=True)
data = parser.parse_args()
username = data['username']
password = data['password']
user = UserModel.find_by_username_password(username, password)
if user is not None:
return user.json()
return {'error': 'No user found'}, 404
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', required=True, type=str, help='This field cannot be blank')
parser.add_argument('weight', required=True, type=float, help='This field cannot be blank')
parser.add_argument('height', required=True, type=float, help='This field cannot be blank')
parser.add_argument('gender', required=True, type=str, help='This field cannot be blank')
parser.add_argument('age', required=True, type=int, help='This field cannot be blank')
parser.add_argument('username', required=True, type=str, help='This field cannot be blank')
parser.add_argument('password', required=True, type=str, help='This field cannot be blank')
data = parser.parse_args()
print(data)
user = UserModel(
name=data['name'],
weight=data['weight'],
height=data['height'],
gender=data['gender'],
age=data['age'],
username=data['username'],
password=data['password']
)
print(user.json())
try:
user.save_to_db()
except Exception as e:
print(e)
return {'success': False, 'message': 'Server Error'}, 500
return user.json(), 201
| true |
6af6e27b2c70acb1a64458aa705fd515f5374043 | Python | mdberkey/chess-loser | /agent.py | UTF-8 | 2,146 | 3.140625 | 3 | [
"MIT"
] | permissive | # Lichess chess bot designed to lose above all.
import threading
import berserk as bsk
""" Agent of chess game in Lichess"""
class Agent:
def __init__(self):
with open('./lichess.token') as tf:
self.token = tf.read()
self.session = bsk.TokenSession(self.token)
self.client = bsk.Client(session=self.session)
self.client.account.upgrade_to_bot()
def accept_challenge(self):
"""
Accepts any lichess challenge and starts the game
:return: None
"""
for event in self.client.bots.stream_incoming_events():
if event['type'] == 'challenge':
self.client.bots.accept_challenge(event['challenge']['id'])
elif event['type'] == 'gameStart':
game = Game(self.client, event['game']['id'])
game.start()
""" Lichess Game """
class Game(threading.Thread):
def __init__(self, client, game_id, **kwargs):
super().__init__(**kwargs)
self.game_id = game_id
self.client = client
self.stream = client.bots.stream_game_state(game_id)
self.current_state = next(self.stream)
def run(self):
"""
Main game loop for bot to read game state and make moves
:return: None
"""
self.client.bots.post_message(self.game_id, 'Prepare to win.')
for event in self.stream:
if event['type'] == 'gameState':
self.handle_state_change(event)
elif event['type'] == 'chatLine':
self.handle_chat_line(event)
def handle_state_change(self, game_state):
"""
Evaluates the game state and makes a move
:param game_state: state of the game
:return: None
"""
print(game_state)
#self.client.bots.make_move(self.game_id, 'e2e4')
def handle_chat_line(self, chat_line):
"""
Evaluates the lichess chat and responds depending on chat
:param chat_line: lichess chat line
:return: None
"""
pass
if __name__ == "__main__":
bot = Agent()
bot.accept_challenge()
| true |
ee55e430504c7a7f2a39ad4799be44a6c1aa0af6 | Python | cunghaw/Elements-Of-Programming | /17.1 Compute an optimum assignment of tasks/main.py | UTF-8 | 522 | 3.390625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Compute an optimum assignment of tasks
@author: Ronny
"""
def computeOptimumTasks( tasks ):
result = []
len_half_tasks = len( tasks ) / 2
tasks = sorted( tasks )
for max_task, min_task in zip( reversed( tasks[ len_half_tasks: ] ), tasks[ :len_half_tasks ] ):
result.append( ( min_task, max_task ) )
return result
if __name__ == '__main__':
assert( computeOptimumTasks( [ 5, 2, 1, 6, 4, 4 ] ) == [ ( 1, 6 ), ( 2, 5 ), ( 4, 4 ) ] )
print "All unit tests are passed" | true |
d28ba77e6bfce422f0162281346f46c0f7da0087 | Python | qbzysa/test | /classify/search_data_by_keyword.py | UTF-8 | 1,248 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# __author__:'Administrator'
# @Time : 2018/12/3 19:21
# -*- coding: utf-8 -*-
# __author__:'Administrator'
# @Time : 2018/11/22 17:38
import os
def classify_file(file, keyword):
"""
根据关键词分类文件内容到指定的list中
:param file:
:return:
"""
infos = []
yd = [line.strip().decode('utf-8') for line in open(file).readlines()]
for qg in yd:
if keyword in qg:
infos.append(qg)
return infos
def write_data(tag, value):
"""
将tag类型数据写入到指定文件
:param tag:
:param value:
:return:
"""
file_name = str(tag)+'.txt'
with open('txt/%s' % file_name, 'w') as f:
for one in value:
f.write(one.encode("utf-8"))
f.write('\n')
if __name__ == "__main__":
if not os.path.exists('txt'):
os.mkdir('txt')
# ####读取分词后的txt文件#######
keywords = [line.strip().decode('utf-8') for line in open('test.txt').readlines()]
# ####通过每一个分词生成报告#######
for key in keywords:
data = classify_file('yd.txt', key.split(',')[0])
write_data(key.split(',')[0], data)
| true |
729e64dfe3df28804381fe23b1396538d1a85474 | Python | E-Sakhno/lab3 | /individual3.py | UTF-8 | 338 | 3.5 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 17. Составьте программу, которая печатает таблицу сложения натуральных чисел в десятичной
# системе счисления.
for i in range(1, 10):
for j in range(1, 10):
print(i, '+', j, '=', i+j)
| true |
3254d48a3d0633f317d5b3357e446033425ce3cb | Python | wallnerryan/floodlight | /apps/qos/qospath.py~ | UTF-8 | 7,517 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | #! /usr/bin/python
"""
QoSPath.py ---------------------------------------------------------------------------------------------------
Developed By: Ryan Wallner (ryan.wallner1@marist.edu)
Add QoS to a specific path in the network. Utilized circuit pusher developed by KC Wang
[Note]
*circuitpusher.py is needed in the same directory for this application to run
succesfully!
USAGE:
qospath.py <add> --qos-path <name> <source-ip> <dest-ip> <policy-object> <controller-ip> <port>
qospath.py <delete> --qos-path <name> <controller-ip> <port>
*note: This adds the Quality of Service to each switch along the path between hosts
*note Policy object can exclude the "sw" ,"enqueue-port" parameters and
"ip-src", "ip-dst" and "ingress-port" match parameters.
They will be modified based on the route anyway.
[author] - rjwallner
-----------------------------------------------------------------------------------------------------------------------
"""
import sys
import os
import time
import simplejson #used to process policies and encode/decode requests
import subprocess #spawning subprocesses
##Get switches in a circuit using circuitpusher (may need to modify to get all switches in path)
##Then use the add policy to a EACH switch in in circuit using QoSPusher to add a policy along a path.
def main():
#checks
if (len(sys.argv) == 2):
if sys.argv[1] == "--help" or sys.argv[1] == "help" or sys.argv[1] == "--h" :
usage_help()
exit()
if (len(sys.argv)) == 9:
p_name = sys.argv[3]
src = sys.argv[4]
dst = sys.argv[5]
pol = sys.argv[6]
c_ip = sys.argv[7]
prt = sys.argv[8]
add(p_name,src,dst,pol,c_ip,prt)
exit()
if (len(sys.argv)) == 6:
p_name = sys.argv[3]
c_ip = sys.argv[4]
prt = sys.argv[5]
delete(p_name,c_ip,prt)
exit()
else:
usage()
exit()
def add(name, ip_src, ip_dst, p_obj, c_ip, port):
print "Trying to create a circuit from host %s to host %s..." % (ip_src, ip_dst)
c_pusher = "circuitpusher.py"
qos_pusher = "qosmanager.py"
pwd = os.getcwd()
print pwd
try:
if (os.path.exists("%s/%s" % (pwd,c_pusher))) and (os.path.exists("%s/%s" % (pwd,qos_pusher))):
print "Necessary tools confirmed.. %s , %s" % (c_pusher,qos_pusher)
else:
print "%s/%s does not exist" %(pwd,c_pusher)
print "%s/%s does not exist" %(pwd,qos_pusher)
except ValueError as e:
print "Problem finding tools...%s , %s" % (c_pusher,qos_pusher)
print e
exit(1)
#first create the circuit and wait to json to pupulate
print "create circuit!!!"
try:
cmd = "--controller=%s:%s --type ip --src %s --dst %s --add --name %s" % (c_ip,port,ip_src,ip_dst,name)
print './circuitpusher.py %s' % cmd
c_proc = subprocess.Popen('./circuitpusher.py %s' % cmd, shell=True)
print "Process %s started to create circuit" % c_proc.pid
#wait for the circuit to be created
c_proc.wait()
except Exception as e:
print "could not create circuit, Error: %s" % str(e)
try:
subprocess.Popen("cat circuits.json",shell=True).wait()
except Exception as e:
print "Error opening file, Error: %s" % str(e)
#cannot continue without file
exit()
print "Opening circuits.json in %s" % pwd
try:
circs = "circuits.json"
c_data = open(circs)
except Exception as e:
print "Error opening file: %s" % str(e)
#load data into json format
print "Creating a QoSPath from host %s to host %s..." % (ip_src, ip_dst)
time.sleep(5)
for line in c_data:
data = simplejson.loads(line)
if data['name'] != name:
continue
else:
sw_id = data['Dpid']
in_prt = data['inPort']
out_prt = data['outPort']
print"QoS applied to this switch for circuit %s" % data['name']
print "%s: in:%s out:%s" % (sw_id,in_prt,out_prt)
p = simplejson.loads(p_obj)
#add necessary match values to policy for path
p['sw'] = sw_id
p['name'] = name+"."+sw_id
#p['ingress-port'] = str(in_prt)
p['ip-src'] = ip_src
p['ip-dst'] = ip_dst
keys = p.keys()
l = len(keys)
queue = False
service = False
for i in range(l):
if keys[i] == 'queue':
queue = True
elif keys[i] == 'service':
service = True
if queue and service:
polErr()
elif queue and not service:
p['enqueue-port'] = str(out_prt)
pol = str(p)
print "Adding Queueing Rule"
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(p)
print sjson
cmd = "./qosmanager.py add policy '%s' %s %s" % (sjson,c_ip,port)
p = subprocess.Popen(cmd, shell=True).wait()
elif service and not queue:
print "Adding Type of Service"
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(p)
print sjson
cmd = "./qosmanager.py add policy '%s' %s %s" % (sjson,c_ip,port)
p = subprocess.Popen(cmd, shell=True).wait()
else:
polErr()
def polErr():
print """Your policy is not defined right, check to
make sure you have a service OR a queue defined"""
def delete(name,c_ip,port):
print "Trying to delete QoSPath %s" % name
# circuitpusher --controller {IP:REST_PORT} --delete --name {CIRCUIT_NAME}
try:
print "Deleting circuit"
cmd = "./circuitpusher.py --controller %s:%s --delete --name %s" % (c_ip,port,name)
subprocess.Popen(cmd,shell=True).wait()
except Exception as e:
print "Error deleting circuit, Error: %s" % str(e)
exit()
qos_s = os.popen("./qosmanager.py list policies %s %s" %(c_ip,port)).read()
qos_s = qos_s[qos_s.find("[",qos_s.find("[")+1):qos_s.rfind("]")+1]
#print qos_s
data = simplejson.loads(qos_s)
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(data)
jsond = simplejson.JSONDecoder().decode(sjson)
#find policies that start with "<pathname>."
l = len(jsond)
for i in range(l):
n = jsond[i]['name']
if name in n:
pol_id = jsond[i]['policyid']
try:
cmd = "./qosmanager.py delete policy '{\"policy-id\":\"%s\"}' %s %s " % (pol_id,c_ip,port)
print cmd
subprocess.Popen(cmd,shell=True).wait()
except Exception as e:
print "Could not delete policy in path: %s" % str(e)
def usage():
print '''type "qospath.py --help" for more details
#qospath.py <add> --qos-path <name> <source-ip> <dest-ip> <policy-object> <controller-ip> <port>
#qospath.py <delete> --qos-path <name> <controller-ip> <port>
*Policy object can exclude the "sw" ,"enqueue-port" parameters and
"ip-src", "ip-dst" and "ingress-port" match parameters.
They will be modified based on the route anyway.'''
def usage_help():
print '''
###################################
QoSPath.py
Author: Ryan Wallner (Ryan.Wallner1@marist.edu)
QoSPath is a simple service that utilizes KC Wang's
CircuitPusher to push Quality of Service along a
specific path in the network.
To add a QoS Path with a Policy
*note other match fields can be added to the policy object
qospath.py add --qos-path Path-Name 10.0.0.1 10.0.0.2 '{"queue":"2"}' 127.0.0.1 8080
qospath.py add --qos-path Path-Name 10.0.0.1 10.0.0.2 '{"service":"Best Effort"}' 127.0.0.1 8080
To delete a QoS Path
qospath.py delete --qos-path "Path-Name" 127.0.0.1 8080
###################################
'''
#Call main
if __name__ == "__main__" :
main()
| true |
f32d23a9bd98eceb9400eddd4d114982aa8e29ef | Python | gitgeorgez/Python-Exercises-Oct-2016 | /4.py | UTF-8 | 639 | 3.03125 | 3 | [] | no_license | """
P15040 GEORGE ZERVOLEAS
1/10/2016
THEMA 4
PROGRAMMA TO OPOIO ALLAZEI pairnei apo ton xristi Onoma tainias kai epistrefei
a. tin vathmologia
b. ta braveia
"""
import json
import urllib,urllib2
url = "http://omdbapi.com/?t=" #only submitting the title parameter
movieTitle = raw_input('Dwse tainia: ')
url_data = url + urllib.quote(movieTitle)
web_url = urllib2.urlopen(url_data)
if web_url.getcode() == 200:
data = web_url.read()
movies = json.loads(data)
else:
print('Received Error')
print('Bathmologia Tainias: ' + movies['imdbRating'])
print ('Braveia Tainias: ' + movies['Awards'])
| true |
6dfa8e60fb1db436559efe0a811a23a4cf91b7a4 | Python | twosigma/Cook | /executor/cook/io_helper.py | UTF-8 | 2,095 | 3.9375 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | #!/usr/bin/env python3
"""This module ensures atomic writes to stdout."""
import logging
import sys
from threading import Lock
import os
__stdout_lock__ = Lock()
def print_to_buffer(lock, buffer, data, flush=False, newline=True):
"""Helper function that prints data to the specified buffer in a thread-safe manner using the lock.
Parameters
----------
lock: threading.Lock
The lock to use
buffer: byte buffer
The buffer to write to
data: string or bytes
The data to output
flush: boolean
Flag determining whether to trigger a sys.stdout.flush()
newline: boolean
Flag determining whether to output a newline at the end
Returns
-------
Nothing.
"""
with lock:
if isinstance(data, str):
buffer.write(data.encode())
else:
buffer.write(data)
if newline:
buffer.write(os.linesep.encode())
if flush:
buffer.flush()
def print_out(data, flush=False, newline=True):
"""Wrapper function that prints to stdout in a thread-safe manner using the __stdout_lock__ lock.
Parameters
----------
data: string or bytes
The data to output
flush: boolean
Flag determining whether to trigger a sys.stdout.flush()
newline: boolean
Flag determining whether to output a newline at the end
Returns
-------
Nothing.
"""
print_to_buffer(__stdout_lock__, sys.stdout.buffer, data, flush=flush, newline=newline)
def print_and_log(string_data, newline=True):
"""Wrapper function that prints and flushes to stdout in a locally thread-safe manner ensuring newline at the start.
The function also outputs the same message via logging.info().
Parameters
----------
string_data: string
The string to output
newline: boolean
Flag determining whether to output a newline at the end
Returns
-------
Nothing.
"""
print_out('{}{}'.format(os.linesep, string_data), flush=True, newline=newline)
logging.info(string_data)
| true |
6886ad8ccd3750b4690fc3e7a7162bb4c0b32edf | Python | sobriquette/interview-practice-problems | /Code Challenges/hackbright.py | UTF-8 | 4,546 | 4.75 | 5 | [] | no_license | """
CODING CHALLENGE:
Write a function that, when given a string as input, can output the indices of the farthest apart matching characters. Here are some example scenarios.
Input: 'yabcdey'
Output: [0, 6]
Explanation: Since the only matching characters are 'y', and 'y', we return the two places where 'y' appears in the string: at index 0 and index 6.
Input: 'yabcyzdefgz'
Output: [5, 10]
Explanation: There are 2 possible matching characters here-- 'y' and 'z'. The distance between the matching 'y' characters is 3.
The distance between the matching 'z' characters is 4. So, 'z' wins! We output 5 (the index of the first 'z') and 10 (the index of the second 'z').
Input: 'abc'
Output: [None, None]
Explanation: There are no matching characters here. So, a sane output would be [None, None].
NOT SURE WHERE TO START?
Start by writing a function that, when given a string as input, can return True or False depending on whether there are two matching characters in the string.
Then, tackle the aspect of getting the indices of those two matching characters.
Lastly, finish off by keeping track of the maximum distance between two matching characters.
"""
class Solution1():
def find_furthest_matching_characters(self, string):
# Exit early if we don't have a string to work with
if not string:
return [None, None]
# Exit early if we do not have any matching characters
characters_with_matches = self.get_matching_characters(string)
if sum(characters_with_matches.values()) < 1:
return [None, None]
# Use the given string and dictionary of matching characters
# and return the indices of the furthest matching characters
return self.get_distances_of_matching_characters(string, characters_with_matches)
def get_matching_characters(self, string):
# There is a match if the count is greater than 0
matches_dict = {}
for char in string:
if char in matches_dict:
matches_dict[char] += 1
else:
matches_dict[char] = 0
return matches_dict
def get_distances_of_matching_characters(self, string, characters_with_matches):
max_distance_indices = [-1, -1]
distances_dict = {}
for index, char in enumerate(string):
# Look at character only if it has a match
if characters_with_matches[char] > 0:
# Update the dictionary with the index of the 2nd matching character
if char in distances_dict:
distances_dict[char][1] = index
# Update max_distance_indices if the distance b/n the current set of indices
# is greater than what we found in max_distance_indices
char_dist = distances_dict[char][1] - distances_dict[char][0]
curr_max_dist = max_distance_indices[1] - max_distance_indices[0]
if char_dist > curr_max_dist:
max_distance_indices = [distances_dict[char][0], distances_dict[char][1]]
# Otherwise, we update distances_dict with
# the index of the first matching character
else:
distances_dict[char] = [index, None]
return max_distance_indices
class Solution2():
def find_furthest_matching_characters(self, string):
# Exit early if we don't have a string to work with
if not string:
return [None, None]
indices_of_char_appearances = self.get_indices_for_char_appearances(string)
max_distance_indices = [-1, -1]
for k, v in indices_of_char_appearances.items():
# Ignore items that do not have at least 2 indices
if len(v) < 2:
continue
else:
# Take the last index because this represents
# the furthest matching character.
# e.g. 'abexcdxex' --> 'x' : [3, 6, 8]
# v[len(v)] - v[0] = 8 - 0
char_dist = v[len(v) - 1] - v[0]
curr_max_dist = max_distance_indices[1] - max_distance_indices[0]
# Update max with the furthest two indices
if char_dist > curr_max_dist:
max_distance_indices = [v[0], v[len(v) - 1]]
# If max_distance_indices has not changed,
# there are no matching characters
if sum(max_distance_indices) > 0:
return max_distance_indices
else:
return [None, None]
def get_indices_for_char_appearances(self, string):
# Add index to dict every time the character appears
indices_of_char_appearances = defaultdict(list)
for index, char in enumerate(string):
indices_of_char_appearances[char].append(index)
return indices_of_char_appearances
from collections import defaultdict
if __name__ == "__main__":
while True:
string = input("Enter a string: ")
if string == "q":
break
print(Solution1().find_furthest_matching_characters(string))
print(Solution2().find_furthest_matching_characters(string))
| true |
488e782f7e4b0b26f10af48ed52f3c2d26b280f5 | Python | IgorMiyamoto/IA-ep02-csp | /src/satisfacao_restricoes.py | UTF-8 | 7,459 | 3.015625 | 3 | [] | no_license | from UI import bcolors
class Restricao():
def __init__(self, variaveis):
self.variaveis = variaveis
def esta_satisfeita(self, atribuicao):
return True
class SatisfacaoRestricoes():
def __init__(self, variaveis, dominios):
self.variaveis = variaveis # Variáveis para serem restringidas
self.dominios = dominios # Domínio de cada variável
self.restricoes = {}
for variavel in self.variaveis:
self.restricoes[variavel] = []
if variavel not in self.dominios:
raise LookupError("Cada variávei precisa de um domínio")
def adicionar_restricao(self, restricao):
for variavel in restricao.variaveis:
if variavel not in self.variaveis:
raise LookupError("Variável não definida previamente")
else:
self.restricoes[variavel].append(restricao)
def esta_consistente(self, variavel, atribuicao):
for restricoes in self.restricoes[variavel]:
if not restricoes.esta_satisfeita(atribuicao):
return False
return True
def busca_backtracking(self, atribuicao = {}):
# retorna sucesso quando todas as variáveis forem atribuídas
if len(atribuicao) == len(self.variaveis):
return atribuicao
# pega todas as variáveis que ainda não foram atribuídas
variaveis_nao_atribuida = [v for v in self.variaveis if v not in atribuicao]
primeira_variavel = variaveis_nao_atribuida[0]
for valor in self.dominios[primeira_variavel]:
atribuicao_local = atribuicao.copy()
atribuicao_local[primeira_variavel] = valor
# estamos consistentes, seguir recursão
if self.esta_consistente(primeira_variavel, atribuicao_local):
print(atribuicao_local)
resultado = self.busca_backtracking(atribuicao_local)
# para o backtracking se não encontra todos os resultados
if resultado is not None:
return resultado
print(f"{bcolors.WARNING}back{bcolors.ENDC}")
return None
class SatisfacaoRestricoesFowardChecking(SatisfacaoRestricoes):
def forward_checking(self, variaveis_nao_atribuida, atribuicao_local, dominio_local):
for var_nao_att in variaveis_nao_atribuida:
dominio_novo = dominio_local[var_nao_att].copy()
for opcao in dominio_local[var_nao_att]:
atribuicao_local_2 = atribuicao_local.copy()
atribuicao_local_2[var_nao_att] = opcao
if not self.esta_consistente(var_nao_att, atribuicao_local_2):
dominio_novo.remove(opcao)
dominio_local[var_nao_att] = dominio_novo
if len(dominio_local[var_nao_att]) == 0:
return None
return dominio_local
def busca_backtracking_foward_checking(self, atribuicao = {}, dominios = {}):
# retorna sucesso quando todas as variáveis forem atribuídas
if len(atribuicao) == len(self.variaveis):
return atribuicao
# se dominio for vazio, pega o da classe, senão, usa o da recursão
if dominios == {} :
dominios = self.dominios
# pega todas as variáveis que ainda não foram atribuídas
variaveis_nao_atribuida = [v for v in self.variaveis if v not in atribuicao]
primeira_variavel = variaveis_nao_atribuida[0]
for valor in dominios[primeira_variavel]:
atribuicao_local = atribuicao.copy()
dominio_local = dominios.copy()
variaveis_nao_atribuida_local = variaveis_nao_atribuida.copy()
atribuicao_local[primeira_variavel] = valor
# estamos consistentes, seguir recursão
if self.esta_consistente(primeira_variavel, atribuicao_local):
print(atribuicao_local)
novo_dominio = self.forward_checking(variaveis_nao_atribuida_local, atribuicao_local, dominio_local)
resultado = None
if novo_dominio != None:
resultado = self.busca_backtracking_foward_checking(atribuicao_local, novo_dominio)
# para o backtracking se não encontra todos os resultados
if resultado is not None:
return resultado
print(f"{bcolors.WARNING}back{bcolors.ENDC}")
return None
# Minimum remaining values
def busca_backtracking_foward_checking_MRV(self, atribuicao = {}, dominios = {}):
# retorna sucesso quando todas as variáveis forem atribuídas
if len(atribuicao) == len(self.variaveis):
return atribuicao
# se dominio for vazio, pega o da classe, senão, usa o da recursão
if dominios == {} :
dominios = self.dominios
# pega todas as variáveis que ainda não foram atribuídas
variaveis_nao_atribuida = [v for v in self.variaveis if v not in atribuicao]
# #MRV
aux = [(v,len(dominios[v])) for v in variaveis_nao_atribuida] #[(var, dom)]
aux = sorted(aux,key=lambda aux : aux[1])
variaveis_nao_atribuida = keyToList(aux)
#FIM MRV
primeira_variavel = variaveis_nao_atribuida[0]
for valor in dominios[primeira_variavel]:
atribuicao_local = atribuicao.copy()
dominio_local = dominios.copy()
variaveis_nao_atribuida_local = variaveis_nao_atribuida.copy()
atribuicao_local[primeira_variavel] = valor
# estamos consistentes, seguir recursão
if self.esta_consistente(primeira_variavel, atribuicao_local):
print(atribuicao_local)
novo_dominio = self.forward_checking(variaveis_nao_atribuida_local, atribuicao_local, dominio_local)
resultado = None
if novo_dominio != None:
resultado = self.busca_backtracking_foward_checking_MRV(atribuicao_local, novo_dominio)
# para o backtracking se não encontra todos os resultados
if resultado is not None:
return resultado
print(f"{bcolors.WARNING}back{bcolors.ENDC}")
return None
# MostContraining Values
def busca_backtracking_foward_checking_MCV(self, atribuicao = {}, dominios = {}):
# retorna sucesso quando todas as variáveis forem atribuídas
if len(atribuicao) == len(self.variaveis):
return atribuicao
# se dominio for vazio, pega o da classe, senão, usa o da recursão
if dominios == {} :
dominios = self.dominios
# pega todas as variáveis que ainda não foram atribuídas
variaveis_nao_atribuida = [v for v in self.variaveis if v not in atribuicao]
# #MCV
aux = [(v,len(self.restricoes[v])) for v in variaveis_nao_atribuida] #[(var, res)]
aux = sorted(aux,key=lambda aux : aux[1],reverse=True)
print(aux)
variaveis_nao_atribuida = keyToList(aux)
#FIM MCV
primeira_variavel = variaveis_nao_atribuida[0]
for valor in dominios[primeira_variavel]:
atribuicao_local = atribuicao.copy()
dominio_local = dominios.copy()
variaveis_nao_atribuida_local = variaveis_nao_atribuida.copy()
atribuicao_local[primeira_variavel] = valor
# estamos consistentes, seguir recursão
if self.esta_consistente(primeira_variavel, atribuicao_local):
print(atribuicao_local)
novo_dominio = self.forward_checking(variaveis_nao_atribuida_local, atribuicao_local, dominio_local)
resultado = None
if novo_dominio != None:
resultado = self.busca_backtracking_foward_checking_MCV(atribuicao_local, novo_dominio)
# para o backtracking se não encontra todos os resultados
if resultado is not None:
return resultado
print(f"{bcolors.WARNING}back{bcolors.ENDC}")
return None
def keyToList(values):
return [v[0] for v in values] | true |
20b24f09ead64a17493b21acd5da21a928f12b2d | Python | edemaine/pegen | /tests/test_grammar_visitor.py | UTF-8 | 1,996 | 2.75 | 3 | [
"MIT"
] | permissive | from typing import Any
from pegen.grammar import GrammarVisitor
from pegen.grammar_parser import GeneratedParser as GrammarParser
from tests.utils import parse_string
class Visitor(GrammarVisitor):
def __init__(self) -> None:
self.n_nodes = 0
def visit(self, node: Any, *args: Any, **kwargs: Any) -> None:
self.n_nodes += 1
super().visit(node, *args, **kwargs)
def test_parse_trivial_grammar() -> None:
grammar = """
start: 'a'
"""
rules = parse_string(grammar, GrammarParser)
visitor = Visitor()
visitor.visit(rules)
assert visitor.n_nodes == 6
def test_parse_or_grammar() -> None:
grammar = """
start: rule
rule: 'a' | 'b'
"""
rules = parse_string(grammar, GrammarParser)
visitor = Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/NameLeaf -> 6
# Rule/Rhs/ -> 2
# Alt/NamedItem/StringLeaf -> 3
# Alt/NamedItem/StringLeaf -> 3
assert visitor.n_nodes == 14
def test_parse_repeat1_grammar() -> None:
grammar = """
start: 'a'+
"""
rules = parse_string(grammar, GrammarParser)
visitor = Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/Repeat1/StringLeaf -> 6
assert visitor.n_nodes == 7
def test_parse_repeat0_grammar() -> None:
grammar = """
start: 'a'*
"""
rules = parse_string(grammar, GrammarParser)
visitor = Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/Repeat0/StringLeaf -> 6
assert visitor.n_nodes == 7
def test_parse_optional_grammar() -> None:
grammar = """
start: 'a' ['b']
"""
rules = parse_string(grammar, GrammarParser)
visitor = Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/StringLeaf -> 6
# NamedItem/Opt/Rhs/Alt/NamedItem/Stringleaf -> 6
assert visitor.n_nodes == 12
| true |
43dafb47fdafc264cf0fcb4908ff92afc770b2e6 | Python | isyoung/PE | /PE_P60.py | UTF-8 | 1,186 | 3.421875 | 3 | [] | no_license |
UPPER_BOUND = 10 ** 4
NB_TO_CHOOSE = 5
def is_prime(n):
factor = 2
while factor * factor <= n:
if n % factor == 0:
return False
factor += 1
return True
def is_valid_pair(n, m):
return is_prime(int(str(n) + str(m))) and is_prime(int(str(m) + str(n)))
prime_list = []
prime = [True] * UPPER_BOUND
for i in range(2, UPPER_BOUND):
if prime[i]:
prime_list.append(i)
for j in range(i + i, UPPER_BOUND, i):
prime[j] = False
nb_primes = len(prime_list)
def check_comb(nb_chosen, indices):
if nb_chosen == NB_TO_CHOOSE:
for i in indices:
print(prime_list[i], end = " ")
print()
start = -1
if nb_chosen > 0:
start = indices[nb_chosen - 1]
for i in range(start + 1, nb_primes):
valid = True
for j in range(nb_chosen):
if not is_valid_pair(prime_list[i], prime_list[indices[j]]):
valid = False
break
if valid:
indices[nb_chosen] = i
check_comb(nb_chosen + 1, indices)
indices = [-1] * NB_TO_CHOOSE
check_comb(0, indices)
| true |
4c7fab682453f8604a96932b4dc9406ac90bb3aa | Python | Shreeasish/pledgerize-reboot | /hoisting/investigations/libc/symbols/compare.py | UTF-8 | 310 | 2.6875 | 3 | [] | no_license | with open("found_functions") as found_functionsf:
ffunctions = found_functionsf.read().splitlines()
ffunctions= set(ffunctions)
with open("Symbols.list") as symbolsf:
Symbols = symbolsf.read().splitlines()
Symbols = set(Symbols)
len(Symbols)
len(ffunctions)
rem = Symbols.difference(ffunctions)
| true |
80d0c718919d40592a09f14420257f15aa0885aa | Python | heitorchang/learn-code | /checkio/forum/all_the_same.py | UTF-8 | 320 | 3.34375 | 3 | [
"MIT"
] | permissive | """
In this mission you should check if all elements in the given list are equal.
Input: List.
Output: Bool.
The idea for this mission was found on Python Tricks series by Dan Bader
"""
def all_the_same(elements):
"""Works if elements are immutable. If elements is [], len is 0"""
return len(set(elements)) < 2
| true |
55fbc6f3140a9150bbe305af8522b7b80604681a | Python | MichaelKipp/MovieModeling | /DataChunking.py | UTF-8 | 2,337 | 2.78125 | 3 | [] | no_license | import timeit, sys, io
start_time = timeit.default_timer()
lines = [[] for x in range (670000)]
movies = {}
conjs = {}
# Create conjunction lookup
with open('conjunctions.csv') as conjunctions:
for line in conjunctions:
line = line.split(",")
conjs[str(line[0])] = line[1].strip()
# Create movie lookup
with open('Data/movie_titles_metadata.txt') as movieList:
for line in movieList:
curLine = line.split('+++$+++')
curLine[1] = curLine[1].strip()
curLine[1] = curLine[1].replace(':', '')
curLine[1] = curLine[1].replace('"', '')
curLine[1] = curLine[1].replace('?', '')
curLine[1] = curLine[1].replace('-', '')
curLine[1] = curLine[1].replace(' ', '_')
movies[str(curLine[0].strip())] = curLine[1]
# SPLITS MOVIE LINES INTO INDIVIDUAL FILES
# Creates empty files for each movie
for filename in range(617):
if filename != 114:
with open('LinesByMovie/' + movies['m' + str(filename)] + '.txt', 'w') as movie:
movie.write('')
def handle_apostraphes(str):
str = str.split(" ")
for i in range(len(str)):
if str[i] in conjs:
str[i] = conjs[str[i]]
return " ".join(str)
# Takes lines from data and adds to list
with open('Data/movie_lines.txt') as data:
for line in data:
singleLine = line.split("+++$+++")
for i in range(len(singleLine)):
singleLine[i] = singleLine[i].decode('utf-8', errors='ignore').encode('utf-8').strip()
singleLine[i] = singleLine[i].lower()
if "'" in singleLine[i]:
singleLine[i] = handle_apostraphes(singleLine[i])
if "u>" in singleLine[i]:
singleLine[i] = singleLine[i].replace("<u>", "")
singleLine[i] = singleLine[i].replace("</u>", "")
lines[int(singleLine[0][1:])] = singleLine
lines = filter(None, lines)
currentMovie = 0
while currentMovie < 617:
if currentMovie != 114:
with open('LinesByMovie/' + movies['m' + str(currentMovie)] + '.txt', 'a') as movie:
for line in lines:
if int(line[2][1:]) == currentMovie:
movie.write(line[4] + '\n')
currentMovie += 1
print ("Time taken: " + str(timeit.default_timer() - start_time))
| true |
1d30a1cbb8b604e1284eb8df2f0fbbb58d653e46 | Python | Adisudirta/Project-Pelatihan-Data-Analytic | /main.py | UTF-8 | 476 | 3.46875 | 3 | [] | no_license | # import file vData.py
import vData
# tampilan menu navigasi program
print("Tugas Akhir Pelatihan")
print("======================\n")
print("Menu:")
print("1. Visualisasi data GDP per kapita")
print("2. Visualisasi data HDI (Human Development Index)")
answer = int(input('Option: '))
if answer == 1:
print('\n')
vData.visualisasi('GDPCAP')
elif answer == 2:
print('\n')
vData.visualisasi('HDI')
else:
print('Pilih opsi yang benar!') | true |
4ab165eb4dcfe6a4a37fe2b92930bdc234fbe3a9 | Python | francislinker/simple_chat_room | /qqClient.py | UTF-8 | 1,897 | 3 | 3 | [] | no_license | import socket
import os
import sys
def main():
#从命令行输入IP地址和端口号
if len(sys.argv)<3:
print('参数错误!')
return
address = (sys.argv[1],int(sys.argv[2]))
#创建 UDP 套接字
client = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#接收用户输入,包装后发送给服务器
while True:
name = input('请输入姓名:')
message = 'login ' + name#此处加入标记
client.sendto(message.encode(),address)
data,addr = client.recvfrom(1024)
if data.decode() == 'OK':
print('您已经进入聊天室...')
break
else:#不允许进入
#打印不允许进入的原因
print(data.decode())
#创建进程
pid = os.fork()
if pid<0:
sys.exit('创建进程失败!')
elif pid == 0:
sendmsg(client,name,address)
else:
recvmsg(client)
def sendmsg(client,name,address):
#发送消息给服务器,服务器群发给所有客户端
while True:
content=input('请发言(输入quit 退出):')
if content == 'quit':
message = 'quit ' + name
client.sendto(message.encode(),address)
sys.exit('已退出聊天室')#子进程退出
#包装消息
message = 'speak %s %s' % (name,content)
client.sendto(message.encode(),address)
def recvmsg(client):
while True:
try:
message,addr = client.recvfrom(1024)
except KeyboardInterrupt:
sys.exit()
if message.decode() == 'exit':#如果收到服务器此消息,父进程退出
os._exit(0)
#因为print覆盖了之前的input界面,在这里重新输出一遍
print(message.decode()+'\n请发言(quit退出):',end='')
if __name__ == "__main__":
main()
| true |
dd61a6aaa46ffac7476f0e4db14dcd7e2c2919b4 | Python | sankalpsagar/mai | /searchanime.py | UTF-8 | 2,908 | 3 | 3 | [] | no_license | from jikanpy import Jikan
import urllib
import subprocess
import textwrap
wrapper=textwrap.TextWrapper(initial_indent='', subsequent_indent='\t'*2, width=50)
# Color Escape Characters
CEND = '\33[0m'
CRED = '\33[31m'
CGREEN = '\33[32m'
def query_helper(s):
s = s.lower()
if (s[0:3] != 'mai'):
print(CGREEN + "[Mai] Couldn't find a Mai command anywhere. Use mai help to see how commands to Mai works." + CEND)
if (s[0:3] == 'mai'):
if (s[4:8] == 'exit'):
print(CGREEN + '[Mai] Sayonara!' + CEND)
raise SystemExit
if (s[4:10] == 'search'):
search_anime(s[11:])
if (s[4:8] == 'help'):
print(CGREEN + "[Mai] Commands to mai are prefaced by using mai." + CEND)
print(CGREEN + "mai [help] [exit] [search]" + CEND)
print(CGREEN + "Use Mai followed by any of the commands." + CEND)
return
def search_anime(s):
jikan = Jikan()
results = jikan.search('anime', s)
result = {}
print(CGREEN + "[Mai] These are the top results I found." + CEND)
for idx, resultitems in enumerate(results['results'], start=1):
print("\t" + CRED + str(idx) + ". " + wrapper.fill(resultitems['title']) +CEND)
# storing mal_id for later use
result[idx] = resultitems['mal_id']
# to check if everything is working as expected
# print(result)
print(CGREEN + "[Mai] Type the index of the anime you want information on or type 0 to exit: " + CEND, end = ' ')
idx = int(input())
if (idx == 0):
return
results = jikan.anime(result[idx])
print(CGREEN + "[Mai] This is the information I found on the requested anime (press q to exit imageviewer)" + CEND)
# sanity check
# print(results)
# downloading image and storing in cache
f = open('cachepic.jpg', 'wb')
f.write(urllib.request.urlopen(results['image_url']).read())
f.close()
# ugh i don't like this hack. depends too much on system and imageviewer installed. try to fix this later.
subprocess.call(["feh", "-x", "cachepic.jpg"])
title = results['title']
episodes = results['episodes']
status = results['status']
# returns as list
title_syns = results['title_synonyms']
date = results['aired']['string']
syn = results['synopsis']
score = results['score']
printnicely(title, title_syns, score, status, syn, episodes, date)
def printnicely(t, ts, s, st, syn, ep, d):
print(CRED + "\tTitle:" + CEND + t)
print("\tSynonyms: ", end='')
tsstring = ""
for i, synonyms in enumerate(ts):
if len(ts) == 1:
tsstring += synonyms
elif len(ts) > 1:
tsstring += synonyms + ", "
if (i == len(ts)-1):
tsstring += synonyms
print(wrapper.fill(tsstring))
print("\tScore: " + str(s))
print("\tStatus: " + st)
print(CGREEN + "\tSynopsis: " + CEND + wrapper.fill(syn))
print("\tEpisodes: " + str(ep))
print("\tDate Aired: "+ d)
return
if __name__ == '__main__':
print("Directly executing searchanime function.")
while(1):
print("$", end = ' ')
query = input()
query_helper(query)
| true |
c44fd718d29e1c0032f2a66bb9e9f02317f914fe | Python | jarvisteach/appJar | /examples/issues/issue144.py | UTF-8 | 547 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
if btn == "Grouped":
app.showSubWindow("Grouped")
elif btn == "Not-grouped":
app.showSubWindow("Not Grouped")
app=gui("Main Window")
app.addLabel("l1", "Main Window")
app.addButtons(["Grouped", "Not-grouped"], press)
app.startSubWindow("Grouped", transient=True)
app.addLabel("g1", "Grouped")
app.stopSubWindow()
app.startSubWindow("Not Grouped", grouped=False, transient=True)
app.addLabel("g2", "Not Grouped")
app.stopSubWindow()
app.go()
| true |
e5fc5357011e47dde99bd13aaa70a609ead17dba | Python | Tigercoll/FTP_socket | /socket/socket_client.py | UTF-8 | 3,048 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
#_*_coding:utf-8_*_
__author__ = "Tiger"
import socket
import configparser
import json
import os
class FtpClient(object):
def __init__(self):
#引入configparser模块,加载配置文件
conf=configparser.ConfigParser()
conf.read('conf.ini',encoding='utf-8')
self.ip=conf.get('ipconfig','ip')
self.port=conf.getint('ipconfig','port')
self.client=socket.socket()
self.data={
'user':'',
'password':'',
'cmd':'',
'filename':'',
'size':'',
'status':True,
}
def connect(self):
#链接服务端
self.client.connect((self.ip,self.port))
def login(self):
#用户登录认证
self.data['user'] = input ('user:').strip ()
self.data['password'] = input ('password:').strip ()
self.client.send(json.dumps(self.data).encode('utf-8'))
result=self.client.recv(1024)
return int(result)
def interactive(self):
#主业务函数
while True:
user_input=input('>>>:').strip()
if len(user_input)==0:continue
self.data['cmd'],self.data['filename']=user_input.split()
#这里使用hasattr,getattr内置函数,动态加载
if hasattr(self,self.data['cmd']):
func=getattr(self,self.data['cmd'])
func()
else:
self.help()
def help(self):
#帮助
print('useage:get | put | help args' )
def put(self):
#put 函数,上传文件
if os.path.isfile(self.data['filename']):
self.data['size']=os.stat(self.data['filename']).st_size
self.client.send(json.dumps(self.data).encode('utf-8'))
self.client.recv(1024)
with open(self.data['filename'],'rb') as f:
for line in f:
self.client.send (line)
else:
print('file is success...')
print(self.data)
else:
print(self.data['filename'],' is not exist')
def get(self):
#get 函数,获取文件
self.client.send (json.dumps (self.data).encode ('utf-8'))
ret=json.loads(self.client.recv(1024).decode())
if ret['status']:
if os.path.isfile(ret['filename']):
f=open(ret['filename']+'.new','wb')
else:
f=open(ret['filename'],'wb')
size=ret['size']
file_size=0
self.client.send(b'200 ok')
while file_size<size:
get_data=self.client.recv(1024)
f.write(get_data)
file_size+=len(get_data)
else:
print(ret['filename'],'is ok')
else:
print('文件不存在')
f.close()
if __name__=='__main__':
ftp=FtpClient()
ftp.connect()
if ftp.login()==0:
ftp.interactive()
else:
print('登录失败') | true |
a56f6b60faaa4fe675c885818fc91c45b7694d5a | Python | keyofdeath/Tp-conceprion-objet | /model/banque.py | UTF-8 | 4,835 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging.handlers
import os
PYTHON_LOGGER = logging.getLogger(__name__)
if not os.path.exists("log"):
os.mkdir("log")
HDLR = logging.handlers.TimedRotatingFileHandler("log/Banque.log",
when="midnight", backupCount=60)
STREAM_HDLR = logging.StreamHandler()
FORMATTER = logging.Formatter("%(asctime)s %(filename)s [%(levelname)s] %(message)s")
HDLR.setFormatter(FORMATTER)
STREAM_HDLR.setFormatter(FORMATTER)
PYTHON_LOGGER.addHandler(HDLR)
PYTHON_LOGGER.addHandler(STREAM_HDLR)
PYTHON_LOGGER.setLevel(logging.DEBUG)
# Absolute path to the folder location of this python file
FOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
class Banque:
def __init__(self, *client):
"""
Contructeur
:param client: (List of Client) Liste de client
"""
self.list_client = client
def virement(self, rib, montant, numero_carte):
"""
Effectue un virement pour des clients de la même banque
:param rib: (string) numero de compte à créditer
:param montant: (int) montant à créditer
:param numero_carte: (string) numéro de carte à débiter
:return: (bool) True si le virement est fait sinon False
"""
if self.client_de_banque_rib(rib):
compte_destination = self.rechercher_compte_rib(rib)
compte_expeditaire = self.rechercher_compte_carte(numero_carte)[0]
if compte_expeditaire.solde_suffisant(montant):
compte_expeditaire.retrait(montant)
compte_destination.crediter(montant)
return True
return False
def consultation(self, numero_carte):
"""
Renvoie les numéros de compte de la carte associée
:param numero_carte:(string) numéro de la carte
:return: (list de string) Liste des numéros de compte. Renvoie None si il n'y a aucun numéro
"""
compte_list = self.rechercher_compte_carte(numero_carte)
return [compte.obtenir_rib() for compte in compte_list] if compte_list is not None else None
def consulter_compte(self, numero_compte):
"""
Fonction pour obtenir un compte en fonction de son numéro
:param numero_compte: (string) numéro de compte
:return: (compte) Compte associé au numéro de carte
"""
for client in self.list_client:
compte = client.obtenir_compte_rib(numero_compte)
if compte is not None:
return compte
return None
def rechercher_compte_carte(self, numero_carte):
"""
Renvoie les comptes de la carte
:param numero_carte: (string) numéro de la carte
:return: (list de compte) List des comptes. Renvoie None si aucun compte n'est trouvé
"""
for client in self.list_client:
if client.proprietaire_de_carte(numero_carte):
return client.obtenir_liste_compte()
return None
def rechercher_carte(self, numero_carte):
"""
Fonction pour obtenir une carte en fonction de son numéro
:param numero_carte: (string) numéro de la carte
:return: (Carte) carte associé au numéro de carte
"""
for client in self.list_client:
if client.proprietaire_de_carte(numero_carte):
return client.obtenir_carte()
return None
def client_de_banque_numero_carte(self, numero_carte):
"""
Regarde en fonction du numéro de carte du client s'il est un client de cette banque
:param numero_carte: (string) numéro de la carte
:return: (boolean) True si le client est dans cette banque, False sinon
"""
for client in self.list_client:
if client.proprietaire_de_carte(numero_carte):
return True
return False
def client_de_banque_rib(self, rib):
"""
Regarde en fonction du RIB du client s'il est un client de cette banque
:param rib: (string) le RIB / numéro de compte du client
:return: (boolean) True si le client est dans cette banque, False sinon
"""
for client in self.list_client:
if client.obtenir_compte_rib(rib) is not None:
return True
return False
def rechercher_compte_rib(self, rib):
"""
Renvoie les comptes du RIB
:param rib: (string) RIB / numéro de compte du client
:return: (compte) compte associé à ce RIB
"""
for client in self.list_client:
compte = client.obtenir_compte_rib(rib)
if compte is not None:
return compte
return None
| true |
2847f587757b5238446e676c447442aedf7e8aca | Python | MattFrankowski/algorithms | /graph/dijkstra.py | UTF-8 | 2,026 | 3.46875 | 3 | [] | no_license | from graph import Graph, Node
from math import inf
class Dijkstra:
def __init__(self):
self.graph = Graph()
self.coveredGraph = []
self.graph_start = 0
def loadGraph(self, path):
self.graph.loadNodes(path)
def findStartingNode(self):
for i in range(len(self.graph.nodes)):
if(self.graph.nodes[i].cost == 0):
self.graph_start = i
break
def minDistance(self, index):
min = inf
for node in self.graph.nodes:
if node.distance < min and node.id not in self.coveredGraph:
min = node.distance
minIndex = node.id
return minIndex
def dijkstra(self):
u = self.graph_start
self.graph.nodes[u].distance = 0
self.coveredGraph.append(u)
for i in self.graph.nodes:
for v in self.graph.nodes[u].neighbors:
alt = self.graph.nodes[u].distance + self.graph.nodes[v].cost
if alt < self.graph.nodes[v].distance:
self.graph.nodes[v].distance = alt
self.graph.nodes[v].last_node = u
u = self.minDistance(u)
self.coveredGraph.append(u)
if self.graph.nodes[u].cost == 0:
self.print_path(u)
break
def print_path(self, finish):
current = finish
path = set()
while current != self.graph_start:
path.add(current)
current = self.graph.nodes[current].last_node
path.add(current)
for i in range(36):
if i in path:
print(self.graph.nodes[i].cost, end="")
else:
print(' ', end='')
if i % 6 == 5:
print()
if __name__ == '__main__':
for i in range(3):
print(f"Data sample {i + 1}")
dijkstra = Dijkstra()
dijkstra.loadGraph(f"data_example{i+1}.txt")
dijkstra.findStartingNode()
dijkstra.dijkstra()
| true |
14a7aac2d33ccf01df2116f1a390b3e0bef186b9 | Python | blacktruth513/KCCIST | /[5] 빅데이터 처리시스템 개발/pythonProject Ver10.12/SHOPPING MALL 2020-10-11/MemberJoin.py | UTF-8 | 3,561 | 2.984375 | 3 | [] | no_license | import pymysql
from tkinter import *
from tkinter import messagebox
import tkinter as tk
def memberManagement(conn,cur):
##====================================================================================
## 함수 선언부
## Data삽입 함수
def insertMemberData() :
# global conn, cur
print("insertMemberData(입력 버튼 클릭)")
# conn, cur = None, None # 교량과 트럭
data1, data2, data3, data4 = "", "", "", ""
data5 = ""
sql = ""
conn = pymysql.connect(host=HOST, user=USER, password=PASSWORD,db=DB, charset="utf8")
cur = conn.cursor() # 빈 트럭 준비
data1 = edt1.get();
data2 = edt2.get();
data3 = edt3.get();
data4 = edt4.get();
# data5 = edt5.get();
'''
m_email VARCHAR(30) PRIMARY KEY, #회원 이메일
m_pw VARCHAR(50) NOT NULL, #회원 비밀번호
m_name VARCHAR(25) NOT NULL, #이름
s_tel INT NULL, #연락처
order_num INT NULL #주문번호
'''
# 중복 체크
sql = "SELECT m_email FROM member"
cur.execute(sql)
email_DB = ''
while (True) :
row = cur.fetchone()
if row == None :
break;
email_DB = row[0];
if email_DB == data1 :
messagebox.showerror('Error', '중복된 아이디 입니다.')
break;
sql = "INSERT INTO member (m_email,m_pw,m_name,s_tel)"
sql += "VALUES('"+data1+"','"+data2+"','"+data3 +"',"+data4+")"
try :
# sql = "INSERT INTO student VALUES("+data1+",'"+data2+"','"+data3+"','"+data4 +"')"
print(sql)
cur.execute(sql)
except :
if (email_DB != data1):
messagebox.showerror('오류', '데이터 입력 오류 발생')
else :
messagebox.showinfo('성공' , '데이터 입력 성공')
conn.commit()
conn.close()
window.destroy()
##====================================================================================
## 전역변수부
HOST = "127.0.0.1"
USER = "root"
PASSWORD = "1234"
DB = "shopping_mall"
##====================================================================================
## MainCode
window = tk.Tk()
window.title(' 입력 : 아이디, 비밀번호, 이름, 전화번호')
window_with = 650
window_height = 50
monitor_width = window.winfo_screenwidth()
monitor_height = window.winfo_screenheight()
x = (monitor_width / 2) - (window_with / 2)
y = (monitor_height / 2) - 100
window.geometry('%dx%d+%d+%d' % (window_with, window_height, x, y))
edtFrame = Frame(window);
edtFrame.pack();
listFrame = Frame(window)
listFrame.pack(side = BOTTOM, fill=BOTH, expand=1)
edt1 = Entry(edtFrame, width=10); edt1.pack(side=LEFT, padx=10, pady=10)
edt2 = Entry(edtFrame, width=10); edt2.pack(side=LEFT, padx=10, pady=10)
edt3 = Entry(edtFrame, width=10); edt3.pack(side=LEFT, padx=10, pady=10)
edt4 = Entry(edtFrame, width=10); edt4.pack(side=LEFT, padx=10, pady=10)
# edt5 = Entry(edtFrame, width=10); edt5.pack(side=LEFT, padx=10, pady=10)
## BUTTON
btnInsert = Button(edtFrame, text="회원가입",command=insertMemberData)
btnInsert.pack(side=LEFT,padx=10,pady=10)
window.mainloop()
| true |
c52cb5a68a09805ae9b507ea317ee7af05e0f53a | Python | AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges | /Challenges/handOfStraights.py | UTF-8 | 1,467 | 4 | 4 | [] | no_license | """
Hand of Straights
Alice has a hand of cards, given as an array of integers.
Now she wants to rearrange the cards into groups so that each group is size W, and consists of W consecutive cards.
Return true if and only if she can.
Example 1:
Input: hand = [1,2,3,6,2,3,4,7,8], W = 3
Output: true
Explanation: Alice's hand can be rearranged as [1,2,3],[2,3,4],[6,7,8].
Example 2:
Input: hand = [1,2,3,4,5], W = 4
Output: false
Explanation: Alice's hand can't be rearranged into groups of 4.
Note:
1 <= hand.length <= 10000
0 <= hand[i] <= 10^9
1 <= W <= hand.length
"""
"""
Priority Queue
Time: O(nlog(n) * W) as heap has O(log(n)) push and pop. There are N cards and at most we do the push and pop operations W times
Space: O(n)
"""
class Solution:
def isNStraightHand(self, hand: List[int], W: int) -> bool:
pq = []
for n in hand:
heapq.heappush(pq, n)
while pq:
current = []
store = []
while len(pq) > 0 and len(current) < W:
smallest_card = heapq.heappop(pq)
if len(current) == 0 or smallest_card == current[-1] + 1:
current.append(smallest_card)
else:
store.append(smallest_card)
if len(current) < W:
return False
else:
for card in store:
heapq.heappush(pq, card)
return True
| true |
98a02b27cd7c97b509a710493f586b95130b8930 | Python | yogesh-kamble/Budget_Planner_Django | /Budget_Monitor/Transcation/views.py | UTF-8 | 1,870 | 2.53125 | 3 | [] | no_license | from models import Amount,Expense,Category
from django.http import HttpResponse
from django.shortcuts import render_to_response
# Create your views here.
def enter_transcation(request):
'''
Method which render to add_transcation.html page.
'''
expense_obj_list=Expense.objects.all()
expense_name_list=[expense.name for expense in expense_obj_list]
return render_to_response('add_transcation.html',{"expense_list":expense_name_list})
def display_Transaction(request):
'''
'''
response=HttpResponse()
response.write("<html>")
response.write("<H1>Transcation Details</H1>")
response.write("<body>")
#amount_obj = Amount()
record_list=Amount.objects.all()
for record in record_list:
response.write("<p> Your Amount is %d </p>"%record.amount_value)
response.write("<p> Date of your Transcation %s </p>"%record.transaction_date)
expense_obj_list = Expense.objects.filter(id=record.expense_id)
expense_name = expense_obj_list[0].name
response.write("<p> Expense Name : %s"%expense_name)
response.write("</body></html>")
return response
def save_transcation(request):
'''
Method for saving Transcation
'''
if request.method == "POST":
amount=request.POST["amount"]
date=request.POST["date_pick"]
description=request.POST['desc']
expense=request.POST['expense_value']
expense_obj=Expense.objects.filter(name=expense)
expense_id=expense_obj[0].id
amount_obj = Amount(amount_value=amount, transcation_date=date, description=description, expense_id=expense_id)
amount_obj.save()
return render_to_response("add_transcation.html", {"transcation_save_ack":"Saved your Transcation"})
| true |
49aa5996fa84c5d3655dbae0192003ca717ab34c | Python | russot/ADS | /refer_entry.py | UTF-8 | 2,814 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#!python
import glob
import string
class Refer_Entry(object):
# __slots__ = {"Xvalue":float,"Xprecision":float,"Yvalue":float,"Yprecision":float,"Yoffset":float,"Ymin":float,"Ymax":float}
def __init__(self,Xvalue=0,Xprecision=0,Yvalue=0,Yprecision=0,Yoffset=0,Ymin=0,Ymax=0,valid_status=None):
self.valid_status = valid_status
self.Xvalue = self.ToFloat( Xvalue)
self.Xprecision = self.ToFloat( Xprecision)
self.Yvalue = self.ToFloat (Yvalue)
self.Yprecision = self.ToFloat(Yprecision)
self.Yoffset = self.ToFloat(Yoffset)
self.Ymin = self.ToFloat(Ymin)
self.Ymax = self.ToFloat(Ymax)
self.length = 0
def ToFloat(self,value):
if not value:
value = float(0)
return float(value)
def Values(self):
return (self.Xvalue,self.Xprecision,self.Yvalue,self.Yprecision,self.Yoffset,self.Ymin,self.Ymax)
def ShowSensor(self):
out = ''
out += "X:%.3f,"%(self.Xvalue)
out += "Xp:%.3f,"%(self.Xprecision)
out += "Y:%.3f,"%(self.Yvalue)
out += "Yp:%.3f,"%(self.Yprecision)
out += "Yo:%.3f,"%(self.Yoffset)
return out
def ShowThermo(self):
out = ''
out += "X:%.3f,"%(self.Xvalue)
out += "Ymin:%.3f,"%(self.Ymin)
out += "Y:%.3f,"%(self.Yvalue)
out += "Ymax:%.3f,"%(self.Ymax)
return out
def GetLength(self):
return self.length
def SetLength(self,length):#status= True/False
self.length= length
def GetValid(self):
return self.valid_status
def SetValid(self,status):#status= True/False
self.valid_status = status
def GetXvalue(self):
return self.Xvalue
def SetXvalue(self,value):
self.Xvalue= float(value)
def GetXprecision(self):
return self.Xprecision
def SetXprecision(self,value):
self.Xprecision= float(value)
def GetYvalue(self):
return self.Yvalue
def SetYvalue(self,value):
self.Yvalue= float(value)
def GetYmin(self):
return self.Ymin
def SetYmin(self,value):
self.Ymin= float(value)
def GetYmax(self):
return self.Ymax
def SetYmax(self,value):
self.Ymax= float(value)
def GetYprecision(self):
return self.Yprecision
def SetYprecision(self,value):
self.Yprecision= float(value)
def GetYoffset(self):
return self.Yoffset
def SetYoffset(self,value):
self.Yoffset= float(value)
def Validate(self,Xvalue=None,Yvalue=0):
xstatus = True
ystatus = False
Xprecision = -1
if Xvalue != None:
Xprecision = abs(Xvalue - self.Xvalue)
if Xprecision > self.Xprecision:
xstatus = False
Yprecision = abs((Yvalue -self.Yvalue)/self.Yvalue)
if Yprecision <= self.Yprecision:
ystatus = True
return (Xprecision,Yprecision,xstatus,ystatus)
def SetXY_Valid(self,xvalue,yvalue,xprecision,yprecision,valid_status):
self.Xvalue=xvalue
self.Yvalue=yvalue
self.Xprecision=xprecision
self.Yprecision=yprecision
self.valid_status = valid_status
| true |
af48c9513e20fe6c618d730d861383e8e8f1e898 | Python | ColdMatter/PhotonBEC | /Scripts/calibrate_grating/calibrate_energy_position.py | UTF-8 | 6,855 | 2.90625 | 3 | [
"MIT"
] | permissive | #coded by JM in 10/2014
import sys
sys.path.append("D:\\Control\\PythonPackages\\")
import scipy.misc
from scipy.optimize import leastsq
import numpy as np
import pbec_analysis
#TODO this queue is made for use of threads, it has mutex stuff inside
# which will make it slow, replace it with a faster alternative
import Queue
#choose border threshold to get most of the parabola but not
# at the expense of speed
floodfill_boundary_threshold = 60
colour_weights = (1, 1, 0, 0) #red, green
smooth_window_len = 10
smooth_window_name = 'flat'
#now all the parabolas are sideways, x = ay^2 + by + c
# so prepare to be slightly confused at all the x/y swapping from
# the parabolas you studied in maths class
#given a y value, find the x of the parabola
def parabola_y_to_x(im, y, mask_im):
while 1:
mx = np.argmax(im[y])
if mask_im[y, mx] == 0: #hot pixel, not in floodfill range
break
#darken the pixel and keep looking
else:
im[y, mx] = 0
return mx
def obtain_parabola_from_image(im, mask_im, parabola_row_offset, parabola_row_size):
xdata = np.zeros(parabola_row_size)
for y in range(parabola_row_size):
xdata[y] = parabola_y_to_x(im, y + parabola_row_offset, mask_im)
return xdata
def construct_parabola_from_parameters((a, b, c), parabola_row_size):
calc_data = np.zeros(parabola_row_size)
for y in range(parabola_row_size):
calc_data[y] = a*y*y + b*y + c
return calc_data
def parabola_residuals(pars, xdata, parabola_row_size):
calc_data = construct_parabola_from_parameters(pars, parabola_row_size)
return ( xdata - calc_data )**2
def colour_mask_image(im_raw, colour_weights):
'''
turns an image with three channels into a greyscale image
'''
return sum([colour_weights[j]*im_raw[:,:,j] for j in range(im_raw.shape[-1])], 0)
def find_max_pixel(im):
maxRow = 0
maxCol = 0
for r in range(len(im)):
col = np.argmax(im[r])
if im[r, col] > im[maxRow, maxCol]:
maxCol = col
maxRow = r
return maxRow, maxCol
#floodfill algorithm with special requirements
# for instance also finds the max and min rows that were filled in
#flood fills pixels with zero until it reaches a boundary
#returns the boundaries in y that the flood fill reached
def floodfill(im, startP, borderThreshold):
minFilledRow = im.shape[0]
maxFilledRow = 0
pxqueue = Queue.Queue()
pxqueue.put(startP)
while not pxqueue.empty():
px = pxqueue.get()
if px[0] > maxFilledRow:
maxFilledRow = px[0]
if px[0] < minFilledRow:
minFilledRow = px[0]
if im[px[0], px[1]] > borderThreshold:
im[px[0], px[1]] = 0
pxqueue.put((px[0] + 1, px[1]))
pxqueue.put((px[0] - 1, px[1]))
pxqueue.put((px[0], px[1] + 1))
pxqueue.put((px[0], px[1] - 1))
return minFilledRow+1, maxFilledRow
def fit_parabola(im_raw, plotfit=False):
#im_raw = scipy.misc.imread("pbec_20141029_161004_20um_slit.png")
#im = im_raw[:,:,im_channel]
im = colour_mask_image(im_raw, colour_weights)
maxRow, maxCol = find_max_pixel(im)
masked_im = im.copy()
parabola_row_range = floodfill(masked_im, (maxRow, maxCol), borderThreshold=floodfill_boundary_threshold)
parabola_row_offset = parabola_row_range[0]
parabola_row_size = parabola_row_range[1] - parabola_row_range[0]
xdata = obtain_parabola_from_image(im, masked_im, parabola_row_offset, parabola_row_size)
#near the stationary point of the parabola there are lots of bright pixels
# this can mess up the calculation for parabola_row_range from floodfill()
# so to find the real range of the parabola, find the very large spike in
# the derivative of xdata
dxdata = np.zeros(parabola_row_size - 1)
large_delta = []
for i in range(parabola_row_size - 1):
dxdata[i] = xdata[i] - xdata[i + 1]
#threshold for too-large change is chosen to be 100
if abs(xdata[i] - xdata[i + 1]) > 100:
large_delta.append(i)
if len(large_delta) > 0:
large_delta = np.array(large_delta)
top_deltas = large_delta[large_delta < maxRow - parabola_row_offset]
bottom_deltas = large_delta[large_delta > maxRow - parabola_row_offset]
print 'td = ' + str(top_deltas) + ' bd = ' + str(bottom_deltas)
parabola_row_offset = top_deltas[-1] + 1 + parabola_row_range[0]
parabola_row_size = bottom_deltas[0] - top_deltas[-1] - 1
xdata = xdata[top_deltas[-1] + 1:bottom_deltas[0]]
else:
print 'skipping overexpose fix'
if plotfit:
figure(1), clf()
plot(xdata)
figure(2), clf()
plot(xdata, np.array(range(parabola_row_size)) + parabola_row_offset, "y")
imshow(im)
scatter([maxCol], [maxRow], c='w', marker='x')
parameters_guess = (-0.01, 6.0, 1600.0) #(a, b, c)
ls_solution = leastsq(parabola_residuals, parameters_guess, args = (xdata, parabola_row_size))
(a, b, c) = ls_solution[0]
#print a, b, c
if plotfit:
calc_data = construct_parabola_from_parameters(ls_solution[0], parabola_row_size)
plot(calc_data, np.array(range(parabola_row_size)) + parabola_row_offset, "w")
ym = -b/2/a + parabola_row_offset #y value of minimum
xm = parabola_y_to_x(im, int(ym), masked_im) #x value of minimum, the desired value for calibrating the wavelength scale
return xm, ls_solution[0]
arg1 = "20141029_161004", "_20um_slit.png"
arg2 = "20141030_165700", ".png"
arg3 = "20141110_123639", ".png"
im_raw = scipy.misc.imread(pbec_analysis.timestamp_to_filename(*arg2))
print fit_parabola(im_raw, True)
#print (maxRow, maxCol, im[maxRow, maxCol])
#print "parabola row range = " + str(parabola_row_range)
#ImageDraw.floodfill(im, (maxRow, maxCol), 0, border)
#cant use ImageDraw.floodfill() because it assumes the border is an individual color
# while we need to use a threshold system, i.e. filling all colors above a certain value
#the idea if using histogram() to discover the correct threshold for floodfill is a bad
# idea because it will still depend on some arbitrary human decision in the proportional cutoff
#fact is the images you take will depend on many things: experimental setup, camera settings and more
# so a human will have to choose the threshold at some point
'''
import matplotlib.pyplot as plt
#n, bins, patches = plt.hist(im.flatten(), im[maxRow, maxCol], fc='k', ec='k', log=True)
np.histogram(im.flatten(),
print n
print bins
'''
'''
from pbec_experiment import *
import time
cam = getCameraByLabel('grasshopper')
try:
for i in range(1):
im_raw = cam.get_image()
if im_raw == None:
print('error')
time.sleep(0.1)
continue
ts = pbec_analysis.make_timestamp()
imsave(pbec_analysis.timestamp_to_filename(ts, file_end=".png"), im_raw)
xm, parabola = fit_parabola(im_raw, True)
print xm
#figure(str(i) + " xm=" + str(xm))
#imshow(colour_mask_image(im_raw, colour_weights))
#calc_data = construct_parabola_from_parameters(parabola)
#plot(calc_data, np.array(range(parabola_row_range[1] - parabola_row_range[0])) + parabola_row_range[0], "w")
time.sleep(1.5)
finally:
cam.close()
''' | true |
e00a16cf25f272c3cf01eeb415d1cab845b77345 | Python | mrmleonard/pyxel_examples | /pyxel/presentation_examples/animated_circle.py | UTF-8 | 412 | 3.625 | 4 | [
"MIT"
] | permissive |
# import Pyxel module
import pyxel
# set variables for animation
x = 0
# initialize the window with the init(width, height) command
pyxel.init(160, 120)
# game loop
while True:
# update variables and call any drawing commands
x += 2
if x >= pyxel.width + 20:
x = -20
pyxel.cls(0)
pyxel.circ(x=x, y=60, r=20, col=11)
# update the screen with the flip command
pyxel.flip()
| true |
08d2276e80ec16adad294cdb28e0572fdef4a35f | Python | bigtone1284/hackerRank | /is_fibo.py | UTF-8 | 1,706 | 4.625 | 5 | [] | no_license | """========================================================================================================================
You are given an integer, N. Find out if the number is an element of fibonacci series.
The first few elements of fibonacci series are 0,1,1,2,3,5,8,13.... A fibonacci series is one where every element is a sum of the previous two elements in the series. The first two elements are 0 and 1.
Formally:
fib0 = 0
fib1 = 1
fibn = fibn-1 + fibn-2 ∀ n > 1
Input Format
The first line contains T, number of test cases.
T lines follows. Each line contains an integer N.
Output Format
Display "IsFibo" (without quotes) if N is a fibonacci number and "IsNotFibo" (without quotes) if it is not a fibonacci number. The output for each test case should be displayed on a new line.
1 <= T <= 105
1 <= N <= 1010
Sample Input
3
5
7
8
Sample Output
IsFibo
IsNotFibo
IsFibo
Explanation
5 is a Fibonacci number given by fib5 = 3 + 2
7 is not a Fibonacci number
8 is a Fibonacci number given by fib6 = 5 + 3
TimeLimit Time limit for this challenge is given here
========================================================================================================================"""
def is_fib(num):
a = 0
b = 1
fibber = 0
if num == a or num == b:
return "IsFibo"
else:
while num > fibber:
fibber = a + b
a, b = b, a + b
if num == fibber:
return "IsFibo"
else:
return "IsNotFibo"
def main():
for i in range(int(raw_input())):
print is_fib(int(raw_input()))
if __name__ == "__main__":
main()
| true |
ff6171c84b16314790de31d81f210685968ec20f | Python | tztex/self_taught2 | /self_taught1/Data_Structures.py | UTF-8 | 2,127 | 4.4375 | 4 | [] | no_license | # list tuples dictionaries
# stacks and queues
# putting item on stack is pushing
# add and remove from stack, only add remove last item
# removing from stack is popping
# called a LIFO data structure, last in first out
# a queue is a data structure and is FIFO
# ex line of people first person gets ticket
class Stack:
def __init__(self):
self.items = [] # list to store data
def is_empty(self):
return self.items == [] # checks if stack empty
def push(self, item):
self.items.append(item) # adds items to end
def pop(self):
return self.items.pop() # remove and return last item
def peek(self):
last = len(self.items) - 1 # returns last item not remove
return self.items[last]
def size(self):
return len(self.items) # returns number of items
stack = Stack()
print(stack.is_empty())
stack.push(1)
print(stack.is_empty())
for i in range(1, 11):
stack.push(i)
print(stack.items)
print(stack.size())
stack.pop()
print(stack.items)
stack.push(5)
for i in range(1,10):
stack.push(i)
print(stack.items)
print(stack.peek())
# print hello backwards
stack3 = Stack()
for c in "Thomas":
stack3.push(c)
reversed_string = ""
for i in range(len(stack3.items)):
reversed_string += stack3.pop()
print(reversed_string)
yester = Stack()
for c in "Yesterday":
yester.push(c)
print(yester.items[0])
reversed_string = ""
for i in range(len(yester.items)):
reversed_string += yester.pop()
print(reversed_string)
print()
class Queue:
def __init__(self): # list
self.items = []
def is_empty(self): # checks if empty
return self.items == []
def enqueue(self, item): # adds item to front index 0
self.items.insert(0, item)
def dequeue(self): # uses pop to delete item at front of quese
return self.items.pop()
def size(self): # checks list of items
return len(self.items)
aq = Queue()
print(aq.is_empty())
for i in range(11):
aq.enqueue(i)
print(aq.items)
print(aq.size())
aq.dequeue()
print(aq.size())
for i in range(aq.size()):
print(i)
print()
| true |
a6fb8c47bd65ab402a98021f2e9b9110a50f82f0 | Python | rocalabern/pygame_love_runner | /levels/tutorial_levels/tutorial_01.py | UTF-8 | 1,208 | 2.859375 | 3 | [] | no_license | import pygame
from pygame import *
from game_screen.game_screen import GameScreen
from lib import *
from levels import *
def show_image(screen, game_screen: GameScreen, width, height):
image_file = "images/thumbs-up/julia_y_mar_muy_bien.png"
temp = pygame.image.load(image_file)
x = temp.get_rect().size[0]
y = temp.get_rect().size[1]
diff = 0.0
factor = (1.0-diff) * (height / y)
temp = pygame.transform.scale(temp, (int(round(factor * x)), int(round(factor * y))))
screen.blit(temp, (game_screen.x_offset+int(round(0.20*width)), game_screen.y_offset+int(round(diff*height))))
pg_print_message(screen, game_screen, "MUY BIEN", int(round(0.3 * 1366 / 4)), int(round(3*768 / 4)), size=128)
pygame.display.update()
pygame.time.wait(5000)
def tutorial_01(
game_screen: GameScreen
):
level = Level(
"levels/tutorial_levels/tutorial_01.txt",
game_screen
)
level.add_caption(create_caption("Jugadora... tienes una cita en la casilla amarilla", 176, 120))
level.add_caption(create_caption("PISTA: Usa las flechas para moverte", 176, 600, color_fg=(150, 150, 150)))
level.success_animation = show_image
return level
| true |
d8a04b44bdb55b59bc3de17d9c09721b81fae476 | Python | Rwik2000/CarRacingv0-PPO-pytorch | /agentFile.py | UTF-8 | 4,032 | 2.6875 | 3 | [] | no_license | from neuralnet import Net
import torch
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Beta
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
class Agent():
"""
Agent for training
"""
# max_grad_norm = 0.5
def __init__(self, episode, args, device):
transition = np.dtype([('s', np.float64, (args.img_stack, 96, 96)), ('a', np.float64, (3,)), ('a_logp', np.float64),
('r', np.float64), ('s_', np.float64, (args.img_stack, 96, 96))])
self.args = args
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.buffer_capacity = args.buffer_capacity
self.batch_size = args.batch_size
self.training_step = 0
self.net = Net(args).double().to(device)
self.device = device
if episode != 0:
print("LOADING FROM EPISODE", episode)
self.net.load_state_dict(torch.load(self.args.saveLocation + 'episode-' + str(episode) + '.pkl'))
self.buffer = np.empty(self.buffer_capacity, dtype=transition)
self.counter = 0
self.lastSavedEpisode = 0
self.optimizer = optim.Adam(self.net.parameters(), lr=1e-3)
def select_action(self, state):
state = torch.from_numpy(state).double().to(self.device).unsqueeze(0)
with torch.no_grad():
alpha, beta = self.net(state)[0]
dist = Beta(alpha, beta)
action = dist.sample()
a_logp = dist.log_prob(action).sum(dim=1)
action = action.squeeze().cpu().numpy()
a_logp = a_logp.item()
return action, a_logp
def save_param(self, episode ):
self.lastSavedEpisode = episode
print('-----------------------------------------')
print("SAVING AT EPISODE", episode)
print('-----------------------------------------')
torch.save(self.net.state_dict(), self.args.saveLocation + 'episode-' + str(episode) + '.pkl')
def update(self, transition, episodeIndex):
self.buffer[self.counter] = transition
self.counter += 1
# print('COUNTER = ', self.counter)
if self.counter == self.buffer_capacity:
print("UPDATING WEIGHTS at reward = ", transition[3])
self.counter = 0
self.training_step += 1
s = torch.tensor(self.buffer['s'], dtype=torch.double).to(self.device)
a = torch.tensor(self.buffer['a'], dtype=torch.double).to(self.device)
r = torch.tensor(self.buffer['r'], dtype=torch.double).to(self.device).view(-1, 1)
s_ = torch.tensor(self.buffer['s_'], dtype=torch.double).to(self.device)
old_a_logp = torch.tensor(self.buffer['a_logp'], dtype=torch.double).to(self.device).view(-1, 1)
with torch.no_grad():
target_v = r + self.args.gamma * self.net(s_)[1]
advantage = target_v - self.net(s)[1]
for _ in range(self.ppo_epoch):
for index in BatchSampler(SubsetRandomSampler(range(self.buffer_capacity)), self.batch_size, False):
alpha, beta = self.net(s[index])[0]
dist = Beta(alpha, beta)
a_logp = dist.log_prob(a[index]).sum(dim=1, keepdim=True)
ratio = torch.exp(a_logp - old_a_logp[index])
surr1 = ratio * advantage[index]
surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * advantage[index]
actorLoss = -torch.min(surr1, surr2).mean()
criticLoss = F.smooth_l1_loss(self.net(s[index])[1], target_v[index])
loss = actorLoss + 2. * criticLoss
self.optimizer.zero_grad()
loss.backward()
# nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self.save_param(episodeIndex) | true |
20224f8244b753a52a7e3ba69f3027dffd911cb0 | Python | jslee6091/SW_Algorithm | /sw expert academy/Intermediate/String/2_회문1/회문1.py | UTF-8 | 515 | 3.296875 | 3 | [] | no_license | import sys
sys.stdin = open("회문1_inputs.txt", 'r')
def palindrome(array):
count = 0
for k in range(8-N+1):
if array[k:k+N] == list(reversed(array[k:k+N])):
count += 1
return count
for test_case in range(1, 11):
N = int(input())
num_array = [list(map(str, list(input()))) for _ in range(8)]
answer = 0
for i in num_array:
answer += palindrome(i)
for j in zip(*num_array):
answer += palindrome(list(j))
print(f'#{test_case} {answer}')
| true |
21edef833c056cc6ff5b3d607d5bdce4d76ccc76 | Python | jbuseck697/SenseHat-Minecraft | /minecraftmap.py | UTF-8 | 649 | 2.703125 | 3 | [] | no_license | from sense_hat import SenseHat
from mcpi.minecraft import Minecraft
from time import sleep
sense = SenseHat()
mc = Minecraft.create()
#blocks
grass = 2
diamond = 57
gold = 41
iron = 42
#colors
cyan = (0, 255, 255)
yellow = (255, 255, 0)
white = (255, 255, 255)
black = (0, 0, 0)
#block clors
colors = {
grass: black,
diamond: cyan,
gold: yellow,
iron: white,
}
while True:
x, y, z = mc.player.getTilePos()
block = mc.getBlock(x, y-1, z)
if block in colors:
color = colors[block]
sense.clear(color)
else:
print("Dont't know block ID %s" % block)
sleep(0.1)
| true |
3a18d22e5c16d4dc110c341700f2b118bcda4d43 | Python | trambelus/plounge-db | /Local_scripts/plmatrix.py | UTF-8 | 2,479 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
import sqlite3
import matplotlib as mp
import matplotlib.pyplot as plt
import sys
QUERY = '''SELECT
100*CAST(a.N AS FLOAT)/T sun,
100*CAST(b.N AS FLOAT)/T mon,
100*CAST(c.N AS FLOAT)/T tue,
100*CAST(d.N AS FLOAT)/T wed,
100*CAST(e.N AS FLOAT)/T thu,
100*CAST(f.N AS FLOAT)/T fri,
100*CAST(g.N AS FLOAT)/T sat
FROM (
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments GROUP BY hour) z
LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '0' GROUP BY hour) a
ON (z.hour = a.hour) LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '1' GROUP BY hour) b
ON (z.hour = b.hour) LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '2' GROUP BY hour) c
ON (z.hour = c.hour) LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '3' GROUP BY hour) d
ON (z.hour = d.hour) LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '4' GROUP BY hour) e
ON (z.hour = e.hour) LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '5' GROUP BY hour) f
ON (z.hour = f.hour) LEFT JOIN
(SELECT STRFTIME('%H',udate) hour, COUNT(*) N FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01') AND STRFTIME('%w',udate) = '6' GROUP BY hour) g
ON (z.hour = g.hour) LEFT JOIN
(SELECT COUNT(*) T FROM comments WHERE author = '{0}'
AND udate > DATE('2014-12-01'))
)'''
DBPATH = 'plounge.db3'
def matrix(user, dbpath=DBPATH, savepath=None):
if savepath == None:
savepath = 'mat\\%s.png' % user
db = sqlite3.connect(dbpath)
data = db.execute(QUERY.format(user)).fetchall()
data = list(map(list,zip(*[[i if i != None else 0 for i in j] for j in data])))
print(data)
db.close()
plt.imshow(data)
plt.show()
def main():
if len(sys.argv) != 2:
print("Usage: plmatrix.py [user]")
return
matrix(sys.argv[1])
if __name__ == '__main__':
main() | true |
472838e715745d175234e6e363d2baa48da45729 | Python | zaid-kamil/python_script_1130_2020 | /visualizer.py | UTF-8 | 418 | 3 | 3 | [] | no_license | from reader import read_file,count_vowels
import matplotlib.pyplot as plt
def vowel_visualizer(file):
vowels_data = count_vowels(file)
x = list(vowels_data.keys())
h = list(vowels_data.values())
color = ['#89f8ff','#ff8833']
plt.bar(x,h,color=color)
plt.savefig('images/vowel_counter.png',bbox_inches='tight')
# calling function
file = 'data/Richardson_Clarissa.txt'
vowel_visualizer(file)
| true |
410008c2c6ce67889a7a9ba3e011af4c8344f607 | Python | thomasjeu/project_ik15 | /helpers.py | UTF-8 | 5,150 | 2.71875 | 3 | [] | no_license | import requests
import urllib.parse
import os
from cs50 import SQL
from werkzeug.security import check_password_hash, generate_password_hash
from flask import redirect, render_template, request, session
from flask_session import Session
from functools import wraps
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///admin.db")
def apology(message, code=400):
""" Render message as an apology to user """
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [("-", "--"), (" ", "-"), ("_", "__"), ("?", "~q"),
("%", "~p"), ("#", "~h"), ("/", "~s"), ("\"", "''")]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def change_description(description, user_id):
""" Changes user description """
# Update description in database
db.execute("UPDATE users SET description=:description WHERE id=:user_id", user_id=user_id, description=description)
return True
def change_password(password, confirmation, user_id):
""" Changes password """
# Hashes the password
hash = generate_password_hash(password, method='pbkdf2:sha256', salt_length=8)
# Update password in database
db.execute("UPDATE users SET hash=:hash WHERE id=:user_id", user_id=user_id, hash=hash)
return True
def change_username(username, user_id):
""" Changes the username """
# Update username in database
db.execute("UPDATE users SET username=:username WHERE id=:user_id", user_id=user_id, username=username)
return True
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def fill_post_dict(posts):
""" Fill post_dict with amount of likes the post has """
post_dict = {}
# Fills the post_dict with amount of likes, title and path
for post in posts:
likes = len(db.execute("SELECT post_id FROM likes WHERE post_id=:post_id", post_id=post["id"]))
titles = db.execute("SELECT title FROM uploads WHERE id=:id", id=post["id"])
title = titles[0]["title"]
post_dict[post["id"]] = (post["path"], likes, title)
return post_dict
def is_following(followers, user_id):
""" False if user follows user already """
for user in followers:
if user["user_id"] == user_id:
return False
return True
def is_user(user, user_id):
""" False if user looks at his own page """
if user == user_id:
return False
return True
def liked_post(user_id, post_id):
""" False if user already liked this post """
liking = db.execute("SELECT post_id FROM likes WHERE user_id=:user_id AND post_id=:post_id",
user_id=session.get("user_id"), post_id=post_id)
if liking:
return False
return True
def favo_post(user_id, post_id):
""" False if user already favorited this post """
favos = db.execute("SELECT post_id FROM favorites WHERE user_id=:user_id AND post_id=:post_id",
user_id=session.get("user_id"), post_id=post_id)
if favos:
return False
return True
def user_information(user_id):
""" Returns user information for profile page """
# Get user info
description = db.execute("SELECT description FROM users WHERE id=:user_id", user_id=user_id)[0]["description"]
username = db.execute("SELECT username FROM users WHERE id=:user_id", user_id=user_id)[0]["username"]
posts = db.execute("SELECT path, id FROM uploads WHERE user_id=:user_id", user_id=user_id)
picture = db.execute("SELECT image FROM users WHERE id=:user_id", user_id=user_id)
followers = db.execute("SELECT user_id FROM follow WHERE follow_id=:follow_id", follow_id=user_id)
following = db.execute("SELECT follow_id FROM follow WHERE user_id=:user_id", user_id=user_id)
# Return user info
return description, username, posts, picture, followers, following
def user_information_users(user_id):
""" Returns user information for profile page """
# Get user info
description = db.execute("SELECT description FROM users WHERE id=:user_id", user_id=user_id)[0]["description"]
username = db.execute("SELECT username FROM users WHERE id=:user_id", user_id=user_id)[0]["username"]
posts = db.execute("SELECT path, id FROM uploads WHERE user_id=:user_id AND status=1", user_id=user_id)
picture = db.execute("SELECT image FROM users WHERE id=:user_id", user_id=user_id)
followers = db.execute("SELECT user_id FROM follow WHERE follow_id=:follow_id", follow_id=user_id)
following = db.execute("SELECT follow_id FROM follow WHERE user_id=:user_id", user_id=user_id)
# Return user info
return description, username, posts, picture, followers, following | true |
779fe44780b66a0b2daa0e1f5e0c6d1bc6115ae6 | Python | thelmuth/cs110-spring-2020 | /Class39/hey_thats_my_fish.py | UTF-8 | 11,768 | 3.828125 | 4 | [] | no_license | """
hey_thats_my_fish.py
Graphical implementation of the board game Hey, That's My Fish!
Written in class
April 2019
"""
from cs110graphics import *
import random, math
WIN_WIDTH = 800
WIN_HEIGHT = 900
HEX_WIDTH = WIN_WIDTH // 8
HALF_HEX_WIDTH = HEX_WIDTH // 2
HEX_HEIGHT = int(2 * HALF_HEX_WIDTH / math.sqrt(3)) * 2
HEX_SIDE_LENGTH = HEX_HEIGHT // 2
BOARD_ROWS = 8
BOARD_COLS = 15
class HeyThatsMyFish:
"""Implements Hey That's My Fish!"""
def __init__(self, win):
win.set_background("darkblue")
self._win = win
# This tells us if we're still playing the initial penguins
self._still_in_setup = True
# This keeps track of which tile is selected to move a penguin from
self._current_tile = None
# Create board as a hexagonal grid of tiles
self._board = []
for r in range(BOARD_ROWS):
row = []
# Even rows have 7 tiles, odd rows have 8 tiles
for c in range(BOARD_COLS):
# Skip even columns in even rows, and odd columns in odd rows
if (r + c) % 2 == 0:
tile = None
else:
tile = Tile(win, self, r, c)
row.append(tile)
self._board.append(row)
# Player colors
self._players = [Player("red"), Player("green")]
#Player("yellow"), Player("purple")]
self._player = 0
# Number of penguins each player has played so far.
self._penguin_number = 1
# Add text at bottom describing who's turn it is, etc.
color = (self._players[self._player]).get_color()
self._text = Text(self._win, "{} player's turn to add penguin {}.".format(color, self._penguin_number),
32, (WIN_WIDTH // 2, 750))
self._text.set_color(color)
self._win.add(self._text)
# Make score per player
self._scores = []
x_coord = 300
for player in self._players:
score_text = Text(self._win, "0", 32, (x_coord, 800))
x_coord += 100
score_text.set_color(player.get_color())
self._scores.append(score_text)
self._win.add(score_text)
def handle_tile_click(self, tile):
"""Handles when a tile gets clicked on, which calls this method."""
if self._still_in_setup:
self.play_initial_penguin(tile)
else:
self.handle_penguin_move(tile)
def play_initial_penguin(self, tile):
"""Handles the first placements of penguins during game setup."""
# Check if this tile already has a penguin, and if so, skip
if tile.get_penguin() != None:
return
# Check that tile has one fish
if tile.get_fish() != 1:
return
# Add correctly-colored penguin to the tile
color = (self._players[self._player]).get_color()
tile.add_penguin(color)
# Update the player
self._player = (self._player + 1) % len(self._players)
# Update the penguin number
if self._player == 0:
self._penguin_number += 1
next_color = (self._players[self._player]).get_color()
# Update text:
self._text.set_text("{} player's turn to add penguin {}.".format(next_color, self._penguin_number))
self._text.set_color(next_color)
if self._penguin_number >= 3:
# Not in setup anymore
self._still_in_setup = False
self._text.set_text("{} player's move.".format(next_color, self._penguin_number))
def handle_penguin_move(self, tile):
"""Take care of moving penguins during the game itself."""
# Check if a tile is selected yet
if self._current_tile == None:
# Check if tile has a penguin. If not, ignore this click
# Also check that the penguin color is the same as the current player
if tile.get_penguin() != (self._players[self._player]).get_color():
return
self._current_tile = tile
tile.toggle_selected()
else:
# Check if the tile is selected already. If so, unselect it
if tile.is_selected():
tile.toggle_selected()
self._current_tile = None
else:
# We need to move the penguin to this tile, as long as it isn't
# already occupied
if tile.get_penguin() != None:
return
# Check that move is legal, in that it is on a hexagonal line
# and there are no gaps or penguins between start and destination
if not self.legal_move(self._current_tile, tile):
return
self.make_move(tile)
# update the player
self._player = (self._player + 1) % len(self._players)
next_color = (self._players[self._player]).get_color()
self._text.set_color(next_color)
self._text.set_text("{} players move.".format(next_color))
# Display current score
def legal_move(self, start_tile, destination_tile):
"""Checks if moving from start_tile to destination tile is a legal move.
Returns True if move is legal, and False otherwise."""
start_row = start_tile.get_row()
start_col = start_tile.get_col()
dest_row = destination_tile.get_row()
dest_col = destination_tile.get_col()
# Check if start and destination are in the same row
if start_row == dest_row:
current_col = start_col
# Find the change in direction we need to check
if start_col < dest_col:
delta_col = 2
else:
delta_col = -2
# Keep changing the current_col by delta until either we hit the
# destination or a tile that is missing or has a penguin.
while current_col != dest_col:
current_col += delta_col
if current_col == dest_col:
return True
# Check if the current_col tile is removed or a penguin
current_tile = self._board[start_row][current_col]
if current_tile.get_penguin() != None or current_tile.is_removed():
return False
else:
# Check the diagonals
diagonal_deltas = [(1, 1), (1, -1), (-1, 1), (-1, -1)]
for (delta_row, delta_col) in diagonal_deltas:
current_row = start_row + delta_row
current_col = start_col + delta_col
# Keep changing column and row by deltas until we either hit
# the destination or a tile that is sunk or a tile with
# a penguin or fall off the map
while current_row >= 0 and current_row < BOARD_ROWS and \
current_col >= 0 and current_col < BOARD_COLS:
# Check if at destination
if current_row == dest_row and current_col == dest_col:
return True
# Check if currently considered tile is removed or a penguin
current_tile = self._board[current_row][current_col]
if current_tile.get_penguin() != None or current_tile.is_removed():
break
# update the row and column
current_row += delta_row
current_col += delta_col
# We've checked all 4 directions, and none are legal moves.
return False
def make_move(self, tile):
"""This moves the penguin on self._current_tile to tile."""
# Update the current player's score
player = self._players[self._player]
fish = self._current_tile.get_fish()
player.add_to_score(fish)
score_to_update = self._scores[self._player]
score_to_update.set_text(str(player.get_score()))
# Get the color of the first selected tile
color = self._current_tile.get_penguin()
# Put penguin of this color on the new tile
tile.add_penguin(color)
# Remove the old tile from the window, and make it not current
self._current_tile.remove()
self._current_tile = None
class Player:
"""Represents a player in HTMF"""
def __init__(self, color):
self._color = color
self._score = 0
def get_color(self):
return self._color
def get_score(self):
return self._score
def add_to_score(self, fish):
self._score += fish
class Tile(EventHandler):
"""A tile in HTMF"""
def __init__(self, win, game, row, col):
EventHandler.__init__(self)
self._win = win
self._game = game
self._row = row
self._col = col
# This stores the color of the penguin on this tile, if there is one.
# If not, set to None
self._penguin = None
# Tracks whether this tile is selected or not
self._selected = False
# Tells whether this tile has been removed
self._removed = False
# The number of fish on the tile
self._fish = random.randint(1, 3)
self.center_from_row_col()
self._hex = Circle(win, 47, self._center)
self._hex.set_border_color("lightblue")
self._hex.set_border_width(5)
self._hex.set_depth(50)
self._hex.add_handler(self)
self._text = Text(win, str(self._fish), 20, self._center)
self._text.set_depth(30)
self._text.add_handler(self)
self._win.add(self._hex)
self._win.add(self._text)
def get_row(self):
return self._row
def get_col(self):
return self._col
def get_penguin(self):
return self._penguin
def get_fish(self):
return self._fish
def is_selected(self):
return self._selected
def is_removed(self):
return self._removed
def center_from_row_col(self):
"""Find the center of this tile based on its row and column."""
center_x = HALF_HEX_WIDTH * self._col + HALF_HEX_WIDTH
center_y = (3 * HEX_SIDE_LENGTH * self._row // 2) + HEX_SIDE_LENGTH
self._center = (center_x, center_y)
def add_penguin(self, color):
"""Adds a penguin to this tile."""
self._penguin = color
self._penguin_shape = Circle(self._win, 12, (self._center[0], self._center[1] + 28))
self._penguin_shape.set_fill_color(color)
self._penguin_shape.set_depth(10)
self._penguin_shape.add_handler(self)
self._win.add(self._penguin_shape)
def handle_mouse_press(self, event):
"""Tells the game class that this tile was clicked on."""
self._game.handle_tile_click(self)
def toggle_selected(self):
"""Toggles whether or not this tile is selected."""
self._selected = not self._selected
if self._selected:
self._hex.set_border_color(self._penguin)
else:
self._hex.set_border_color("lightblue")
def remove(self):
"""Remove the visual components of the tile from the window."""
self._win.remove(self._hex)
self._win.remove(self._text)
self._win.remove(self._penguin_shape)
self._removed = True
def main(win):
""" The main function. """
win.set_width(WIN_WIDTH)
win.set_height(WIN_HEIGHT)
HeyThatsMyFish(win)
if __name__ == '__main__':
""" When using cs110graphics, replace the usual line with this one: """
StartGraphicsSystem(main)
| true |
70c9933ac606f1be26493ededbdf6e848a4f6635 | Python | HangJie720/Tensorflow_basic | /examples/tutorials/mnist/deeplayer_cnn_mnistprediction.py | UTF-8 | 4,856 | 2.859375 | 3 | [] | no_license | import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 128
display_step = 10
def deepnn(x):
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are.
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x_image = tf.reshape(x,[-1,28,28,1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y_conv, keep_prob
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride"""
return tf.nn.conv2d(x, W, strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def main(_):
# import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Initializing the variable
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
step = 1
while step * batch_size < training_iters:
batch = mnist.train.next_batch(batch_size)
train_step.run(feed_dict={x: batch[0],y_: batch[1], keep_prob: 0.5})
if step % display_step == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
loss = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(train_accuracy))
step += 1
print("Optimization finished!")
print("Test accuracy:", accuracy.eval(feed_dict={x: mnist.test.images[:256],y_: mnist.test.labels[:256], keep_prob: 1.0}))
# for i in range(20000):
# batch = mnist.train.next_batch(50)
# if i % 100 == 0:
# train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
# print('step %d, training accuracy %g' % (i, train_accuracy))
# train_step.run(feed_dict={x: batch[0],y_: batch[1], keep_prob: 0.5})
#
# print('test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.images,y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',type=str,default='/tmp/',help=('Directory for storing input data'))
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | true |