blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8b5a14f144b619b9fb39d9cf6784723eeddf81ff | Python | dongyingdepingguo/PTA | /pta_practice/PTA1039.py | UTF-8 | 1,498 | 4.28125 | 4 | [] | no_license | # !/usr/bin/env python
# _*_ coding: utf-8 _*_
"""
1039 到底买不买 (20 分)
小红想买些珠子做一串自己喜欢的珠串。卖珠子的摊主有很多串五颜六色的珠串,但是不肯把任何一串拆散了卖。于是小红要你帮忙判断一下,
某串珠子里是否包含了全部自己想要的珠子?如果是,那么告诉她有多少多余的珠子;如果不是,那么告诉她缺了多少珠子。
为方便起见,我们用[0-9]、[a-z]、[A-Z]范围内的字符来表示颜色。例如在图1中,第3串是小红想做的珠串;那么第1串可以买,
因为包含了全部她想要的珠子,还多了8颗不需要的珠子;第2串不能买,因为没有黑色珠子,并且少了一颗红色的珠子。
输入格式:
每个输入包含 1 个测试用例。每个测试用例分别在 2 行中先后给出摊主的珠串和小红想做的珠串,两串都不超过 1000 个珠子。
输出格式:
如果可以买,则在一行中输出 Yes 以及有多少多余的珠子;如果不可以买,则在一行中输出 No 以及缺了多少珠子。其间以 1 个空格分隔。
输入样例 1:
ppRYYGrrYBR2258
YrR8RrY
输出样例 1:
Yes 8
输入样例 2:
ppRYYGrrYB225
YrR8RrY
输出样例 2:
No 2
"""
a = input()
b = input()
j = 0
for i in b:
if i in a:
index = a.index(i)
a = a[:index] + a[index+1:]
else:
j += 1
if j != 0:
print('No %s'%j)
else:
print('Yes %s'%len(a)) | true |
367dfc1c962e76d9e30d9f31b3c79435323ba0b2 | Python | MartinDowell/Test1 | /Python Files/Monty Test 3.py | UTF-8 | 226 | 3.40625 | 3 | [] | no_license | #Monty Test 3
shop = {
'apples' : [2, 11],
'pears': 7,
'bananas' : 5,
'raspberries' : 8,
'strawberries' : 9}
gash = shop.keys()
print (gash)
foo=[]
for a,b in shop.items():
foo.append(b)
print (foo)
| true |
14168c07119d6147b9d7ebd8c1a3470370ac9fea | Python | TJmask/Space-Health-Predicting | /Codes/generate_data_500ms_nor.py | UTF-8 | 11,604 | 2.8125 | 3 | [
"MIT"
] | permissive | import os
import glob
import pandas as pd
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from datetime import timedelta
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
# import matplotlib.pyplot as plt
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
####### define my own sliding window function
def sliding_window(data, buck_size, step):
'''
author: Jie Tang
inputs:
data: the data we will do sliding windows
buck_size: the length of time series points used for our training, e.g. 500ms, 700ms etc.
step: the step we will move when we do sliding window
'''
list_all = []
tmp = []
## determine the length of our data, if it's <= 1, we could not do sliding windows
if len(data)<=1:
return tmp
## determine if the total time points length is less than our buck_size
if data.iloc[-1] - data.iloc[0] <= buck_size:
return tmp
else:
temp_len = len(data[data.between(data.iloc[0], data.iloc[0] + buck_size)])
for i in range(len(data)):
left = data.iloc[i] + i * step
right = data.iloc[i] + i * step + buck_size
series1 = data[data.between(left, right)]
## if the series length is less than 2, we need to pass it
if len(series1)<2:
continue
## need to determine the boundary that last time series and left boundary is enough to do sliding ( greater than buck_size)
## or we need to break the loop, since the data points are not enough to do next sliding.
if data.iloc[-1] - left < buck_size:
break
list_all.append(series1)
return list_all
def generate_features(time_scale_train,
time_ahead,
sampling_rate,
time_gap,
time_step):
'''
author: Jie Tang
inputs:
time_scale_train: the length of time used for training. e.g 500ms, 700ms
time_ahead: time in advance to predict
sampling_rate: sampling rate in each time_scale_train, our default value is 50
time_gap: this is used to exclude those consecutive crash events happened less than time_gap. Since when two crash events happend so closely,
we could not generate enough data to train
time_step: the step we will move when we do sliding window
'''
np.set_printoptions(suppress=True)
for_names = str(int(time_ahead*1000))
data_all = pd.read_csv('data/data_all.csv')
# data_all = data_all.iloc[:70000, :]
##### extract all needed columns ######
neededCols = ['seconds','trialPhase','currentPosRoll','currentVelRoll','calculated_vel',\
'joystickX', 'peopleName', 'trialName', 'peopleTrialKey', 'datetimeNew']
data_needed = data_all[neededCols]
data_needed['datetimeNew'] = pd.to_datetime(data_needed['datetimeNew'])
data_needed = data_needed[data_needed.trialPhase!=1]
data_needed.set_index('datetimeNew', inplace =True)
#### normalize data
# min_max_scaler = MinMaxScaler()
# column_names_to_normalize = ['currentPosRoll', 'currentVelRoll', 'calculated_vel', 'joystickX']
# x = data_needed[column_names_to_normalize].values
# x_scaled = min_max_scaler.fit_transform(x)
# df_temp = pd.DataFrame(x_scaled, columns=column_names_to_normalize, index = data_needed.index)
# data_needed[column_names_to_normalize] = df_temp
data_needed['currentPosRoll'] =data_needed['currentPosRoll']/60
data_needed['currentVelRoll'] = data_needed['currentVelRoll']/180
##### define crash event by trialPhase
crashed_on_trialPhase = data_needed[data_needed.trialPhase==4]
##### excluding the trialPhase=1 and the left are human being behavior
crashed_on_trialPhase = crashed_on_trialPhase[crashed_on_trialPhase.trialPhase!=1]
###### use the data based on trialPhase and get all the unique peopleTrialKey has a crash
crash_event = crashed_on_trialPhase
peopleTrialHasCrash = crash_event.peopleTrialKey.unique()
####### give a threshold of a time interval between two consecutive crashes within one trial #######
crash_excludeShortT = pd.DataFrame()
for x in range(len(peopleTrialHasCrash)):
trial = peopleTrialHasCrash[x]
df = crash_event[crash_event.peopleTrialKey == trial]
df['seconds_shift'] = df['seconds'].shift(1)
df.fillna(0, inplace=True)
df['time_gap'] = df['seconds'] - df['seconds_shift']
# now we try to set as 2 seconds as a short-time crash
df = df[df.time_gap > time_gap]
crash_excludeShortT = pd.concat([crash_excludeShortT, df])
crash_feature_label_300ms_500ms_test = pd.DataFrame()
peopleTrialHasCrash_ex = crash_excludeShortT.peopleTrialKey.unique()
for num in range(len(peopleTrialHasCrash)):
j = peopleTrialHasCrash[num]
print(num)
for i in (crash_excludeShortT.loc[
((crash_excludeShortT['peopleTrialKey'] == j)),'seconds']):
temp_df = pd.concat([data_needed[(data_needed.seconds <= i - time_ahead) &\
(data_needed.seconds >= i - time_scale_train - time_ahead) \
&(data_needed['peopleTrialKey'] == j)]])
##### resample & interpolate
temp_df = temp_df[['seconds', 'currentVelRoll','currentPosRoll','calculated_vel','joystickX','peopleTrialKey']]
if len(temp_df) < 2:
continue
x = temp_df.seconds
y_calculated_vel = temp_df.calculated_vel
y_org_vel = temp_df.currentVelRoll
y_currentPosRoll = temp_df.currentPosRoll
y_joystickX = temp_df.joystickX
new_x = np.linspace(x.min(), x.max(), sampling_rate)
new_y_calculated_vel = sp.interpolate.interp1d(x, y_calculated_vel, kind='linear')(new_x)
# new_y_calculated_vel = preprocessing.normalize(new_y_calculated_vel, axis=0)
new_y_original_vel = sp.interpolate.interp1d(x, y_org_vel, kind='linear')(new_x)
# new_y_original_vel = preprocessing.normalize(new_y_original_vel, axis=0)
new_y_currentPosRoll = sp.interpolate.interp1d(x, y_currentPosRoll, kind='linear')(new_x)
# new_y_currentPosRoll = preprocessing.normalize(new_y_currentPosRoll, axis=0)
new_y_joystickX = sp.interpolate.interp1d(x, y_joystickX, kind='linear')(new_x)
# new_y_joystickX = preprocessing.normalize(new_y_joystickX, axis=0)
arr1 = np.dstack([new_y_calculated_vel, new_y_currentPosRoll, new_y_joystickX]).reshape(sampling_rate,3)
arr2 = np.dstack([new_y_original_vel, new_y_currentPosRoll, new_y_joystickX]).reshape(sampling_rate,3)
arr3 = 1
arr4 = temp_df['peopleTrialKey'].iloc[0]
arr5 = temp_df['seconds'].iloc[0]
arr6 = temp_df['seconds'].iloc[-1]
crash_feature_label_300ms_500ms_test = pd.concat(\
[crash_feature_label_300ms_500ms_test, pd.DataFrame([[arr1, arr2, arr3, arr4, arr5, arr6]],\
columns=["features_cal_vel","features_org_vel",'label', 'peopleTrialKey', 'start_seconds', 'end_seconds']) ])
## save it as pickle for training
crash_feature_label_300ms_500ms_test.to_pickle('data/data_500ms/'+ 'crash_feature_label_'+ for_names+ 'ms' + '_500ms_test')
####### crash event data info ######
peopleTrialHasCrash_ex = crash_excludeShortT.peopleTrialKey.unique()
noncrash_feature_label_300ms_500ms_test = pd.DataFrame()
for num in range(len(peopleTrialHasCrash_ex)):
j = peopleTrialHasCrash_ex[num]
print(num)
df = crash_excludeShortT[crash_excludeShortT.peopleTrialKey == j]
df['seconds_shift'] = df['seconds'].shift(1)
df.fillna(0, inplace=True)
df['time_gap'] = df['seconds'] - df['seconds_shift']
df_trial = data_needed[(data_needed['peopleTrialKey'] == j)]
for i in (crash_excludeShortT.loc[
(crash_excludeShortT['peopleTrialKey'] == j),'seconds']):
left = df.seconds_shift[df.seconds==i].iloc[0]
right = i - time_scale_train - time_ahead
noncrash_time_range = [left, right]
temp_serie = df_trial.loc[(df_trial.seconds>=left) & (df_trial.seconds<=right) \
,'seconds']
## run sliding window on noncrash event
list_all = sliding_window(temp_serie, time_scale_train, time_step)
if len(list_all) <1:
break
for x in range(len(list_all)):
temp_df = df_trial[(df_trial.seconds >= list_all[x].iloc[0])\
& (df_trial.seconds <= list_all[x].iloc[-1])]
##### resample & interpolate
temp_df = temp_df[['seconds', 'currentVelRoll', 'currentPosRoll','calculated_vel','joystickX','peopleTrialKey']]
x = temp_df.seconds
y_calculated_vel = temp_df.calculated_vel
y_org_vel = temp_df.currentVelRoll
y_currentPosRoll = temp_df.currentPosRoll
y_joystickX = temp_df.joystickX
new_x = np.linspace(x.min(), x.max(), sampling_rate)
new_y_calculated_vel = sp.interpolate.interp1d(x, y_calculated_vel, kind='linear')(new_x)
# new_y_calculated_vel = preprocessing.normalize(new_y_calculated_vel, axis=0)
new_y_original_vel = sp.interpolate.interp1d(x, y_org_vel, kind='linear')(new_x)
# new_y_original_vel = preprocessing.normalize(new_y_original_vel, axis=0)
new_y_currentPosRoll = sp.interpolate.interp1d(x, y_currentPosRoll, kind='linear')(new_x)
# new_y_currentPosRoll = preprocessing.normalize(new_y_currentPosRoll, axis=0)
new_y_joystickX = sp.interpolate.interp1d(x, y_joystickX, kind='linear')(new_x)
# new_y_joystickX = preprocessing.normalize(new_y_joystickX, axis=0)
arr11 = np.dstack([new_y_calculated_vel, new_y_currentPosRoll, new_y_joystickX]).reshape(sampling_rate,3)
arr22 = np.dstack([new_y_original_vel, new_y_currentPosRoll, new_y_joystickX]).reshape(sampling_rate,3)
arr33 = 0
arr44 = temp_df['peopleTrialKey'].iloc[0]
arr55 = temp_df['seconds'].iloc[0]
arr66 = temp_df['seconds'].iloc[-1]
noncrash_feature_label_300ms_500ms_test = pd.concat(\
[noncrash_feature_label_300ms_500ms_test, pd.DataFrame([[arr11, arr22, arr33, arr44, arr55, arr66]],\
columns=["features_cal_vel","features_org_vel",'label', 'peopleTrialKey', 'start_seconds', 'end_seconds']) ])
## save it as pickle for training
noncrash_feature_label_300ms_500ms_test.to_pickle('data/data_500ms/'+ 'noncrash_feature_label_'+ for_names+ 'ms' + '_500ms_test')
if __name__ == "__main__":
# time_early = [0.3, 0.5, 0.7, 0.9, 1.1, 1.3]
time_early = [0.3]
for i in time_early:
generate_features(time_scale_train = 0.5,
time_ahead = i,
sampling_rate = 50,
time_gap=2.5,
time_step = 0.3)
| true |
7dc6ba8e6e9d5089072dde43a52816fd90f52175 | Python | Shamya/multiLingRep | /libraries/embeddings.py | UTF-8 | 5,078 | 2.921875 | 3 | [] | no_license | import os
import numpy as np
import nltk, re, pprint
import django
import codecs
from gensim import models
from django.utils.encoding import smart_str
from bs4 import BeautifulSoup
from nltk import tokenize
from random import shuffle
from libraries.acs import acs
import time
import pickle
from gensim.parsing import PorterStemmer
global_stemmer = PorterStemmer()
class StemmingHelper(object):
"""
Class to aid the stemming process - from word to stemmed form,
and vice versa.
The 'original' form of a stemmed word will be returned as the
form in which its been used the most number of times in the text.
"""
#This reverse lookup will remember the original forms of the stemmed
#words
word_lookup = {}
@classmethod
def stem(cls, word):
"""
Stems a word and updates the reverse lookup.
"""
#Stem the word
stemmed = global_stemmer.stem(word)
#Update the word lookup
if stemmed not in cls.word_lookup:
cls.word_lookup[stemmed] = {}
cls.word_lookup[stemmed][word] = (
cls.word_lookup[stemmed].get(word, 0) + 1)
return stemmed
@classmethod
def original_form(cls, word):
"""
Returns original form of a word given the stemmed version,
as stored in the word lookup.
"""
if word in cls.word_lookup:
return max(cls.word_lookup[word].keys(),
key=lambda x: cls.word_lookup[word][x])
else:
return word
# print os.path.join(os.getcwd(),"/wikiDS/English/AA/AA")
ENG_DIR = "/Users/danielsampetethiyagu/github/OracleMultiLing/wikiDS/English/AA/AA"
ESP_DIR = "/Users/danielsampetethiyagu/github/OracleMultiLing/wikiDS/Spanish/AA/AA"
# File list in each directory
ENG_filenames=os.listdir(ENG_DIR)
ESP_filenames=os.listdir(ESP_DIR)
def preprocess(content):
"""
Function that preprocesses the data
(1) Parses through html/xml content and removes it
(2) Sets words to lowercase
(3) Sentence Tokenization
(4) Word Tokenization and Lemmatization
Parameters: Content read from the file
Returns : List of sentences
"""
soup = BeautifulSoup(content, 'html.parser')
data = soup.get_text().lower()
sentences=tokenize.sent_tokenize(data.lower())
for i in range(len(sentences)):
sentences[i]=sentences[i].split()
return sentences
def ACS(multilingual_data):
for line_number in xrange(len(multilingual_data)):
line = multilingual_data[line_number]
res = []
for word in line:
res.append(acs(word))
multilingual_data[line_number] = res
return multilingual_data
def gather_from_dir(Directory):
Sentences = []
for f in os.walk(Directory):
path, x, file_names = f
for file_name in file_names:
print(str(path)+"/"+str(file_name))
with codecs.open(path+"/"+file_name,'rb',encoding='utf-8') as doc:
content = doc.read()
Sentences += preprocess(content)
return Sentences
def gather_data():
# Global list variable for English sentences after processing
ENG_sentences = []
# Global list variable for Spanish sentences after processing
ESP_sentences = []
ESP_sentences = gather_from_dir(ESP_DIR)
ENG_sentences = gather_from_dir(ENG_DIR)
# Assimilated Corpus of Multilingual texts
multilingual_data=[]
multilingual_data= ENG_sentences + ESP_sentences
# Random shuffle of the sentences
shuffle(multilingual_data)
# multilingual_data = ACS(multilingual_data)
return multilingual_data
def get_model(multilingual_data):
# Persist a model to disk
fname= "wikiDS/word2Vec.mdl"
vocabfname = "wikiDS/vocab.pkl"
"""
Using the word2vec implementation
- Initialize a model
Parameters:
- (sg=0), CBOW is used. Otherwise (sg=1), skip-gram is employed
- size is the dimensionality of the feature vectors.
- window is the maximum distance between the current and predicted word within a sentence.
- min_count => ignore all words with total frequency lower than this.
"""
model = models.Word2Vec(multilingual_data, size=128, window=5, min_count=5, workers=4)
model.save(fname)
vocab = list(model.vocab.keys())
vocabfile = codecs.open(vocabfname, "w", "utf-8")
pickle.dump( vocab, vocabfile )
vocab_len = len(vocab)
print("Vocab length is ",vocab_len);
test_model(model)
return model
def test_model(model):
accudict= model.accuracy(os.path.join(os.getcwd(),'questions-words.txt'))
for i in range(len(accudict)):
if(len(accudict[i]['incorrect'])+len(accudict[i]['correct']) >0):
print "For category ", accudict[i]['section'], "Accuracy is ", 100*float(len(accudict[i]['correct']))/(len(accudict[i]['incorrect'])+len(accudict[i]['correct']))
def process_and_retrieve_model():
multilingual_data = gather_data()
return get_model(multilingual_data)
| true |
34afd3ca93f4a3062cab4da99d67d20665603a85 | Python | ChangtongZhou/School_AI_Projects | /nario.py | UTF-8 | 7,838 | 3.71875 | 4 | [] | no_license | import sys
import argparse
import math
import heapq
class Graph:
def __init__(self):
self.states = []
# Turn the board matrix into an array that contains coordinates without obstacles
def makeCoordinates(self):
nario = Nario()
board = nario.board
rows, cols = len(board), len(board[0])
for x in range(rows):
for y in range(cols):
string = board[x][y]
if string and string != "=": # make sure the value is not an obstacle
self.states.append((x, y))
# print "The self,states is: ", self.states, "\n"," Its length is: ", len(self.states)
return self.states
# The edges are going to be the four directions: right, up, left, down
# For any node we need to know the other nodes connected to this one by an edge
def getNeighbors(self, state):
dirs = [(1, 0), (0, 1), (-1, 0), (0, -1)]
neighbors = []
if state != (0,0): # check the state is not goal state
if state[1] == 0: # if it's left edge points, they can go left off board
dirs = [(1, 0), (0, 1), (0, 9), (0, -1)]
if state[1] == 9: # if it's right edge points, they can go right off board
dirs = [(0, -9), (0, 1), (-1, 0), (0, -1)]
for dir in dirs:
x, y = state
next_state = (x + dir[0], y + dir[1])
if next_state in self.states:
neighbors.append(next_state)
# print "The neighbors of ", state, "is: ", neighbors
return neighbors
class Nario:
"A problem agent using A* Search Algorithm"
def __init__(self):
self.board = []
self.came_from = {}
self.cost_so_far = {}
self.path_seq = []
# pass filenames on the command line
inFile = sys.argv[1]
# read data from the text file and initialize board
# self.board is a list
with open(inFile, 'r') as f:
data = f.readlines()
f.closed
# Get rid of newline character from the data list
for i in range(len(data)):
self.board.append(data[i].strip('\n'))
def manhattan(self, current_state, goal_state):
# print "You are using manhattan heuristic"
# print "Your state state is: ", current_state
x, y = current_state
x1, y1 = goal_state
if y == 9: # if ccurrent_state is in the right edge
if x-1<0 and self.board[x][y]!= "=":
y = 0
distance = 1 + abs(x1 - x) + abs(y1 - y)
distance = abs(x1 - x) + abs(y1 - y)
# print "The manhattan distance is: ", distance, "\n"
return distance
def euclidean(self, current_state, goal_state):
# print "You are using euclidean heuristic \n"
x, y = current_state
x1, y1 = goal_state
if y == 9: # if ccurrent_state is in the right edge
if x-1<0 and self.board[x][y]!= "=":
y = 0
distance = 1 + math.sqrt((x1 - x) ** 2 + (y1 - y) ** 2)
distance = math.sqrt((x1 - x) ** 2 + (y1 - y) ** 2)
# print "The euclidean distance is: ", distance, "\n"
return distance
def my_own(self, current_state, goal_state):
# print "You are using my_own heuristic \n"
x, y = current_state
x1, y1 = goal_state
if y == 9: # if ccurrent_state is in the right edge
if x-1<0 and self.board[x][y]!= "=":
y = 0
distance = 1 + 1 * max(abs(x1 - x),abs(y1 - y))
distance = 1 * max(abs(x1 - x),abs(y1 - y))
# print "my_own distance is: ", distance, "\n"
return distance
def aStarSearch(self, graph, heuristic, start, goal):
""" use the A* algorithm to determine a sequence of moves to take Nario from the bottom row of the board to the # """
frontier = PriorityQueue()
frontier.push(start, 0)
path = []
self.came_from[start] = None
self.cost_so_far[start] = 0
while not frontier.empty():
current = frontier.pop()
# print "your current state is: ", current
path.append(current)
if current == goal:
# return self.reconstruct_path(goal)
path = self.reconstruct_path(goal)
break
for next_state in graph.getNeighbors(current):
new_cost = self.cost_so_far[current] + 1
# Check if next_state is explored or not, or the new cost is lesser
if next_state not in self.cost_so_far or new_cost < self.cost_so_far[next_state]:
self.cost_so_far[next_state] = new_cost
if heuristic == "manhattan":
priority = new_cost + self.manhattan(next_state, goal)
if heuristic == "euclidean":
priority = new_cost + self.euclidean(next_state, goal)
if heuristic == "my_own":
priority = new_cost + self.my_own(next_state, goal)
frontier.push(next_state, priority)
self.came_from[next_state] = current
self.path_seq = path
def reconstruct_path(self, current_state):
""" Reconstruct the path recursively by traversing back through the came_from list """
try:
p = self.reconstruct_path(self.came_from[current_state])
path = []
path.extend(p)
path.append(current_state)
path_len = len(path)
if path_len > 2:
new_mv = path[path_len-1]
prev_mv = path[path_len - 2]
# print "The new move is: ", new_mv
if prev_mv[1] == 9:
print "Nario wraps around by going right off board"
if prev_mv[1] == 0:
print "Nario wraps around by going left off board"
self.edit_mv(prev_mv, new_mv)
return path
except KeyError, e:
return [current_state]
# Draw the game board
def draw(self, board):
for row in board:
print row
print "\n"
# Draw the game board after new moves
def edit_mv(self, prev_mv, new_mv):
x1, y1 = prev_mv
x2, y2 = new_mv
board = self.board
# new_mv utilities:
row2 = board[x2]
s2 = list(row2)
s2[y2] = '@'
board[x2]= "".join(s2)
# prev_mv utilities:
row1 = board[x1]
s1 = list(row1)
s1[y1] = '.'
board[x1] = "".join(s1)
self.draw(board)
# Draw the whole moving process
def draw_process(self):
board = self.board
for state in self.path_seq[2:]:
x, y = state
row = board[x]
s = list(row)
s[y] = '@'
board[x]= "".join(s)
self.draw(board)
# PriorityQueue utilities
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def push(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def pop(self):
return heapq.heappop(self.elements)[1]
# Tester:
nario = Nario()
board = nario.board
# print "You are using ", sys.argv[2], "heuristic:"
# print "--------------------- The original game board --------------------- \n"
nario.draw(board)
graph = Graph()
graph.makeCoordinates()
# print "--------------------- Conducting A* search ---------------------"
nario.aStarSearch(graph, sys.argv[2], (8, 5), (0, 0))
print "--------------------- The total process ---------------------"
nario.draw_process()
# print "--------------------- The total path ---------------------"
# print nario.path_seq
| true |
092819012e2b9aea847aa6f04a1f09522d1747c0 | Python | renanfe/studybook | /python/example 2/aluno.py | UTF-8 | 387 | 3.625 | 4 | [] | no_license | class Aluno:
def __init__(self, nome, email="default@gmail.com"):
self.__nome=nome
self.__email=email
def getNome(self):
return self.__nome
def getEmail(self):
return self.__email
def setNome(self, nome):
self.__nome = nome
def setEmail(self, email):
self.__email = email
def __str__(self):
return "({}, {})".format(self.__nome, self.__email)
| true |
f918f1673d8a0781afbd3a4d46d8af558e9ce88c | Python | ISPritchin/Olympiad | /900/Оформлено/Сеть компании Bmail.py | UTF-8 | 150 | 2.875 | 3 | [] | no_license | n = int(input())
a = [0, 0] + list(map(int, input().split()))
b = []
i = n
while a[i] != 0:
b.append(i)
i = a[i]
b.append(1)
print(*b[::-1])
| true |
be410fa2ef91e246c21161db1c0b0ca5714d9ba9 | Python | talepre/AIProg | /module3/board.py | UTF-8 | 3,801 | 3.0625 | 3 | [] | no_license | from itertools import combinations, izip_longest
from probleminstance import Probleminstance
class Board():
'''
Class responsible for parsing the board from a text file, and making the domain and constraint dictonaries.
'''
def __init__(self, file):
'''
Initializes with a file, and reads its data.
'''
file = open(file, 'r')
self.file_data = file.readlines()
file.close()
def parse_text_file(self):
'''
Parses the text file.
'''
dimensions = self.file_data[0].replace("\n", "").split(" ")
self.number_of_rows = int(dimensions[1])
self.number_of_columns = int(dimensions[0])
self.rows_info = list()
self.columns_info = list()
file_index = 1
while True:
row = self.file_data[file_index].replace("\n", "").split(" ")
self.rows_info.append([int(x) for x in row])
file_index += 1
if file_index > self.number_of_rows:
break
while file_index < len(self.file_data):
column = self.file_data[file_index].replace("\n", "").split(" ")
self.columns_info.append([int(x) for x in column])
file_index += 1
self.rows_info.reverse()
column_dict = self.make_domain_dict(self.number_of_rows, self.number_of_columns, self.columns_info, 0)
row_dict = self.make_domain_dict(self.number_of_columns, self.number_of_rows, self.rows_info, 1)
self.domain_dict = dict(row_dict.items() + column_dict.items())
self.constraint_dict = {}
self.make_constraint_dict(row_dict, column_dict)
def make_domain_dict(self, size, elements, info, num):
'''
Creates a dictionary of all the possible domains for all rows and columns.
'''
temp_domain_dict = {}
for node in range(elements):
length, block_length, number_of_blocks = self.get_free_spaces(size, info[node])
free_spaces = length - block_length - (number_of_blocks -1)
blocks = info[node]
block_representation = list()
#Get representation of block
block_num = 0
for block in blocks:
block_num += 1
if block_num > 1:
block_repr = [0]
nums = [1]*block
block_repr.extend(nums)
else:
block_repr = [1]*block
block_representation.append(block_repr)
for space_placement in self.space_placement(free_spaces, number_of_blocks + 1):
spaces_representation = list()
for spaces in space_placement:
space_repr = [0]*spaces
spaces_representation.append(space_repr)
zipped_list = izip_longest(spaces_representation, block_representation)
domain = list()
for zipitem in zipped_list:
for lists in zipitem:
if lists != None:
domain.extend(lists)
if (num, node) in temp_domain_dict:
temp_domain_dict[(num, node)].append(domain)
else:
temp_domain_dict[(num, node)] = [domain]
return temp_domain_dict
def make_constraint_dict(self, row_dict, column_dict):
'''
Makes the dictionary consisting of the constraints.
'''
for row_node in row_dict:
for column_node in column_dict:
if row_node in self.constraint_dict:
self.constraint_dict[row_node].append(column_node)
else:
self.constraint_dict[row_node] = [column_node]
if column_node in self.constraint_dict:
self.constraint_dict[column_node].append(row_node)
else:
self.constraint_dict[column_node] = [row_node]
def get_free_spaces(self, length, block_array):
'''
Help method, returns the free spaces of a block.
'''
block_length = 0
number_of_blocks = 0
for block in block_array:
number_of_blocks += 1
block_length += block
return length, block_length, number_of_blocks
def space_placement(self, spaces, length):
'''
Yields a viable space placement given a row or columns, must be iterated over.
'''
for c in combinations(range(spaces + length - 1), length - 1):
yield tuple(b - a - 1 for a, b in zip((-1,) + c, c + (spaces + length - 1,)))
| true |
6954841512d34408609e3c4589b967ba684195d6 | Python | mlgmya/Python | /百度百科/a.py | UTF-8 | 357 | 2.578125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
headers={"User-Agent":"Mozilla/5.0(Windows;U;Windows NT 6.0 x64;en-US;rv:1.9pre)Gecko/2008072421 Minefield/3.0.2pre"}
r=requests.get("https://baike.baidu.com/",headers=headers)
html=r.text
bsObj=BeautifulSoup(html)
for link in bsObj.findAll("a"):
if 'href' in link.attrs:
print(link.attrs['href']) | true |
e05ab33ac53bee536ea169a7f3617560db6f22ae | Python | rabinwill/Python | /Python_Examples/Queues&Stacks.py | UTF-8 | 1,917 | 4.78125 | 5 | [] | no_license | '''
A palindrome is a word, phrase, number, or other sequence of characters which reads the same backwards and forwards. Can you determine if a given string, s, is a palindrome?
To solve this challenge, we must first take each character in s, enqueue it in a queue, and also push that same character onto a stack. Once that's done, we must dequeue the first character from the queue and pop the top character off the stack, then compare the two characters to see if they are the same; as long as the characters match, we continue dequeueing, popping, and comparing each character until our containers are empty (a non-match means s isn't a palindrome).
Sample Input
racecar
Sample Output
The word, racecar, is a palindrome. '''
import sys
class Solution:
def __init__(self):
self.stack = list()
self.queue = list()
def pushCharacter(self, char):
self.stack.append(char)
def popCharacter(self):
return (self.stack.pop(-1)) #pop(-1) removes and returns the last entry in the list
def enqueueCharacter(self, char):
self.queue.append(char)
def dequeueCharacter(self):
return(self.queue.pop(0)) #pop(0) removes and returns the first entry from the list
# read the string s
s=input()
#Create the Solution class object
obj=Solution()
l=len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
#finally print whether string s is palindrome or not.
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.") | true |
7378988b55182fd5f1129391c3d6d49a4f87c959 | Python | casahome2000/wz_jira | /config.py | UTF-8 | 15,646 | 2.625 | 3 | [] | no_license | __author__ = 'casahome2000'
import dateHandler
def calcReleaseDate(prevRelease, currentDuration):
date = dateHandler.date_by_adding_business_days(dateHandler.datetime.datetime(dateHandler.dateTODAY.year,
dateHandler.dateTODAY.month,
dateHandler.dateTODAY.day),
10,
dateHandler.Holidays)
releaseDate = "%s-%s-%s" % (date.year, date.month, date.day)
return releaseDate
class ProjectIssues(object):
EPICS = {
"phases": [
dict(name="Phase 1: Kick Off",
description='The kick-off meeting will allow the UX and PM get a foothold on what the client is trying to ' \
'accomplish. The PM, UX, and Sales will meet with the client, and review the LEAN CANVAS provided by ' \
'sales to the assigned pre-development team before the meeting. This should facilitate the completion ' \
'of the Kick-Off Meeting Form, which should be the output from the conclusion of the kick-off meeting. ' \
'At this point, enough information should be provided by the client for the team to proceed to Phase 2',
summary="Kick Off",
fixversion="Phase 1: Kick Off",
durration=2),
dict(name="Phase 2: Competitive Analysis",
description="The Competitive Analysis phase will allow the PM and UX define the strategy for the client's " \
"application. It will include comprehensive research on what apps are already filling the " \
"competitive space, identify what needs and gaps to fill, identify the user personas, and " \
"ultimately present to the client the direction the pre-development team feels will provide " \
"the best MVP. The PM will initially support the UX by providing research documentation, which " \
"will be compiled into a presentation for the client to easily absorb our strategy. It will " \
"also produce the groomed Feature List for sign-off with the client. These will ultimately dictate " \
"the needed materials to proceed to [Phase III - UX Flow]. Lastly, the client will need to fill " \
"out the [Creative Brief] at this point, and be given 2-3 days to complete.",
summary="Competitive Analysis",
fixversion="Phase 2: Competitive Analysis",
durration=5),
dict(name="Phase 3: UX Flow",
description="The UX should now have enough information to begin creating the UX Flow document. Within that " \
"period, the PM will proceed to groom the schedule and tickets to ensure the project is " \
"staying on track. At the final stage of this phase, the client will review the UX Flow and " \
"updated schedule. This phase will require [Milestone Sign-Off] for the UX Flow before proceeding " \
"to the next stage: [Phase IV - Wireframes]. Sign off can be approved by e-mail with the client," \
" or they can come in for a meeting if they choose to. Please account for meeting hours if the " \
"client comes in for review.",
summary="UX Flow",
fixversion="Phase 3: UX Flow",
durration=7),
dict(name="Phase 4: Wireframes",
description="The UX designer now has full project scope in hand, which has been reviewed and signed-off on " \
"by the client. This phase defines the project from low-mid fidelity, with the output being " \
"fully clickable, partially annotated wireframes to be passed on to the designers for " \
"[Phase V - Preliminary Designs]. Additionally, the PM will parallel path, and begin " \
"[Phase VI - Annotations and Developer Review]. This will provide comprehensive material and " \
"project scope to both verticals to begin the first real preparations for development. " \
"Client must approve of this milestone with the [Milestone Sign-Off].",
summary="Wireframes",
fixversion="Phase 4: Wireframes",
durration=15),
dict(name="Phase 5: Preliminary Designs",
description="The preliminary designs will allow the client to choose from 3 alternative designs, " \
"from 2 wireframe screens. The client will review, and provide feedback leading to " \
"final approval of the overall design direction, which will lead into " \
"[Phase VII - Full Design].",
summary="Preliminary Designs",
fixversion="Phase 5: Preliminary Designs",
durration=10),
dict(name="Phase 6: Annotations and Developer Review",
description="This phase will run parallel to [Phase VII - Full Design]. It will serve as the " \
"kick-off point to begin project awareness with the developer team. The annotated " \
"wireframes should be enough to provide an overview of the application and its " \
"functionality to the development teams, and get their assessment of what solutions we " \
"will need to consider to get this project into development. Final objective is to make " \
"the development team aware of the project, have a good reference document for them to " \
"assess project needs, and begin quote production at an early stage for full preparation " \
"at [Phase VIII - Project Wrap].",
summary="Annotations and Developer Review",
fixversion="Phase 6: Annotations and Developer Review",
durration=7),
dict(name="Phase 7: Full Design",
description="Full design. Designers flesh out the remaining application based on the approved " \
"direction set by [Phase V - Preliminary Designs]. The final output will be the full " \
"designed application based on the wireframes, with a full, clickable prototype. " \
"This is the final stage of pre-development, but leads to [Phase VIII - Project Wrap], " \
"which produces the development quote and pitch for the client to move on to development.",
summary="Full Design",
fixversion="Phase 7: Full Designs",
durration=20),
dict(name="Phase 8: Project Wrap",
description="This is the final stage. Asset delivery, and the final quote to the client for " \
"development. Any additional follow up and project grooming that needs to take.",
summary="Project Wrap",
fixversion="Phase 8: Project Wrap",
durration=2)
]
}
VERSIONS = {
"versions": [
dict(name="Phase 1: Kick Off",
description="Kick Off",
startdate=(
"%s-%s-%s" % (dateHandler.dateTODAY.year, dateHandler.dateTODAY.month, dateHandler.dateTODAY.day)),
releasedate=None,
duration=2),
dict(name="Phase 2: Competitive Analysis",
description="Competitive Analysis",
startdate=None,
releasedate=None,
duration=5),
dict(name="Phase 3: UX Flow",
description="UX Flow",
startdate=None,
releasedate=None,
duration=7),
dict(name="Phase 4: Wireframes",
description="Wireframes",
startdate=None,
releasedate=None,
duration=15),
dict(name="Phase 5: Preliminary Designs",
description="Preliminary Designs",
startdate=None,
releasedate=None,
duration=10),
dict(name="Phase 6: Annotations and Developer Review",
description="Annotations and Developer Review",
startdate=None,
releasedate=None,
duration=7),
dict(name="Phase 7: Full Designs",
description="Full Designs",
startdate=None,
releasedate=None,
duration=20),
dict(name="Phase 8: Project Wrap",
description="Project Wrap",
startdate=None,
releasedate=None,
duration=2)
]
}
ISSUES = {
"issues": [
dict(summary="Meet with Client",
description="Meet with Client",
duration=2,
unit="hours",
epic=1,
fixversion="Phase 1: Kick Off"),
dict(summary="Create Project in RedMine",
description="Create Project in RedMine",
duration=1,
unit="hours",
epic=1,
fixversion="Phase 1: Kick Off"),
dict(summary="Schedule First Draft",
description="Schedule First Draft",
duration=1,
unit="hours",
epic=1,
fixversion="Phase 1: Kick Off"),
dict(summary="Create Feature List",
description="Create Feature List",
duration=4,
unit="hours",
epic=2,
fixversion="Phase 2: Competitive Analysis"),
dict(summary="Create User Personas",
description="Create User Personas",
duration=4,
unit="hours",
epic=2,
fixversion="Phase 2: Competitive Analysis"),
dict(summary="Competitive Review",
description="Competitive Review",
duration=10,
unit="hours",
epic=2,
fixversion="Phase 2: Competitive Analysis"),
dict(summary="Competitive Review Presentation",
description="Competitive Review Presentation",
duration=8,
unit="hours",
epic=2,
fixversion="Phase 2: Competitive Analysis"),
dict(summary="Client Review",
description="Competitive Review Presentation",
duration=4,
unit="hours",
epic=2,
fixversion="Phase 2: Competitive Analysis"),
dict(summary="Creative Brief",
description="Competitive Review Presentation",
duration=1,
unit="hours",
epic=2,
fixversion="Phase 2: Competitive Analysis"),
dict(summary="UX Flow Creation",
description="UX Flow Creation",
duration=25,
unit="hours",
epic=3,
fixversion="Phase 3: UX Flow"),
dict(summary="PM Time Tracking",
description="PM Time Tracking",
duration=8,
unit="hours",
epic=3,
fixversion="Phase 3: UX Flow"),
dict(summary="Schedule Second Draft",
description="Schedule Second Draft",
duration=2,
unit="hours",
epic=3,
fixversion="Phase 3: UX Flow"),
dict(summary="Client Review Meeting",
description="Client Review Meeting",
duration=2,
unit="hours",
epic=3,
fixversion="Phase 3: UX Flow"),
dict(summary="Milestone Sign-Off",
description="Milestone Sign-Off",
duration=1,
unit="hours",
epic=3,
fixversion="Phase 3: UX Flow"),
dict(summary="PM Time Tracking",
description="PM Time Tracking",
duration=20,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Wireframes 1",
description="Wireframes 1",
duration=20,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Client Review 1",
description="Client Review 1",
duration=4,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Wireframes 2",
description="Client Review 1",
duration=20,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Client Review 2",
description="Client Review 1",
duration=4,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Final Wireframes",
description="Final Wireframes",
duration=20,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Final Client Review",
description="Final Client Review",
duration=4,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="Milestone Sign-Off",
description="Milestone Sign-Off",
duration=1,
unit="hours",
epic=4,
fixversion="Phase 4: Wireframes"),
dict(summary="PM Time Tracking",
description="PM Time Tracking",
duration=5,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Design Hand-Off",
description="Design Hand-Off",
duration=3,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Preliminary Design 1",
description="Preliminary Design 1",
duration=30,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Preliminary Icons 1",
description="Preliminary Icons 1",
duration=10,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Client Review 1",
description="Client Review 1",
duration=2,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Preliminary Design 2",
description="Preliminary Design 2",
duration=12,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Finalize Icon",
description="Finalize Icon",
duration=4,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Final Client Review",
description="Final Client Review",
duration=2,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="Milestone Sign-off",
description="Milestone Sign-off",
duration=1,
unit="hours",
epic=5,
fixversion="Phase 5: Preliminary Designs"),
dict(summary="UX Time Tracking",
description="UX Time Tracking",
duration=5,
unit="hours",
epic=6,
fixversion="Phase 6: Annotations and Developer Review"),
dict(summary="Review with UX",
description="Review with UX",
duration=1,
unit="hours",
epic=6,
fixversion="Phase 6: Annotations and Developer Review"),
dict(summary="Annotated Wireframes",
description="Annotated Wireframes",
duration=20,
unit="hours",
epic=6,
fixversion="Phase 6: Annotations and Developer Review"),
dict(summary="RU PM Project Review",
description="RU PM Project Review",
duration=8,
unit="hours",
epic=6,
fixversion="Phase 6: Annotations and Developer Review"),
dict(summary="PM Time Tracking",
description="PM Time Tracking",
duration=20,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="UX Time Tracking",
description="UX Time Tracking",
duration=5,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Round 1 Design",
description="Round 1 Design",
duration=80,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Client Review 1",
description="Client Review 1",
duration=4,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Round 2 Design",
description="Round 2 Design",
duration=30,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Client Review 2",
description="Client Review 2",
duration=4,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Round 3 Design",
description="Round 3 Design",
duration=20,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Final Review",
description="Final Review",
duration=2,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Milestone Sign-Off",
description="Milestone Sign-Off",
duration=1,
unit="hours",
epic=7,
fixversion="Phase 7: Full Designs"),
dict(summary="Update Project Page",
description="Update Project Page",
duration=2,
unit="hours",
epic=8,
fixversion="Phase 8: Project Wrap"),
dict(summary="Development Quote",
description="Development Quote",
duration=7,
unit="hours",
epic=8,
fixversion="Phase 8: Project Wrap"),
dict(summary="UX Assets",
description="UX Assets",
duration=2,
unit="hours",
epic=8,
fixversion="Phase 8: Project Wrap"),
dict(summary="UI Assets",
description="UI Assets",
duration=2,
unit="hours",
epic=8,
fixversion="Phase 8: Project Wrap"),
]
}
| true |
a51738a0c0abf17a87ec1f086e1c26e37f861ba0 | Python | dinh/Config-Management | /Vim/bin/vimw-diary-template.py | UTF-8 | 2,054 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
# @(#) gen-vimw-diary-template.py
# Last-edited: Mon 2021.01.04.0745 -- Danny Quah (me@DannyQuah.com)
# ----------------------------------------------------------------
# Revision History:
# % Sat 2020.09.26.0659 -- Danny Quah (me@DannyQuah.com)
# First draft: Template for Vimwiki diary
# Original from http://frostyx.cz/posts/vimwiki-diary-template
# In frostyx.cz the vimscript call has '%' at the end for the
# buffer name. But I'm not going to use that (below) so I also
# removed the '%' in my .vimrc call to this script.
# ----------------------------------------------------------------
import sys
import datetime
# ----------------------------------------------------------------
myTemplate = """# {theLocalDate}
## Log
({theDayDateTime})
## Daily.Checklist
- [ ] Lisoril, Gluocasamine, Vitamin C
- [ ] Temperature.AM
- [ ] Temperature.PM
## ToDo
- [ ]
## Notes
({theDayDateTime}) Run WCP. KV abs, core // top // 56.7
{theSsDate} 08:02 06.12 '00:37:45 '00:06:10
({theDayDateTime}) Run CW. KV abs, core // top // 57.3
{theSsDate} 07:42 6.03 '00:39:41 '00:06:34
({theDayDateTime}) KV abs, core // top // 57.3
{theSsDate} 06:21 '00:32:30
"""
# ----------------------------------------------------------------
# Date, my way
# This snippet immediately below, from the original source,
# activates on the second branch when called by vimwiki,
# and therefore fails when I seek to reformat the date
# x = (datetime.date.today() if len(sys.argv) < 2
# # Expecting filename in YYYY-MM-DD.something format
# else sys.argv[1].rsplit(".", 1)[0])
# Instead I do:
myNow = datetime.datetime.now()
myDiaryDict = {
"theLocalDate": myNow.strftime("%a %d %b %Y"),
"theDateTime": myNow.strftime("%Y.%m.%d.%H%M"),
"theDayDateTime": myNow.strftime("%a %Y.%m.%d.%H%M"),
"theSsDate": myNow.strftime("%d/%m/%Y")
}
# Spread out the dictionary using Python's counterpart
# to the splat operator
print(myTemplate.format(**myDiaryDict))
# eof gen-vimw-diary-template.py
| true |
ae038e4753deb705a1959ae02ff1c39c92b9919c | Python | capJavert/renamer | /renamer.py | UTF-8 | 1,189 | 2.890625 | 3 | [] | no_license | import click
# encoding=utf8
import os
# !/usr/bin/python
# -*- coding: utf8 -*-
__author__ = '@capJavert'
@click.command()
@click.option('--path', '-P', default=".", help='Path to directory where files/directories you want '
'renamed are located. Defaults to current directory '
'if not set. ')
@click.option('--change', '-c', default="", help='Part of the file/directory name you want to replace.')
@click.option('--to', '-t', default="", help='String you want to replace --change string to.')
def main(path, change, to):
if change == "" or to == "":
print("You are missing --change or --to params. Use --help for more info.")
return
# length = len(change)
print("Files renamed: ")
renamed_files_counter = 0
for filename in os.listdir(path):
if filename.find(change, 0, len(filename)) != -1:
renamed_files_counter += 1
print(filename.replace(change, to))
os.rename(path+"/"+filename, path+"/"+filename.replace(change, to))
if renamed_files_counter == 0:
print("No files where matched.")
main()
| true |
dbc56e0be47cda8b7bf851b3e49e7b8ca8b4d058 | Python | stroobandt/lua-filters | /minted/run_minted_tests.py | UTF-8 | 16,777 | 2.84375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python
"""
Unit tests for the pandoc minted.lua filter.
"""
# Lint this file with: flake8 --max-line-length=80
import os
import string
import subprocess
import sys
import textwrap
code_block = textwrap.dedent('''
## A Code Block
```{.cpp}
auto mult = []<typename T, typename U>(T const & x, U const & y) {
return x * y;
};
```
''')
"""
The base CodeBlock code. {.cpp} is used as a replacement marker in most tests!
"""
inline_delims = '|!@#^&*-=+' + string.digits + string.ascii_letters
inline_code = textwrap.dedent('''
## Inline Code
`#include <type_traits>`{.cpp}
C and C++ use `{` and `}` to delimit scopes.
Some other special characters:
These check bypass: `~!@#$%^&*()-=_+[]\\{}|;\':",./<>?`
These check regular inline: ''' + ' '.join(
'`{' + inline_delims[:i] + '`' for i in range(len(inline_delims))
))
"""
The base Code code. {.cpp} is used as a replacement marker in most tests!
"""
def run_pandoc(pandoc_args, stdin):
"""Run pandoc with the specified arguments, returning the output."""
# The input / output should be small enough for these tests that buffer
# overflows should not happen.
pandoc_proc = subprocess.Popen(
["pandoc"] + pandoc_args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE
)
# Python 3.x and later require communicating with bytes.
if sys.version_info[0] >= 3:
stdin = bytes(stdin, "utf-8")
stdout, stderr = pandoc_proc.communicate(input=stdin)
if pandoc_proc.returncode != 0:
sys.stderr.write("Non-zero exit code of {ret} from pandoc!\n".format(
ret=pandoc_proc.returncode
))
sys.stderr.write("pandoc stderr: {stderr}".format(
stderr=stderr.decode("utf-8")
))
sys.exit(1)
return stdout.decode("utf-8")
def fail_test(test_name, messages, ansi_color_code="31"):
"""
Print failure message and ``sys.exit(1)``.
``test_name`` (str)
The name of the test (to make finding in code easier).
``messages`` (list of str -- or -- str)
A single string, or list of strings, to print out to ``stderr`` that
explain the reason for the test failure.
``ansi_color_code`` (str)
A an ANSI color code to use to colorize the failure message :) Default
is ``"31"``, which is red.
"""
sys.stderr.write(
"\033[0;{ansi_color_code}mTest {test_name} FAILED\033[0m\n".format(
ansi_color_code=ansi_color_code, test_name=test_name
)
)
if isinstance(messages, list):
for m in messages:
sys.stderr.write("--> {m}\n".format(m=m))
else:
sys.stderr.write("--> {messages}\n".format(messages=messages))
sys.exit(1)
def ensure_fragile(test_name, pandoc_output):
r"""
Ensure that every \begin{frame} has (at least one) fragile.
``test_name`` (str)
The name of the test (forwards to ``fail_test``).
``pandoc_output`` (str)
The pandoc output for the test case.
"""
for line in pandoc_output.splitlines():
if r"\begin{frame}" in line:
if "fragile" not in line:
fail_test(
test_name,
r"\begin{frame} without 'fragile': {line}".format(line=line)
)
def ensure_present(test_name, string, pandoc_output):
"""
Assert that ``string`` is found in ``pandoc_output``.
``test_name`` (str)
The name of the test (forwards to ``fail_test``).
``string`` (str)
The string to check verbatim ``string in pandoc_output``.
``pandoc_output`` (str)
The pandoc output for the test case.
"""
if string not in pandoc_output:
fail_test(
test_name,
"The requested string '{string}' was not found in:\n{pout}".format(
string=string, pout=pandoc_output
)
)
def ensure_not_present(test_name, string, pandoc_output):
"""
Assert that ``string`` is **not** found in ``pandoc_output``.
``test_name`` (str)
The name of the test (forwards to ``fail_test``).
``string`` (str)
The string to check verbatim ``string not in pandoc_output``.
``pandoc_output`` (str)
The pandoc output for the test case.
"""
if string in pandoc_output:
fail_test(
test_name,
"The forbidden string '{string}' was found in:\n{pout}".format(
string=string, pout=pandoc_output
)
)
def run_tex_tests(pandoc_args, fmt):
"""
Run same tests for latex writers.
``pandoc_args`` (list of str)
The base list of arguments to forward to pandoc. Some tests may remove
the ``--no-highlight`` flag to validate whether or not pandoc
highlighting macros appear as expected (or not at all).
``fmt`` (str)
The format is assumed to be either 'latex' or 'beamer'.
"""
def verify(test_name, args, md, *strings):
"""Run pandoc, ensure fragile, and string in output."""
output = run_pandoc(args + ["-t", fmt], md)
if fmt == "beamer":
ensure_fragile(test_name, output)
else: # latex writer
ensure_not_present(test_name, "fragile", output)
for s in strings:
ensure_present(test_name, s, output)
# Make sure the pandoc highlighting is not being used
if "--no-highlight" in args:
ensure_not_present(test_name, r"\VERB", output)
# if `nil` is present, that likely means a problem parsing the metadata
ensure_not_present(test_name, "nil", output)
############################################################################
# CodeBlock tests. #
############################################################################
begin_minted = r"\begin{{minted}}[{attrs}]{{{lang}}}"
verify(
"[code-block] default",
pandoc_args,
code_block,
begin_minted.format(attrs="autogobble", lang="cpp")
)
verify(
"[code-block] no_default_autogobble",
pandoc_args,
textwrap.dedent('''
---
minted:
no_default_autogobble: true
---
{code_block}
''').format(code_block=code_block),
begin_minted.format(attrs="", lang="cpp")
)
verify(
"[code-block] default block language is 'text'",
pandoc_args,
code_block.replace("{.cpp}", ""),
begin_minted.format(attrs="autogobble", lang="text")
)
verify(
"[code-block] user provided default_block_language",
pandoc_args,
textwrap.dedent('''
---
minted:
default_block_language: "haskell"
---
{code_block}
''').format(code_block=code_block.replace("{.cpp}", "")),
begin_minted.format(attrs="autogobble", lang="haskell")
)
verify(
"[code-block] user provided block_attributes",
pandoc_args,
textwrap.dedent('''
---
minted:
block_attributes:
- "showspaces"
- "space=."
---
{code_block}
''').format(code_block=code_block),
begin_minted.format(
attrs=",".join(["showspaces", "space=.", "autogobble"]),
lang="cpp"
)
)
verify(
"[code-block] user provided block_attributes and no_default_autogobble",
pandoc_args,
textwrap.dedent('''
---
minted:
no_default_autogobble: true
block_attributes:
- "style=monokai"
- "bgcolor=monokai_bg"
---
{code_block}
''').format(code_block=code_block),
begin_minted.format(
attrs=",".join(["style=monokai", "bgcolor=monokai_bg"]), lang="cpp"
)
)
verify(
"[code-block] attributes on code block",
pandoc_args,
code_block.replace(
"{.cpp}", "{.cpp .showspaces bgcolor=tango_bg style=tango}"
),
begin_minted.format(
attrs=",".join([
"showspaces", "bgcolor=tango_bg", "style=tango", "autogobble"
]),
lang="cpp"
)
)
verify(
"[code-block] attributes on code block + user block_attributes",
pandoc_args,
textwrap.dedent('''
---
minted:
block_attributes:
- "showspaces"
- "space=."
---
{code_block}
''').format(
code_block=code_block.replace(
"{.cpp}", "{.cpp bgcolor=tango_bg style=tango}"
)
),
begin_minted.format(
attrs=",".join([
"bgcolor=tango_bg",
"style=tango",
"showspaces",
"space=.",
"autogobble"
]),
lang="cpp"
)
)
verify(
"[code-block] traditional fenced code block",
pandoc_args,
code_block.replace("{.cpp}", "cpp"),
begin_minted.format(attrs="autogobble", lang="cpp")
)
verify(
"[code-block] non-minted attributes not forwarded",
pandoc_args,
code_block.replace("{.cpp}", "{.cpp .showspaces .hello}"),
begin_minted.format(
attrs=",".join(["showspaces", "autogobble"]), lang="cpp"
)
)
############################################################################
# Inline Code tests. #
############################################################################
mintinline = r"\mintinline[{attrs}]{{{lang}}}"
verify(
"[inline-code] default",
pandoc_args,
inline_code,
mintinline.format(attrs="", lang="cpp"),
"|{|",
"|}|",
*[
delim + '{' + inline_delims[:i] + delim
for i, delim in enumerate(inline_delims)
]
)
verify(
"[inline-code] default language is text",
pandoc_args,
inline_code,
mintinline.format(attrs="", lang="text"),
"|{|",
"|}|"
)
# begin: global no_mintinline shared testing with / without --no-highlight
inline_no_mintinline_globally_md = textwrap.dedent('''
---
minted:
no_mintinline: true
---
{inline_code}
''').format(inline_code=inline_code)
inline_no_mintinline_globally_strings = [
r"\texttt{\{}",
r"\texttt{\}}",
(r"\texttt{" +
r"\textasciitilde{}!@\#\$\%\^{}\&*()-=\_+{[}{]}\textbackslash{}\{\}" +
r"""\textbar{};\textquotesingle{}:",./\textless{}\textgreater{}?}""")
]
verify(
"[inline-code] no_mintinline off globally",
pandoc_args,
inline_no_mintinline_globally_md,
r"\texttt{\#include\ \textless{}type\_traits\textgreater{}}",
*inline_no_mintinline_globally_strings
)
verify(
"[inline-code] no_mintinline off globally, remove --no-highlight",
[arg for arg in pandoc_args if arg != "--no-highlight"],
inline_no_mintinline_globally_md,
r"\VERB|\PreprocessorTok{\#include }\ImportTok{\textless{}type\_traits\textgreater{}}|",
*inline_no_mintinline_globally_strings
)
# end: global no_mintinline shared testing with / without --no-highlight
# begin: no_minted shared testing with / without --no-highlight
inline_no_minted_md = inline_code.replace("{.cpp}", "{.cpp .no_minted}")
inline_no_minted_strings = ["|{|", "|}|"]
verify(
"[inline-code] .no_minted on single inline Code",
pandoc_args,
inline_no_minted_md,
r"texttt{\#include\ \textless{}type\_traits\textgreater{}}",
*inline_no_minted_strings
)
verify(
"[inline-code] .no_minted on single inline Code, remove --no-highlight",
[arg for arg in pandoc_args if arg != "--no-highlight"],
inline_no_minted_md,
r"\VERB|\PreprocessorTok{\#include }\ImportTok{\textless{}type\_traits\textgreater{}}|",
*inline_no_minted_strings
)
# end: no_minted shared testing with / without --no-highlight
verify(
"[inline-code] user provided default_inline_language",
pandoc_args,
textwrap.dedent('''
---
minted:
default_inline_language: "haskell"
---
{inline_code}
''').format(inline_code=inline_code),
mintinline.format(attrs="", lang="haskell")
)
verify(
"[inline-code] user provided inline_attributes",
pandoc_args,
textwrap.dedent('''
---
minted:
inline_attributes:
- "showspaces"
- "space=."
---
{inline_code}
''').format(inline_code=inline_code),
mintinline.format(
attrs=",".join(["showspaces", "space=."]), lang="cpp"
),
mintinline.format(
attrs=",".join(["showspaces", "space=."]), lang="text"
)
)
verify(
"[inline-code] attributes on inline code",
pandoc_args,
inline_code.replace(
"{.cpp}", "{.cpp .showspaces bgcolor=tango_bg style=tango}"
),
mintinline.format(
attrs=",".join(["showspaces", "bgcolor=tango_bg", "style=tango"]),
lang="cpp"
)
)
verify(
"[inline-code] attributes on inline code + user inline_attributes",
pandoc_args,
textwrap.dedent('''
---
minted:
inline_attributes:
- "showspaces"
- "space=."
---
{inline_code}
''').format(
inline_code=inline_code.replace(
"{.cpp}", "{.cpp bgcolor=tango_bg style=tango}"
)
),
mintinline.format(
attrs=",".join([
"bgcolor=tango_bg",
"style=tango",
"showspaces",
"space=."
]),
lang="cpp"
)
)
verify(
"[inline-code] non-minted attributes not forwarded",
pandoc_args,
inline_code.replace("{.cpp}", "{.cpp .showspaces .hello}"),
mintinline.format(attrs="showspaces", lang="cpp")
)
def run_html_tests(args):
"""
Run tests with an html5 writer to make sure minted commands are not used.
Also make sure minted specific attributes are indeed stripped.
``args`` (list of str)
The base list of arguments to forward to pandoc.
"""
def verify(test_name, md, attrs=[]):
"""Verify minted and any strings in attrs not produced"""
output = run_pandoc(args + ["-t", "html5"], md)
ensure_not_present(test_name, "mint", output)
ensure_not_present(test_name, "fragile", output)
if attrs:
for a in attrs:
ensure_not_present(test_name, a, output)
# if `nil` is present, that likely means a problem parsing the metadata
ensure_not_present(test_name, "nil", output)
verify(r"[html] no \begin{minted}", code_block)
verify(r"[html] no \mintinline", inline_code)
verify(
r"[html] no \begin{minted} or \mintinline",
"{code_block}\n\n{inline_code}".format(
code_block=code_block, inline_code=inline_code
)
)
verify(
"[html] code block minted specific attributes stripped",
code_block.replace(
"{.cpp}",
"{.cpp .showspaces space=. bgcolor=minted_bg style=minted}"
),
["showspaces", "space", "bgcolor", "style"]
)
verify(
"[html] inline code minted specific attributes stripped",
inline_code.replace(
"{.cpp}",
"{.cpp .showspaces space=. bgcolor=minted_bg style=minted}"
),
["showspaces", "space", "bgcolor", "style"]
)
if __name__ == "__main__":
# Initial path setup for input tests and lua filter
this_file_dir = os.path.abspath(os.path.dirname(__file__))
minted_lua = os.path.join(this_file_dir, "minted.lua")
if not os.path.isfile(minted_lua):
sys.stderr.write("Cannot find '{minted_lua}'...".format(
minted_lua=minted_lua
))
sys.exit(1)
args = ["--fail-if-warnings", "--no-highlight", "--lua-filter", minted_lua]
run_tex_tests(args, "beamer")
run_tex_tests(args, "latex")
run_html_tests(args)
| true |
8dcdbfeeb99800eda85dfd3ca80b80a3c78c07a0 | Python | skyscience/Python | /爬虫学习/7/csv文件处理.py | UTF-8 | 1,058 | 3.390625 | 3 | [] | no_license | # csv:其实就是一种数据保存的格式,每个字段之间,以逗号分割,
# 有头部
import csv
list = [
{'name':'fxm','class':201,'age':20,'hight':175},
{'name':'zy','class':201,'age':22,'hight':180},
{'name':'zzh','class':201,'age':24,'hight':180},
{'name':'ltt','class':201,'age':21,'hight':160},
{'name':'xnsk','class':201,'age':22,'hight':180},
{'name':'ssc','class':201,'age':22,'hight':180},
{'name':'asjkx','class':201,'age':22,'hight':180},
{'name':'sncsl','class':201,'age':22,'hight':180},
]
#将数据写入csv文件
csvfile = open('204.csv','w')
#构建头部的参数
fieldnames = ['name','class','age','hight']
writehandler = csv.DictWriter(csvfile,fieldnames=fieldnames)
#写入一个csv文件的头部
writehandler.writeheader()
# for dict in list:
# 但行写入
# writehandler.writerow(dict)
#多行写入
writehandler.writerows(list)
#关闭文件
csvfile.close()
#csv文件的读取
csvfile = open('204.csv','r')
reader = csv.reader(csvfile)
for line in reader:
print(line)
| true |
019e882f8ae78eb9c6073beeaf3bd634abec761d | Python | christus02/citrix-adc-metrics-exporter-serverless | /utils/metrics-template-creator/metrics-template-creator.py | UTF-8 | 2,126 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | import json
import copy
IN_JSON = "metrics.json"
OUT_JSON = "out.json"
#Read the input metrics json
with open(IN_JSON) as f:
metrics = json.load(f)
f.close()
UNIT_CONVERSION = [
{"key": "_mbits_rate", "value": "Megabits/Second"},
{"key": "_mbits", "value": "Megabytes"},
{"key": "_mb", "value": "Megabytes"},
{"key": "_rate", "value": "Count/Second"},
{"key": "percent", "value": "Percent"}
]
COUNTER_TEMPLATE = {
'MetricName': '',
'Unit': 'Count',
'Value': '',
'Timestamp': '',
'Dimensions': [
{'Name': 'Description', 'Value': ''},
{'Name': 'CitrixADC-AutoScale-Group', 'Value': ''},
{'Name': 'CitrixADC-InstanceID', 'Value': ''}
]
}
out_ds = dict()
for feature in metrics.keys():
out_ds[feature] = dict()
out_ds[feature]['counters'] = list()
for cntr in metrics[feature].get('counters', []):
cntr_template = copy.deepcopy(COUNTER_TEMPLATE) # Deep copy is required as we have dict() of dict()
metric_name = cntr[0]
metric_description = cntr[1]
cntr_template['MetricName'] = metric_name
cntr_template['Dimensions'][0]['Value'] = metric_description
#Find the right Unit
for unit in UNIT_CONVERSION:
if unit['key'] in metric_description:
cntr_template['Unit'] = unit['value']
break
out_ds[feature]['counters'].append(cntr_template)
for cntr in metrics[feature].get('gauges', []):
cntr_template = copy.deepcopy(COUNTER_TEMPLATE) # Deep copy is required as we have dict() of dict()
metric_name = cntr[0]
metric_description = cntr[1]
cntr_template['MetricName'] = metric_name
cntr_template['Dimensions'][0]['Value'] = metric_description
#Find the right Unit
for unit in UNIT_CONVERSION:
if unit['key'] in metric_description:
cntr_template['Unit'] = unit['value']
break
out_ds[feature]['counters'].append(cntr_template)
#Write to a JSON File
with open(OUT_JSON, 'w') as f:
json.dump(out_ds, f, indent=4)
f.close()
| true |
f3306127c76c865b1b63b70ab483a3af2cdb2f2e | Python | timnaire/case | /models/subpractice.py | UTF-8 | 1,408 | 2.6875 | 3 | [] | no_license | import logging
from google.appengine.ext import ndb
from models.lawyer import Lawyer
class Subpractice(ndb.Model):
practice = ndb.KeyProperty(kind=Practice)
subpractice = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
@classmethod
def save(cls,*args,**kwargs):
subpractice_id = str(kwargs.get('id'))
if subpractice_id and subpractice_id.isdigit():
subpractice = cls.get_by_id(int(subpractice_id))
else:
subpractice = cls()
practice_id = str(kwargs.get('practice'))
if practice_id.isdigit():
practice_key = ndb.Key('Practice',int(practice_id))
subpractice.practice = practice_key
if kwargs.get('subpractice'):
subpractice.subpractice = kwargs.get('subpractice')
subpractice.put()
return subpractice
def to_dict(self):
data = {}
data['lawyer'] = None
if self.lawyer:
lawyer = self.lawyer.get()
data['lawyer'] = lawyer.to_dict()
practices = self.query().fetch()
lawyer_pract = []
for p in practices:
if p.lawyer == lawyer.key:
lawyer_pract.append(p.law_practice)
data['law_practice'] = lawyer_pract
return data
| true |
c9d447ca0d0bf7e2ed43ec94f0fd48f965c8ea6d | Python | stratosthirios/mmgroup | /src/mmgroup/generate_c/testcode.py | UTF-8 | 669 | 2.890625 | 3 | [
"MIT"
] | permissive | from generate_functions import UserDirective
from make_c_tables import TableGenerator
class TestGen:
def add_to(x, expr):
return "{x} = {x} + {expr};\n".format(x = x, expr = str(expr))
functions = {
"AddTo": UserDirective(add_to, "si"),
}
tables = {
"y" : [1,2,3],
"TABLE_SIZE": 32,
"mult": 17,
"pair_list": list(zip(range(3), [7,4,9])),
}
def generate():
T = TestGen
tg = TableGenerator(T.tables, T.functions, verbose = 1)
tg.generate("testcode.ske", "test.c", "test.h")
print("Table in test code:")
print( tg.names )
if __name__ == "__main__":
generate()
| true |
aad201b8b947dcbb5e7145b6904d29b359f09eeb | Python | wondper/2dgp-homework | /git main/2dgp_homework/2dgp week2-1/20200910_grid1.py | UTF-8 | 410 | 3.515625 | 4 | [] | no_license | import turtle as t
t.speed(0)
size = 30
count = 19
linelength = count * size
t.penup()
t.goto(-100, -200)
t.pendown()
def line(start, end):
t.penup()
t.goto(*start)
t.pendown()
t.goto(*end)
x1, y1 = t.pos()
x2, y2 = x1 + linelength, y1 + linelength
for i in range(count + 1):
n = i * size
line((x1, y1 + n), (x2, y1 + n))
line((x1 + n, y1), (x1 + n, y2))
t.exitonclick() | true |
46477810b0fbf99848db811ea394f111be35b373 | Python | ymei/TMSPlane | /Hardware/PCB/KiCadScript/KiAuto/util.py | UTF-8 | 374 | 2.578125 | 3 | [
"BSD-3-Clause"
] | permissive | ## \file
# Utility routines.
#
from __future__ import print_function
import pcbnew
## 1nm is the smallest dimension in KiCAD
_SCALE = 1000000.0
## convert x in [mm] to native scale
def mm(x):
return int(_SCALE * x)
#return pcbnew.FromMM(x)
## convert x in [mil] to native scale
def mil(x):
return int(_SCALE * 25.4 * x / 1000.0)
#return pcbnew.FromMils(x)
| true |
b4b07a3045ad215e8f433b0aaa6d1c1990e89bdc | Python | compMathUPRH/wolffia | /lib/chemicalGraph/io/PSF.py | UTF-8 | 9,764 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Copyright 2011, 2012: José O. Sotero Esteva,
Computational Science Group, Department of Mathematics,
University of Puerto Rico at Humacao
<jse@math.uprh.edu>.
(On last names: Most hispanic people, Puerto Ricans included, use two surnames;
one from the father and one from the mother. We have separated first names from
surnames with two spaces.)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program (gpl.txt). If not, see <http://www.gnu.org/licenses/>.
Acknowledgements: The main funding source for this project has been provided
by the UPR-Penn Partnership for Research and Education in Materials program,
USA National Science Foundation grant number DMR-0934195.
"""
import sys
class PSF(object):
def __init__(self, filename ):
# open source file
f = open(filename)
atomcount = 1
self.atoms = []
self._elements = []
self._charges = []
self._masses = []
self.bonds = []
self.angles = []
self.dihedrals = []
self.types = []
self.elements = []
self.charges = []
self.masses = []
self.types = []
for line in f:
if line.find('!NATOM') > -1:
atomBase = atomcount - 1
natoms = int(line[:8])
for i in range(0, natoms):
line = f.next()
self.atoms.append("%8d" % (atomcount) + line[8:])
#print "%8d" % (atomcount) + line[8:]
atomcount = atomcount+1
if line.find('!NBOND') > -1:
nbonds = int(line[:8])
for i in range(0, nbonds/4):
line = f.next()
self.bonds.append([int(line[:8])+atomBase, int(line[9:16])+atomBase])
self.bonds.append([int(line[17:24])+atomBase, int(line[25:32])+atomBase])
self.bonds.append([int(line[33:40])+atomBase, int(line[41:48])+atomBase])
self.bonds.append([int(line[49:56])+atomBase, int(line[57:64])+atomBase])
line = f.next()
for i in range(0, nbonds % 4):
self.bonds.append([int(line[i*16+1: i*16+8])+atomBase, int(line[i*16+9: i*16+16])+atomBase])
if line.find('!NTHETA') > -1:
nangles = int(line[:8])
for i in range(0, nangles/3):
line = f.next()
self.angles.append([int(line[:8])+atomBase, int(line[9:16])+atomBase,int(line[17:24])+atomBase])
self.angles.append([int(line[25:32])+atomBase, int(line[33:40])+atomBase, int(line[41:48])+atomBase])
self.angles.append([int(line[49:56])+atomBase, int(line[57:64])+atomBase, int(line[65:72])+atomBase])
line = f.next()
for i in range(0, nangles % 3):
self.angles.append([int(line[i*24+1: i*24+8])+atomBase, int(line[i*24+9: i*24+16])+atomBase, int(line[i*24+17: i*24+24])+atomBase])
if line.find('!NPHI') > -1:
ndihedrals = int(line[:8])
for i in range(0, ndihedrals/2):
line = f.next()
self.dihedrals.append([int(line[:8])+atomBase, int(line[9:16])+atomBase,int(line[17:24])+atomBase,int(line[25:32])+atomBase])
self.dihedrals.append([int(line[33:40])+atomBase, int(line[41:48])+atomBase,int(line[49:56])+atomBase, int(line[57:64])+atomBase])
line = f.next()
for i in range(0, ndihedrals % 2):
self.dihedrals.append([int(line[i*32+1: i*32+8])+atomBase, int(line[i*32+9: i*32+16])+atomBase, int(line[i*32+17: i*32+24])+atomBase, int(line[i*32+25: i*32+32])+atomBase])
f.close()
# print [atom[23:26] for atom in self.atoms]
self._elements = [atom[24:29].strip() for atom in self.atoms]
self._charges = [float(atom[35:48]) for atom in self.atoms]
self._masses = [float(atom[50:58]) for atom in self.atoms]
self._types = [atom[29:34].strip() for atom in self.atoms]
#-------------------------------------------------------------
def inferAngles(self, molecule):
#from chemicalGraph import Mixture
#print "molecula: '", molecule, "'"
angles = list()
for atom in molecule:
if atom <= 28:
neigh = molecule.neighbors(atom)
#print "Neighbours: ", atom, ", ", neigh
for n1 in neigh:
for n2 in neigh:
if n1 < n2 and n2 < 28 and atom < 10:
#print (n1, atom, n2)
awesome = self._types[n1] + " " + self._types[atom] + " " + self._types[n2]
#print awesome, (n1, atom, n2), "awesome"
radical = self._types[n2] + " " + self._types[atom] + " " + self._types[n1]
#print radical, (n2, atom, n1), "radical"
#print ff._ANGLES
if awesome in ff._ANGLES:
print "Append: '" + awesome + "' <<< awesome", ([n1,atom,n2])
angles.append([n1,atom,n2])
#print ff._ANGLES[awesome]
elif radical in ff._ANGLES:
print "Append: '" + radical + "' <<< radical", ([n1,atom,n2])
angles.append([n1,atom,n2])
#print ff._ANGLES[radical]
else:
print "NoAppd:", ([n1,atom,n2]), awesome, ";", radical
#print "This.Type: ", self._types[atom]
#print "Angles:", angles
#print "Tipos:", self._types
for angle in angles:
if not self.hasAngle(angle):
self.angles.append(angle)
#-------------------------------------------------------------
@staticmethod
def write(mixture, psfFile=None):
"""
Writes a PSF topology file.
@type psfFile: string
@param psfFile: PSF filename. If None it will write to sys.stdout.
"""
if psfFile==None:
fd = sys.stdout
#print "writePSF imprimiendo stdout"
else:
fd = open(psfFile, 'w')
#print "writePSF(",psfFile,")"
# write ATOM section
#print "PSF\n\n 1 !NTITLE\n REMARKS \n\n%8i !NATOM\n" % mixture.order()
fd.write("PSF\n\n 1 !NTITLE\n REMARKS \n\n%8i !NATOM\n" % mixture.order())
count = 1
renumbering = dict() # atoms labels may not be [1..n]
for molecule in mixture:
renumbering[molecule] = dict()
mol = mixture.getMolecule(molecule)
#print "PSF write " , molecule, mixture.trad[molecule]
for atom in mol:
atr = mol.getAtomAttributes(atom)
#charge = mol.getForceField().charge(atr.getInfo().getType())
charge = atr.getInfo().getCharge()
#print "PSF.write", atr.getInfo().getType(),charge
psfline = atr.getInfo().PSFline(count, mixture.trad[molecule])
psfline = "%s%+8.6f%s\n" % (psfline[:38], charge, psfline[47:])
fd.write(psfline)
#fd.write(atr.PSFline(count)+"\n")
renumbering[molecule][atom] = count
count += 1
# write BOND section
fd.write("\n%8i !NBOND\n" % mixture.bonds())
#print "\n%8i !NBOND\n" % mixture.bonds()
count = 0
bondCount = 0
for molecule in mixture:
mol = mixture.getMolecule(molecule)
bondsT = mol.bonds()
nbonds = len(bondsT)
#bonds = list()
for bond in bondsT:
#bonds.append([bond[0]+count, bond[1]+count])
fd.write("%8d%8d" % (renumbering[molecule][bond[0]], renumbering[molecule][bond[1]]))
bondCount += 1
if bondCount % 4 == 0:
fd.write("\n")
#for i in range(0, nbonds - nbonds % 4, 4):
#fd.write("%8d%8d" % (bonds[i][0], bonds[i][1]))
count += mol.order()
# write angles
fd.write("\n\n%8d !NTHETA: angles\n" % mixture.angleCount())
#print "\n\n%8d !NTHETA: angles\n" % mixture.angleCount()
count = 0
angleCount = 0
for molecule in mixture:
mol = mixture.getMolecule(molecule)
anglesT = mol.angles()
nangles = len(anglesT)
#angles = list()
for angle in anglesT:
#angles.append([angle[0]+count, angle[1]+count])
fd.write("%8d%8d%8d" % \
(renumbering[molecule][angle[0]], \
renumbering[molecule][angle[1]], \
renumbering[molecule][angle[2]]))
angleCount += 1
if angleCount % 3 == 0:
fd.write("\n")
#for i in range(0, nangles - nangles % 4, 4):
#fd.write("%8d%8d" % (angles[i][0], angles[i][1]))
count += mol.order()
# write dihedrals
fd.write("\n\n%8d !NPHI: dihedrals\n" % (0))
#print "\n\n%8d !NPHI: dihedrals\n" % (0)
# write other stuff
fd.write("\n\n%8d !NIMPHI: impropers\n" % (0))
fd.write("\n\n%8d !NDON: donors\n" % (0))
fd.write("\n\n%8d !NACC: acceptors\n" % (0))
fd.write("\n\n%8d !NNB\n" % (0))
fd.write("\n\n%8d !NGRP\n" % (0))
fd.close()
#-------------------------------------------------------------
def getAngles(self):
return self.angles
def getDihedrals(self):
return self.dihedrals
def getElement(self, i):
return self._elements[i]
def getCharge(self, i):
return self._charges[i]
def getMass(self, i):
return self._masses[i]
def getType(self, i):
return self._types[i]
def len(self):
return len(self.atoms)
def hasAngle(self, angle):
return angle in self.angles or angle.reverse() in self.angles
if __name__ == '__main__':
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+'/../../../conf')
from Wolffia_conf import *
from chemicalGraph.molecule.ForceField import ForceField
from chemicalGraph.molecule.polymer.PolyCYT import PolyCYT
#ff = ForceField("pruebaCYT", "../../../data/forceFields/PolyCYT.prm")
m = PSF("../../../data/coordinates/Polymers/CYT/start_CYT.psf")
assert(m.getMass(5) == 12.011)
mol = PolyCYT(1)
ff = mol.getForceField()
print mol.__dict__.keys()
m.inferAngles(mol)
print "FF Angles:\n", ff._ANGLES, "\n======================================"
assert(m.hasAngle([1, 2, 3]))
assert(True)
| true |
093c83ffcd736f6aa5cca146bb141cea265734f5 | Python | H0bbyist/hero-rpg | /characters/zombie.py | UTF-8 | 459 | 3.59375 | 4 | [
"MIT"
] | permissive | from characters.base import Character
class Zombie(Character):
def __init__(self):
self.name = "Zombie"
self.health = 5
self.power = 1
self.prize = 0
def alive(self):
return True
def receive_damage(self, points):
self.health -= points
print("{} received {} damage.".format(self.name, points))
if self.health <= 0:
print("{} is already dead.".format(self.name)) | true |
a7b7cead6ea4368d09423119b7e2c12d11571ac2 | Python | Shashank-Bhari/Handwritten-Words-Recognition | /wknn.py | UTF-8 | 992 | 2.515625 | 3 | [] | no_license | import numpy
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import skimage.io
from skimage.feature import hog
import os
path='words1/train/'
hog_list=[]
labels=[]
f=open('pre.txt','+w')
for i in range(1,211):
path1=path+str(i)
for j in os.listdir(path1):
img=skimage.io.imread(fname=path1+'/'+j,as_gray=True)
fd=hog(img,orientations=9,pixels_per_cell=(14,14),cells_per_block=(3,3),transform_sqrt=True,block_norm="L1")
print(path1+'/'+j)
hog_list.append(fd)
labels.append(i)
x_train,x_test,y_train,y_test=train_test_split(hog_list,labels,test_size=0.2,random_state=42)
for k in range(51,152,2):
wkn=KNeighborsClassifier(n_neighbors=k,weights='distance')
wkn.fit(x_train,y_train)
pre=wkn.predict(x_test)
f.write(str(k)+'-'+str(accuracy_score(y_test,pre))+"\n")
print(str(k)+'-'+str(accuracy_score(y_test,pre))+"\n")
f.close()
| true |
3c35a9d48ef39c442b7bc7dd7386438c16c68547 | Python | pohily/checkio | /house-password.py | UTF-8 | 1,397 | 3.390625 | 3 | [] | no_license | def checkio(data: str) -> bool:
if len(data)<10:
return False
digit = False
lower = False
upper = False
for letter in data:
if letter.isdigit():
digit = True
elif letter.islower():
lower = True
elif letter.isupper():
upper = True
if digit ==True and lower == True and upper == True:
return True
else:
return False
print(checkio('A1213pokl'))
"""
def checkio(data):
#replace this for solution
if not isinstance(data,str):
return False
if len(data) < 10 :
return False
import re
rgx1=re.compile(r'[0-9a-zA-Z]+')
rgx2=re.compile(r'.*[0-9].*')
rgx3=re.compile(r'.*[a-z].*')
rgx4=re.compile(r'.*[A-Z].*')
if not rgx1.match(data):
return False
if not rgx2.match(data):
return False
if not rgx3.match(data):
return False
if not rgx4.match(data):
return False
return True
def checkio(d: str) -> bool:
return len(d) >= 10 and d not in [d.lower(), d.upper()] and any(c.isdigit() for c in d)
def checkio(data: str) -> bool:
import re
if len(data) >= 10:
if re.search(r'[A-Z]+',data):
if re.search(r'[a-z]+', data):
if re.search(r'[0-9]+', data):
return True
return False
"""
| true |
055ac2a58ac87e9751a68db464990a33410fa9b1 | Python | ndz-v/grades | /src/presentation_service.py | UTF-8 | 5,106 | 3.21875 | 3 | [] | no_license | import click
import os
class PresentationService:
window_width = 0
small_ratio = 0
big_ratio = 0
def __init__(self):
self.window_width = os.get_terminal_size().columns
self.small_ratio = int(self.window_width*0.08)
self.big_ratio = int(self.window_width*0.3)
def print_subject_table(self, subject_list: list):
self.print_table_header()
for subject_row in subject_list:
self.print_subject(subject_row)
click.echo()
def print_table_header(self):
click.secho(f'{"":-^{self.window_width}}', fg='white', bold=True)
click.secho(f'{"Modul"[:self.small_ratio]:<{self.small_ratio}} | ', bold=True, nl=False)
click.secho(f'{"Fach"[:self.big_ratio]:<{self.big_ratio}} | ', bold=True, nl=False)
click.secho(f'{"Note"[:self.small_ratio]:<{self.small_ratio}} | ', bold=True, nl=False)
click.secho(f'{"LP"[:self.small_ratio]:<{self.small_ratio}} | ', bold=True, nl=False)
click.secho(f'{"Versuch"[:self.small_ratio]:<{self.small_ratio}} | ', bold=True, nl=False)
click.secho(f'{"Datum"[:self.small_ratio]:<{self.small_ratio}} | ', bold=True, nl=False)
click.secho(f'{"Semester"[:self.small_ratio]:<{self.small_ratio}}', bold=True)
click.secho(f'{"":-^{self.window_width}}', fg='white', bold=True)
def print_subject(self, subject_row: list):
module = subject_row[0]
subject = subject_row[1]
semester = subject_row[2]
grade = subject_row[3]
points = subject_row[5]
attempt = subject_row[8]
date = subject_row[9]
converted_grade = float(grade.replace(',', '.'))
color = self.select_color(converted_grade)
click.secho(f'{module[:self.small_ratio]:<{self.small_ratio}}', fg='white', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{subject[:self.big_ratio]:<{self.big_ratio}}', fg=color, nl=False)
click.secho(' | ', nl=False)
click.secho(f'{grade[:self.small_ratio]:<{self.small_ratio}}', fg=color, nl=False)
click.secho(' | ', nl=False)
click.secho(f'{points[:self.small_ratio]:<{self.small_ratio}}', fg=color, nl=False)
click.secho(' | ', nl=False)
click.secho(f'{attempt[:self.small_ratio]:<{self.small_ratio}}', fg='white', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{date[:self.small_ratio]:<{self.small_ratio}}', fg='white', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{semester[:self.big_ratio]:<{self.small_ratio}}')
def select_color(self, grade):
if 1 <= grade <= 1.5:
return 'bright_green'
elif 1.5 < grade <= 2.5:
return 'green'
elif 2.5 < grade <= 3.5:
return 'white'
elif 3.5 < grade <= 4.0:
return 'bright_yellow'
else:
return 'red'
def print_average_grade(self, grade: float):
grade_text = str(grade).replace('.', ',')
click.echo()
click.secho(f'{"Durchschnittsnote":<10} {grade_text}', fg=self.select_color(grade))
def print_gained_points(self, points: float):
click.secho(f'{"Erreichte LP":<10} {int(points)}')
click.echo()
def print_exam_statistics(self, exam_data: list):
subject_name = exam_data[14]
click.echo()
click.echo(f'{"NOTENSPIEGEL"} {subject_name}')
click.secho(f'{"":-^65}', fg='white', bold=True)
click.echo(f'{"Notenbereich":<31} | Anzahl')
click.secho(f'{"":-^65}', fg='white', bold=True)
very_well_amount = exam_data[1]
good_amount = exam_data[3]
satisfying_amount = exam_data[5]
sufficient_amount = exam_data[7]
deficient_amount = exam_data[9]
attendees_amount = exam_data[11]
average_grade = exam_data[13]
table_width = self.window_width/4
click.secho(f'{"Sehr gut":<15} {"(1,0 - 1,5)":<15}', fg='bright_green', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{very_well_amount}', fg='bright_green')
click.secho(f'{"Gut":<15} {"(1,6 - 2,5)":<15}', fg='green', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{good_amount}', fg='green')
click.secho(f'{"Befriedigend":<15} {"(2,6 - 3,5)":<15}', fg='white', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{satisfying_amount}', fg='white')
click.secho(f'{"Ausreichend":<15} {"(3,6 - 4,0)":<15}', fg='bright_yellow', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{sufficient_amount}', fg='bright_yellow')
click.secho(f'{"Mangelhaft":<15} {"(4,1 - 5,0)":<15}', fg='bright_red', nl=False)
click.secho(' | ', nl=False)
click.secho(f'{deficient_amount}', fg='red')
click.secho(f'{"":-^65}', fg='white', bold=True)
click.secho(f'{"Teilnehmer":<31} | {attendees_amount}', fg='white')
click.secho(f'{"Durchschnittsnote":<31} | {average_grade}', fg='white')
click.secho(f'{"":-^65}', fg='white', bold=True)
click.echo()
| true |
b3fa563891e5f6d207ea60a8481f6c36d3c571ad | Python | ProfessorSean/Kasutamaiza | /upcfcardsearch/c132.py | UTF-8 | 1,041 | 2.53125 | 3 | [
"MIT"
] | permissive | import discord
from discord.ext import commands
from discord.utils import get
class c132(commands.Cog, name="c132"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Atamotona', aliases=['c132'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Atamotona',
color=0xFDE68A)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2334895.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type (Attribute)', value='Machine/Normal (EARTH)', inline=False)
embed.add_field(name='Level (ATK/DEF)', value='7 (2050/2650)', inline=False)
embed.add_field(name='Lore Text', value='MECHA-MECHA! Machine...loading...updating......GO! ATAMOTONA FULL THROTTLE!!!', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c132(bot)) | true |
ca1622fa5a777c58a19a5e99f74d22e1cf51885f | Python | yeyintminthuhtut/hack_audio_captcha | /split_loud_voice.py | UTF-8 | 3,037 | 2.84375 | 3 | [] | no_license | '''
Quick and dirty way to generate separate wav files depending on the loud voice detected in audio captcha challenge.
Lots of room for improvement.
Author : Debasish Mandal
http://www.debasish.in/
'''
import wave
import sys
import struct
import os
import time
import httplib
from random import randint
ip = wave.open(sys.argv[1], 'r')
info = ip.getparams()
frame_list = []
for i in range(ip.getnframes()):
iframe = ip.readframes(1)
amplitude = struct.unpack('<h', iframe)[0]
frame_list.append(amplitude)
ip.close()
for i in range(0,len(frame_list)):
if abs(frame_list[i]) < 25:
frame_list[i] = 0
################################ Find Out most noisy portions of the audio file ###########################
thresh = 30
output = []
non_zero_temp = []
length = len(frame_list)
i = 0
while i < length:
zeros = []
while i < length and frame_list[i] == 0:
i += 1
zeros.append(0)
if len(zeros) != 0 and len(zeros) < thresh:
non_zero_temp += zeros
elif len(zeros) > thresh:
if len(non_zero_temp) > 0 and i < length:
output.append(non_zero_temp)
non_zero_temp = []
else:
non_zero_temp.append(frame_list[i])
i += 1
if len(non_zero_temp) > 0:
output.append(non_zero_temp)
chunks = []
for j in range(0,len(output)):
if len(output[j]) > 3000:
chunks.append(output[j])
#########################################################################################################
for l in chunks:
for m in range(0,len(l)):
if l[m] == 0:
l[m] = randint(-0,+0)
inc_percent = 1 #10 percent
for l in chunks:
for m in range(0,len(l)):
if l[m] <= 0:
# negative value
l[m] = 0 - abs(l[m]) + abs(l[m])*inc_percent/100
else:
#positive vaule
l[m] = abs(l[m]) + abs(l[m])*inc_percent/100
########################################################
#Change it to > 1 if any amplification is required
NEW_RATE = 1
print '[+] Possibly ',len(chunks),'number of loud voice detected...'
for i in range(0, len(chunks)):
new_frame_rate = info[0]*NEW_RATE
print '[+] Creating No. ',str(i),'file..'
split = wave.open('cut_'+str(i)+'.wav', 'w')
split.setparams((info[0],info[1],info[2],0,info[4],info[5]))
# split.setparams((info[0],info[1],new_frame_rate,0,info[4],info[5]))
#Add some silence at start selecting +15 to -15
for k in range(0,10000):
single_frame = struct.pack('<h', randint(-25,+25))
split.writeframes(single_frame)
# Add the voice for the first time
for frames in chunks[i]:
single_frame = struct.pack('<h', frames)
split.writeframes(single_frame)
#Add some silence in between
for k in range(0,10000):
single_frame = struct.pack('<h', randint(-25,+25))
split.writeframes(single_frame)
# Add the voice second time
for frames in chunks[i]:
single_frame = struct.pack('<h', frames)
split.writeframes(single_frame)
#Add silence at end
for k in range(0,10000):
single_frame = struct.pack('<h', randint(-25,+25))
split.writeframes(single_frame)
split.close()
time.sleep(1)
print '[+] Done!'
| true |
da57628ad144b0b7254709d4cb960d7e3eff45fe | Python | Corovino/credito | /comh.py | UTF-8 | 209 | 2.75 | 3 | [
"MIT"
] | permissive | import os
tipo = raw_input("Ingrese el tipo: ")
msj = raw_input("Ingrese el mensaje: ")
os.system('git add .')
os.system("""git commit -am '{"responsable": "Hebert Romero","tipo": "%s", "msj": "%s"}' """ %(tipo,msj)) | true |
47488e32da43503ca9d14c0439cf0d2942b201b3 | Python | salgado/python_stuffs | /jsonassg.py | UTF-8 | 459 | 3.296875 | 3 | [] | no_license | # get json form url
import json
import urllib
url = raw_input('Enter url: ')
if len(url) < 1 : exit
print 'Retrieving', url
uh = urllib.urlopen(url)
data = uh.read()
print 'Retrieved',len(data),'characters'
try: info = json.loads(str(data))
except: info = None
#print json.dumps(info, indent=4)
sum = 0
num = 0
for item in info["comments"]:
count = item["count"]
num = num + 1
sum = sum + int(count)
print "Count: ", num
print "Sum: ", sum
| true |
c8b4870a29ae311b310bdc7bfb240cfb4cea36d4 | Python | thiaagodev/Curso_Python_Mundo_1 | /PythonExercicios/ex019.py | UTF-8 | 370 | 3.828125 | 4 | [] | no_license | import random
aluno1 = input('Digite o nome do aluno número 1: ')
aluno2 = input('Digite o nome do aluno número 2: ')
aluno3 = input('Digite o nome do aluno número 3: ')
aluno4 = input('Digite o nome do aluno número 4: ')
lista = [aluno1, aluno2, aluno3, aluno4]
aluno_escolhido = random.choice(lista)
print('O aluno escolhido foi o {}'.format(aluno_escolhido))
| true |
07a092c99f580b10e8a24d488ba29670b5a324f8 | Python | sakanack/AtCorder | /AtCoderBeginnerContest148/D.py | UTF-8 | 256 | 2.953125 | 3 | [] | no_license | N = int(input())
a = list(map(int, input().split()))
breakCount = 0
orderCount = 1
for i in range(N):
if(a[i] == orderCount):
orderCount += 1
continue
breakCount += 1
if(breakCount == N):
print(-1)
else:
print(breakCount) | true |
7e015433bb5021dfa77360c58ab81e948b200a56 | Python | swapnil8424/DSA-in-python | /lab1\/LinkedList.py | UTF-8 | 2,685 | 4.34375 | 4 | [] | no_license | class LinkedList:
"""Defines a Singly Linked List.
attributes: head"""
def __init__(self):
"""Create a new list with a Sentinel Node"""
self.head=ListNode()
def insert(self,x,p):
"""Insert element x in the position after p"""
temp=ListNode()
temp.value=x
temp.next=p.next
p.next=temp
def delete(self,p):
"""Delete the node following node p in the linked list."""
p.next=p.next.next
def printlist(self):
""" Print all the elements of a list in a row."""
i=self.head.next
while i:
print(str(i.value))
i=i.next
def insertAtIndex(self,x,i):
"""Insert value x at list position i. (The position of the first element is taken to be 0.)"""
j=self.head
p=0
temp=ListNode()
temp.value=x
while(i>p):
j=j.next
p=p+1
temp.next=j.next
j.next=temp
def search(self,x):
"""Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found."""
i=self.head
while(i.next != None and i.value !=x):
i=i.next
if(i.next != None):
print("Value found at position"+str(i))
else:
return None
def len(self):
"""Return the length (the number of elements) in the Linked List."""
i=0
j=self.head
while(j.next != None):
i=i+1
j=j.next
return i
def isEmpty(self):
"""Return True if the Linked List has no elements, False otherwise."""
if(self.head.next==None):
print("True")
else:
print("False")
class ListNode:
"""Represents a node of a Singly Linked List.
attributes: value, next.
"""
def __init__(self,val=None,nxt=None):
self.value=val
self.next=nxt
def main():
L = LinkedList()
L.insert(10,L.head)
print('List is: ')
L.printlist()
L.insert(12,L.head.next)
print('List is: ')
L.printlist()
L.insert(2,L.head)
print('List is: ')
L.printlist()
print('Size of L is ',L.len())
L.delete(L.head)
print('List is: ')
L.printlist()
L.delete(L.head.next)
print('List is: ')
L.printlist()
print('List is empty?')
L.isEmpty()
print('Size of L is ',L.len())
L.delete(L.head)
print('List is empty?')
L.isEmpty()
print('Size of L is ',L.len())
L.insertAtIndex(2,0)
L.insertAtIndex(1,0)
L.insertAtIndex(4,2)
L.insertAtIndex(3,2)
print('List is: ')
L.printlist()
if __name__ == '__main__':
main()
| true |
843a11c9ffd8879802ee75ff8c4d8a462b7a49b8 | Python | Dude036/BlackjackPy | /cards.py | UTF-8 | 2,184 | 3.765625 | 4 | [] | no_license | #!/usr/bin/python
import random
class Card(object):
rank = 1
suit = 'S'
"""docstring for Card"""
def __init__(self, newRank, newSuit):
self.rank = newRank
self.suit = newSuit
def __add__(self, other):
return self.rank + other.rank
def __radd__(self, other):
return self.rank + other
def __gt__(self, other):
if self.rank == other.rank:
order = ['S', 'H', 'C', 'D']
return order.index(self.suit) < order.index(other.suit)
else:
return self.rank > other.rank
def __lt__(self, other):
return not self.__gt__(other)
def __eq__(self, other):
return self.rank == other.rank and self.suit == other.suit
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __repr__(self):
return str(self.rank) + self.suit
def __str__(self):
s = ''
if self.suit == 'S':
s = ' of Spades'
elif self.suit == 'H':
s = ' of Hearts'
elif self.suit == 'C':
s = ' of Clubs'
else:
s = ' of Diamonds'
r = 0
if self.rank == 1:
r = 'Ace'
elif self.rank == 11:
r = 'Jack'
elif self.rank == 12:
r = 'Queen'
elif self.rank == 13:
r = 'King'
else:
r = self.rank
return str(r) + s
class Deck(object):
factor = 1
decks = []
"""docstring for Deck"""
def __init__(self, totalDecks = 1):
self.factor = totalDecks
self.fill()
self.shuffle()
def fill(self):
for _ in range(self.factor):
for r in range(1, 14):
for s in ['S', 'H', 'C', 'D']:
self.decks.append(Card(r, s))
def shuffle(self):
random.shuffle(self.decks)
def draw(self):
if len(self.decks) == 0:
self.fill()
self.shuffle()
return self.decks.pop()
# def __str__(self):
# return self.decks
if __name__ == '__main__':
c1 = Card(5, 'S')
c2 = Card(8, 'D')
print(c1+c2)
d = Deck()
c = []
for x in range(10):
c.append(d.draw())
print(sum(c))
test1 = [Card(i, j) for i in range(1, 14) for j in ['D', 'C', 'H', 'S']]
test2 = [Card(i, j) for i in range(1, 14) for j in ['S', 'D', 'C', 'H']]
random.shuffle(test2)
test2.sort()
print(test2)
print(test1)
print(id(test1) == id(test2))
print(test1 == test2)
| true |
54332236a0b39758dfa4cd5023edad7c80b36a1e | Python | GuodongQi/LeetCode | /codes_1-50/11_Container_With_Most_Water.py | UTF-8 | 545 | 3.078125 | 3 | [] | no_license | class Solution:
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
m = len(height)
start = 0
end = m - 1
cont_most = 0
while start < end:
if height[start] < height[end]:
h = height[start]
start += 1
else:
h = height[end]
end -= 1
cont = h * (end - start +1)
if cont_most < cont:
cont_most = cont
return cont_most
| true |
ad6d0ec4d92a2b34e080d5b4fb164f5521db1e4f | Python | thibs911/SergeSlack | /serge.py | UTF-8 | 4,154 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from messaging import new_message
import database_connector
from random import randint
import requests
from params import params
import json
from quizz import Quizz
# randint is inclusive
def treat_message(message, ws):
if 'type' in message:
if message['type'] == 'message':
answer(message, ws)
def answer(message, ws):
if '<@U04JBV6SR>' in message['text']:
# Wake up thibs
if '/CallThibs' in message['text']:
ws.send(new_message("@Thibs: Suce des bites", message['channel']))
# Add a citation
elif 'add_citation' in message['text']:
add_citation(ws, message)
# Tell a Citation
elif 'citation' in message['text']:
tell_citation(ws, message['channel'])
elif 'casse toi' in message['text']:
kthxby(message, ws)
elif 'start_quizz' in message['text']:
start_quizz(ws)
if ('salut' in message['text']
or 'hello' in message['text']
or 'yo' in message['text']
or 'Salut' in message['text']
or 'Hello' in message['text']
or 'Bonjour' in message['text']
or 'bonjour' in message['text']):
respond_hello(message, ws)
def say_hello(ws):
ws.send(new_message("Serge is there BITCHES", "C02LLV4HS"))
def respond_hello(message, ws):
user = get_user_info(message['user'])
ws.send(new_message("Salut " + user['user']['profile']['first_name'] + ' !', message['channel']))
def kthxby(message, ws):
ws.send(new_message("Kthxby Enculé", message['channel']))
ws.close
def tell_citation(ws, chan):
cursor, cnx = database_connector.open_connection('root', '', '127.0.0.1', 'Serge')
citations_cursor = database_connector.select_all_citation(cursor)
citations = []
for row in citations_cursor:
citations.append(row)
citation = citations[randint(1, len(citations) - 1)]
ws.send(new_message(citation['Citation'] + ' - ' + citation['Author'], chan))
database_connector.close_connection(cursor, cnx)
def add_citation(ws, message):
if secure_insert(message):
command, citation, author = message['text'].split(",")
print "command: {}, citation: {}, author: {}".format(command, citation, author)
cursor, cnx = database_connector.open_connection('root', '', '127.0.0.1', 'Serge')
database_connector.insert_citation(cursor, citation, author)
ws.send(new_message("Nouvelle Citation Insérée! (CMB)", message['channel']))
database_connector.close_connection(cursor, cnx)
else:
ws.send(new_message("Bien essayé !", message['channel']))
def secure_insert(message):
if ('Marseille' not in message['text']
or 'marseille' not in message['text']
or 'OM' not in message['text']
or 'sardine' not in message['text']):
return True
return False
def get_user_info(user):
payload = {'token': params.TOKEN, 'user': user }
r = requests.post('https://slack.com/api/users.info', data=payload)
response = json.loads(r.text)
return response
def ask_question(ws, question):
index = 1
questions = Quizz.all_question()
ws.send(new_message(questions['q_text'], "C02LLV4HS"))
while index < 5:
ws.send(new_message('{} - {}'.format(index, questions['q_options_{}'.format(index)]), "C02LLV4HS"))
index = index + 1
return questions['q_correct_option']
def start_quizz(ws):
ws.send(new_message('Attention ! Le Quizz va démarrer !', "C02LLV4HS"))
question = 0
while question < 4:
ws.send(new_message('Question {}: '.format(question+1), "C02LLV4HS"))
answer = ask_question(ws, question)
wait_verify(ws, answer)
question = question + 1
def wait_verify(ws, answer):
condition = True
while condition:
result = ws.recv()
message = json.loads(result)
if 'type' in message:
if message['type'] == 'message' and '{}'.format(answer) in message['text']:
condition = False
ws.send(new_message('GG!, Next', "C02LLV4HS"))
return True
| true |
f880efb60a2ea9d04b036d918db20d418c77636a | Python | tanyastropheus/holbertonschool-higher_level_programming | /0x0B-python-input_output/12-student.py | UTF-8 | 542 | 3.5 | 4 | [] | no_license | #!/usr/bin/python3
"""contains class Student"""
class Student:
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self, attrs=None):
"""retrieves a dictionary representation of a class Student instance"""
d = self.__dict__
new_d = {}
if attrs is None:
return d
for key, value in d.items():
if key in attrs:
new_d[key] = value
return new_d
| true |
3a1c3bb27dfa6954339ed404451e451ea83ada12 | Python | mk270/covid19-uk-growth | /scrape_cases | UTF-8 | 1,333 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
import datetime
import database
import sys
import logging
import os
import api_client
iso_date_fmt = "%Y-%m-%d"
def today_iso(days_ago):
then = datetime.datetime.now() - datetime.timedelta(days_ago)
return then.strftime(iso_date_fmt)
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', required=False,
help='set logging level to INFO')
parser.add_argument('--days-ago', action='store', type=int,
default=0,
help='set date offset')
args = parser.parse_args()
if args.debug or ("DEBUG" in os.environ):
logging.getLogger().setLevel(20)
if database.already_done_today(today_iso(args.days_ago)):
logging.warning("Data already obtained for the day requested")
sys.exit(0)
try:
day, count, tested = api_client.lookup_cases_and_tested(args.days_ago)
except api_client.DateNotFound:
logging.error("Data not (yet) available for date requested")
sys.exit(1)
print(day, count, tested)
# date_diff = (datetime.datetime.now() - day).days
# day_formatted = datetime.datetime.strftime(day, iso_date_fmt)
database.save_update(day, count, tested)
if __name__ == '__main__':
run()
| true |
d2d9bb596d325bbdd1ff7b5b1fad0f6cbe1ca926 | Python | Kohdz/Algorithms | /LeetCode/medium/72_kokoEatingBananas.py | UTF-8 | 716 | 3.625 | 4 | [] | no_license | # https://leetcode.com/problems/koko-eating-bananas/
# https://www.youtube.com/watch?v=Sp2sRLcLND0
# https://leetcode.com/discuss/general-discussion/786126/python-powerful-ultimate-binary-search-template-solved-many-problems
import math
def minEatingSpeed(piles, H):
def feasible(speed):
time = 0
for banana in piles:
time += math.ceil(banana / speed)
return time <= H
left, right = 1, max(piles)
while left < right:
mid = left + (right -left >> 1)
if feasible(mid):
right = mid
else:
left = mid + 1
return left
H = 8
piles = [3,6,7,11]
# Output: 4
print(minEatingSpeed(piles, H)) | true |
53384d0f8d64944004e4d8951cc0083e82a53c25 | Python | ksopyla/numbers_recognition | /data_helpers.py | UTF-8 | 2,861 | 3 | 3 | [] | no_license | import numpy as np
import os
from scipy.misc import imread
def load_dataset(digits_dir,img_size, digits_count, max_files=float('inf')):
'''
loads digit dataset, folder structure:
Digit_2/
Font_name1/
00_timestamp.png
...
99_timestamp.png
Font_name2/
00_timestamp.png
...
99_timestamp.png
Font_name3/
00_timestamp.png
...
99_timestamp.png
Params
============
digits_dir - root dir, containning the font folders with digits
img_size - tuple, size of each image (width,height)
digits_count - how many digits is on the image
Return
============
x - numpy array with data, Nx(img_size[0]*img_size[1]), images are flatten
Y - numpy array with labels,
'''
img_files = []
for root, dirs, files in os.walk(digits_dir):
for file in files:
img_files.append(os.path.join(root, file))
N= len(img_files)
imgN= img_size[0]*img_size[1]
X = np.zeros([N,imgN])
# how many digits?
Y = np.zeros([N,digits_count*10])
for i,file in enumerate(img_files):
img = imread(file)
#take number from file name
number_str = os.path.basename(file).split('_')[0]
X[i,:] = img.flatten()
Y[i,:] = encode2vector(number_str)
return (X,Y,img_files)
def encode2vector(number_str):
'''
Encode number(string) in to one hot encoding, each
Params
=========
number_str - string containning number with N digits
'''
N = len(number_str)
vec = np.zeros(N*10)
for i,char in enumerate(number_str):
digit = int(char)
pos= i*10+digit
vec[pos]=1
return vec
def decode2digits(vector,digits=2):
'''
decode from vector representation to str, each digit takes 10 continous places,
vector dim is digits*10
this is a form of one-hot encoding, where we encode each digit and
then concatenate those intermediete vectors
'''
digits= vector.reshape(digits,10)
nnz = digits.nonzero()
positions = nnz[1]
return decode2digits_pos(positions)
def decode2digits_pos(positions):
return "".join([ str(d) for d in positions])
def random_batch(X,Y, batch_size=128):
shape_X = X.shape
shape_Y = Y.shape
if shape_X[0]!=shape_Y[0]:
raise ValueError('X and Y has different number of examples')
num_ele = shape_X[0]
if batch_size > num_ele:
raise ValueError('Batch cant be larger then X has rows')
rand_idx = np.random.choice(num_ele,batch_size, replace=False)
return X[rand_idx,:], Y[rand_idx,:],rand_idx
| true |
72dc187c4453acbd981e58210d8b1a594c606425 | Python | JDiegoS/Proyecto1-Software-Renderer | /lib.py | UTF-8 | 3,434 | 3.21875 | 3 | [] | no_license | import struct
from collections import namedtuple
#Extraido de (no es la respuesta aceptada) https://stackoverflow.com/questions/28253102/python-3-multiply-a-vector-by-a-matrix-without-numpy
def matrixMul(a, b):
c = []
for i in range(0,len(a)):
temp=[]
for j in range(0,len(b[0])):
s = 0
for k in range(0,len(a[0])):
s += a[i][k]*b[k][j]
temp.append(s)
c.append(temp)
return c
#Funciones dadas por dennis
V2 = namedtuple('Vertex2', ['x', 'y'])
V3 = namedtuple('Vertex3', ['x', 'y', 'z'])
def sum(v0, v1):
"""
Input: 2 size 3 vectors
Output: Size 3 vector with the per element sum
"""
return V3(v0.x + v1.x, v0.y + v1.y, v0.z + v1.z)
def mul(v0, k):
"""
Input: 2 size 3 vectors
Output: Size 3 vector with the per element multiplication
"""
return V3(v0.x * k, v0.y * k, v0.z *k)
def char(c):
return struct.pack('=c', c.encode('ascii'))
def word(c):
return struct.pack('=h', c)
def dword(c):
return struct.pack('=l', c)
def glCreateWindow(width, height):
win = Render(width, height)
return win
def cross(v0, v1):
#Producto cruz de 2 vectores
return V3(
v0.y * v1.z - v0.z * v1.y,
v0.z * v1.x - v0.x * v1.z,
v0.x * v1.y - v0.y * v1.x,
)
def color(r, g, b):
return(bytes([b, g, r]))
def bbox(*vertices):
#Bounding box desde 2 vectores
xs = [ vertex.x for vertex in vertices ]
ys = [ vertex.y for vertex in vertices ]
xs.sort()
ys.sort()
return V2(xs[0], ys[0]), V2(xs[-1], ys[-1])
def barycentric(A, B, C, P):
#Conseguir coordenadas baricentricas desde los 3 vectores con producto cruz
cx, cy, cz = cross(
V3(B.x - A.x, C.x - A.x, A.x - P.x),
V3(B.y - A.y, C.y - A.y, A.y - P.y)
)
if abs(cz) < 1:
#es triangulo degenerado (regresar lo que sea)
return -1, -1, -1
u = cx/cz
v = cy/cz
w = 1 - (u + v)
return w, v, u
def norm(v0):
#Normal del vector
v0length = length(v0)
if not v0length:
return V3(0, 0, 0)
return V3(v0.x/v0length, v0.y/v0length, v0.z/v0length)
def dot(v0, v1):
#Producto punto
return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z
def sub(v0, v1):
#Resta de vectores
return V3(v0.x - v1.x, v0.y - v1.y, v0.z - v1.z)
def length(v0):
return (v0.x**2 + v0.y**2 + v0.z**2)**0.5
def allbarycentric(A, B, C, bbox_min, bbox_max):
barytransform = numpy.linalg.inv([[A.x, B.x, C.x], [A.y,B.y,C.y], [1, 1, 1]])
grid = numpy.mgrid[bbox_min.x:bbox_max.x, bbox_min.y:bbox_max.y].reshape(2,-1)
grid = numpy.vstack((grid, numpy.ones((1, grid.shape[1]))))
barycoords = numpy.dot(barytransform, grid)
# barycoords = barycoords[:,numpy.all(barycoords>=0, axis=0)]
barycoords = numpy.transpose(barycoords)
return barycoords
def writebmp(filename, width, height, pixels):
f = open(filename, 'bw')
# File header (14 bytes)
f.write(char('B'))
f.write(char('M'))
f.write(dword(14 + 40 + width * height * 3))
f.write(dword(0))
f.write(dword(14 + 40))
# Image header (40 bytes)
f.write(dword(40))
f.write(dword(width))
f.write(dword(height))
f.write(word(1))
f.write(word(24))
f.write(dword(0))
f.write(dword(width * height * 3))
f.write(dword(0))
f.write(dword(0))
f.write(dword(0))
f.write(dword(0))
# Pixel data (width x height x 3 pixels)
for x in range(height):
for y in range(width):
f.write(pixels[x][y])
f.close() | true |
a69771756dd8b34fb327881783b49e161b78992d | Python | mgramli1/pyelixys | /pyelixys/utils/flashlpc.py | UTF-8 | 13,465 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | # UCLA EE202A - LPC1768 Programmer for Python
# November 27, 2010
#issues baudrate too slow --> missed messages
import sys
import time
from time import sleep
import serial
import getopt
import binascii
import struct
def hostPrint(msg):
now = time.localtime(time.time())
print time.strftime("%Y-%m-%d %H:%M:%S ", now) + msg
def hostFault(msg):
hostPrint(msg)
sys.exit(2)
### CLASS START ###
class flashlpc:
RESET_STRING = "{~NESL~}"
def __init__(self, port, crystalfreq,reset=False):
#--- Initialize serial device
self.sd = serial.Serial(port, baudrate=115200, timeout=1)
#--- Eliminate any lingering data in input stream
self.sd.flushInput()
#--- Reset Chip ---#
if reset:
self.lpcReset()
#--- Start sync sequence
self.lpcSyncScript(crystalfreq)
#--- Display device info
self.lpcShowDevID()
self.lpcShowBootloaderVer()
self.lpcShowSerialNumber()
#--- Unlock device for programming
self.sd.write("U 23130\n") #see page625 lpc1768 specs for details
ret = self.sd.readline()
if not ret:
hostFault("ERROR: Unable to unlock device for programming. Timed out: %s" % ret)
elif int(ret) != 0:
hostFault("ERROR: Unable to unlock device for programming: %d" % int(ret))
#--- CONSTANTS ---
#LPC17xx series has uneven sector sizes. Below 64kb = sector size of 4, > 64kb is size of 32
self.lpc1768_sectors = (4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
32,32,32,32,32,32,32,32,32,32,32,32,32,32)
self.lpc1768_prog_addr_base = 0x10002000 #arbitrary, just choose something in Local RAM for temporary storage
self.lpc1768_mem_block_size = 4096
#-----------------
def lpcReset(self):
hostPrint("Attempting RESET...")
self.sd.write(self.RESET_STRING)
time.sleep(.250)
self.sd.flushInput()
def lpcAutoReset(self):
hostPrint("Auto RESET...")
self.sd.write("G 0 T\r\n")
self.sd.flushInput()
def lpcSyncScript(self, crystalfreq):
#--- Write "?" to start sync
self.sd.write("?")
ret = self.sd.readline()
if ret != "Synchronized\r\n":
hostFault("ERROR: Unable to sync. Sync message not detected")
elif not ret:
hostFault("ERROR: Unable to sync. Timed out at \'?\'")
#--- Reply sync message
self.sd.write("Synchronized\n") ### DEBUG: SHOULD BE "Synchronized\r\n"?
ret = self.sd.readline()
if ret != "Synchronized\n":
hostFault("ERROR: Unable to sync. Sync message not detected")
#--- Check for "OK\r\n"
ret = self.sd.readline()
if ret != "OK\r\n":
hostFault("ERROR: Unable to sync. OK message not detected")
#--- Set crystal frequency
self.sd.write("%d\r\n" % crystalfreq)
ret = self.sd.readline()
if ret != ("%d\rOK\r\n" % crystalfreq):
hostFault("ERROR: Unable to sync. Frequency OK message not detected")
#--- Echo off
self.sd.write("A 0\n")
ret = self.sd.readline()
ret = self.sd.readline()
return
def lpcShowDevID(self):
self.sd.write("J\n")
ret = self.sd.readline()
if ret != "0\r\n":
hostFault("ERROR: Unable to read device ID")
ret = self.sd.readline()
hostPrint("Device ID = " + hex(int(ret)))
return
def lpcShowBootloaderVer(self):
self.sd.write("K\n")
ret = self.sd.readline()
if ret != "0\r\n":
hostFault("ERROR: Unable to read bootloader version")
ret = str(int(self.sd.readline())) + "." + str(int(self.sd.readline()))
hostPrint("Bootloader Ver = " + ret[::-1])
return
def lpcShowSerialNumber(self):
self.sd.write("N\n")
ret = self.sd.readline()
if ret != "0\r\n":
hostFault("ERROR: Unable to read serial number")
ret = str(int(self.sd.readline())) + " " + str(int(self.sd.readline())) + " " + str(int(self.sd.readline())) + " " + str(int(self.sd.readline()))
hostPrint("Serial Number = " + ret)
return
def lpcWriteDataToRam(self, addr_base, data):
data_size = len(data)
uuline_size = 45
uublock_size = 900 #45 byte length x 20 lines
cur_addr = addr_base
for i in range(0, data_size, uublock_size):
cur_block_size = uublock_size
if (data_size - i) <= uublock_size:
cur_block_size = data_size - i
#--- Process individual blocks
data_block = data[i : i + cur_block_size]
data_block_size = len(data_block)
#--- Set LPC1768 to receive block
self.sd.write("W %d %d\n" % (cur_addr, data_block_size))
ret = self.sd.readline()
if not ret:
hostFault("ERROR: Unable to write data to RAM. Timed out: %s" % ret)
elif int(ret) != 0:
hostFault("ERROR: Unable to write data to RAM: %d" % int(ret))
#--- Write uuencoded lines (20 lines of 45 bytes) for each block
for j in range(0, data_block_size, uuline_size):
cur_line_size = uuline_size
if (data_block_size - j) <= uuline_size:
cur_line_size = data_block_size - j
#uu.encode(img_file)
uudata = binascii.b2a_uu(data_block[j:j+cur_line_size])
self.sd.write(uudata)
#--- Calculate and send string checksum
datasum = 0
for ch in data_block:
datasum = datasum + ord(ch)
self.sd.write("%d\n" % datasum)
ret = self.sd.readline()
#print "\'" + str(datasum) + "\'"
if not ret:
hostFault("Error in checksum for RAM write. Timed out: %s" % ret)
if ret != "OK\r\n":
hostFault("Error in checksum for RAM write: %s" % ret)
#--- Move to next block
cur_addr = cur_addr + cur_block_size
return
def lpcInsertImgChecksum(self, img):
### SEE: http://code.google.com/p/micropendousx/source/browse/trunk/MicropendousX/Vector_Checksum_Calculator.c
checksum_vec = 7 #checksum_vector for lpc1768 is 7, according to OpenOCD flash source code
vectable = struct.unpack("8i", img[0:32]) #little endian UTF-8 encoding
#--- Sum up values over first 8 interrupt vectors
checksum = 0
for i in range(0, len(vectable)):
if i != checksum_vec:
checksum = checksum + vectable[i]
#--- 2's complement operation for obtaining "0 - checksum"
checksum = (2**32) - checksum % (2**32)
#--- Repack image and add checksum vectors to beginning 32 bytes
newimg = ''
for i in range(0,8):
if i == checksum_vec:
newimg += struct.pack("<I", checksum)
else:
newimg += struct.pack("<I", vectable[i])
#--- Append original image after checksum vectors
newimg += img[32:]
return newimg
def lpcGetSector(self, addr):
#--- Translates a base address to corresponding sector in flash mem
sectors = self.lpc1768_sectors
n_sectors = len(sectors)
addr_base = 0
for sect in range(0, n_sectors):
if addr_base <= addr and addr < (addr_base + 1024 * sectors[sect]):
return sect
addr_base = addr_base + 1024 * sectors[sect]
#--- If corresponding sector not found, return -1
return -1
def lpcEraseProgramSectors(self):
sector_s = 0
sector_e = len(self.lpc1768_sectors) - 1
hostPrint("Erasing sectors %d to %d (max sectors %d)" % (sector_s, sector_e, len(self.lpc1768_sectors)))
#--- Prepare sectors for modification
self.sd.write("P %d %d\n" % (sector_s, sector_e))
ret = self.sd.readline()
if not ret:
hostFault("ERROR: Unable to prepare sectors for programming. Timed out: %s" % ret)
elif int(ret) != 0:
hostFault("ERROR: Unable to prepare sectors for programming: %d" % int(ret))
#--- Erase sectors
self.sd.write("E %d %d\n" % (sector_s, sector_e))
ret = self.sd.readline()
if not ret:
hostFault("ERROR: Unable to erase sectors. Timed out: %s" % ret)
elif int(ret) != 0:
hostFault("ERROR: Unable to erase sectors: %d" % int(ret))
return
def lpcProgramImageScript(self, img):
hostPrint("Programming image...")
#--- Defaults
mem_base_addr = self.lpc1768_prog_addr_base
mem_block_size = self.lpc1768_mem_block_size
#--- Insert checksum into image
### call: insert img checksum ()
img = self.lpcInsertImgChecksum(img)
### OUTPUT "inserted checksum ____" into image
hostPrint("Inserted checksum for bootloader validation into image.")
#--- Pad image with '0xFF' characters to a valid multiple of blocksize
img_size = len(img)
if (img_size % mem_block_size) != 0:
pad_size = mem_block_size - (img_size % mem_block_size)
pad_data = ''
for i in range(0, pad_size):
pad_data = pad_data + '\xff'
img = img + pad_data
img_size = img_size + pad_size
### OUTPUT "padding image with ___ bytes"
hostPrint("Image size is irregular. Need to pad image by %d bytes" % pad_size)
else:
hostPrint("Image size is a valid multiple of block size. No padding needed")
#--- Erase old program
### call: erase program sectors ()
self.lpcEraseProgramSectors()
#--- Write each block of entire image
for i in range(0, img_size, mem_block_size):
#- Calculate size of current block to write
cur_block_size = mem_block_size
if (img_size - i) <= mem_block_size:
cur_block_size = img_size - i
#- Copy current block to memory
flash_addr_s = i
flash_addr_e = flash_addr_s + cur_block_size
hostPrint("Copying %d bytes to address 0x%x (current image block = %d ~ %d)" % (cur_block_size, mem_base_addr, flash_addr_s, flash_addr_e))
self.lpcWriteDataToRam(mem_base_addr, img[flash_addr_s:flash_addr_e])
#- Prepare flash sectors
sector_s = self.lpcGetSector(flash_addr_s)
sector_e = self.lpcGetSector(flash_addr_e)
#print "P %d %d\n" % (sector_s, sector_e) # DEBUG ###
self.sd.write("P %d %d\n" % (sector_s, sector_e))
ret = self.sd.readline()
if not ret:
hostFault("ERROR: Unable to prepare sectors for programming. Timed out: %s" % ret)
elif int(ret) != 0:
hostFault("ERROR: Unable to prepare sectors for programming: %d" % int(ret))
#- Copy data from lpc1768 RAM to flash
#print "C %d %d %d\n" % (flash_addr_s, mem_base_addr, cur_block_size) # DEBUG ###
self.sd.write("C %d %d %d\n" % (flash_addr_s, mem_base_addr, cur_block_size))
ret = self.sd.readline()
if not ret:
hostFault("ERROR: Unable to program data from RAM to FLASH. Timed out: %s" % ret)
elif int(ret) != 0:
hostFault("ERROR: Unable to program data from RAM to FLASH: %d" % int(ret))
hostPrint("Programming complete. Binary image successfully written and validated in flash memory")
### CLASS END ###
#def unescape(msg):
# msg = msg.replace('\r','\\r')
# msg = msg.replace('\n','\\n')
# return msg
#def lpcAssert(retVal, expectedVal):
# if (retVal != expectedVal):
# hostPrint("[ERROR] Expected \'" + unescape(expectedVal) + "\'; got \'" + unescape(retVal) + "\'")
# return -1
# else:
# hostPrint("[OK] Got \'" + unescape(expectedVal) + "\'")
# return 0
def usage():
print >>sys.stderr, "Usage: flashlpc.py [--erase] <DEVICE> filename.bin\nExample:\n\tflashlpc.py --erase COM3\tOnly erases the program on device\n\tflashlpc.py COM3 main.bin\tPrograms main.bin onto device\n"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "erase", "reset","auto-reset"])
except getopt.error, msg:
usage()
sys.exit(2)
erase = 0
reset = False
autoReset = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-e", "--erase"):
erase = 1
elif o in ("-r", "--reset"):
reset = True
elif o in ("-a", "--auto-reset"):
autoReset = True
else:
hostFault("Invalid parameter(s)")
if len(args) < 2 and erase != 1:
usage()
sys.exit(2)
dev = args[0]
crystalfreq = 12000
cmd = flashlpc(dev, crystalfreq, reset)
if erase == 1:
cmd.lpcEraseProgramSectors()
else:
file = args[1]
img = open(file, "rb").read()
cmd.lpcProgramImageScript(img)
hostPrint("Exiting. Reset the LPC1768 board")
if autoReset:
cmd.lpcAutoReset()
if __name__ == "__main__":
main()
| true |
a67c6e21d7af9fafa17ffb4694a3124a51cf09e5 | Python | asdasdqwdqwfsdf/FiniteElement | /Part2_code/test_Nanna.py | UTF-8 | 1,955 | 2.578125 | 3 | [] | no_license | from Part2_code.getplate import getPlate
from matplotlib import pyplot as plt
import numpy as np
from Part2_code.linearElasticity2D import homogeneousDirichlet
from Part1.plot import plot
def PlotMesh(N):
"""Plotting mesh covering the unit disc with N nodes"""
p, tri, edge = getPlate(N)
#print(p,tri,edge)
#print(len(p))
fig, ax = plt.subplots(figsize=(3, 3))
edge = edge -1
for el in tri:
ax.plot(p[el, 0], p[el, 1], "ro-", color="black")
ax.plot(p[el[[2, 0]], 0], p[el[[2, 0]], 1], "ro-", color="black")
for el in edge:
ax.plot(p[el, 0], p[el, 1], "ro-", color="red")
# ax.plot(p[el[[2, 0]], 0], p[el[[2, 0]], 1], "ro-", color="red")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title(str(N) + " nodes")
plt.show()
return ax
E=1
nu=0.25
def f(x,y,pos):
if pos==0:
return E/(1-nu**2) * (-2*y**2 - x**2 + nu* x**2 -2*nu*x*y - 2*x*y + 3 -nu)
else:
return E/(1-nu**2) * (-2*x**2 - y**2 + nu* x**2 -2*nu*x*y - 2*x*y + 3 -nu)
def u(x,y):
return (x**2-1)*(y**2-1)
def error():
n=2**6
rel_error=[]
conv=[]
h=[]
u,p, tri = homogeneousDirichlet(n+1, 4, f, 0.25, 1)
ux,uy= u[::2],u[1::2]
a = np.linspace(0, n, n + 1)
for i in range(1,5):
N=2**i+1
t = int(n/(2**(i)))
k = np.array([(n+1) * a[::t] + j for j in a[::t]]).flatten()
k=k.astype(int)
k=np.sort(k)
ux_k,uy_k = ux[k],uy[k]
u, p,tri = homogeneousDirichlet(N, 4, f, 0.25, 1)
u_num= np.hstack((u[::2] ,u[1::2]))
u_k=np.hstack((ux_k,uy_k))
error= abs(u_k - u_num)/np.linalg.norm(ux,ord=np.inf)
rel_error.append(error)
conv.append(np.linalg.norm(u_num - u_k))
h.append(1/(2**i))
return rel_error, conv,h
rel_error,conv,h=error()
plt.figure()
plt.loglog(h,conv)
plt.show()
order = np.polyfit(np.log(h), np.log(conv), 1)[0]
print("order", order)
| true |
4dc61fc6240ba5078b8bd5a08381d0883c3ca8e3 | Python | honzaMaly/my-alternatives | /model/transactions.py | UTF-8 | 583 | 2.921875 | 3 | [] | no_license | """Transaction Entities"""
import datetime
from model import ATransaction, AProfile, APlace
class Transaction(ATransaction):
def __init__(self, created: datetime.date, user: AProfile, place: APlace):
super().__init__(created)
self.user: AProfile = user
self.place: APlace = place
def __hash__(self):
return hash((self.place, self.user, self.created))
def __eq__(self, other):
return self.__class__ == other.__class__ and self.place == other.place and self.user == other.user \
and self.created == other.created
| true |
23d22a7324b148a628c862354e81bf8f88d54306 | Python | ken2190/EPFLx-RoboX-Neurorobotics-utils | /epflx_robox_nrp_utils/SOM/SOM_additional.py | UTF-8 | 5,522 | 2.5625 | 3 | [] | no_license | # class SOM_additional (SOM_solution)
import numpy as np
import pandas as pd
import pylab as pl
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
from matplotlib import collections as mc
from matplotlib import patches
from IPython import display
from IPython import get_ipython
class SOM_additional():
# Self-Organizing Map mapping the environment depending on the positions visited by the robot
def __init__(self, csv_output_lattice):
import warnings; warnings.filterwarnings('ignore')
self.csv_output_lattice = csv_output_lattice
#########################
### Visualization ###
#########################
def visualization_main(self,lattice,Nn,eta,sigma,trial):
# Self Organazing Map - Process visualization
get_ipython().run_line_magic('matplotlib', 'inline')
# SOM-lattice: centers and edges
edges = [] # list of links between centers
centers = [] # list of centers
for i in range(Nn):
for j in range(Nn):
if i > 0:
edges.append([(lattice[i-1,j,1], lattice[i-1,j,0]),
(lattice[i,j,1], lattice[i,j,0])])
if j > 0:
edges.append([(lattice[i,j-1,1], lattice[i,j-1,0]),
(lattice[i,j,1], lattice[i,j,0])])
centers.append((lattice[i,j,1],lattice[i,j,0]))
Cx,Cy = zip(*centers)
lc2 = mc.LineCollection(edges, colors='red', linewidths=.8)
# ENVIRONMENT
# walls
borders = [[(4.8,4.8), (4.8,-4.8)], [(4.8,-4.8), (-4.8,-4.8)],
[(-4.8,-4.8), (-4.8,4.8)], [(-4.8,4.8), (4.8,4.8)]]
lc1 = mc.LineCollection(borders, colors='black', linewidths=1)
# obstracles
rect1 = patches.Rectangle((-1.0,-3.0), 2., 1., color='black')
rect2 = patches.Rectangle(( 1.0,-3.0), 1., 3., color='black')
rect3 = patches.Rectangle((-2.0, 0.0), 1., 2., color='black')
rect4 = patches.Rectangle((-2.0, 2.0), 3., 1., color='black')
# PLOT (displaying)
# define figure
fig = plt.figure(0,figsize=(8, 6))
# Information board
fig.suptitle('Trial: {} [sigma: {}, eta: {}]; Map: {}x{}'.format(int(trial), round(sigma,3), round(eta,3), Nn, Nn)) # (links: {}) , len(edges)
ax = fig.add_subplot(111)
# displaying of obstacles
ax.add_patch(rect1)
ax.add_patch(rect2)
ax.add_patch(rect3)
ax.add_patch(rect4)
# displaying of walls
ax.add_collection(lc1)
# displaying of exploration data
plt.plot(self.pos[:,1], self.pos[:,0],'b.',alpha=0.1)
# displaying of SOM-lattice
ax.add_collection(lc2) # edges
plt.plot(Cx, Cy,'ro',markersize=8.0) # centers
# SHOW (figure)
plt.gca().invert_yaxis()
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.gca().set_aspect('equal', adjustable='box')
display.clear_output(wait=True)
display.display(plt.gcf())
################################
### Upload and Save data ###
################################
def load_data(self,csv_file):
# exctract and transform data
states = pd.read_csv(csv_file, delimiter=',',header=0).values
positions = np.array([pd.to_numeric(states[:,0], errors='coerce'), pd.to_numeric(states[:,1], errors='coerce')]).T
# Reduce the number of data points
self.pos = positions[slice(0,positions.shape[0],(positions.shape[0]/1000)),:]
return self.pos
def save_lattice2(self,lattice):
# convert it to stacked format using Pandas
stacked = pd.Panel(lattice.swapaxes(1,2)).to_frame().stack().reset_index()
stacked.columns = ['x', 'y', 'z', 'value']
# save to file
stacked.to_csv(self.csv_output_lattice, index=False)
def save_lattice(self,lattice,Nn):
# prepare data to write
output = np.zeros((Nn*Nn,4))
for i in range(Nn):
for j in range(Nn):
output[i*Nn+j][:] = [i,j,lattice[i,j,0],lattice[i,j,1]]
# save to file
np.savetxt(self.csv_output_lattice, output, delimiter=",", header = "Lattice index X,# Lattice index Y,# Coordinate X,# Coordinate Y")
def som_preparation(self,trials,Nn,visualization):
from IPython import display
from ipywidgets import IntProgress
mode = ['simulation','visualization','grading']
# ERROR: there are only two available modes: 'simualtion' or 'visualization'
visualization = mode.index(visualization)
video = visualization
if(video==2): Nn = 8
T = time.time()
f = IntProgress(min=0, max=trials) # instantiate the bar
display.display(f) # display the bar
return T,f,Nn
def visualization(self, simdata, lattice, visualization, f):
mode = ['simulation','visualization','grading']
Nn = simdata[0];
N_trials = simdata[1];
trial = simdata[2];
eta = simdata[3];
sigma = simdata[4];
# ERROR: there isn't any available mode with this name
# Program cannot define an index for further processing
visualization = mode.index(visualization)
# 1) 'simulation' - only simulation of SOM training
# 2) 'vizualization' - visualize and update a current SOM state
video = visualization
f.value += 1
self.save_lattice(lattice,Nn)
if(video or trial==N_trials):
if(video<2): self.visualization_main(lattice,Nn,eta,sigma,trial)
def display_results(self, T, visualization):
mode = ['simulation','visualization','grading']
# ERROR: there are only two available modes: 'simualtion' or 'visualization'
visualization = mode.index(visualization)
video = visualization
display.clear_output(wait=True)
if(video<2): print 'Done. Simulation time is ', time.time()-T, '(s).'
| true |
13e046c392e5a925fafbcc17ade366cf937406cd | Python | sanderadam/my-first-blog | /rooster/rommel.py | UTF-8 | 471 | 2.609375 | 3 | [] | no_license | import datetime
from rooster.models import StdDienst
#stdDienst_list = list(StdDienst.objects.all().values('date'))
stdDienst_list = StdDienst.objects.all().values('beschrijving','date','chauffeur','begintijd','eindtijd')
year = 2017
month = 1
dates_list = []
for i in range(1,31):
for stdDienst in stdDienst_list:
i_date = datetime.date(year,month,i)
if(i_date.weekday()==stdDienst['date'].weekday()):
dates_list.append(stdDienst)
| true |
73e9d03a63d79c72e83518a4672e5ff12665c03f | Python | caioverissimo/IA002-python-scripts | /lista1/01-lista1.py | UTF-8 | 1,089 | 4.5625 | 5 | [] | no_license | ### IAL002 - Primeira Lista
### 1. Escreva um programa que leia um número inteiro do teclado e diga se esse
### número é positivo ou negativo
### Program for verify if an integer entry value is negative or positive ###
print('\n');
print("### Program for verify if an integer entry value is negative or positive ###")
print("### -------------------------------------------------------------------- ###")
print('\n');
# Ask for enter a value until it be an integer
while True :
try :
entryValue = int(input("Please, enter an integer number: "))
if ( isinstance(entryValue, int)) :
print("\n")
break
except ValueError :
print("This value is not an integer!")
print("\n")
# Verify and write if the number is 'positive' or 'negative'
if ( entryValue >= 0 ) :
print("###RESULT###")
print("The entry number", entryValue, "is positive.")
else :
print("The entry number", entryValue, "is negative.")
print("\n")
print("##### :-) Thanks for your patience! See ya!!! ;-) ###")
# print("Magic happens here!") | true |
126071853a1a0246830688868f0bd8dede9f9f0c | Python | cshweeti77/Backup | /fibo.py | UTF-8 | 219 | 4.09375 | 4 | [] | no_license | #!usr/bin/python3
#version 3
#program to demonstrate usage of function to print upto n fibonacci number
def fibo(n):
a, b = 1, 1
while(a < n):
print(a)
a, b =b, a+b
a = int(input("Enter a number::"))
fibo(a)
| true |
6b407844eb4681b89035cf519a15d1f8eaa454fc | Python | tseiiti/curso_em_video | /mundo_2/ex037.py | UTF-8 | 613 | 4.28125 | 4 | [
"MIT"
] | permissive | from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 037:
Escreva um programa que leia um número inteiro qualquer e peça
para o usuário escolher qual será a base de conversão:
- 1 para binário
- 2 para octal
- 3 para hexadecimal
''')
n = int(input('Digite um número: '))
print('1: binário')
print('2: octal')
print('3: hexadecimal')
c = int(input('Qual base para conversão: '))
if c == 1:
print('em binário: {}'.format(bin(n).upper()))
elif c == 2:
print('em octal: {}'.format(oct(n).upper()))
elif c == 3:
print('em hexadecimal: {}'.format(hex(n).upper()))
| true |
4cc2a926425ab3a3bc45df8df3494aef301b9873 | Python | ycchhueannu/LeetCode | /python/0459_Repeated_Substring_Pattern.py | UTF-8 | 383 | 2.75 | 3 | [] | no_license | class Solution(object):
def repeatedSubstringPattern(self, s):
"""
:type s: str
:rtype: bool
"""
for n in range(1, len(s)//2 + 1):
if len(s) % n == 0:
sub_str = s[0:n]
if all(s[n*i:n*(i+1)] == sub_str for i in range(0, len(s)//n)):
return True
return False | true |
d1e8a53283251f562d0ea3f84a2f002c05c641c9 | Python | oshlern/MiscellaneousWork | /pico/vigen.py | UTF-8 | 551 | 2.859375 | 3 | [] | no_license | import string
def d(c, k):
m = ''
for i in range(len(c)):
ki = string.ascii_lowercase.index(k[i % len(k)])
ci = string.ascii_lowercase.index(c[i])
m += string.ascii_lowercase[ci-ki % 26]
# m += chr((ord(c[i])-65)+(ord(k[i%len(k)])-65) % 26 + 65)
return m
m = "vgefmsaapaxpomqemdoubtqdxoaxypeo"
def caesar(m, k):
c = ''
for i in range(len(m)):
mi = string.ascii_lowercase.index(m[i])
c += string.ascii_lowercase[(mi+k) % 26]
return c
for i in range(26):
print(caesar(m, i)) | true |
8a4d6843d48e18cc0026fc5a2d92c5abf8039a12 | Python | daniel-reich/ubiquitous-fiesta | /XKEDTh2NMtTLSyCc2_16.py | UTF-8 | 308 | 3.109375 | 3 | [] | no_license |
def valid_credit_card(number):
if len(str(number)) == 16:
y = [int(char) for char in str(number)[::-1]]
for i in range(16):
if i%2:
y[i] = y[i]*2
if y[i] > 9:
y[i] = y[i] - 9
if not sum(y)%10:
return True
else:
return False
else:
return True
| true |
2fe479d49c2d21de2fa4fee90f78a1595d50fdb4 | Python | lowbite/ComputerArchitecture | /Lections/1.py | UTF-8 | 447 | 3.453125 | 3 | [] | no_license | a = 5
def f(x,y = a):
return x*y
print (f(2))
a = 3
print (f(2))
#--------------------------------------------
def f1(a, l=[]):
l.append(a)
print(l)
return l
f1(1)
f1(1)
l = f(1) #Bude zsylatys na 1 komirky pamyati
#--------------------------------------------
def f3(a = None):
if a is None:
print("A is None")
else:
print("A NOT None")
f3()
f3(1)
f3(None)
#--------------------------------------------
| true |
d93574dca7aa8d113168c65b16566a8322ba33da | Python | wurmfood/highalt | /Testing/ThreadTest.py | UTF-8 | 1,286 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python3
import threading
from time import sleep
class TestThread (threading.Thread):
def __init__(self, h_array, line):
threading.Thread.__init__(self)
self.headers = h_array
self.line = None
def run(self):
for i in range(1, 15):
self.headers.append(i)
self.line = "Blah"
sleep(5)
class TestThreadSupervisor (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.__stop = False
self.__h_array = []
self.__text = None
self.__cur_thread = None
def stop(self):
self.__stop = True
def print_headers(self):
for i in self.__h_array:
print(i)
def print_line(self):
print(self.__cur_thread.line)
def run(self):
while not self.__stop:
print("Create thread")
self.__cur_thread = TestThread(self.__h_array, self.__text)
print("Start new thread")
self.__cur_thread.start()
print("Join")
self.__cur_thread.join()
print("Post join")
if __name__ == "__main__":
sup = TestThreadSupervisor()
sup.start()
sleep(12)
sup.print_line()
sleep(4)
sup.stop()
sup.print_headers()
| true |
d9c6c3ada541986691271729c9b7754b9b9e586c | Python | cforth/codefarm | /idea-365/D033_JSON2PY/GifHandle.py | UTF-8 | 2,696 | 3.09375 | 3 | [
"MIT"
] | permissive | from PIL import Image, ImageTk
# GIF动图处理类
class GifHandle(object):
def __init__(self, master_widget, img_path):
# 保存显示图片的控件引用
self.master_widget = master_widget
# 保存图片路径
self.img_path = img_path
# 保存gif格式图片当前显示的帧的数据
self._frame = None
# 保存gif格式图片每一帧
self._gif_frames = []
# 保存gif格式图片帧的数量
self._frame_count = 0
# 保存gif格式图片每一帧的延时
self.delay = 50
# 保存gif格式图片当前显示的帧的位置
self._ind = 0
# 设置gif图片默认运行状态为关闭
self._gif_running = False
# 初始化gif动图
self._init_gif()
# 初始化GIF动图,将GIF动图每一帧保存起来准备显示
def _init_gif(self):
im = Image.open(self.img_path)
seq = []
try:
while True:
seq.append(im.copy())
im.seek(len(seq)) # skip to next frame
except EOFError:
pass # we're done
try:
self.delay = im.info['duration']
# 将默认延时设置为50ms
if self.delay < 50:
self.delay = 50
except KeyError:
self.delay = 50
first = seq[0].convert('RGBA')
self._gif_frames = [ImageTk.PhotoImage(first)]
temp = seq[0]
for image in seq[1:]:
temp.paste(image)
frame = temp.convert('RGBA')
self._gif_frames.append(ImageTk.PhotoImage(frame))
self._frame_count += 1
# 更新GIF动图的下一帧
def _update_gif(self):
self._frame = self._gif_frames[self._ind]
self._ind += 1
if self._ind >= self._frame_count:
self._ind = 0
# 将gif当前帧显示在widget容器中
self.master_widget.configure(image=self._frame)
# 设置定时器,更新widget容器显示的gif帧
self.master_widget.gif_timer = self.master_widget.after(self.delay, self._update_gif)
# 启动GIF动图
def start_gif(self):
# 设置gif图片运行标志
self._gif_running = True
# 在widget容器中设置定时器
self.master_widget.gif_timer = self.master_widget.after(0, self._update_gif)
# 停止当前的GIF动图
def stop_gif(self):
if self._gif_running:
# 停止定时器
self.master_widget.after_cancel(self.master_widget.gif_timer)
self._gif_running = False
| true |
5e5d308b303667bf20831bbc61051cf0fc202436 | Python | jinook929/algoexpert | /_createBinaryTree.py | UTF-8 | 5,073 | 3.609375 | 4 | [] | no_license | # assumed id == value in nodes
def createBinaryTree(nodes, root):
tmp = []
for i in range(len(nodes)):
tmp.append(BinaryTree(nodes[i]["value"]))
def findNode(value):
return list(filter(lambda node: node.value == int(value), tmp))[0]
for i in reversed(range(len(tmp))):
if nodes[i]["left"]:
tmp[i].left = findNode(nodes[i]["left"])
if nodes[i]["right"]:
tmp[i].right = findNode(nodes[i]["right"])
return tmp
class BinaryTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert_left(self, node):
self.left = node
def insert_right(self, node):
self.right = node
def printTree(self, node, depth = 0):
print("+---" * depth + str(node.value))
if node.left:
self.printTree(self, node.left, depth + 1)
if node.right:
self.printTree(self, node.right, depth + 1)
return
# def createBinaryTree(nodes, root):
# tmp = []
# for i in range(len(nodes)):
# tmp.append(BinaryTree(str(i + 1), nodes[i]["value"]))
# def findNode(id):
# return list(filter(lambda node: node.id == id, tmp))[0]
# for i in reversed(range(len(tmp))):
# if nodes[i]["left"]:
# tmp[i].left = findNode(nodes[i]["left"])
# if nodes[i]["right"]:
# tmp[i].right = findNode(nodes[i]["right"])
# return tmp
# class BinaryTree:
# def __init__(self, node_id, value):
# self.id = node_id
# self.value = value
# self.left = None
# self.right = None
# def insert_left(self, node):
# self.left = node
# def insert_right(self, node):
# self.right = node
nodes = [
{"id": "1", "left": "2", "right": "3", "value": 1},
{"id": "2", "left": "4", "right": "5", "value": 2},
{"id": "3", "left": "6", "right": "7", "value": 3},
{"id": "4", "left": "8", "right": "9", "value": 4},
{"id": "5", "left": None, "right": None, "value": 5},
{"id": "6", "left": None, "right": None, "value": 6},
{"id": "7", "left": None, "right": None, "value": 7},
{"id": "8", "left": None, "right": None, "value": 8},
{"id": "9", "left": None, "right": None, "value": 9}
]
root = "1"
treeRoot = createBinaryTree(nodes, root)[0]
# print(treeRoot.value)
# print(treeRoot.left.value)
# print(treeRoot.left.left.value)
# print(treeRoot.left.left.left.value)
# print(treeRoot.left.left.right.value)
nodes = [
{"id": "1", "left": "2", "right": None, "value": 1},
{"id": "2", "left": "3", "right": None, "value": 2},
{"id": "3", "left": "4", "right": None, "value": 3},
{"id": "4", "left": "5", "right": None, "value": 4},
{"id": "5", "left": "6", "right": None, "value": 5},
{"id": "6", "left": "7", "right": None, "value": 6},
{"id": "7", "left": "8", "right": None, "value": 7},
{"id": "8", "left": "9", "right": None, "value": 8},
{"id": "9", "left": None, "right": None, "value": 9}
]
root = "1"
treeRoot = createBinaryTree(nodes, root)[0]
# print(treeRoot.value)
# print(treeRoot.left.value)
# print(treeRoot.left.left.value)
# print(treeRoot.left.left.left.value)
# print(treeRoot.left.left.left.left.value)
# print(treeRoot.left.left.left.left.left.value)
# print(treeRoot.left.left.left.left.left.left.value)
# print(treeRoot.left.left.left.left.left.left.left.value)
# print(treeRoot.left.left.left.left.left.left.left.left.value)
nodes = [
{"id": "1", "left": "2", "right": "3", "value": 1},
{"id": "2", "left": "4", "right": "5", "value": 2},
{"id": "3", "left": "6", "right": "7", "value": 3},
{"id": "4", "left": "8", "right": "9", "value": 4},
{"id": "5", "left": None, "right": None, "value": 5},
{"id": "6", "left": "10", "right": None, "value": 6},
{"id": "7", "left": None, "right": None, "value": 7},
{"id": "8", "left": None, "right": None, "value": 8},
{"id": "9", "left": None, "right": None, "value": 9},
{"id": "10", "left": None, "right": "11", "value": 10},
{"id": "11", "left": "12", "right": "13", "value": 11},
{"id": "12", "left": "14", "right": None, "value": 12},
{"id": "13", "left": "15", "right": "16", "value": 13},
{"id": "14", "left": None, "right": None, "value": 14},
{"id": "15", "left": None, "right": None, "value": 15},
{"id": "16", "left": None, "right": None, "value": 16}
]
root = "1"
treeRoot = createBinaryTree(nodes, root)[0]
# print(treeRoot.value)
# print(treeRoot.right.value)
# print(treeRoot.right.left.value)
# print(treeRoot.right.left.left.value)
# print(treeRoot.right.left.left.right.value)
# print(treeRoot.right.left.left.right.left.value)
# print(treeRoot.right.left.left.right.left.left.value)
# print(treeRoot.right.left.left.right.right.value)
# print(treeRoot.right.left.left.right.right.right.value) | true |
7208c8ae3e527967991fba30274941084daaaa63 | Python | justincohler/related_publications | /db.py | UTF-8 | 437 | 3.015625 | 3 | [] | no_license | import json
from sqlalchemy import create_engine
def db_connect():
"""Return a SQLAlchemy DB engine.
Returns:
sqlalchemy.Engine -- a DB engine
"""
with open('credentials.json') as file:
credentials = json.load(file)
DB_USER = credentials["DB_USER"]
DB_PASS = credentials["DB_PASS"]
DB_CONN = f"postgresql://{DB_USER}:{DB_PASS}@localhost:5433/postgres"
return create_engine(DB_CONN)
| true |
7d145c1107a11bc336fa6591166785ebdabf9fd3 | Python | yusufrahmatp/opengl | /task-2/script.py | UTF-8 | 383 | 2.703125 | 3 | [] | no_license | filename = 'kuda.txt'
filename_w = 'kuda_modified.txt'
fopen = open(filename, 'r')
fwrite = open(filename_w, 'w')
i = 0
for line in fopen :
if line != '\n':
x,y = line.split(',')
x = (int(x) - 400) / 800
y = ((int(y) - 400) / 800) * -1
fwrite.write('{},{},{}\n'.format(x, y, 0))
else :
fwrite.write('\n')
fopen.close()
fwrite.close() | true |
f4872b538c401e82346508230123c4384a46375a | Python | kayodeomotoye/Code_Snippets | /color.py | UTF-8 | 3,113 | 3.140625 | 3 | [] | no_license | import os
import sys
from string import hexdigits, digits
from typing import Tuple
import urllib.request
# PREWORK (don't modify): import colors, save to temp file and import
tmp = os.getenv("TMP", "/tmp")
color_values_module = os.path.join(tmp, 'color_values.py')
urllib.request.urlretrieve(
'https://bites-data.s3.us-east-2.amazonaws.com/color_values.py',
color_values_module
)
sys.path.append(tmp)
# should be importable now
from color_values import COLOR_NAMES # noqa E402
class Color:
"""Color class.
Takes the string of a color name and returns its RGB value.
"""
def __init__(self, color):
self.color = color
self.rgb = COLOR_NAMES.get(color.upper())
@staticmethod
def hex2rgb(hex):
"""Class method that converts a hex value into an rgb one"""
if not all(c in hexdigits for c in hex.lstrip('#')): raise ValueError
hex = hex.lstrip('#')
rgb = tuple(int(hex[i:i+2], 16) for i in (0, 2, 4))
return rgb
@staticmethod
def rgb2hex(rgb):
"""Class method that converts an rgb value into a hex one"""
if isinstance(rgb[0], str):
raise ValueError
if not all(0 <= val <= 255 for val in rgb):
raise ValueError(f'rgb {rgb} not in range(256)')
return '#' + ''.join([f'{val:02x}' for val in rgb])
def __repr__(self):
"""Returns the repl of the object"""
return (f'{type(self).__name__}'
f"('{self.color}')")
def __str__(self):
"""Returns the string value of the color object"""
if self.rgb != None:
return f'{self.rgb}'
else:
return 'Unknown'
#pybites
class Color:
"""Color class.
Takes the string of a color name and returns its RGB value.
"""
def __init__(self, color):
self.color = color
self.rgb = COLOR_NAMES.get(self.color.upper(), None)
@staticmethod
def hex2rgb(hex_value):
"""Converts a hex value into an rgb one"""
error_message = f"{hex_value} is not a valid hex value!"
for char in hex_value[1:]:
if char not in hexdigits:
raise ValueError(error_message)
if not len(hex_value) == 7 or not hex_value.startswith("#"):
raise ValueError(error_message)
return tuple(int(hex_value[i:i + 2], 16) for i in (1, 3, 5))
@staticmethod
def rgb2hex(rgb_value):
"""Converts an rgb value into a hex one"""
error_message = f"{rgb_value} is not a valid RGB value!"
if not isinstance(rgb_value, tuple):
raise ValueError(error_message)
valid = [1 for n in rgb_value if not (0 <= n <= 255)]
if sum(valid) > 0:
raise ValueError(error_message)
return f"#{rgb_value[0]:02x}{rgb_value[1]:02x}{rgb_value[2]:02x}"
def __repr__(self):
"""Returns the repl of the object"""
return f"Color('{self.color}')"
def __str__(self):
"""Returns the string value of the color object"""
return f"{self.rgb}" if self.rgb else "Unknown" | true |
4588f4569847b10b11e90ae3e540836bbff96752 | Python | 1946472806/crawler | /day04/06selenium第三方登录知乎.py | UTF-8 | 818 | 2.671875 | 3 | [] | no_license | import time
from selenium import webdriver
#创建谷歌浏览器驱动,使用指定的驱动程序
driver = webdriver.Chrome(executable_path="/home/zhengyj/Downloads/chromedriver_linux64/chromedriver")
driver.get('https://www.zhihu.com')
#先点击登录
driver.find_element_by_xpath('//*[@class="SignContainer-switch"]/span').click()
time.sleep(1)
#第三方登录
#点击社交账号登录
driver.find_element_by_xpath('//*[@id="root"]/div/main/div/div/div/div[2]/div[1]/form/div[5]/span[5]/button').click()
#然后点击QQ登录
driver.find_element_by_xpath('//*[@id="root"]/div/main/div/div/div/div[2]/div[1]/form/div[5]/span[5]/span/button[3]').click()
time.sleep(20) #等待自己去登录qq账号或扫码
#最后刷新知乎页面
driver.refresh()
#登录后获取cookie
print(driver.get_cookies())
| true |
d1abc4d867724b81b346f0e20c65061de8dcfd2f | Python | asperaa/back_to_grind | /Trees/post_order_traversal_recursive.py | UTF-8 | 638 | 3.484375 | 3 | [] | no_license | """We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""145. Binary Tree Postorder Traversal [Recursive]
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def post_order_traversal_helper(self, root, res):
if root:
self.post_order_traversal_helper(root.left, res)
self.post_order_traversal_helper(root.right, res)
res.append(root.val)
def postorderTraversal(self, root):
res = []
self.post_order_traversal_helper(root, res)
return res | true |
cc4fa06516940c6390f30ce53bd3fa4184cd1fa4 | Python | Nathaanlennon/multi-fuction_program_with_python | /MainV2.py | UTF-8 | 1,827 | 3.890625 | 4 | [] | no_license | from Class import *
from math import *
def ai():
print("Hello user, I'm Chara, your personal IA. What's your name?")
name = input()
print(f"Hi {name}, I'm happy to meet you ^^")
print("I have a lot of abilities, try to say someone and if I know how do that, i'll try to help you")
print("What's your request?")
request = input()
if request.casefold() == "addition".casefold() \
or request.casefold() == "subtraction".casefold() \
or request.casefold() == "multiplication".casefold() \
or request.casefold() == "division":
print(operation(request.lower()))
if request.casefold() == "complex operation":
def operation(operation_type):
article = "a"
if operation_type == "addition":
article = "an"
result = 0
print(
f"This function has for job to make {article} {operation_type}, "
f"you write the first number, then the second number. "
"\nSay \"stop\" to stop the program.")
i = 1 # i = the number for the while, "number {i}" in the sentence
while True:
print(f"What is your number {i}? (stop for stop)")
number = input()
if number.casefold() == "stop".casefold():
break
try:
if i == 1:
result = int(number)
else:
if operation_type == "addition":
result += int(number)
elif operation_type == "subtraction":
result -= int(number)
elif operation_type == "multiplication":
result *= int(number)
else:
result /= int(number)
except ValueError:
print("It's not a number, please retry")
i += 1
return f"The result is {result}"
ai()
| true |
1939f58c35f311ca619fc329b6a1d73f45ba3118 | Python | shockim3710/Programmers-Algorithm | /Level 1/42748_K번째수.py | UTF-8 | 385 | 3.015625 | 3 | [] | no_license | def solution(array, commands):
answer = []
for i in range(len(commands)):
cut = []
for j in range(commands[i][0] - 1, commands[i][1]):
cut.append(array[j]) # commands 범위까지 cut에 추가
cut.sort()
# cut에서 원하는 위치의 숫자를 answer에 추가
answer.append(cut[commands[i][2] - 1])
return answer | true |
5c4161204ef51274e189546d0c4d0849e0c42cdd | Python | mbg17/superlist | /day39/进程池2.py | UTF-8 | 510 | 2.84375 | 3 | [] | no_license | import os
import time
from multiprocessing import Pool
def func(n):
print('start func %s'% n,os.getpid())
time.sleep(1)
print('start func %s'% n,os.getpid())
if __name__ == '__main__':
pool = Pool(5)
# 同步提交进程
for i in range(10):
pool.apply(func,args = (1,))
# 异步提交进程
for i in range(10):
pool.apply_async(func,args = (1,))
pool.close()# 关闭进程池接受任务
pool.join()# 监测进程池任务执行结束 | true |
4904b41ebf3e9596667ad9ced0fdda06386ae87c | Python | irfan-gh/MO-PaDGAN-Optimization | /airfoil/surrogate/train_surrogate.py | UTF-8 | 3,191 | 2.609375 | 3 | [
"MIT"
] | permissive |
"""
Trains a surrogate model
Author(s): Wei Chen (wchen459@gmail.com)
"""
import argparse
import numpy as np
import tensorflow as tf
from surrogate_model import Model, preprocess, postprocess
import sys
sys.path.append('../..')
from utils import ElapsedTimer
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('mode', type=str, default='train', help='train or evaluate')
parser.add_argument('--save_interval', type=int, default=1000, help='save interval')
args = parser.parse_args()
assert args.mode in ['train', 'evaluate']
train_steps = 10000
batch_size = 256
lr = 0.0001
# Read dataset
xs_train_fname = '../data/xs_train.npy'
ys_train_fname = '../data/ys_train.npy'
xs_test_fname = '../data/xs_test.npy'
ys_test_fname = '../data/ys_test.npy'
X_train = np.load(xs_train_fname)
Y_train = np.load(ys_train_fname)
X_test = np.load(xs_test_fname)
Y_test = np.load(ys_test_fname)
# Use CL and CL/CD as objectives
Y_train[:,1] = Y_train[:,0]/Y_train[:,1]
Y_test[:,1] = Y_test[:,0]/Y_test[:,1]
# Scale y
Y = np.concatenate([Y_train, Y_test])
min_y = np.min(Y, axis=0, keepdims=True)
max_y = np.max(Y, axis=0, keepdims=True)
Y_train = (Y_train-min_y)/(max_y-min_y)
Y_test = (Y_test-min_y)/(max_y-min_y)
directory = './trained_surrogate'
with tf.Session() as sess:
model = Model(sess, X_train.shape[1])
if args.mode == 'train':
# Train
timer = ElapsedTimer()
model.train(X_train, Y_train, X_test, Y_test, batch_size=batch_size, train_steps=train_steps, lr=lr,
save_interval=args.save_interval, directory=directory)
elapsed_time = timer.elapsed_time()
runtime_mesg = 'Wall clock time for training: %s' % elapsed_time
print(runtime_mesg)
else:
model.restore(directory=directory)
n = 5
ind = np.random.choice(X_test.shape[0], size=n, replace=False)
scores = model.predict(X_test[ind])
print(scores)
print(Y_test[ind])
# g = tf.gradients(model.y_pred, model.x, unconnected_gradients='zero')
# airfoils = preprocess(np.array(X_test[:5], ndmin=3))
# gradients = sess.run(g, feed_dict={model.x: airfoils, model.training: False})[0]
# print(gradients)
# print(gradients.shape)
# model.restore_frozen_graph(directory=directory)
#
# # Test model
# n = 5
## scores = model.predict(X_test[:n])
# with tf.Session(graph=model.frozen_graph) as sess:
# airfoils = preprocess(np.array(X_test[:n], ndmin=3))
# scores = sess.run(model.y_pred, feed_dict={model.x: airfoils, model.training: False})
# print(postprocess(scores))
# print(Y_test[:n])
# g = tf.gradients(model.y_pred, model.x, unconnected_gradients='zero')
# with tf.Session(graph=model.frozen_graph) as sess:
# gradients = sess.run(g, feed_dict={model.x: airfoils, model.training: False})[0]
# print(gradients)
# print(gradients.shape)
| true |
de5d29b4451dc734115e8a3ff08493aa69d0e74d | Python | ChangdeDu/generativeSSL | /data/half_moon.py | UTF-8 | 2,114 | 2.9375 | 3 | [] | no_license | from sklearn.datasets import make_moons
import numpy as np
import pdb
def pad_targets(xy):
"""
Pad the targets to be 1hot.
:param xy: A tuple containing the x and y matrices.
:return: The 1hot coded dataset.
"""
x, y = xy
classes = np.max(y) + 1
tmp_data_y = np.zeros((x.shape[0], classes))
for i, dp in zip(range(len(y)), y):
r = np.zeros(classes)
r[dp] = 1
tmp_data_y[i] = r
y = tmp_data_y
return x, y
def _download(centered):
train_x, train_t = make_moons(n_samples=10000, shuffle=True, noise=0.2, random_state=1234)
test_x, test_t = make_moons(n_samples=10000, shuffle=True, noise=0.2, random_state=1234)
valid_x, valid_t = make_moons(n_samples=10000, shuffle=True, noise=0.2, random_state=1234)
if centered:
train_x += np.abs(train_x.min())
test_x += np.abs(test_x.min())
valid_x += np.abs(valid_x.min())
train_set = (train_x.astype('float32'), train_t.astype('int32'))
test_set = (test_x.astype('float32'), test_t.astype('int32'))
valid_set = (valid_x.astype('float32'), valid_t.astype('int32'))
return train_set, test_set, valid_set
def load_semi_supervised(centered):
"""
Load the half moon dataset with 6 fixed labeled data points.
"""
train_set, test_set, valid_set = _download(centered)
# Add 6 static labels.
train_x_l = np.zeros((6, 2))
train_t_l = np.array([0, 0, 0, 1, 1, 1])
# Top halfmoon
train_x_l[0] = [.7, 1.7] # left
train_x_l[1] = [1.6, 2.6] # middle
train_x_l[2] = [2.7, 1.7] # right
# Bottom halfmoon
train_x_l[3] = [1.6, 2.0] # left
train_x_l[4] = [2.7, 1.1] # middle
train_x_l[5] = [3.5, 2.0] # right
if not centered:
train_x_l -= np.abs(train_set[0].min())
train_set_labeled = (train_x_l, train_t_l)
train_set_labeled = pad_targets(train_set_labeled)
train_set = pad_targets(train_set)
test_set = pad_targets(test_set)
if valid_set is not None:
valid_set = pad_targets(valid_set)
return train_set, train_set_labeled, test_set, valid_set
| true |
64493331e3aee65465481cbbfcf89ec3bdc1c73f | Python | SimGob/PageRank | /MassDistribution.py | UTF-8 | 1,648 | 3.078125 | 3 | [] | no_license | from mrjob.job import MRJob
from mrjob.protocol import JSONProtocol
ALPHA = .15 # Googles number
class MassDistribution(MRJob):
'''
MRJob Class for distributing lost mass across non-dangling nodes.
Also add dampening with alpha=.15, run once node rankings have converged
'''
INPUT_PROTOCOL = JSONProtocol
def configure_args(self):
''' Set command line args for MassDistribution
--total-nodes: int
count of all the nodes in the graph
--lost-mass: float
Sum of the rank of all dangling nodes
'''
super(MassDistribution, self).configure_args()
self.add_passthru_arg(
'--total-nodes', type=int,
help="Specify the total number of nodes"
)
self.add_passthru_arg(
'--lost-mass', type=float,
help="Specify the mass of lost rank"
)
def mapper(self, node_id, node):
''' Map function for Mass Distribution
This method applies the lost mass and dampening to non-dangling
nodes and filters out dangling nodes.
PARAMETERS
----------
node_id: int
ID of the webpage
node: list<list<int>, float>
A list where the first item is an adjaceny list (list of outgoing links) and
the second item is the current rank of the node.
YIELDS
------
int, int:
Node ID and its final ranking
'''
# Remove dangling nodes
if node_id == 'lost' or len(node[0]) == 0:
return
node_rank = node[1]
new_rank = ALPHA/self.options.total_nodes + (1 - ALPHA) * (self.options.lost_mass/self.options.total_nodes + node_rank)
yield node_id, new_rank
if __name__ == "__main__":
MassDistribution.run() | true |
00eb290305db1568041223e05a1ddf1b49465126 | Python | mexiCoders/pyvcf | /src/vcf/hackparser.py | UTF-8 | 7,045 | 2.75 | 3 | [] | no_license | import re
import itertools
# token patterns
p_comma = r","
p_colon = r":"
p_semicolon = r";"
p_equals = r"="
p_slash = r"/"
p_bar = r"\|"
p_dot = r"\."
p_int = r"[-+]?(?:0|[1-9][0-9]*)"
p_float = r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?"
p_str = r".*"
p_end = r'$'
p_identifier = r"[a-zA-Z][a-zA-Z0-9]*"
p_nucleotides = r"[ACGTN]+"
p_info_str = r"[^,;\n]+"
p_phase = "%(slash)s|%(bar)s" % { 'slash':p_slash, 'bar':p_bar }
def anchor(restr):
return "^" + restr + "$"
def group(restr):
return '(' + restr + ')'
# compiled re's
re_comma = re.compile(anchor(p_comma))
re_colon = re.compile(anchor(p_colon))
re_semicolon = re.compile(anchor(p_semicolon))
re_equals = re.compile(anchor(p_equals))
re_slash = re.compile(anchor(p_slash))
re_bar = re.compile(anchor(p_bar))
re_dot = re.compile(anchor(p_dot))
re_int = re.compile(anchor(p_int))
re_float = re.compile(anchor(p_float))
re_str = re.compile(anchor(p_str))
re_end = re.compile(anchor(p_end))
re_identifier = re.compile(anchor(p_identifier))
re_nucleotides = re.compile(anchor(p_nucleotides))
re_info_str = re.compile(anchor(p_info_str))
re_phase = re.compile(anchor(p_phase))
# python doesn't pretty print compiled regexes...
re_to_p = {
re_comma : p_comma,
re_colon : p_colon,
re_semicolon : p_semicolon,
re_equals : p_equals,
re_slash : p_slash,
re_bar : p_bar,
re_dot : p_dot,
re_int : p_int,
re_float : p_float,
re_str : p_str,
re_end : p_end,
re_identifier : p_identifier,
re_nucleotides : p_nucleotides,
re_info_str : p_info_str,
}
def parse(name, string):
parser = name_to_parser[name]
return parser(string)
def attr_restr(value_restr):
return "(?P<attr>%(attr_restr)s)(?:=(?P<value>%(value_restr)s))?" % { 'attr_str':r"[a-zA-Z]+", 'value_restr':value_restr }
typeable_as = {
None: frozenset([int, float, bool, str]),
int: frozenset([int, float, str]),
float: frozenset([float, str]),
str: frozenset([str]),
bool: frozenset([bool]),
}
def parse_info_attr(attr):
attr_value = attr.split(p_equals)
attr = attr_value[0]
if len(attr_value) == 2:
return (attr, parse_value(attr_value[1]))
else:
return (attr, True)
def parse_scalar_value(value):
result = re_int.match(value)
if result is not None:
return int(value)
result = re_float.match(value)
if result is not None:
return float(value)
# return a str
return value
def base_type(values, default=None):
if len(values) == 0:
return default
types = frozenset([type(v) for v in values])
i = iter(types)
common_types = set(typeable_as[i.next()])
for t in i:
common_types.intersection_update(typeable_as[t])
if len(common_types) == 1:
return iter(common_types).next()
common_types.intersection_update(types)
if len(common_types) == 1:
return iter(common_types).next()
return default
def parse_value(value):
values = value.split(p_comma)
if len(values) != 1:
parsed_values = [parse_scalar_value(v) for v in values]
btype = base_type(parsed_values, default=str)
return [btype(v) for v in parsed_values]
return parse_scalar_value(value)
def parse_info_by_type(info):
attrs_by_type = {
str: [],
int: [],
float: [],
bool: [],
}
for attr_str in info.split(p_semicolon):
attr_pair = parse_info_attr(attr_str)
attrs_by_type[type(attr_pair[1])].append(attr_pair)
return attrs_by_type
def parse_info(info):
return dict([parse_info_attr(attr_str) for attr_str in info.split(p_semicolon)])
def ordered_alleles(ref, alts):
if type(alts) == str:
alts = alts.split(p_comma)
genotypes = []
alleles = []
last_alleles = [ref]
last_alleles.extend(alts)
for x in last_alleles:
alleles.append(x)
for y in alleles:
genotypes.append((y, x))
return genotypes
def parse_dbsnp_id(string):
return parse_none_else_string(string)
def parse_null_genotype(string):
allele1, phase, allele2 = parse_zip_split(group(p_phase), [parse_none, parse_phase, parse_none], string)
return ((allele1, allele2), phase)
def parse_phase(string):
# | => True, / => False
return match(re_phase, string, on_success=lambda r, s: True if s == "|" else False)
def parse_int(string):
return match(re_int, string, on_success=lambda r, s: int(s))
def parse_int_list(string):
return parse_each_split(p_comma, parse_int, string)
parse_AD = parse_int_list
parse_DP = parse_int
parse_GQ = parse_int
parse_PL = parse_int_list
def parse_GT(string):
allele1, phase, allele2 = parse_zip_split(group(p_phase), [parse_int, parse_phase, parse_int], string)
return ((allele1, allele2), phase)
return tuple(parse_each_split(p_slash, parse_int, string))
def parse_nonnull_genotype(string):
# 0/1:11,15:26:99:364,0,353
# GT:AD:DP:GQ:PL
GT, AD, DP, GQ, PL = re.split(p_colon, string)
return { 'GT':parse_GT(GT), 'AD':parse_AD(AD), 'DP':parse_DP(DP), 'GQ':parse_GQ(GQ), 'PL':parse_PL(PL) }
def parse_genotype(string):
return parse_either(parse_null_genotype, parse_nonnull_genotype, string)
def parse_genotype_format(string):
pass
def parse_ref(string):
return match(re_nucleotides, string)
def parse_alts(string):
return parse_each_split(p_comma, parse_allele, string)
def parse_allele(string):
return match(re_nucleotides, string, on_fail=lambda re, s: parse_none(s))
def parse_dbsnp_id(string):
return parse_none_else_string(string)
def parse_none(string):
return match(re_dot, string, on_success=lambda m, s: None)
def parse_none_else_string(string):
return match(re_dot, string, on_success=lambda m, s: None, on_fail=lambda r, s: string)
def parse_each(seq, parser):
return [parser(s) for s in seq]
def parse_zip(seq, parsers):
return [p(s) for s, p in itertools.izip(seq, parsers)]
def parse_zip_split(regexp, parsers, string):
return parse_zip(re.split(regexp, string), parsers)
def parse_each_split(regexp, parser, string):
return parse_each(re.split(regexp, string), parser)
def parse_either(parser1, parser2, string):
try:
return parser1(string)
except ParserException:
return parser2(string)
def _on_fail(regex, string):
raise ParserException("Failed to parse %(string)s using %(regex)s" % { 'string':string, 'regex':re_to_p.get(regex, regex) })
def _on_success(match, string):
return string
def match(regex, string, on_success=_on_success, on_fail=_on_fail):
result = regex.match(string)
if result is not None:
return on_success(result, string)
return on_fail(regex, string)
class ParserException(Exception):
pass
name_to_parser = {
'info' : parse_info,
'ref' : parse_ref,
'dbsnp_id' : parse_dbsnp_id,
'alts' : parse_alts,
'genotype' : parse_genotype,
# not used
# 'genotype_format' : parse_genotype_format,
# 'allele' : parse_allele,
}
| true |
a70176edf7771ca00c388c8023e2d8d87986a8fb | Python | chenred/CB_Data_Visualization | /code/code_tags/4.0/dash/crawl_data.py | UTF-8 | 3,725 | 2.703125 | 3 | [] | no_license | import requests
import pandas as pd
import numpy as np
from lxml import etree
import os
import setting
import time
class Spiders():
def __init__(self):
# 使用默认地址为富投网行情全表
self.url = r"http://www.richvest.com/index.php?m=cb&a=cb_all"
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
def set_url(self,url):
""" 设置自己为指定的url """
self.url = url
def get_html(self):
''' 模拟请求数据 '''
try:
res = requests.get(self.url,headers=self.headers)
res.raise_for_status()
res.encoding = res.apparent_encoding
return res.text
except :
return "GET HTML A (出现错误)"
def parse_html(self):
''' 使用lxml解析HTML '''
demo = self.get_html()
try:
if "GET HTML(出现错误)" != demo:
html = etree.HTML(demo)
table = html.xpath('//table[@id="cb_hq"]')
demo_ = etree.tostring(table[0], encoding='utf-8').decode()
data_pool = pd.read_html(demo_, encoding='utf-8', header=0)[0]
return data_pool
else :
return "GET HTML B (失败)"
except :
return "PARSE HTMl (出现错误)"
def storage_data(self,data,path = setting.DATA_PATH):
"""
数据预览及存储
- 默认存储
"""
name = str(pd.datetime.now())[:10]
try:
data.to_csv(path+name+".csv",index = False)
return True
except :
return False
def crawl_storage(self):
path = setting.DATA_PATH
file_name = os.listdir(path)[-1][:-4]
print(os.listdir(path))
file_stamp = pd.to_datetime(file_name)
stamp = pd.datetime.now()
print(file_stamp.month,file_stamp.day,stamp.month,stamp.day)
if file_stamp.month == stamp.month and file_stamp.day == stamp.day:
pass
else:
data = self.parse_html()
self.storage_data(data)
def get_gsz(self):
""" 基金估算值 """
# 设置对应连链接
url = "http://fundgz.1234567.com.cn/js/161716.js?rt=" + str(int(round(time.time() * 1000)))
self.set_url(url)
result = self.get_html()
gsz = float(eval(result[8:-2])["gsz"])
return gsz
def get_zhaoshang(self):
""" 获取招商现价 """
self.set_url("http://www.richvest.com/index.php?m=stock_pub&c=arbitrage&a=listBondFund")
fund_data = self.parse_html()
zhaoshang = fund_data["现价"][fund_data["代码"]=="sz161716"].values[0]
return zhaoshang
def get_zs_data(self):
zhaoshang = self.get_zhaoshang()
gsz = self.get_gsz()
fund_data_ = ["招商双债","sz161716",zhaoshang,gsz,round((float(zhaoshang)- float(gsz))/float(gsz) * 100,3)]
# print(fund_data_)
return fund_data_
# 初始化类
# spider = Spiders()
# spider.crawl_storage()
""" 数据爬取类实例化 """
# spider = Spiders()
# spider.set_url("http://www.richvest.com/index.php?m=stock_pub&c=arbitrage&a=listBondFund")
# fund_data = spider.parse_html()
# zhaoshang = fund_data["现价"][fund_data["代码"]=="sz161716"].values[0]
# gsz = spider.get_gsz()
# print(zhaoshang,gsz,(zhaoshang-gsz)/gsz*100)
# fund_data_ = pd.DataFrame([["招商双债"],["sz161716"],[zhaoshang],[gsz],[round((zhaoshang-gsz)/gsz*100,3)]],columns=["名称","代码","现价","净估值","溢价率(%)",])
# print(fund_data_) | true |
e4d84ccf0c5899373e989ec40261597fd301e453 | Python | maxieds/GATechGTDMMBSoftwareBackup | /RNADB-construction/rna2ndary/Exceptions.py | UTF-8 | 881 | 2.8125 | 3 | [] | no_license | # Exceptions.py : Miscellaneous exceptions used throughout this code.
# Author: Maxie D. Schmidt (maxieds@gmail.com)
# Created: 2019.06.18
class DirectoryError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##
class SequenceError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##
class AmbiguousSequenceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##
class NoOutput(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##
class FileHashingError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##
| true |
f177eea5a7fb6192597fc85a5c05dce95fefc947 | Python | vadim-lev/test_task_1 | /main.py | UTF-8 | 2,160 | 2.515625 | 3 | [] | no_license | import os
from datetime import datetime
import requests
import sqlite3
URL_list = 'https://www.investing.com/search/service/searchTopBar'
headers = {'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
'84.0.4147.89 Safari/537.36'}
data = {'search_text': 'Manufacturing Purchasing Managers',
'limit': '270'}
try:
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'DB-table.db')
os.remove(path)
except FileNotFoundError:
print('File does not exist')
timeseries = requests.post(URL_list, data=data, headers=headers).json()
link_timeseries = [[x['dataID'], 'https://www.investing.com' + x['link'],
x['name']] for x in timeseries['ec_event']]
conn = sqlite3.connect('DB-table.db')
c = conn.cursor()
c.execute('''CREATE TABLE timeseries(id text, name text)''')
c.execute('''CREATE TABLE timeseries_value(id text, date text ,
value text)''')
conn.commit()
c.executemany('INSERT INTO timeseries VALUES (?,?)',
[[x[0], x[2]] for x in link_timeseries])
conn.commit()
print('The Economic Events entry is complete\n')
for id in [i[0] for i in link_timeseries]:
URL_timeseries = 'https://sbcharts.investing.com/events_charts/us/' +\
id + '.json'
timeseries_value = requests.get(URL_timeseries,
headers=headers).json()['attr']
data_result = []
for x in timeseries_value:
data = datetime.fromtimestamp(int(x['timestamp']) //
1000).strftime('%d-%m-%Y %H:%M:%S')
data_result.append([id, data, x['actual']])
print('{:<15}{:>6}{:>25}'.format('timeseries', id,
'recording is complete'))
c.executemany('INSERT INTO timeseries_value VALUES (?,?,?)', data_result)
c.execute('''SELECT COUNT(date) FROM timeseries_value;''')
totalRows = c.fetchone()
print("\nTotal rows are in the timeseries_value table: ", totalRows[0])
conn.commit()
| true |
76eabfae3f8d6aa4066d24c95209bd29870d5de4 | Python | Aasthaengg/IBMdataset | /Python_codes/p02414/s810363420.py | UTF-8 | 576 | 2.59375 | 3 | [] | no_license | n,m,l = map(int,input().split())
a = [[0 for i in range(m)] for j in range(n)]
b = [[0 for i in range(l)] for j in range(m)]
c = [[0 for i in range(l)] for j in range(n)]
for i in range(n):
mat_tmp = list(map(int,input().split()))
for j in range(m):
a[i][j] = mat_tmp[j]
for i in range(m):
mat_tmp = list(map(int,input().split()))
for j in range(l):
b[i][j] = mat_tmp[j]
for i in range(n):
for j in range(l):
for k in range(m):
c[i][j] += a[i][k]*b[k][j]
for i in range(n):
print(' '.join(str(x) for x in c[i])) | true |
f5df9f4dae05a31f115212666118b175b7c60be3 | Python | ao-song/stock | /hs300.py | UTF-8 | 2,019 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tushare as ts
import pandas as pd
from pathlib import Path
import time
''' About HS300 '''
#------------------------------------------------------------------------------
__author__ = 'Ao Song'
__email__ = 'ao.song@outlook.com'
START_DATE = '2005-01-04'
FILE_LOCATION = './data/hs300'
HS300_INDEX = '000300'
COLUMN_NAMES = ['date','open','high','close','low','volume','amount']
class hs300:
def __init__(self):
hs300File = Path(FILE_LOCATION)
if not hs300File.is_file():
print("HS300 file created!\n")
self.download_hs300()
else:
hs300T = pd.read_csv(FILE_LOCATION)
currentDate = time.strftime("%Y-%m-%d")
latestDate = hs300T.loc[0, 'date']
if currentDate == latestDate:
print("HS300 file already exists!\n")
self.__hs300 = hs300T
else:
print("Updating HS300 file!\n")
hs300P = ts.get_h_data(
HS300_INDEX,
index=True,
start=latestDate)
hs300P = hs300P.reset_index()
hs300P['date'] = hs300P['date'].apply(
lambda x: pd.to_datetime(x).date().isoformat())
self.__hs300 = pd.concat([hs300P, hs300T[1:]])
self.__hs300.to_csv(
FILE_LOCATION, encoding='utf-8', index=False)
def get_data(self):
return self.__hs300
def download_hs300(self, startDate=START_DATE):
self.__hs300 = ts.get_h_data(
HS300_INDEX,
index=True,
start=startDate)
self.__hs300 = self.__hs300.reset_index()
self.__hs300['date'] = self.__hs300['date'].apply(
lambda x: pd.to_datetime(x).date().isoformat())
self.__hs300.to_csv(
FILE_LOCATION, encoding='utf-8', index=False)
if __name__ == '__main__':
h = hs300().get_data()
#print(h.head()) | true |
191968384b2cf3568199fd2c22b9030b522d2be9 | Python | bonmorz/portScanPublic2 | /portScan.py | UTF-8 | 3,219 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import socket
import re
from scapy.all import *
import subprocess
import re
import xlrd
final_result={}
def read_usage():
wb = xlrd.open_workbook('tcpUsage.xlsx')
# 按工作簿定位工作表
sh = wb.sheet_by_name('Sheet1')
# print(sh.nrows) # 有效数据行数
# print(sh.ncols) # 有效数据列数
# print(sh.cell(0, 0).value) # 输出第一行第一列的值
# print(sh.row_values(0)) # 输出第一行的所有值
# 将数据和标题组合成字典
# print(dict(zip(sh.row_values(0), sh.row_values(1))))
# 遍历excel,打印所有数据
usage_result = []
usage_dict = {}
for i in range(sh.nrows):
# print(sh.row_values(i))
usage_dict[sh.row_values(i)[0]] = sh.row_values(i)[1]
# print(usage_result)
#print(usage_dict)
return usage_dict
def get_port_status(server_ip, server_port,port_dict):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
try:
s.connect((server_ip, server_port))
print('{} port {} is opened'.format(server_ip, server_port))
print("用途是"+port_dict.get(int(server_port)))
final_result[int(server_port)]=port_dict.get(int(server_port))
except Exception as err:
pass
finally:
s.close()
def get_target_system(ip):
print("测试ip开始")
p = subprocess.Popen(["ping", "-c", "5", ip], stdout=subprocess.PIPE)
# print(p)
res = p.communicate()[0]
if p.returncode > 0:
print('server error')
else:
pattern = re.compile('ttl=\d*')
# print(pattern.search(str(res)).group())
ttl = pattern.search(str(res)).group()
ttl = int(ttl[4:])
global system_check
if ttl <= 64:
print("Linux or Unix!")
system_check = "Linux or Unix"
elif ttl <= 128 and ttl > 64:
print("Windows!")
system_check="Windows"
else:
print("Unix!")
system_check="Unix"
def main():
usage_dict={}
usage_dict=read_usage()
ip = input('暂不支持域名\r\n请输入IP地址(默认为127.0.0.1):')
pattern_ip = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
# pattern_cn = re.compile(r'(\w+\.){2}\w+')
# host_cn = pattern_cn.match(ip)
host_ip = pattern_ip.match(ip)
if ip != '':
if host_ip:
# socket.gethostbyname(ip)
ip = host_ip.group()
else:
print('格式输入错误')
exit(-1)
else:
ip = '127.0.0.1'
get_target_system(ip)
port = input('请输入端口,将扫描到该端口为止:')
port = int(port)
if port == '':
port_start = 1
port_end = 65536
port = range(port_start, port_end)
for p in port:
get_port_status(ip, int(p),usage_dict)
elif port != '':
port_start = 1
port_end = port
port_list = range(port_start, port_end)
for p in port_list:
get_port_status(ip, int(p),usage_dict)
else:
get_port_status(ip, port)
print(final_result)
print(system_check)
if __name__ == '__main__':
main()
| true |
5d24d549572bffb02c07a05a47b5906007c729fe | Python | greyaurora/greykit | /npc_gen/tables/character/data/desc.py | UTF-8 | 1,332 | 2.609375 | 3 | [] | no_license | # <character> is [descriptor], [descriptor], [descriptor]
descriptors = [
'dangerous', 'manipulative', 'stoic', 'attractive', 'aloof',
'generous', 'armed', 'brave', 'sociable', 'connected',
'jealous', 'active', 'hostile', 'successful', 'experienced',
'ambitious', 'conceited', 'stern', 'wary', 'insightful',
'quirky', 'disfigured', 'skilled', 'timid', 'wild',
'cunning', 'kind', 'oblivious', 'cautious', 'weary',
'anxious', 'athletic', 'cruel', 'honest', 'dying',
'artistic', 'confused', 'relaxed', 'confident', 'friendly',
'influential', 'adventurous', 'vengeful', 'armored', 'determined',
'sick', 'selfish', 'fervent', 'agreeable', 'stubborn',
'greedy', 'obsessed', 'a protagonist', 'cheery', 'stealthy',
'passive', 'intolerant', 'weak', 'affectionate', 'stingy',
'wise', 'smug', 'insensitive', 'young', 'clever',
'bitter', 'oppressed', 'ugly', 'remorseful', 'cooperative',
'doomed', 'charming', 'apathetic', 'bold', 'critical',
'loyal', 'angry', 'resourceful', 'religious', 'suspicious',
'wounded', 'old', 'hardhearted', 'powerful', 'violent',
'talented', 'driven', 'hot-tempered', 'deceitful', 'quiet',
'incompetent', 'aggressive', 'infamous', 'cowardly', 'proud',
'reclusive', 'careless', 'dependent', 'disabled', 'strong',
] | true |
1e09a23e34ca2d74f840aa098e8a6b6b34be6f81 | Python | lkqllx/Lim | /Project_2-Forum/download_prices.py | UTF-8 | 1,504 | 3.046875 | 3 | [] | no_license | """
This file will download the prices of target list
"""
import pandas_datareader as web
import pandas as pd
import concurrent.futures
import datetime as dt
from progress.bar import Bar
import threading
import os
lock = threading.RLock()
def prices(targets: list, start='2010-01-01', end='2019-10-15'):
start = dt.datetime.strptime(start, '%Y-%m-%d')
end = dt.datetime.strptime(end, '%Y-%m-%d')
with Bar('Downloading', max=len(targets)) as bar:
def download(target):
try:
if not os.path.exists(f'data/prices/{target}.csv'):
df = web.get_data_yahoo(target, start, end)
ticker = target.split('.')[0]
df.to_csv(f'data/prices/{ticker}.csv')
lock.acquire()
bar.next()
lock.release()
except Exception as e:
print(f'Error - {target} - {e} - func prices')
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as pool:
pool.map(download, targets)
def run_download_prices():
shanghai_list = pd.read_csv('data/target_list/SH.csv')
shanghai_list = [str(name) + '.SS' for name in shanghai_list.iloc[:, 0]]
shenzhen_list = pd.read_csv('data/target_list/SZ.csv')
shenzhen_list = [str(name).zfill(6) + '.SZ' for name in shenzhen_list.iloc[:, 0]]
target_list = shenzhen_list + shanghai_list
prices(target_list)
if __name__ == '__main__':
run_download_prices()
| true |
e7a1b4813817252921ef502773355eb8f0855356 | Python | jgran/march_madness_2016 | /make_testing_dataset.py | UTF-8 | 5,347 | 3.0625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import itertools as iter
def seed_str_to_num(seed):
return int(float(''.join(i for i in seed if i.isdigit())))
print '\n'
print 'making test dataset'
avg_df = pd.read_csv('season_avg.csv')
seeds_df = pd.read_csv('input/TourneySeeds.csv')
avg_df = avg_df.loc[(avg_df.Season == 2016), ['Season','Team','score','fgm','fga','fgm3','fga3','ftm','fta','or','dr','ast','to','stl','blk','pf']]
seeds_df = seeds_df.loc[(seeds_df.Season == 2016), ['Season','Team','Seed']]
test_df = pd.DataFrame([[0,1]], columns=['LowTeam','HighTeam'])
year_df = seeds_df.loc[(seeds_df.Season == 2016), ['Team']]
#get all possible combinations of teams since we don't know apriori which teams will play each other
df =[year_df.transpose()[list(pair)] for pair in list(iter.combinations(year_df.transpose().columns, 2))]
for j in df:
j['Season'] = 2016
j.columns = ['LowTeam', 'HighTeam','Season']
test_df = test_df.append(j)
test_df = test_df[1:]
test_df = test_df[['Season', 'LowTeam', 'HighTeam']]
out_df = pd.DataFrame({
'Season':[0],
'LowTeam':[0],
'HighTeam':[0],
'diff_seed':[0],
'diff_score':[0],
'diff_fgm':[0],
'diff_fga':[0],
'diff_fgm3':[0],
'diff_fga3':[0],
'diff_ftm':[0],
'diff_fta':[0],
'diff_or':[0],
'diff_dr':[0],
'diff_ast':[0],
'diff_to':[0],
'diff_stl':[0],
'diff_blk':[0],
'diff_pf':[0]
})
#for each possible game in the tournament, add row to dataframe with differences in season avg stats
for index, row in test_df.iterrows():
low_team = row['LowTeam']
high_team = row['HighTeam']
rs = row['Season']
if row['LowTeam'] < row['HighTeam']:
low_team = row['LowTeam']
high_team = row['HighTeam']
else:
low_team = row['HighTeam']
high_team = row['LowTeam']
s1 = seed_str_to_num(seeds_df[(seeds_df.Season == rs) & (seeds_df.Team == low_team)].iloc[0].Seed)
s2 = seed_str_to_num(seeds_df[(seeds_df.Season == rs) & (seeds_df.Team == high_team)].iloc[0].Seed)
diff_seed = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].score
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].score
diff_score = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].fgm
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].fgm
diff_fgm = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].fga
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].fga
diff_fga = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].fgm3
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].fgm3
diff_fgm3 = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].fga3
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].fga3
diff_fga3 = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].ftm
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].ftm
diff_ftm = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].fta
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].fta
diff_fta = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0]['or']
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0]['or']
diff_or = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].dr
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].dr
diff_dr = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].ast
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].ast
diff_ast = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].to
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].to
diff_to = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].stl
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].stl
diff_stl = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].blk
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].blk
diff_blk = s1 - s2
s1 = avg_df[(avg_df.Season == rs) & (avg_df.Team == low_team)].iloc[0].pf
s2 = avg_df[(avg_df.Season == rs) & (avg_df.Team == high_team)].iloc[0].pf
diff_pf = s1 - s2
temp_df = pd.DataFrame({
'Season':row['Season'],
'LowTeam':[low_team],
'HighTeam':[high_team],
'diff_seed':[diff_seed],
'diff_score':[diff_score],
'diff_fgm':[diff_fgm],
'diff_fga':[diff_fga],
'diff_fgm3':[diff_fgm3],
'diff_fga3':[diff_fga3],
'diff_ftm':[diff_ftm],
'diff_fta':[diff_fta],
'diff_or':[diff_or],
'diff_dr':[diff_dr],
'diff_ast':[diff_ast],
'diff_to':[diff_to],
'diff_stl':[diff_stl],
'diff_blk':[diff_blk],
'diff_pf':[diff_pf]
})
out_df = out_df.append(temp_df)
out_df = out_df[1:]
#print out_df.head(5)
out_df.to_csv('test.csv')
| true |
57e9f99178459ae120a53110fc4d8f0858b8e752 | Python | Kamirus/artificial-intelligence | /p1/z5.py | UTF-8 | 3,649 | 2.78125 | 3 | [] | no_license | import random
from z4 import opt_dist
def init_matrix(rows, cols):
return [[random.randint(0, 1) for _ in range(len(cols))] for _ in range(len(rows))]
def get_column(i, matrix):
return [matrix[j][i] for j in range(len(matrix))]
def get_columns(matrix):
return [get_column(i, matrix) for i in range(len(matrix[0]))]
def neg_kth(seq, index):
s = seq[:]
s[index] = int(not s[index])
return s
class LogPicsSimple:
def __init__(self, row_c, col_c, cost_func):
# rows + columns
self.row_c = row_c
self.col_c = col_c
self.cost_func = cost_func
self._reinitialize()
def solve(self, max_tries=100, print_all=False):
for _ in range(max_tries):
print_all and self.print_matrix()
try:
wrongs = self._get_wrongs() # indexes of wrong rows and columns
i = random.choice(wrongs)
except IndexError:
return self # done
else:
if random.random() < 0.01 and len(wrongs) < len(self.matrix):
# break ok ones
oks = [k for k, _ in enumerate(self.matrix)
if k not in wrongs]
i = random.choice(oks)
# _, j = random.choice(list(self._iter_cost_j(i)))
_, j = max(self._iter_cost_j(i))
else:
_, j = min(self._iter_cost_j(i))
self._neg(i, j)
self._reinitialize()
return self.solve(max_tries=len(self.matrix) ** 2 + max_tries, print_all=print_all)
def print_matrix(self):
print(*(''.join('#' if x else '.' for x in row)
for row in self.matrix[:self.n]), sep='\n', end='\n\n')
def _get_wrongs(self):
return [i for i, s in enumerate(self.matrix) if self.cost_func(s, self.reqs[i])]
def _iter_cost_j(self, i):
if i < self.n: # i = row index
j0, n = self.n, self.m # then j0 = first column index
else: # i = column index
j0, n = 0, self.n
for off in range(n):
j = j0 + off
neg_seq_i = neg_kth(self.matrix[i], off)
neg_seq_j = neg_kth(self.matrix[j], i % n)
sum = self.cost_func(neg_seq_i, self.reqs[i]) \
+ self.cost_func(neg_seq_j, self.reqs[j])
yield sum, j
def _neg(self, i, j):
if not (0 <= i < self.n): # i != row index
i, j = j, i
assert 0 <= i < self.n and self.n <= j < len(
self.matrix), "one should be row index, 2nd col index"
self.matrix[i] = neg_kth(self.matrix[i], j - self.n)
self.matrix[j] = neg_kth(self.matrix[j], i)
def _reinitialize(self):
self.matrix = init_matrix(self.row_c, self.col_c)
self.n = len(self.matrix) # number of rows
self.matrix.extend(get_columns(self.matrix))
self.m = len(self.matrix) - self.n # number of columns
self.reqs = self.row_c + self.col_c
if __name__ == '__main__':
for r, c in [
# ([7, 7, 7, 7, 7, 7, 7], [7, 7, 7, 7, 7, 7, 7]),
# ([2, 2, 7, 7, 2, 2, 2], [2, 2, 7, 7, 2, 2, 2]),
([2, 2, 7, 7, 2, 2, 2], [4, 4, 2, 2, 2, 5, 5]),
# ([7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7]),
# ([7, 5, 3, 1, 1, 1, 1], [1, 2, 3, 7, 3, 2, 1]),
# ([1, 1, 1, 1], [2, 2]),
# ([2, 3], [1, 1, 1, 1, 1]),
# ([3, 2, 3, 4, 2], [1, 2, 3, 1, 1, 2, 2, 1, 1]),
# ([3, 3, 3, 4, 2], [2, 2, 3, 1, 1, 2, 2, 1, 1]),
]:
LogPicsSimple(r, c, cost_func=opt_dist).solve(print_all=1).print_matrix()
| true |
ce8729a31b42ed5f64b35fe42f6f0127ea109155 | Python | saraiyakush/library-case-study | /Book.py | UTF-8 | 582 | 3.34375 | 3 | [] | no_license | import csv
class Book:
def __init__(self, bookid, name, author, year, genre, count):
self.bookid = bookid
self.name = name
self.author = author
self.year = year
self.genre = genre
self.count = count
def addBook(self):
fh = open('./bookdata.csv', 'a')
fh.write(str(self.bookid) + "," + self.name + "," + self.author + "," + str(self.year) + "," + self.genre + "," + str(self.count) + "\n")
fh.close()
def printBook():
fh = open('./bookdata.csv', 'r')
print(fh.readlines())
fh.close()
| true |
8862e1599c7a7c201b52e8e0438d60407a164fa3 | Python | pk4btc/seleniumScripts | /tests/test_nopCommercecom.py | UTF-8 | 204 | 2.65625 | 3 | [] | no_license | import pytest
class TestClass():
def test_func(self):
x=2
return x+1
def test_two(self):
x="ania"
assert 'i' in x
def test_answer(self):
assert 2==2 | true |
6fb573e3236d0f93316e929ba86beceb6e74325f | Python | apdofficial/AI_hackathon_2019 | /project/download.py | UTF-8 | 724 | 2.96875 | 3 | [] | no_license | import bs4
import requests
from typing import AnyStr
class Download:
"""
Use:
download = Download()
download.download("https://physionet.org/physiobank/database/bidmc/bidmc_csv/")
to download all the data into the <data> folder
"""
@staticmethod
def download(url: AnyStr):
page = requests.get(url)
data = bs4.BeautifulSoup(page.text, "html.parser")
for link in data.find_all("a"):
file = link["href"]
if file.endswith(".csv") or file.endswith(".txt"):
with open("./data/" + file, "wb") as f:
print("writing: " + file)
r = requests.get(url + file)
f.write(r.content)
| true |
a171fd9e0d0355e45bb7461ae626efd26b31b09a | Python | eric8607242/DrBC_pytorch | /utils/model.py | UTF-8 | 3,043 | 2.71875 | 3 | [] | no_license | import numpy as np
import torch
import torch.nn as nn
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree
class GCNConv(MessagePassing):
def __init__(self, in_channels, out_channels):
super(GCNConv, self).__init__(aggr="add")
def forward(self, x, edge_index):
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
row, col = edge_index
deg = degree(row, x.size(0), dtype=x.dtype)
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index=edge_index, size=(x.size(0), x.size(0)), x=x, norm=norm)
def message(self, x_j, norm):
return norm.view(-1, 1)*x_j
def update(self, aggr_out):
return aggr_out
class Encoder_Block(nn.Module):
def __init__(self, dim=4):
super(Encoder_Block, self).__init__()
self.gcn = GCNConv(dim, dim)
self.gru = nn.GRU(input_size=dim, hidden_size=dim)
def forward(self, x, edge_index):
h_n = self.gcn(x, edge_index)
h_n = h_n.view(1, *h_n.shape)
x = x.view(1, *x.shape)
h_v = self.gru(h_n, x)
return h_v[0][0]
class Encoder(nn.Module):
def __init__(self, input_dim=3, layer_nums=5, embedding_dim=4):
super(Encoder, self).__init__()
self.first = nn.Linear(input_dim, embedding_dim)
self.relu = nn.LeakyReLU()
self.layers = nn.ModuleList()
for l in range(layer_nums):
self.layers.append(Encoder_Block(embedding_dim))
def forward(self, x, edge_index):
outs = []
x = self.first(x)
x = self.relu(x)
for l in self.layers:
x = l(x, edge_index)
outs.append(x)
outs = torch.stack(outs)
max_out = outs.max(dim=0).values
return max_out
class Decoder(nn.Module):
def __init__(self, input_dim=128, hidden_dim=54):
super(Decoder, self).__init__()
self.layers = nn.Sequential(nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(),
nn.Linear(hidden_dim, 1))
def forward(self, x):
x = self.layers(x)
#x = torch.sigmoid(x)
return x
class DrBC(nn.Module):
def __init__(self, input_dim=3, embedding_dim=128, hidden_dim=54, layer_nums=5):
super(DrBC, self).__init__()
self.e = Encoder(input_dim, layer_nums, embedding_dim=embedding_dim)
self.d = Decoder(input_dim=embedding_dim, hidden_dim=hidden_dim)
def forward(self, x, edge_index):
x = self.e(x, edge_index)
x = self.d(x)
return x
if __name__ == "__main__":
model = DrBC()
x = torch.tensor([[2, 1, 1],[2, 1, 1],[2, 1, 1],[2, 1, 1]],dtype=torch.float)
y = torch.tensor([[0,2,1,0,3],[3,1,0,1,2]],dtype=torch.long)
edge_index = torch.tensor([[0,1,2],
[1,2,3]],dtype=torch.long)
print(model(x, edge_index))
| true |
ff252cdc45adc99ba9f16d606e94649f0920bf74 | Python | kangju1/Catalog | /categories.py | UTF-8 | 1,267 | 2.703125 | 3 | [] | no_license | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import User, Base, Category
engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
category = Category(name = "Animal")
session.add(category)
session.commit()
category = Category(name = "Fruit")
session.add(category)
session.commit()
category = Category(name = "Vegetable")
session.add(category)
session.commit()
category = Category(name = "Human")
session.add(category)
session.commit()
category = Category(name = "Tree")
session.add(category)
session.commit()
category = Category(name = "Flower")
session.add(category)
session.commit()
| true |
bceb1870f55e78603e4395c449500f02d16d2a39 | Python | NoahPeeters/Alchemy | /client/client.py | UTF-8 | 6,596 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import os.path
import time
import readline
import socket
import getpass
def clear():
pass
os.system("clear")
languages = ["en", "de"]
l = []
start = 35
def load_language_file(name):
global l
l = []
for line in open(name):
l.append(line.strip())
load_language_file("en")
clear()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 4295))
username = ""
password = ""
menu = 0
def showHelp():
clear()
print l[10]
raw_input()
def showMessage(m):
clear()
print m
time.sleep(0.5)
def yesno(question):
clear()
print question + " ("+l[12]+","+l[13]+")"
print
antwort = raw_input()
if antwort == l[12]:
return True
else:
return False
def selectOn(title,list):
work = True
while work:
work = False
clear()
print title
print
available = []
for i in range(len(list)):
if list[i] != "":
print str(i) + ": " + list[i]
available.append(i)
print
selected = raw_input("").strip()
if selected.isdigit():
intselected = int(selected)
if intselected in available:
return intselected
else:
work = True
else:
work = True
clear()
def closeSocket(m):
if m != "":
print m
s.close()
clear()
sys.exit(0)
def login():
global username
global password
username = ""
password = ""
work = True
while work:
intlogin = selectOn(l[0], [l[1], l[2], l[23]])
clear()
if intlogin == 2:
exitGame()
else:
print l[1+intlogin]
print
i1 = raw_input(l[3] + ": ").replace(" ","________")
i2 = getpass.getpass(l[4] + ": ").replace(" ","________")
clear()
if intlogin == 0:
s.send("l "+i1+" "+i2)
antwort = s.recv(1024)
if antwort == "Hi":
username = i1
password = i2
showMessage(l[5])
work = False
elif antwort == "user":
showMessage(l[6])
else:
closeSocket(l[8])
elif intlogin == 1:
s.send("r "+i1+" "+i2)
antwort = s.recv(1024)
if antwort == "ok":
username = i1
password = i2
showMessage(l[9])
work = False
elif antwort == "used":
showMessage(l[7])
else:
closeSocket(l[8])
time.sleep(1)
clear()
def send(command):
clear()
print "Loading Data..."
s.send("l "+username+" "+password+" "+command)
antwort = s.recv(1024)
if antwort == "close":
closeSocket(l[8])
else:
return antwort
def exitGame():
if yesno(l[14]):
sys.exit(0)
def getName(n):
if start+n >= len(l):
return l[33]
else:
return l[start+n]
def getPossession(title):
antwort = send("showPossession")
possession = title + "\n\n"
possession = possession + "0: " + getName(0) + ": " + l[11] + "\n"
possession = possession + "1: " + getName(1) + ": " + l[11] + "\n"
possession = possession + "2: " + getName(2) + ": " + l[11] + "\n"
possession = possession + "3: " + getName(3) + ": " + l[11] + "\n"
parts = antwort.split(",")
for i in range(len(parts)):
o = int(parts[i])
if o != 0:
possession = possession + str(i+4) + ": " + getName(i+4) + ": " + str(o) + "\n"
return possession
def getPossessionAsArray():
antwort = send("showPossession")
possession = []
possession.append(getName(0) + ": " + l[11])
possession.append(getName(1) + ": " + l[11])
possession.append(getName(2) + ": " + l[11])
possession.append(getName(3) + ": " + l[11])
parts = antwort.split(",")
for i in range(len(parts)):
o = int(parts[i])
if o != 0:
possession.append(getName(i+4) + ": " + str(o))
else:
possession.append("")
return possession
def mix():
sel = getPossessionAsArray()
sel.append(l[26])
first = selectOn(l[28],sel)
if first < len(sel)-1:
if send("check "+str(first)) == "false":
showMessage(l[16])
else:
sel = getPossessionAsArray()
sel.append(l[30])
second = selectOn(l[29],sel)
if second < len(sel)-1:
if (first != second and send("check "+str(second)) == "false") or (first == second and send("doublecheck "+str(second)) == "false"):
showMessage(l[16])
else:
antwort = send("mix "+str(first)+" "+str(second))
if antwort != "false":
showMessage(getName(first)+" + "+getName(second)+" -> "+getName(int(antwort)))
else:
showMessage(l[17])
else:
global menu
menu = 0
else:
global menu
menu = 0
def changeLanguage():
new_language = selectOn(l[34],languages)
load_language_file(languages[new_language])
def openMenu(n):
clear()
global menu
menu = n
if n == 0:
antwort = 0
if username != "root":
antwort = selectOn(l[18],[l[19],l[20],l[22],l[23]])
else:
antwort = selectOn(l[18],[l[19],l[20],l[22],l[23],l[31]])
if antwort == 0:
menu = 1
mix()
#openMenu(1)
elif antwort == 1:
openMenu(2)
elif antwort == 2:
if yesno(l[27]):
login()
elif antwort == 3:
exitGame()
elif antwort == 4:
if yesno(l[32]):
send("close")
closeSocket("")
elif n == 1:
#antwort = selectOn(getPossession(l[19]), [l[24],l[26]])
#if antwort == 0:
# mix()
#elif antwort == 1:
# openMenu(0)
mix()
elif n == 2:
antwort = selectOn(l[20], [l[25],l[26]])
if antwort == 0:
changeLanguage()
# elif antwort == 1:
openMenu(0)
try:
while True:
if username == "":
login()
openMenu(menu)
finally:
closeSocket("")
| true |
0ed28d157fff4f0b10e57a73ab29d39011a9973c | Python | anihamde/rl-monkey-game | /run.py | UTF-8 | 2,833 | 3.015625 | 3 | [] | no_license | import numpy as np
class ReinforcementLearning(object):
def __init__(self, num_actions, state_dimensions, learning_rate, discount_factor):
self.num_actions = num_actions
self.state_dimensions = state_dimensions
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.W = np.zeros((self.num_actions, self.state_dimensions))
def best_move(self, s):
#print self.W
return np.argmax([self.__eval_Q(s, a, self.W[a]) for a in range(0, self.num_actions)])
def update(self, s, a, r, s_next):
print(a)
self.W[a]
print(self.W[1])
print(self.W[0])
gradient_a = self.__eval_Q_loss_gradient(s, a, r, s_next, self.W[a])
step = gradient_a * self.learning_rate
self.W[a] = self.W[a] - step
def __eval_Q_loss_gradient(self, s, a, r, s_next, w_a):
w_old = w_a # TODO: Really confused what w_old is supposed to be so just assuming it's the same as w for now
q = self.__eval_Q(s, a, w_a)
q_next = max([self.__eval_Q(s_next, a_next, self.W[a_next]) for a_next in range(0, self.num_actions)])
q_eqiv = r + self.discount_factor*q_next
q_gradient = s
print(q_gradient)
return (q_next - q_eqiv)*q_gradient
def __eval_Q(self, s, a, w_a):
return np.dot(w_a, s)
class ReinforcementLearningParam(object):
def __init__(self, num_actions, state_dimensions, num_states, learning_rate, discount_factor):
self.num_actions = num_actions
self.state_dimensions = state_dimensions
self.num_states = num_states
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.W = np.zeros((self.num_states, self.num_actions))
self.states = np.zeros((self.num_states, self.state_dimensions))
self.last_state = -1
def best_move(self, s_raw):
s = self.__get_state(s_raw)
return np.argmax([self.__eval_Q(s, a, self.W) for a in range(0, self.num_actions)])
def update(self, s_raw, a, r, s_next_raw):
s = self.__get_state(s_raw)
s_next = self.__get_state(s_next_raw)
gradient_a = self.__eval_Q_loss_gradient(s, a, r, s_next, self.W)
step = gradient_a * self.learning_rate
self.W[s, a] = self.W[s, a] - step
def __eval_Q_loss_gradient(self, s, a, r, s_next, w_a):
q = self.__eval_Q(s, a, w_a)
q_next = max([self.__eval_Q(s_next, a_next, self.W) for a_next in range(0, self.num_actions)])
q_eqiv = r + self.discount_factor * q_next
return q - q_eqiv
def __eval_Q(self, s, a, w_a):
return w_a[s, a]
def __get_state(self, s):
s_idx = np.where(np.all(self.states == s,axis=1))[0]
if len(s_idx) == 0:
self.last_state += 1
s_idx = self.last_state
self.states[s_idx,:] = s
return s_idx
else:
return s_idx[0]
| true |
efe2ebffa6aa4a63fd2d9fd6230a840ea94450c4 | Python | TIian610/DataAnalysis | /chapter03/pltcolor.py | UTF-8 | 837 | 2.984375 | 3 | [] | no_license | #coding:utf-8
"""
filename:pltcolor.py
display the color of plt
"""
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
curves = [np.random.random(20) for i in range(10)]
values = range(10)
fig = plt.figure()
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cnorm = colors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=cnorm, cmap=jet)
lines = []
for idx in range(len(curves)):
line = curves[idx]
color_val = scalar_map.to_rgba(values[idx])
color_text = ('color: (%4.2f,%4.2f,%4.2f)'%(color_val[0],color_val[1],color_val[2]))
ret_line, = ax.plot(line, color=color_val, label=color_text)
lines.append(ret_line)
handles,labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='upper right')
plt.show()
| true |
380260a84bb8ae944a879a29c0591a8913e63c4e | Python | mianasbat/scikit-surgeryfred | /sksurgeryfred/widgets/interactive_registration.py | UTF-8 | 2,892 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | """
The main widget for the interactive registration part of scikit-surgeryFRED
"""
import matplotlib.pyplot as plt
import skimage.io
import numpy as np
from sksurgeryfred.algorithms.fred import make_target_point, \
PlotRegistrations, \
PointBasedRegistration, AddFiducialMarker
from sksurgeryfred.algorithms.fit_contour import find_outer_contour
from sksurgeryfred.algorithms.errors import expected_absolute_value
from sksurgeryfred.logging.fred_logger import Logger
class InteractiveRegistration:
"""
an interactive window for doing live registration
"""
def __init__(self, image_file_name):
"""
Creates a visualisation of the projected and
detected screen points, which you can click on
to measure distances
"""
self.fig, self.subplot = plt.subplots(1, 2, figsize=(18, 10))
self.fig.canvas.set_window_title('SciKit-SurgeryF.R.E.D.')
self.plotter = PlotRegistrations(self.subplot[1], self.subplot[0])
log_config = {"logger" : {
"log file name" : "fred_results.log",
"overwrite existing" : False
}}
self.logger = Logger(log_config)
self.mouse_int = None
self.pbr = None
self.image_file_name = image_file_name
self.intialise_registration()
self.cid = self.fig.canvas.mpl_connect('key_press_event',
self.keypress_event)
plt.show()
def keypress_event(self, event):
"""
handle a key press event
"""
if event.key == 'r':
self.intialise_registration()
def intialise_registration(self):
"""
sets up the registration
"""
img = skimage.io.imread(self.image_file_name)
outline, _initial_guess = find_outer_contour(img)
target_point = make_target_point(outline)
self.plotter.initialise_new_reg(img, target_point, outline)
fle_sd = np.random.uniform(low=0.5, high=5.0)
moving_fle = np.zeros((1, 3), dtype=np.float64)
fixed_fle = np.array([fle_sd, fle_sd, fle_sd], dtype=np.float64)
fixed_fle_eavs = expected_absolute_value(fixed_fle)
moving_fle_eavs = expected_absolute_value(moving_fle)
if self.pbr is None:
self.pbr = PointBasedRegistration(target_point, fixed_fle_eavs,
moving_fle_eavs)
else:
self.pbr.reinit(target_point, fixed_fle_eavs, moving_fle_eavs)
if self.mouse_int is None:
self.mouse_int = AddFiducialMarker(self.fig, self.plotter,
self.pbr, self.logger,
fixed_fle, moving_fle)
self.mouse_int.reset_fiducials(fixed_fle_eavs)
self.fig.canvas.draw()
| true |
8b5b3220101c2fca62094cad6fc82ff5ba0f4780 | Python | Galieve/TFG-Informatica | /src/endogamy/generate_csv.py | UTF-8 | 1,884 | 2.640625 | 3 | [] | no_license | import os
import sys
import pandas as pd
def generate_database():
file_id = 16
file_path = get_path_file('data_in_csv.csv')
dfvalid = pd.read_csv(file_path, sep=';')
for file_id in range(0, 20):
# try:
# df = pd.read_csv('prod8/data_in_csv' + str(file_id) + '.csv', sep=';')
# file_path = get_path_file('prod8/data_in_csv' + str(file_id) + '.csv')
# df.to_csv(file_path, index=False, encoding='utf-8', sep=";", header=(file_id==10000))
# continue
# except FileNotFoundError:
# continue
data = {"id": [], "circuit": []}
#file = open("../../files_galvani/prod8/files/gates"+str(file_id)+".out")
file = open("../random_circuits/palm_diamond"+str(file_id)+".out")
lines = file.read().splitlines()
i = 0
n = len(lines)
print(n)
while i < len(lines) - 1:
id = lines[i]
i += 1
circlines = []
if i % (1e3) == 0:
print(i, n, f'{i/n*100:.2f}')
while lines[i] != "":
circlines.append(lines[i])
i += 1
i += 1
data['id'].append(id)
data['circuit'].append(circlines)
#{'name':'Geo', 'physics':87, 'chemistry':92, 'algebra':97}
file.close()
pddata = pd.DataFrame(data)
pddata = pddata.loc[pddata['id'].isin(dfvalid['id'])].copy()
file_path = get_path_file('willow_diamond/not_data_in_csv'+str(file_id)+'.csv')
#file_path = get_path_file('data_in_csv.csv')
pddata.to_csv(file_path, index=False, encoding='utf-8', sep=";")
def get_path_file(filename):
file_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(file_dir, filename)
return file_path
if __name__ == "__main__":
generate_database()
| true |
8b0f1fd1a19aaac06c99cb957f9da0d7023f235a | Python | seanlahman/patent_scraper | /lahmanlib.py | UTF-8 | 3,295 | 3.09375 | 3 | [] | no_license | #lahmanlib
# Collection of modules written by Sean Lahman - seanlahman@gmail.com
# Created 2013-10-27
def HelloWorld():
print 'hello world'
def PatentScrape(p_id):
# ----------------------------------------------------------------------------
# Given the number of a US patent, scrape information about it from
# the USPTO website and return it as a data dictionary
# ----------------------------------------------------------------------------
#import libraries
from bs4 import BeautifulSoup # For processing HTML
import urllib2
#import sys
#set the URL based on the patent ID passed
url="http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&u=%2Fnetahtml%2FPTO%2Fsearch-adv.htm&r=1&p=1&f=G&l=50&d=PTXT&S1="+p_id+".PN.&OS=pn/"+p_id+"&RS=PN/"+p_id
try:
#get webpage
downloaded_page = urllib2.urlopen(url)
soup = BeautifulSoup(downloaded_page)
#initialize variables
outdate=''
outinv=''
outass='-' #note, some patents have no assignee, so this state is likely not an error
#page through TD tags to find targeted text
mylist=soup.find_all('td')
count=1
#note: Loop uses length-3 since we are looking for markers to identify subsequent records, we don't want to iterate too far.
# in practice, we could stop after 25-30 or lines, but hardcoding that way could cause problems
# if USPTO changes their format in the future
while (count < (len(mylist)-3)):
count = count + 1
#look for markers
if 'United States Patent' in str(mylist[count]):
#patdate
patdate=(mylist[count+3])
outdate=patdate.find('b').string
outdate=str(outdate.strip())
if 'Inventors' in str(mylist[count]):
patinv=str(mylist[count+1])
outinv=patinv[patinv.index('90%')+5:-5]
outinv=outinv.strip()
outinv=outinv.replace('<b>','')
outinv=outinv.replace('</b>',' ')
outinv=outinv.replace(';',', ')
if 'Assignee' in str(mylist[count]):
patass=str(mylist[count+1])
outass=patass[patass.index('90%')+5:-15]
outass=outass.strip()
outass = "".join(outass.split('\n')) #strip newlines
outass=outass.replace('<b>','')
outass=outass.replace('</b>',' ')
outass=outass.replace('<br>','')
outass=outass.replace('<br>','')
#individual inventors have no assignee, so set value to "-"
if outass=='':
outass='-'
#all done, build dictionary and return what we found
patent_info = {'date' : outdate, 'inventor': outinv, 'assignee': outass}
return patent_info
except:
#The most rudimentary error handling possible
print "SL-Unexpected error:", sys.exc_info()[0]
patent_info = {'date' : "ERR", 'inventor': "ERR", 'assignee': "ERR"}
return patent_info
| true |
876924098e4c50f6c8d64deb9f83b7a58b216c36 | Python | eastonYi/Unsupervised-ASR | /utils/tools_model.py | UTF-8 | 2,748 | 2.6875 | 3 | [] | no_license | import tensorflow as tf
def get_LSTM_UWb(weight):
'''
weight must be output of LSTM's layer.get_weights()
W: weights for input
U: weights for hidden states
b: bias
'''
warr, uarr, barr = weight
gates = ["i","f","c","o"]
hunit = uarr.shape[0]
U, W, b = {},{},{}
for i1,i2 in enumerate(range(0,len(barr),hunit)):
W[gates[i1]] = warr[:,i2:i2+hunit]
U[gates[i1]] = uarr[:,i2:i2+hunit]
b[gates[i1]] = barr[i2:i2+hunit].reshape(hunit,1)
return W, U, b
def get_GRU_UWb(weight):
'''
weight must be output of GRU's layer.get_weights()
W: weights for input
U: weights for hidden states
b: bias
'''
warr, uarr, barr = weight
gates = ["r","u","o"]
hunit = uarr.shape[0]
U, W, b = {}, {}, {}
for i1, i2 in enumerate(range(0, len(barr), hunit)):
W[gates[i1]] = warr[:,i2:i2+hunit]
U[gates[i1]] = uarr[:,i2:i2+hunit]
b[gates[i1]] = barr[i2:i2+hunit].reshape(hunit,1)
return W, U, b
def get_LSTMweights(model1):
for layer in model1.layers:
if "LSTM" in str(layer):
w = layer.get_weights()
W,U,b = get_LSTM_UWb(w)
break
return W, U, b
def get_GRUweights(model):
for layer in model.layers:
if "GRU" in str(layer):
w = layer.get_weights()
W,U,b = get_GRU_UWb(w)
break
return W, U, b
def get_GRU_activation(layer, cell_inputs, hiddens):
"""
gru/kernel: h_prev x h
gru/recurrent_kernel: h x (h*3)
gru/bias: 2 x (h*3)
cell_inputs: b x h_prev
hiddens: b x h
"""
assert "GRU" in str(layer)
activation_fn = layer.recurrent_activation
kernel, recurrent_kernel, bias = layer.get_weights()
matrix_x = tf.matmul(cell_inputs, kernel)
matrix_x = tf.add(matrix_x, bias[0])
x_z, x_r, _ = tf.split(matrix_x, 3, axis=-1)
matrix_inner = tf.matmul(hiddens, recurrent_kernel)
matrix_inner = tf.add(matrix_inner, bias[1])
recurrent_z, recurrent_r, _ = tf.split(matrix_inner, 3, axis=-1)
z = tf.reduce_sum(activation_fn(x_z + recurrent_z), 1)
r = tf.reduce_sum(activation_fn(x_r + recurrent_r), 1)
return z, r
def vectorize_with_labels(W,U,b):
bs,bs_label,ws,ws_label,us,us_label=[],[],[],[],[],[]
for k in ["i","f","c","o"]:
temp = list(W[k].flatten())
ws_label.extend(["W_"+k]*len(temp))
ws.extend(temp)
temp = list(U[k].flatten())
us_label.extend(["U_"+k]*len(temp))
us.extend(temp)
temp = list(b[k].flatten())
bs_label.extend(["b_"+k]*len(temp))
bs.extend(temp)
weight = ws + us + bs
wlabel = ws_label + us_label + bs_label
return(weight,wlabel)
| true |
480c8745e82cb699c11dd2b56e505cdfcc4dd528 | Python | wohnjoods/Sentiment-Model-Flask-App | /tempCodeRunnerFile.py | UTF-8 | 376 | 3.1875 | 3 | [] | no_license | inputText = inputText.split()
text_sequence = []
for word in text:
if word not in vocabulary:
vocabulary[word] = len(inverse_vocabulary)
text_sequence.append(len(inverse_vocabulary))
inverse_vocabulary.append(word)
else:
text_sequence.append(vocabulary[word])
sequences.append(text_sequence) | true |
4cc32f1893c4cf9fec0ea9c32bc3f4cb955f4e1b | Python | opbeat/opbeatcli | /opbeatcli/client.py | UTF-8 | 4,139 | 2.53125 | 3 | [
"BSD-2-Clause"
] | permissive | """
Opbeat log API client.
"""
import json
import logging
from opbeatcli import __version__
from opbeatcli.log import logger
from opbeatcli import settings
from opbeatcli.exceptions import ClientConnectionError, ClientHTTPError
from opbeatcli.compat import (Request, urlopen, URLError,
HTTPError, BaseHTTPRequestHandler)
HTTP_RESPONSE_CODES = BaseHTTPRequestHandler.responses
class OpbeatClient(object):
"""
The Opbeat client, which handles communication with the
Opbeat servers.
"""
def __init__(self, secret_token, organization_id, app_id,
server=settings.SERVER, timeout=settings.TIMEOUT,
dry_run=False):
self.server = server
self.secret_token = secret_token
self.organization_id = organization_id
self.app_id = app_id
self.timeout = timeout
self.dry_run = dry_run
self.logger = logger.getChild('client')
self.logger.info('Opbeat client configuration:')
for k in ['server', 'organization_id', 'app_id']:
self.logger.info(' %16s: %r' % (k, str(getattr(self, k))))
def log_request(self, uri, headers, payload):
self.logger.debug('> Server: %s', self.server)
self.logger.debug('> HTTP/1.1 POST %s', uri)
for header, value in headers.items():
self.logger.debug('> %s: %s', header, value)
self.logger.debug('> %s', payload)
def log_response(self, response, level=logging.DEBUG):
"""
:type response: HTTPResponse
"""
self.logger.log(level, '< HTTP %d %s',
response.code,
HTTP_RESPONSE_CODES[response.code][0])
body = response.read()
if body:
self.logger.log(level, '< %s', body)
def post(self, uri, data):
"""
HTTP POST ``data`` as JSON to collection identified by ``uri``.
:param uri:
The collection URI. It can be in the form of a URI template
with the variables {organization_id} and {app_id}, e.g.:
/api/{organization_id}/apps/{app_id}/deployments/
:param data: the data to be send
:type data: dict
"""
uri = uri.format(
organization_id=self.organization_id,
app_id=self.app_id
)
url = self.server + uri
headers = {
'User-Agent': 'opbeatcli/%s' % __version__,
'Authorization': 'Bearer %s' % self.secret_token,
'Content-Type': 'application/json',
}
payload = json.dumps(data, indent=2, sort_keys=True)
request = Request(
url=url,
headers=headers,
data=payload.encode('utf8')
)
self.log_request(uri, headers, payload)
if self.dry_run:
self.logger.info('Not sending because --dry-run.')
return
try:
response = urlopen(
request,
timeout=self.timeout,
)
# voidspace.org.uk/python/articles/urllib2.shtml#handling-exceptions
except HTTPError as e:
self.logger.error('< The server could not fulfill the request')
self.logger.debug('HTTP error', exc_info=True)
self.log_response(e, level=logging.ERROR)
raise ClientHTTPError(e.code)
except URLError as e: # Connection error.
try:
code, reason = e.reason.args
except ValueError:
code, reason = None, str(e)
if reason == 'Operation now in progress':
error_msg = 'request timed out (--timeout=%.2f)' % self.timeout
else:
error_msg = reason
self.logger.error('Unable to reach the API server: %s', error_msg)
self.logger.debug('URL error (connection error)', exc_info=0)
raise ClientConnectionError(error_msg)
except Exception:
raise # Unexpected error, not handled here.
else:
self.log_response(response, level=logging.DEBUG)
| true |
0684f56d14e2c5ea76453a6735277552bcc19b10 | Python | abramik/cs102 | /homework03/life-console.py | UTF-8 | 1,157 | 3.03125 | 3 | [] | no_license | import curses
from life import GameOfLife
from ui import UI
import os
class Console(UI):
def __init__(self, life: GameOfLife) -> None:
super().__init__(life)
def draw_borders(self, screen) -> None:
screen.border('|', '|', '-', '-', '+', '+', '+', '+')
def draw_grid(self, screen) -> None:
dims = screen.getmaxyx()
for i in range(0, self.life.rows):
for j in range(0, self.life.cols):
screen.addstr(j+1, i+1, '*'
if self.life.curr_generation[i][j] else
' ')
def run(self) -> None:
screen = curses.initscr()
self.draw_borders(screen)
curses.curs_set(0)
screen.keypad(True)
self.life.create_grid(True)
running = True
while running and self.life.is_changing and \
not self.life.is_max_generations_exceed:
self.life.step()
self.draw_grid(screen)
screen.refresh()
curses.endwin()
if __name__ == '__main__':
os.system('clear')
gui = Console(GameOfLife((30,20), True, 50))
gui.run()
| true |
5edc3a64a44dcb9f3c5f73c4ca8a9b3716f1ec3f | Python | nyborr/OpenKattisProblems | /A Towering Problem.py | UTF-8 | 593 | 3.0625 | 3 | [] | no_license | import itertools
arr = input()
L = [int(x) for x in arr.split()]
height1 = L[-2]
height2 = L[-1]
s = []
x =[]
for i in range (0,4):
for j in range (i+1,5):
for k in range (j+1,6):
if ((L[i] + L[j] + L[k]) == height1):
s.append(L[i])
s.append(L[j])
s.append(L[k])
if ((L[i] + L[j] + L[k]) == height2):
x.append(L[i])
x.append(L[j])
x.append(L[k])
s.sort()
x.sort()
x.reverse()
s.reverse()
print(s[0],s[1],s[2], x[0],x[1],x[2])
| true |
9f0ebbb14086d8a2806c40499bc0e6f881d34ffa | Python | atjason/Python | /practice/py_ex2.py | UTF-8 | 760 | 4.5 | 4 | [] | no_license | # -*- coding: utf8 -*-
"""
斐波那契数列(Fibonacci sequence),又称黄金分割数列。
如:1、1、2、3、5、8
"""
def fibonacci(num):
result = [1, 1]
if num <= 2:
return result[:num]
a, b = 1, 1
for i in range(3, num + 1):
a, b = b, a + b
result.append(b)
return result
print fibonacci(0)
print fibonacci(1)
print fibonacci(2)
print fibonacci(3)
print fibonacci(4)
print fibonacci(5)
print fibonacci(6)
print fibonacci(7)
def fibonacci2(num):
if num < 1:
return []
elif num <= 2:
return 1
return fibonacci2(num - 2) + fibonacci2(num - 1)
print fibonacci2(0)
print fibonacci2(1)
print fibonacci2(2)
print fibonacci2(3)
print fibonacci2(4)
print fibonacci2(5)
print fibonacci2(6)
print fibonacci2(7)
| true |
4bd05f7da031a2d0848afcc301e42f73b48037ed | Python | n8henrie/exercism-exercises | /python/matrix/matrix.py | UTF-8 | 469 | 3.421875 | 3 | [
"MIT"
] | permissive | import typing as t
class Matrix:
def __init__(self, matrix_string):
self._as_rows: t.List[t.List[int]] = [
[int(col) for col in row.split()]
for row in matrix_string.splitlines()
]
def row(self, index):
zero_based_index = index - 1
return self._as_rows[zero_based_index]
def column(self, index):
zero_based_index = index - 1
return [row[zero_based_index] for row in self._as_rows]
| true |
59b103c6858fc097fcb259f0e2ec507b1934a018 | Python | jokersunited/UrlAnalyze | /RESTAPI/urlclass.py | UTF-8 | 20,834 | 2.5625 | 3 | [] | no_license | import ssl, OpenSSL
import re
import whois, tldextract
import socket
import json
import pandas as pd
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.utils import ChromeType
from bs4 import BeautifulSoup
from ocspchecker import ocspchecker
from urllib.parse import urlparse
import requests
def get_status(logs):
"""
Extract page responses form selenium logs
:param logs: Log object from selenium
:return: List of [status, url, type] for each request
"""
statuses = []
for log in logs:
if log['message']:
d = json.loads(log['message'])
# print(d)
if d['message'].get('method') == "Network.responseReceived":
statuses.append(
[d['message']['params']['response']['status'], d['message']['params']['response']['url'],
d['message']['params']['type']])
return statuses
def get_redirections(logs, final_url):
req_list = []
for log in logs:
if log['message']:
d = json.loads(log['message'])
# print(d)
if d['message'].get('method') == "Network.requestWillBeSent":
if d['message']['params']['documentURL'] == final_url:
break
else:
if d['message']['params']['documentURL'] not in req_list:
req_list.append(d['message']['params']['documentURL'])
return req_list
############## Initialise Selenium ##############
# Initialse parameters for Selenium headless chrome browser
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'
options = webdriver.ChromeOptions()
options.add_argument(f'user-agent={user_agent}')
options.add_argument('--headless')
options.add_argument('ignore-certificate-errors')
# Enable performance logging to trace requests and response information
capabilities = options.to_capabilities()
capabilities['goog:loggingPrefs'] = {'performance': 'ALL'}
# Start a selenium service so can call it and we don't need to create an instance for every URL
service = webdriver.chrome.service.Service(ChromeDriverManager(chrome_type=ChromeType.GOOGLE).install())
service.start()
# Read the top domains from the specified CSV file
top_domains = list(pd.read_csv('../RESTAPI/data/top250domains.csv')['domain'])
# Declare the features to be used in the RF Classifier
feature_list = ['length', 'subcount', 'proto', 'pathdir', 'pathlen', 'querylen', 'queryparam', 'isip', 'pathspecial',
'domainspecial', 'hyphencount', 'domlen', 'digi2letter', 'atchar']
class Url:
def __init__(self, url, tag=None):
"""
URL class to store information of URLs
:param url: String representation of URL
:param tag: Label if it is phishing or not, None if unknown
"""
self.url_str = url
self.urlparse = urlparse(url)
self.domaininfo = tldextract.extract(self.url_str)
def generate_raw_json(self):
return json.loads(self.generate_df().iloc[0].to_json())
# ======================= Lexical Features ========================
def is_ip(self):
"""
Checks if URL is an IP address
:return: True if URL is an IP address and False if not
"""
return True if self.urlparse.netloc.replace('.', '').isnumeric() else False
def get_len(self):
"""
Get length of URL
:return: Integer value of the length of the URL
"""
return len(self.url_str)
def get_proto(self):
"""
Get the protocol used by the URL, if it is HTTPS or not
:return: True if HTTPS is used, False if not
"""
return True if self.urlparse.scheme == 'https' else False
def get_domain(self):
"""
Get the full domain of the URL
:return: String representation of the full domain
"""
return str(self.urlparse.netloc)
def get_domain_hyphen(self):
"""
Get the ratio of hypens to subdomain count
:return: Float value of the ratio
"""
return self.urlparse.netloc.count("-") / self.get_subdomaincount()
def get_domainlen(self):
"""
Get the total length of the domain
:return: Integer value of the total domain length
"""
return len(self.urlparse.netloc)
def get_subdomaincount(self):
"""
Get the number of subdomains in the URL
:return: Integer value of the number of subdomains
"""
return len(self.domaininfo.subdomain.split("."))
def get_topdomain(self):
"""
Check if registered domain is in the top list of domains
:return: Returns True if registered domain is in the top list of domains and False if not
"""
return True if self.domaininfo.registered_domain in top_domains else False
def get_pathlen(self):
"""
Get the length of the URL path
:return: Integer value of the path length
"""
return len(self.urlparse.path)
def get_pathdirs(self):
"""
Get the number of subdirectories in the URL path
:return: Integer value of the subdirectory count
"""
if self.urlparse.path == '/':
return 0
else:
return len(self.urlparse.path.split('/')) - 1
def get_querylen(self):
"""
Get the length of the URL query
:return: Integer value of the query length
"""
return len(self.urlparse.query)
def get_queryparams(self):
"""
Get number of query parameters
:return: Integer value of the count of parameters
"""
if self.urlparse.query == '':
return 0
else:
return len(self.urlparse.query.split('&'))
def get_specialchar(self, type='domain'):
"""
Get the percentage of special characters for specified type
:param type: 'domain' or 'path'
:return: Float percentage value of special characters in specified type
"""
domain = self.urlparse.netloc.replace('.', '')
path = self.urlparse.path.replace('/', '')
query = self.urlparse.path.replace('&', '').replace('=', '')
special_char_re = r'[^a-zA-Z0-9\.]'
if type == 'domain':
return (len(''.join(re.findall(special_char_re, domain)))) / (len(domain) + 1)
elif type == 'path':
return (len(''.join(re.findall(special_char_re, path)))) / (len(path) + 1)
def get_at_char(self):
"""
Check if @ character exists in URL
:return: True if character exists False if not
"""
return True if '@' in self.url_str else False
def digit_to_letter(self):
"""
Check the ratio of digits to letter in the entire URL
:return: Float value of the ratio of the 2 values
"""
letter_re = r'[a-zA-Z]'
number_re = r'[0-9]'
return (len(''.join(re.findall(number_re, self.url_str)))) / (
len(''.join(re.findall(letter_re, self.url_str))) + 1)
def generate_df(self):
"""
Generate a pandas DataFrane object with the features necessary for randomforest classification
:return: Single row Pandas DataFrame object with features extracted
"""
rf_df = pd.DataFrame(columns=feature_list)
rf_df['length'] = [self.get_len()]
rf_df['subcount'] = [self.get_subdomaincount()]
rf_df['proto'] = [self.get_proto()]
rf_df['pathdir'] = [self.get_pathdirs()]
rf_df['pathlen'] = [self.get_pathlen()]
rf_df['querylen'] = [self.get_querylen()]
rf_df['queryparam'] = [self.get_queryparams()]
rf_df['isip'] = [self.is_ip()]
rf_df['pathspecial'] = [self.get_specialchar('path')]
rf_df['domainspecial'] = [self.get_specialchar('domain')]
rf_df['digi2letter'] = [self.digit_to_letter()]
rf_df['hyphencount'] = [self.get_domain_hyphen()]
rf_df['domlen'] = [self.get_domainlen()]
rf_df['atchar'] = [self.get_at_char()]
return rf_df
class LiveUrl(Url):
def __init__(self, url, tag=None):
global service, capabilities
super().__init__(url, tag)
print("\n[*] Getting info for " + self.url_str)
self.dns = self.get_dns()
# self.req = self.get_live()
self.link_dict = None
self.uniq_dom = None
self.link_count = 0
self.spoof = {}
self.access = False
if self.dns is True:
try:
self.driver = self.init_driver(capabilities)
self.access = True
self.final_url = self.driver.current_url
self.title = self.driver.title
self.log = self.driver.get_log('performance')
self.requests = self.get_totalrequests()
self.resp_code = self.get_respcode()
self.redirects = get_redirections(self.log, self.final_url)
self.screenshot = self.get_64snapshot()
self.whois = whois.whois(self.url_str)
self.ocsp = self.get_certocsp()
if self.urlparse.scheme == 'https':
self.cert = self.get_cert()
else:
self.cert = None
self.get_links_uniqdom()
# self.print_cmdreport()
except WebDriverException as we:
print(we)
return
self.driver.quit()
def init_driver(self, capabilities):
driver = webdriver.Remote(service.service_url, desired_capabilities=capabilities)
driver.set_window_size(800, 600)
driver.get(self.url_str)
return driver
def print_cmdreport(self):
print("\n===== Page Info =====")
print("Destination URL: " + str(self.final_url))
print("Destination Title: " + str(self.driver.title))
print("\n===== Domain Info =====")
print("Registrar: " + str(self.whois.registrar))
if type(self.whois.creation_date) is list:
print("Creation Date: " + str(self.whois.creation_date[0]))
else:
print("Creation Date: " + str(self.whois.creation_date))
if type(self.whois.expiration_date) is list:
print("Expiry Date: " + str(self.whois.expiration_date[0]))
else:
print("Expiry Date: " + str(self.whois.expiration_date))
print("Abuse Emails: ")
if type(self.whois.emails) is list:
for x in self.whois.emails: print("- " + str(x))
else:
print("- " + str(self.whois.emails))
print("\n===== Cert Info =====")
if self.cert is not None:
print("Cert Issuer: " + str(self.cert.get_issuer().CN) + " " + str(self.cert.get_issuer().O))
print("Cert Expired?: " + str(self.cert.has_expired()))
ocsp_request = ocspchecker.get_ocsp_status(self.final_url)
ocsp_status = [i for i in ocsp_request if "OCSP Status:" in i][0]
print("Cert Validity: " + str(ocsp_status.split(":")[1][1:]))
else:
print("No SSL Cert Found!")
print("\n===== Initiated Requests =====")
for index, item in enumerate(get_status(self.driver.get_log('performance'))):
print("Request " + str(index + 1) + ": " + str(item[0]) + ', ' + item[2] + ', ' + item[1])
if self.link_count != 0:
print("\n===== Hyperlink Info =====")
print("Total links: " + str(self.link_count))
print("\nloc %:" + str(len(self.link_dict['loc']) / self.link_count * 100))
print("ext %:" + str(len(self.link_dict['ext']) / self.link_count * 100))
print("static %:" + str(len(self.link_dict['static']) / self.link_count * 100))
if len(self.uniq_dom.keys()) > 0:
print("\nUnique external domains: ")
for key in self.uniq_dom.keys():
print("- " + key)
if len(self.link_dict['loc']) > 0:
print("\nUnique local links %: " + str(self.get_uniqlocal() * 100))
else:
print("\nNo Local Links!")
print("\n===== Potential Spoof Domain Scores =====")
for key, value in self.spoof.items():
if value > 0.4: print(key + ": " + str(value))
else:
print("\nNo hyperlinks on page!")
# ======================= Live Features ========================
def get_live(self):
try:
requests.get(self.url)
except Exception as e:
return False
def get_dns(self):
try:
addr_info = socket.getaddrinfo(self.urlparse.netloc, None)
# print(addr_info)
return True
except socket.gaierror:
return False
def get_linkperc(self, link):
# if self.link_count != 0:
# print("\n===== Hyperlink Info =====")
# print("Total links: " + str(self.link_count))
# print("\nloc %:" + str(len(self.link_dict['loc']) / self.link_count * 100))
# print("ext %:" + str(len(self.link_dict['ext']) / self.link_count * 100))
# print("static %:" + str(len(self.link_dict['static']) / self.link_count * 100))
#
# if len(self.uniq_dom.keys()) > 0:
# print("\nUnique external domains: ")
# for key in self.uniq_dom.keys():
# print("- " + key)
# if len(self.link_dict['loc']) > 0:
# print("\nUnique local links %: " + str(self.get_uniqlocal() * 100))
# else:
# print("\nNo Local Links!")
#
# print("\n===== Potential Spoof Domain Scores =====")
# print(self.spoof.items())
# for key, value in self.spoof.items():
# if value > 0.4: print(key + ": " + str(value))
#
# else:
# print("\nNo hyperlinks on page!")
if self.link_count > 0:
return str(int(len(self.link_dict[link]) / self.link_count * 100))+"%"
else:
return None
def truncate_url(self, url):
if len(url) > 100:
return url[:100] + "..."
else:
return url
def get_totalrequests(self):
reqlist = []
for index, item in enumerate(get_status(self.log)):
# print("Request " + str(index + 1) + ": " + str(item[0]) + ', ' + item[2] + ', ' + item[1])
reqlist.append([str(index + 1), str(item[0]), item[2], item[1]])
return reqlist
def first_email(self):
if type(self.whois.emails) is list:
return self.whois.emails[0]
else:
return self.whois.emails
def get_spoofed(self):
return_str = ""
for key, value in self.spoof.items():
if value > 0.4: return_str += key
if return_str == "":
return "Unknown"
else:
return return_str
def get_certocsp(self):
ocsp_request = ocspchecker.get_ocsp_status(self.final_url)
ocsp_status = [i for i in ocsp_request if "OCSP Status:" in i]
ocsp_error = [i for i in ocsp_request if "OCSP Request Error:" in i]
if len(ocsp_status) != 0:
return str(ocsp_status[0].split(":")[1][1:])
elif len(ocsp_error) != 0:
return str(ocsp_error[0].split(":")[2][1:])
else:
return "ERROR"
def get_certissuer(self):
return str(self.cert.get_issuer().CN) + " " + str(self.cert.get_issuer().O)
def get_expiry(self):
return "Yes" if self.cert.has_expired() else "No"
def get_respcode(self):
try:
return get_status(self.log)[0][0]
except:
return -1
def get_64snapshot(self):
ss = self.driver.get_screenshot_as_base64()
self.driver.save_screenshot('./images/' + str(self.urlparse.netloc) + '.png')
return "data:image/png;base64," + ss
def get_dates(self, key='expiration'):
w = self.whois
try:
if key == 'expiration':
if type(w.expiration_date) is list:
date = w.expiration_date[0]
else:
date = w.expiration_date
t = date
elif key == 'creation':
if type(w.creation_date) is list:
date = w.creation_date[0]
else:
date = w.creation_date
t = date
else:
t = None
return t.strftime('%d/%m/%Y')
except AttributeError as e:
return None
def get_cert(self):
conn = ssl.create_connection((self.urlparse.netloc, 443))
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn, server_hostname=self.urlparse.netloc)
cert = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
# cert = ssl.get_server_certificate((self.urlparse.netloc, 443))
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
return cert
# def clean_text(self):
# res_source = self.driver.page_source
# html_re = r'(<style.*>[^<]*<\/style>|<script[\s\S]*?><\/script>|<script.*>[\s\S]*?<\/script>|<[^>]*>)'
# body_text = re.sub(html_re, '', res_source)
# text_list = body_text.replace("\n", " ").replace("\t", " ").split(" ")
# clean_text = [x for x in text_list if x]
# return clean_text
# def get_lang(self):
# res_source = self.driver.page_source
# html_re = r'(<style.*>[^<]*<\/style>|<script[\s\S]*?><\/script>|<script.*>[\s\S]*?<\/script>|<[^>]*>)'
# body_text = re.sub(html_re, '', res_source)
# t = detect_langs(body_text)
# return t
def get_links_uniqdom(self):
soup = BeautifulSoup(self.driver.page_source, features='lxml')
links = soup.find_all(['a', 'area'])
link_dict = {'loc': [], 'ext': [], 'static': [], 'mail': []}
uniq_dom = {}
for link in links:
link = link.get('href')
self.link_count += 1
if link is None or len(link) == 0 or link[0] == "#" or link[0] == "?" or "javascript:" in link:
if link is not None and "javascript:" in link:
link = "".join(link.split(":")[1:]).replace(" ", "")
link_dict['static'].append(link)
elif "mailto:" in link:
link_dict['mail'].append(link)
elif link[0] == "/" or tldextract.extract(
link).registered_domain == tldextract.extract(self.final_url).registered_domain or "://" not in link:
link_dict['loc'].append(link)
else:
base_dom = tldextract.extract(link).registered_domain
if base_dom not in uniq_dom:
uniq_dom.update({base_dom: 1})
else:
uniq_dom[base_dom] += 1
link_dict['ext'].append(link)
# Formula for calculation counts of each unique (domain / (num of loc + ext link) * ((num of ext / link count) + (num of static / link count))
for key, value in uniq_dom.items():
# self.spoof.update({str(key): (value / (len(link_dict['loc']) + len(link_dict['ext']))) * (
# len(link_dict['ext']) / self.link_count + len(link_dict['static']) / self.link_count)})
self.spoof.update({str(key): (value / len(link_dict['ext'])) * (
len(link_dict['ext']) / self.link_count + len(link_dict['static']) / self.link_count)})
# print(str(key) + ": " + str((value/(len(link_dict['loc']) + len(link_dict['ext'])))*(len(link_dict['ext'])/link_count + len(link_dict['static'])/link_count)))
self.link_dict = link_dict
self.uniq_dom = uniq_dom
def get_uniqlocal(self):
# print(self.link_dict['loc'])
# print(self.link_dict['static'])
if len(self.link_dict['loc']) != 0:
uniq_loc = list(dict.fromkeys(self.link_dict['loc']))
static = len(list(dict.fromkeys(self.link_dict['static'])))
return (len(uniq_loc)+static-1) / (len(self.link_dict['loc'])+len(self.link_dict['static']))
else:
try:
static = len(list(dict.fromkeys(self.link_dict['static'])))
return (static-1)/len(self.link_dict['static'])
except ZeroDivisionError:
return 0 | true |
d55ddf43db7970d6f19112662d28f636ea6889a7 | Python | shad7/seedbox | /seedbox/db/sqlalchemy/migrate_repo/versions/004_alter_total_time_column_on_MediaFile.py | UTF-8 | 600 | 2.859375 | 3 | [
"MIT"
] | permissive | """Handles converting MediaFile.total_time from Integer to Float"""
import sqlalchemy as sa
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
table = sa.Table('media_files', meta, autoload=True)
col_resource = getattr(table.c, 'total_time')
# converts Integer to Float
col_resource.alter(type=sa.Float)
def downgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
table = sa.Table('media_files', meta, autoload=True)
col_resource = getattr(table.c, 'total_time')
# converts Float to Integer
col_resource.alter(type=sa.Integer)
| true |
efb3ca120b99d0907592c208b816a078c94d670b | Python | kriegaex/projects | /Python/projectEuler/CombinatoricSelections.py | UTF-8 | 916 | 3.609375 | 4 | [] | no_license | import time
def is_even(num):
if num % 2 == 0:
return True
else: return False
def factorial(num):
if num == 0:
return 1
else:
return factorial(num - 1) * num
def formula(num1, num2):
combination = factorial(num1) / (factorial(num2) * factorial(num1 - num2))
return combination
def main():
index = 2
count = 0
while index <= 100:
if not is_even(index):
for i in range(1, int(0.5 * index) + 1):
if formula(index, i) > 1000000:
count += (int(0.5 * index) + 1 - i) * 2
break
else:
for i in range(1, int(0.5 * (index + 1)) + 1):
if formula(index, i) > 1000000:
count += (int(0.5 * (index + 1)) + 1 - i) * 2 - 1
break
index += 1
print(count)
s = time.time()
main()
e = time.time()
print(e - s) | true |
9f5edc10aca42dfa995916c210e2a92f24ba9c74 | Python | samedayshipping/python3-amazon-mws | /mws/parsers/errors.py | UTF-8 | 2,404 | 2.53125 | 3 | [
"Unlicense"
] | permissive | import re
from .base import first_element, BaseResponseMixin, BaseElementWrapper
from lxml import etree
class ErrorResponse(ValueError, BaseElementWrapper, BaseResponseMixin):
def __init__(self, element, mws_access_key=None, mws_secret_key=None, mws_account_id=None, mws_auth_token=None):
BaseElementWrapper.__init__(self, element)
ValueError.__init__(self, self.message)
@property
@first_element
def type(self):
return self.element.xpath('//ErrorResponse/Error/Type/text()')
@property
@first_element
def code(self):
return self.element.xpath('//ErrorResponse/Error/Code/text()')
@property
@first_element
def message(self):
return self.element.xpath('//ErrorResponse/Error/Message/text()')
@property
@first_element
def request_id(self):
return self.element.xpath('//ErrorResponse/RequestID/text()')
@classmethod
def load(cls, xml_string, mws_access_key=None, mws_secret_key=None, mws_account_id=None, mws_auth_token=None):
"""
Create an instance of this class using an xml string.
overridden so that we can remove any namespace from the response.
:param xml_string:
:return:
"""
ptn = '\s+xmlns=\".*?\"'
xml_string = re.sub(ptn, '', xml_string)
tree = etree.fromstring(xml_string)
return cls(tree)
class ProductError(ValueError, BaseElementWrapper):
"""
Error wrapper for any error returned back for any call to the Products api.
"""
namespaces = {
'a': 'http://mws.amazonservices.com/schema/Products/2011-10-01',
'b': 'http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd'
}
def __init__(self, element, identifier, mws_access_key=None, mws_secret_key=None, mws_account_id=None, mws_auth_token=None):
BaseElementWrapper.__init__(self, element)
ValueError.__init__(self, self.message)
self.identifier = identifier
@property
@first_element
def message(self):
return self.element.xpath('./a:Message/text()', namespaces=self.namespaces)
@property
@first_element
def code(self):
return self.element.xpath('./a:Code/text()', namespaces=self.namespaces)
@property
@first_element
def type(self):
return self.element.xpath('./a:Type/text()', namespaces=self.namespaces)
| true |
d4eb70a662cf6d186f3fd4bfeb7a3b7fd6ebc686 | Python | hojjat-faryabi/python_binary_genetic_algorithm | /crossover.py | UTF-8 | 802 | 2.546875 | 3 | [] | no_license | from init import init
import numpy as np
import random
def crossover(initValues : init, matingPool : list):
parentNum = np.random.permutation(initValues.n)
newPopulation = np.zeros((initValues.n, initValues.l) , dtype=int)
for j in range(0, initValues.n, 2):
pointer1 = parentNum[j]
pointer2 = parentNum[j+1]
cutPoint = random.randint(1,initValues.l - 1)
off1 = matingPool[pointer1, :]
off2 = matingPool[pointer2, :]
if (random.random() < initValues.pc):
temp = off2
off2[cutPoint:-1] = off1[cutPoint:-1]
off1[cutPoint:-1] = temp[cutPoint:-1]
newPopulation[j, :] = off1
newPopulation[j+1, :] = off2
return newPopulation
#print(newPopulation) | true |