blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
5
133
path
stringlengths
2
333
src_encoding
stringclasses
30 values
length_bytes
int64
18
5.47M
score
float64
2.52
5.81
int_score
int64
3
5
detected_licenses
listlengths
0
67
license_type
stringclasses
2 values
text
stringlengths
12
5.47M
download_success
bool
1 class
d240a20e48f68351ae620b3ee2d82468edfbd450
Python
erickmiller/AutomatousSourceCode
/AutonomousSourceCode/data/raw/sort/5824a259-97ee-4c08-bf18-8f715a0bfd52__alternateSort.py
UTF-8
647
3.25
3
[]
no_license
''' Created on Oct 15, 2014 @author: Ben Athiwaratkun (pa338) ''' #from __future__ import division #import numpy as np def sortedToAlternateSort(A): n = len(A) B = [None]*n # assume that A is sorted half = len(A)/2 for i in range(half): B[2*i] = A[i] B[2*i+1] = A[n-1-i] if n % 2 == 1: B[n-1] = A[half] # take care of odd length case return B def main(): A = [1,5,2,5,6,8,2,3,7,2,0] B = sorted(A) print B print sortedToAlternateSort(B) print sortedToAlternateSort([1,2,3,4,5,6]) print sortedToAlternateSort([1,2,3,4,5,6,7]) if __name__ == "__main__": main()
true
8861ad18e8fd95b706abcd6155889d7a6f834494
Python
zhlstone/AIR
/interaction_prediction/loss.py
UTF-8
6,180
2.671875
3
[ "Apache-2.0" ]
permissive
# This is the loss for interaction prediction import tensorflow as tf import numpy as np class JointPredMultiModalLoss(tf.keras.losses.Loss): def __init__(self, name = 'jpmm_loss'): super().__init__(name = name) def call(self, gt, avails, pred, confidences, reduce_mean=True): """ Call Arguments: targets: (B, 2, 80, 2) pred: (B, 64, 2, 80, 2) confidences: (B, 64) target_availabilities: (B, 2, 80) Returns: loss: (1,) if reduce_mean (B, 1) otherwise """ assert len(pred.shape) == 5, f"expected 3D (MxTxC) array for pred, got {pred.shape}" gt = tf.reshape(gt, [-1, 1, 2, 80, 2]) avails = tf.reshape(avails, [-1, 1, 2, 80, 1]) error = tf.reduce_sum(((gt - pred)*avails)**2, axis = [2, 3,4]) # Outputs:(B, 64) error = tf.math.log(confidences + 1e-16) - 0.5 * error error = -tf.math.reduce_logsumexp(error, axis = 1, keepdims = True) # (B, 1) error /= 80. if reduce_mean: return tf.reduce_mean(error) else: return error class JointPredMultiPathLoss(tf.keras.losses.Loss): def __init__(self, veh_centroids, ped_centroids, cyc_centroids, veh_weight = 1., ped_weight = 1., cyc_weight=1., cls_weight = 1., marginal_loss_weight=0., name = 'jpmp_loss'): super().__init__(name = name) self.veh_centroids = tf.convert_to_tensor(veh_centroids) self.ped_centroids = tf.convert_to_tensor(ped_centroids) self.cyc_centroids = tf.convert_to_tensor(cyc_centroids) self.K = len(self.veh_centroids) self.veh_weight = veh_weight self.ped_weight = ped_weight self.cyc_weight = cyc_weight self.cls_weight = cls_weight self.marginal_loss_weight = marginal_loss_weight def call(self, gt, avails, pred, confidences, x, y, yaw, object_type, reduce_mean = True): """ Call Arguments: gt: (B, 2, 80, 2) pred: (B, K*K, 2, 80, 2) confidences: (B, K*K) avails: (B, 2, 80) x: (B, 2, 1) y: (B, 2, 1) yaw: (B, 2, 1) object_type: (B, 2) Returns: loss: (1,) if reduce_mean (B, 1) otherwise """ assert len(pred.shape) == 5, f"expected 3D (MxTxC) array for pred, got {pred.shape}" x = tf.reshape(x, [-1, 1]) y = tf.reshape(y, [-1, 1]) yaw = tf.reshape(yaw, [-1, 1]) gt = tf.reshape(gt, [-1, 80, 2]) c = tf.math.cos(yaw) # (B*2, 1) s = tf.math.sin(yaw) # (B*2, 1) x_hat = gt[:, :, 0] - x # (B*2, 80) y_hat = gt[:, :, 1] - y # (B*2, 80) gt_ego_x = c * x_hat + s * y_hat # (B*2, 80) gt_ego_y = -s * x_hat + c * y_hat # (B*2, 80) gt = tf.reshape(gt, [-1, 1, 2, 80, 2]) avails = tf.reshape(avails, [-1, 1, 2, 80, 1]) error = tf.reduce_sum(((gt - pred)*avails)**2, axis = [2, 3, 4]) # Outputs:(B, 64) error = -self.cls_weight*tf.math.log(confidences + 1e-16) + 0.5 * error # (B, K*K) conf_error = -self.cls_weight*tf.math.log(confidences + 1e-16) # (B, K*K) gt_ego = tf.stack([gt_ego_x, gt_ego_y], axis = -1) # (B*2, 80, 2) # self.centroids has shape (K, 160) gt_ego = tf.reshape(gt_ego, [-1, 1, 80, 2]) # (B*2, 1, 80, 2) avails = tf.reshape(avails, [-1, 1, 80, 1]) # (B*2, 1, 80, 1) veh_centroids = tf.reshape(self.veh_centroids, [1, -1, 80, 2]) # (1, K, 80, 2) distance = ((gt_ego - veh_centroids)**2)*avails # (B*2, K, 80, 2) distance = tf.reduce_sum(distance, [2,3]) # (B*2, K) veh_assignments = tf.argmin(distance, axis = 1) # (B*2,) veh_assignments = tf.cast(veh_assignments, tf.int32) ped_centroids = tf.reshape(self.ped_centroids, [1, -1, 80, 2]) # (1, K, 80, 2) distance = ((gt_ego - ped_centroids)**2)*avails # (B*2, K, 80, 2) distance = tf.reduce_sum(distance, [2,3]) # (B*2, K) ped_assignments = tf.argmin(distance, axis = 1) # (B*2,) ped_assignments = tf.cast(ped_assignments, tf.int32) cyc_centroids = tf.reshape(self.cyc_centroids, [1, -1, 80, 2]) # (1, K, 80, 2) distance = ((gt_ego - cyc_centroids)**2)*avails # (B*2, K, 80, 2) distance = tf.reduce_sum(distance, [2,3]) # (B*2, K) cyc_assignments = tf.argmin(distance, axis = 1) # (B*2,) cyc_assignments = tf.cast(cyc_assignments, tf.int32) object_type_mask = tf.math.reduce_max(object_type, axis=1) object_type_mask = self.veh_weight*tf.cast(object_type_mask==1, tf.float32) +\ self.ped_weight*tf.cast(object_type_mask==2, tf.float32) +\ self.cyc_weight*tf.cast(object_type_mask==3, tf.float32) object_type_mask = tf.reshape(object_type_mask, [-1, 1]) object_type = tf.reshape(object_type, [-1,]) is_veh = tf.cast(object_type == 1, tf.int32) # (2*B,) is_ped = tf.cast(object_type == 2, tf.int32) is_cyc = 1 - is_veh - is_ped assignments = is_veh*veh_assignments + is_ped*ped_assignments + is_cyc*cyc_assignments # (2*B,) assignments = tf.reshape(assignments, [-1, 2]) assignments_0 = assignments[:, 0] # (B,) assignments_1 = assignments[:, 1] # (B,) marginal_0 = tf.reduce_sum(tf.reshape(confidences, [-1, self.K, self.K]), axis=2) # (B, K) marginal_0 = -tf.math.log(marginal_0 + 1e-16) # (B,) marginal_0 = tf.one_hot(assignments_0, depth=self.K)*marginal_0 marginal_1 = tf.reduce_sum(tf.reshape(confidences, [-1, self.K, self.K]), axis=1) marginal_1 = -tf.math.log(marginal_1 + 1e-16) # (B,) marginal_1 = tf.one_hot(assignments_1, depth=self.K)*marginal_1 marginal_error = tf.reduce_sum(marginal_0, axis=1) + tf.reduce_sum(marginal_1, axis=1) assignments = self.K*assignments_0 + assignments_1 # (B,) assignments_mask = tf.one_hot(assignments, self.K**2) # (B, 64) error = assignments_mask * error # (B, K**2) error = error * object_type_mask error = tf.reduce_sum(error, axis = 1) + marginal_error * self.marginal_loss_weight error /= 80. conf_error = assignments_mask * conf_error conf_error = conf_error * object_type_mask conf_error = tf.reduce_sum(conf_error, axis = 1) + marginal_error * self.marginal_loss_weight conf_error /= 80. if reduce_mean: return tf.reduce_mean(error), tf.reduce_mean(conf_error) else: return error, conf_error
true
2a20adf71a1212466bbb9c7f6ad0153acf5475d2
Python
srazihaider/PythonExcercises
/Excercise7.py
UTF-8
107
3.265625
3
[]
no_license
myfilename = input("Enter a file name with its extension") mylist = myfilename.split(".") print(mylist[1])
true
0794d75bab74119b479a7c05d760c869f0d9118e
Python
dozhdikov99/SnakeGame
/SnakeGame.py
UTF-8
4,344
2.703125
3
[]
no_license
#!/usr/bin/env python3.4 # By Igoru99 (C) 2016 year import time import win32api import pythoncom import pyHook import threading import os import random from colorama import Fore, Back, Style, init data = ['0' for i in range(200)] # Game box 10*20 scores = 0 # Scores python = [110,130,150] # List of coordinats the snake is_game = True # Checking of game end key = 1 # Pressed key def OnKeyboardEvent(event): global key LEFT = win32api.GetKeyState(0x25) RIGHT = win32api.GetKeyState(0x27) UP = win32api.GetKeyState(0x26) DOWN = win32api.GetKeyState(0x28) if LEFT<0: key = 4 elif RIGHT<0: key = 3 elif UP<0: key = 2 elif DOWN<0: key = 1 def EventListener(): """ Function for listening pressed keys """ hm = pyHook.HookManager() hm.KeyAll = OnKeyboardEvent hm.HookKeyboard() pythoncom.PumpMessages() def ini(): """ Function of initialization the game """ global data init() for i in range(0,20): data[i] = Back.WHITE + '-' + Back.BLACK for i in range(0, 200, 20): data[i] = Back.WHITE + '|' + Back.BLACK for i in range(19, 200, 20): data[i] = Back.WHITE + '|' + Back.BLACK for i in range(181,199): data[i] = Back.WHITE + '-' + Back.BLACK generate_eat() def move(forward): """ Function of move the snake """ global python last_pos = int() data[python[0]] = '0' last_pos = python.pop(0) if forward == 1: # down python.append(python[len(python)-1]+20) elif forward == 2: # up python.append(python[len(python)-1]-20) elif forward == 3: # right python.append(python[len(python)-1]+1) elif forward == 4: # left python.append(python[len(python)-1]-1) if data[python[len(python)-1]] == Back.WHITE + '-' + Back.BLACK or data[python[len(python)-1]] == Back.WHITE + '|' + Back.BLACK or data[python[len(python)-1]] == Back.GREEN + '*' + Back.BLACK: last_index = len(python)-1 python.remove(python[last_index]) new_python = [] new_python.append(last_pos) new_python.extend(python) python = new_python.copy() game_end() show() return False elif data[python[len(python)-1]] == Back.YELLOW + '$' + Back.BLACK: global scores scores += 1 add_body(last_pos) generate_eat() return True else: return True def add_body(last_pos): """ Increase the lenght of the snake """ global python new_python = [] new_python.append(last_pos) new_python.extend(python) python = new_python.copy() def generate_eat(): """ Function of generation food """ is_generate = False while is_generate is False: i = random.randint(0,199) if data[i] == '0': data[i] = Back.YELLOW + '$' + Back.BLACK is_generate = True def game_end(): global is_game is_game = False for i in range(63, 76): data[i] = Back.RED + '-' + Back.BLACK data[63] = Back.RED + '+' + Back.BLACK data[75] = Back.RED + '+' + Back.BLACK for i in range(103, 116): data[i] = Back.RED + '-' + Back.BLACK data[103] = Back.RED + '+' + Back.BLACK data[115] = Back.RED + '+' + Back.BLACK t = 0 l = '| Game end! |' for i in range(83,96): data[i] = Back.RED + l[t] + Back.BLACK t += 1 return def show(): """ Print game box in console """ for i in python: if data[i] == '0' or data[i] == Back.YELLOW + '$' + Back.BLACK or data[i] == Back.GREEN + '*' + Back.BLACK: data[i] = Back.GREEN + '*' + Back.BLACK string = Back.WHITE + '|' + 18*'-' + '|\n' + '|' + 4*' ' + Back.BLUE + 'Scores: ' + str(scores) + Back.WHITE + (6-len(str(scores)))*' ' + '|\n' + Back.BLACK for i in range(10): for j in range(20*i, 20*i+20): string += data[j] else: string += '\n' print(string) ini() show() time.sleep(1) t = threading.Thread(target=EventListener) # Creating new threading for EventListener t.daemon = True t.start() while True: # Body of the game os.system('cls') if move(key) is False: break show() time.sleep(1) input('Press Enter to exit.')
true
831d9f25dbfb77a08b1bc62df528191b19474942
Python
ffigura/Euler-deconvolution-plateau
/code/synthetic_test/estimates_statistics.py
UTF-8
2,093
2.875
3
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
""" Estimates statistics A Python program to compute the standard deviation of depth and base-level estimates and the mean of the northing, easting and depth estimates on plateau plots. The outputs are placed at the folder 'results'. The nomenclature is 'plateau_pltX.txt', where X stands for the area corresponding to the SI plotted. This code is released from the paper: Correct structural index in Euler deconvolution via base-level estimates The program is under the conditions terms in the file README.txt authors: Felipe F. Melo, 2019 email: felipe146@hotmail.com """ import numpy as np def plateau(est_plateau,xi,yi,area_plt,SI_vet,name): results=[] for i in range (len(est_plateau)): estimates=np.stack((xi,yi,est_plateau[i][:,2],est_plateau[i][:,3]), axis=-1) masked =np.ma.array(estimates,mask=np.repeat(estimates[:,0]<= area_plt[0],estimates.shape[1])) masked = np.ma.array(masked, mask=np.repeat(masked[:,0]>=area_plt[1], estimates.shape[1])) masked = np.ma.array(masked, mask=np.repeat(masked[:,1]<=area_plt[2], estimates.shape[1])) masked = np.ma.array(masked, mask=np.repeat(masked[:,1]>=area_plt[3], estimates.shape[1])) stdz=(np.std(masked[:, 2]/1000.)) stdb=(np.std(masked[:, 3])) meanx=(np.mean(masked[:, 0]/1000.)) meany=(np.mean(masked[:, 1]/1000.)) meanz=(np.mean(masked[:, 2]/1000.)) meanb=(np.mean(masked[:, 3])) results.append([SI_vet[i],stdz,stdb,meanx,meany,meanz,meanb]) output=np.array([(results[i]) for i in range (0,len(SI_vet))]) np.savetxt('results/'+str(name)+'.txt',output,fmt='%.3f',\ header="SI, std z, std b, mean x, mean y, mean z, mean b", comments='') return
true
cd10cd1cafc934b6c2a7535a158acebaa84c1d12
Python
GhadeerElmkaiel/Simple-Evolution-Simulation
/Run_Simulation.py
UTF-8
18,342
2.65625
3
[]
no_license
import pygame import random import math # ----------------------------------------------------------------- pygame.init() winWidth = 1100 winHeight = 800 playWinWidth = 800 playWinHeight = 800 FPS = 100 mutationRate = 0.01 hugeMutationRate = 0.05 gen = 1 maxFitness = 0 minAnglesChange = 10**7 minSteps = 1000 minStepsLimit = 50 oneReachedGoal = False showAll = True withWalls = True clock = pygame.time.Clock() gameDis = pygame.display.set_mode((winWidth, winHeight)) startPos = [playWinWidth/2, playWinHeight-20] maxSpeed = 9 black = (0, 0, 0) white = (255, 255, 255) red = (255, 0, 0) green = (0, 255, 0) blue = (0, 0, 0) darkGray = (30, 30, 30) lightGray = (200, 200, 200) bestUnitImgSize = 150 bestUnitImgWidthStart = playWinWidth + (winWidth - playWinWidth - bestUnitImgSize)/2 bestUnitImgHeightStart = 520 unitWidth = 10 unitHeight = 15 unit = pygame.image.load("Unit.png") bestUnit = pygame.image.load("BestUnit.png") bestUnitBig = pygame.image.load("BestBig.png") bestUnitsInGens = [] # ----------------------------------------------------------------- def dis(pos1, pos2): return (math.sqrt((pos1[0]-pos2[0])**2 + (pos1[1]-pos2[1])**2)) # ----------------------------------------------------------------- def get_msg_objects(msg, color, font): textSurface = font.render(msg, True, color) return textSurface, textSurface.get_rect() # ----------------------------------------------------------------- def msg_to_screen(msg, pos, color=(0, 0, 0), size = 25): font = pygame.font.SysFont(None, size) textSurface, textRect = get_msg_objects(msg, color, font) textRect.center = pos[0], pos[1] gameDis.blit(textSurface, textRect) # ----------------------------------------------------------------- def msg_to_screen_corner(msg, pos, color=(0, 0, 0), size = 25): font = pygame.font.SysFont(None, size) textSurface = font.render(msg, True, color) gameDis.blit(textSurface, pos) # ----------------------------------------------------------------- def update_control_ban(): pygame.draw.rect(gameDis, (30, 30, 30), [playWinWidth,0,winWidth - playWinWidth, winHeight]) msg_to_screen_corner("Max Fitness =", [playWinWidth + 10, 10],lightGray, 30) if not oneReachedGoal: msg_to_screen_corner(str(int(maxFitness)), [playWinWidth + 10, 35], lightGray, 30) else: msg_to_screen_corner("Max Value", [playWinWidth + 10, 35], lightGray, 30) msg_to_screen_corner("Gen : ", [playWinWidth + 10, 80], lightGray, 30) msg_to_screen_corner(str(gen), [playWinWidth + 10, 105], lightGray, 30) msg_to_screen_corner("Min Num of Steps : ", [playWinWidth + 10, 150], lightGray, 30) if minSteps > 400 : msg_to_screen_corner("INF", [playWinWidth + 10, 175], lightGray, 30) else: msg_to_screen_corner(str(minSteps), [playWinWidth + 10, 175], lightGray, 30) msg_to_screen_corner("Min Unit Angles Change : ", [playWinWidth + 10, 220], lightGray, 30) if minAnglesChange >= 10**7: msg_to_screen_corner("INF", [playWinWidth + 10, 245], lightGray, 30) else: msg_to_screen_corner(str(int(minAnglesChange)), [playWinWidth + 10, 245], lightGray, 30) msg_to_screen_corner("The Best Unit :", [playWinWidth + 10, bestUnitImgHeightStart - 30], lightGray, 30) # pygame.draw.rect(gameDis, white, [playWinWidth + (winWidth - playWinWidth - bestUnitImgSize)/2, bestUnitImgHeightStart, winWidth - (winWidth - playWinWidth - bestUnitImgSize)/2, bestUnitImgHeightStart + bestUnitImgSize]) pygame.draw.rect(gameDis, white, [bestUnitImgWidthStart, bestUnitImgHeightStart, bestUnitImgSize, bestUnitImgSize]) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # ------------------------------------------------------------------ class Wall: def __init__(self, pos, width, height): self.pos = pos self.height = height self.width = width global gameDis def show(self): pygame.draw.rect(gameDis, (170, 170, 170), [self.pos[0], self.pos[1], self.width, self.height]) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # ------------------------------------------------------------------ walls=[] wall_1 = Wall([300, 500], 500, 20) wall_2 = Wall([0, 300], 500, 10) walls.append(wall_1) walls.append(wall_2) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # ------------------------------------------------------------------ """ The Brain for each unit which contain the directions and angles matrices and the number of steps were used to get to the goal """ class Brain: def __init__(self, size): self.size = size self.directions = [] self.angles = [] self.randomize() self.steps = 0 self.mutationRate = mutationRate """ Randomize the initial values for the directions matrix and calc the angles matrix """ def randomize(self): for i in range(self.size): r = 2*math.pi*random.random() vector = [math.cos(r), math.sin(r)] self.directions.append(vector) self.angles.append(r*180/math.pi) """ Clone the directions and angles matrices to give to new unit """ def clone(self): clone = Brain(self.size) for i in range(self.size): clone.directions[i] = self.directions[i] clone.angles[i] = self.angles[i] return clone """ Make small changes to some of the directions and angle values """ def mutate(self, hugeMutate = False): for i in range(self.size): r = random.random() if r < self.mutationRate: r = 2 * math.pi * random.random() self.angles[i] = r*180/math.pi vector = [0.5*math.cos(r), 0.5*math.sin(r)] self.directions[i]=vector if hugeMutate: for i in range(self.size): r = random.random() if r < self.mutationRate*10: r = 2 * math.pi * random.random() self.angles[i] = r * 180 / math.pi vector = [0.5 * math.cos(r), 0.5 * math.sin(r)] self.directions[i] = vector # ------------------------------------------------------------------ # ------------------------------------------------------------------ # ------------------------------------------------------------------ """ The calss that define each unit and have the parameters position and speed and acceleration and the brain """ class Dot: def __init__(self, color=(0, 0, 0)): global maxSpeed global playWinHeight global playWinWidth global goal global gameDis global startPos self.pos = [playWinWidth/2, playWinHeight-20] self.speed = [0, 0] self.acc = [0, 0] self.radius = 4 self.maxSpeed = maxSpeed self.brain = Brain(400) self.isDead = False self.reachedGoal = False self.fitness = 0 self.color = color self.isBest = False self.angle = 90 self.lastAngle = 90 self.angleChanges = 0 # ------------------------------------------------------ def show(self): if self.isBest: imgDir = pygame.transform.rotate(bestUnit, self.angle -90) imgBigDir = pygame.transform.rotate(bestUnitBig, self.angle -90) rect = imgBigDir.get_rect() gameDis.blit(imgDir, (self.pos[0] - int(unitWidth/2), self.pos[1] - int(unitHeight/2))) gameDis.blit(imgBigDir, (bestUnitImgWidthStart-int((rect[2]-bestUnitImgSize)/2), bestUnitImgHeightStart-int((rect[3]-bestUnitImgSize)/2))) else: imgDir = pygame.transform.rotate(unit, self.angle -90) gameDis.blit(imgDir, (self.pos[0] - int(unitWidth/2), self.pos[1] - int(unitHeight/2))) # pygame.draw.circle(gameDis, self.color, [int(self.pos[0]), int(self.pos[1])], self.radius, 0) # ------------------------------------------------------ def move(self): if (len(self.brain.directions) > self.brain.steps): self.acc = self.brain.directions[self.brain.steps] self.angle = self.brain.angles[self.brain.steps] self.angleChanges+= min(abs(self.angle - self.lastAngle), abs(min(self.angle, self.lastAngle) - max(self.angle, self.lastAngle) + 360)) self.lastAngle = self.angle self.brain.steps += 1 for i in range(2): self.pos[i] += self.speed[i] self.speed[i] += self.acc[i] if self.speed[0] > self.maxSpeed: self.speed[0] = self.maxSpeed elif self.speed[0] < -1*self.maxSpeed: self.speed[0] = -1*self.maxSpeed if self.speed[1] > self.maxSpeed: self.speed[1] = self.maxSpeed elif self.speed[1] < -1*self.maxSpeed: self.speed[1] = -1*self.maxSpeed # ------------------------------------------------------ def check_if_out(self): if self.pos[0]<self.radius*1.2 or (self.pos[0]>playWinWidth-self.radius*1.2) or (self.pos[1]<self.radius*1.2) or (self.pos[1] > playWinHeight - self.radius*1.2): self.isDead = True return True return False # ------------------------------------------------------ def check_if_hit_wall(self, wall): if (self.pos[0] > wall.pos[0] - self.radius) and (self.pos[0] < wall.pos[0] + wall.width + self.radius) and (self.pos[1] > wall.pos[1] - self.radius) and (self.pos[1] < wall.pos[1] + wall.height + self.radius): self.isDead = True return True return False # ------------------------------------------------------ def check_if_reach_goal(self): global oneReachedGoal if dis(self.pos, goal.pos) < self.radius: self.reachedGoal = True oneReachedGoal = True # self.isDead = True return True return False # ------------------------------------------------------ def update(self): if not (self.isDead or self.reachedGoal): self.check_if_reach_goal() self.check_if_out() if withWalls: for wall in walls: self.check_if_hit_wall(wall) if not (self.isDead or self.reachedGoal): self.move() # ------------------------------------------------------ def calc_fitness(self): # if self.reachedGoal: # self.fitness = 10000 + (1/((0.001*(self.brain.steps-1.5*minStepsLimit))**6))*((10000/self.angleChanges)**3) # else: # self.fitness = (100 / ((0.01*(dis(self.pos, goal.pos)))**4 + 0.05)) + 100/self.angleChanges if not oneReachedGoal: self.fitness = (1000 / ((0.01 * (dis(self.pos, goal.pos))) ** 4 + 0.05)) + 100 / self.angleChanges else: if not self.reachedGoal: self.fitness = (1000 / ((0.1 * (dis(self.pos, goal.pos))) ** 4)) /(3*max(0.5, self.brain.steps - minSteps)) / (max(0.9, self.angleChanges - minAnglesChange)) else: self.fitness = 10**5 / (3 * max(1, self.brain.steps - minSteps)) / (max(1, self.angleChanges - minAnglesChange)) # ------------------------------------------------------ def get_baby(self): baby=Dot() baby.brain = self.brain.clone() return baby # ------------------------------------------------------ def duplicate(self, dupl): self.brain.size = dupl.brain.size for i in range(self.brain.size): self.brain.directions[i] = dupl.brain.directions[i] # ------------------------------------------------------------------ # ------------------------------------------------------------------ # ------------------------------------------------------------------ class Population: def __init__(self, num): self.num = num self.dotsList = [] self.fitnessSum = 0 self.gen = 1 self.minSteps = 1000 self.minAnglesChange = 10**7 self.plusStepsAllowed = 30 for i in range(self.num): d = Dot() self.dotsList.append(d) # ------------------------------------------------------ def show(self): if showAll: for i in range(self.num): self.dotsList[i].show() self.dotsList[0].show() # ------------------------------------------------------ def update(self): for i in range(self.num): if self.dotsList[i].brain.steps > self.minSteps + self.plusStepsAllowed: self.dotsList[i].isDead = True else: self.dotsList[i].update() # ------------------------------------------------------ def calc_min_steps(self): global minSteps for i in range(self.num): if self.dotsList[i].reachedGoal and self.dotsList[i].brain.steps < self.minSteps: self.minSteps = self.dotsList[i].brain.steps minSteps = self.minSteps # ------------------------------------------------------ def calc_min_angle_change(self): global minAnglesChange firstCalc = True for i in range(self.num): if self.dotsList[i].reachedGoal and self.dotsList[i].brain.steps == self.minSteps and firstCalc: firstCalc = False self.minAnglesChange = self.dotsList[i].angleChanges elif self.dotsList[i].reachedGoal and self.dotsList[i].brain.steps == self.minSteps and self.dotsList[i].angleChanges < self.minAnglesChange: self.minAnglesChange = self.dotsList[i].angleChanges minAnglesChange = self.minAnglesChange # ------------------------------------------------------ def calc_fitness(self): for i in range(self.num): self.dotsList[i].calc_fitness() # ------------------------------------------------------ def allAreDead(self): for i in range(self.num): if (not self.dotsList[i].isDead) and (not self.dotsList[i].reachedGoal): return False return True # ------------------------------------------------------ def calc_fitness_sum(self): self.fitnessSum = 0 for i in range(self.num): self.fitnessSum += self.dotsList[i].fitness # ------------------------------------------------------ def get_parents(self): r = random.random()*self.fitnessSum fitTempSum = 0 i = 0 while(fitTempSum < r): fitTempSum += self.dotsList[i].fitness i += 1 return self.dotsList[i-1] # ------------------------------------------------------ def get_best_dot(self): global maxFitness global minSteps global bestAnglesChange global bestUnitsInGens maxFit = 0 bestDotIndex = 0 for i in range(self.num): if self.dotsList[i].fitness > maxFit: maxFit = self.dotsList[i].fitness bestDotIndex = i self.dotsList[i].isBest = False if self.dotsList[bestDotIndex].reachedGoal: self.minSteps = self.dotsList[bestDotIndex].brain.steps maxFitness = maxFit bestAnglesChange = int(self.dotsList[bestDotIndex].angleChanges) minSteps = self.dotsList[bestDotIndex].brain.steps if(self.gen%10 == 0 or gen == 1): bestUnitsInGens.append(self.dotsList[bestDotIndex].brain.angles) return bestDotIndex # ------------------------------------------------------ def natural_selection(self): global gen newPop = Population(self.num) newPop.dotsList[0] = self.dotsList[self.get_best_dot()].get_baby() newPop.dotsList[0].isBest = True self.calc_fitness_sum() for i in range(self.num): if i > 0: parents = self.get_parents() newPop.dotsList[i].brain = parents.brain.clone() self.dotsList = newPop.dotsList self.gen += 1 gen = self.gen # ------------------------------------------------------ def mutate_baby(self): for i in range(self.num): if i > 0: self.dotsList[i].brain.mutate() for i in range(self.num): if i > 0: r = random.random() if r < hugeMutationRate: self.dotsList[i].brain.mutate(True) # ------------------------------------------------------ def get_new_gen(self): self.calc_min_steps() self.calc_min_angle_change() self.calc_fitness() self.calc_fitness_sum() self.natural_selection() self.mutate_baby() # ------------------------------------------------------------------ # ------------------------------------------------------------------ # ------------------------------------------------------------------ class Goal: def __init__(self, pos): self.pos = pos self.radius = 5 global gameDis def show(self): pygame.draw.circle(gameDis, (255, 0, 0), self.pos, self.radius) pop = Population(1000) goal = Goal([int(playWinWidth/2), 20]) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.quit() quit() if event.key == pygame.K_b: showAll = not showAll if event.key == pygame.K_w: withWalls = not withWalls if event.key == pygame.K_p: pass gameDis.fill((255, 255, 255)) goal.show() if withWalls: for wall in walls: wall.show() if(pop.allAreDead()): pop.get_new_gen() update_control_ban() pop.show() pop.update() pygame.display.update() clock.tick(FPS)
true
369b64e1d9558b595120012dbe625bf61b2b687b
Python
MatrixWeber/python_cleanup_directory
/ask_question_to_delete.py
UTF-8
1,905
2.78125
3
[]
no_license
import subprocess, os, platform from write_msg_to_desktop import notify from verbose import checkIfVerbose import shutil def askQuestionAndPerform(destDirection, fileName, sayYesToString = '/][??05436'): destFile = destDirection + fileName if sayYesToString in fileName: deleteOptions = 'y' else: if checkIfVerbose(): notify('Cleanup Script', 'Sure you wanna delete that file: ' + destFile + '?\nChoose option: y to delete, n to hold, o to open with a default program or r to rename file\n') deleteOptions = input("Sure you wanna delete that file: " + destFile + "?\nChoose option: y to delete, n to hold, o to open with a default program,\n r to rename file or a to delete all in a directory\n") if "yes" in deleteOptions or "y" in deleteOptions: if os.path.isdir(destFile): shutil.rmtree(destFile) else: os.remove(destFile) if checkIfVerbose(): notify('Cleanup Script', destFile + ': was moved to trash\n') elif "o" in deleteOptions or "open" in deleteOptions: if platform.system() == 'Darwin': # macOS subprocess.call(('open', destFile)) elif platform.system() == 'Windows': # Windows os.startfile(destFile) else: # linux variants subprocess.call(('xdg-open', destFile)) askQuestionAndPerform(destDirection, fileName) elif "r" in deleteOptions or "rename" in deleteOptions: if checkIfVerbose(): notify('Cleanup Script', 'Enter new file name!\n') newFileName = input("Enter new file name!\n") os.rename(destFile, destDirection + newFileName) # elif "a" in deleteOptions: # if checkIfVerbose(): # notify('Cleanup Script', 'Remove all files from dir!\n') # os.rename(destFile, destDirection + newFileName)
true
37b4b4d0f6e66e4c8123f2f12b881e3d7813c4e6
Python
jochembruins/DataProcessing
/Homework/scraper/tvscraper.py
UTF-8
4,250
3.359375
3
[]
no_license
#!/usr/bin/env python # Name: Jochem Bruins # Student number: 10578811 """ This script scrapes IMDB and outputs a CSV file with highest rated tv series. """ import re import csv from requests import get from requests.exceptions import RequestException from contextlib import closing from bs4 import BeautifulSoup TARGET_URL = "http://www.imdb.com/search/title?num_votes=5000,&sort=user_rating,desc&start=1&title_type=tv_series" BACKUP_HTML = 'tvseries.html' OUTPUT_CSV = 'tvseries.csv' def extract_tvseries(dom): """ Extract a list of highest rated TV series from DOM (of IMDB page). Each TV series entry should contain the following fields: - TV Title - Rating - Genres (comma separated if more than one) - Actors/actresses (comma separated if more than one) - Runtime (only a number!) """ # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE # HIGHEST RATED TV-SERIES # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT. # extract all div's containing the 50 series series = dom.find_all("div", class_="lister-item-content") # loop for all series in list for serie in series: # extract title try: serie.title = serie.h3.a.string except: serie.title = 'no title' # extract serie rating as float try: serie.rating = float(serie.find(itemprop="ratingValue")["content"]) except: serie.rating = 'no rating' # extract genres try: serie.genres = serie.find(class_="genre").string.strip() except: serie.genres = 'no genres' # extract list of actors try: actors_list = serie.find_all("a", href=re.compile("name")) # variable to store actors serie.actors = '' # take string containing actor name and add them to string for actor in actors_list: if serie.actors == '': serie.actors += actor.string else: serie.actors += ', ' + actor.string except: serie.actors = 'no actors' # extract runtime as integer try: runtime = serie.find(class_="runtime").string.strip() serie.runtime = int(re.sub('[^0-9]', '', runtime)) except: serie.runtime = 'no runtime' return series def save_csv(outfile, tvseries): """ Output a CSV file containing highest rated TV-series. """ writer = csv.writer(outfile) writer.writerow(['Title', 'Rating', 'Genre', 'Actors', 'Runtime']) # loop to write all extracted information in csv file for serie in tvseries: writer.writerow([serie.title, serie.rating, serie.genres, serie.actors, serie.runtime]) def simple_get(url): """ Attempts to get the content at `url` by making an HTTP GET request. If the content-type of response is some kind of HTML/XML, return the text content, otherwise return None """ try: with closing(get(url, stream=True)) as resp: if is_good_response(resp): return resp.content else: return None except RequestException as e: print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e))) return None def is_good_response(resp): """ Returns true if the response seems to be HTML, false otherwise """ content_type = resp.headers['Content-Type'].lower() return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1) if __name__ == "__main__": # get HTML content at target URL html = simple_get(TARGET_URL) # save a copy to disk in the current directory, this serves as an backup # of the original HTML, will be used in grading. with open(BACKUP_HTML, 'wb') as f: f.write(html) # parse the HTML file into a DOM representation dom = BeautifulSoup(html, 'html.parser') # extract the tv series (using the function you implemented) tvseries = extract_tvseries(dom) # write the CSV file to disk (including a header) with open(OUTPUT_CSV, 'w', newline='') as output_file: save_csv(output_file, tvseries)
true
eebb7f782ba360f65d065eef86db2450f71874da
Python
thomaszhouan/CodeIT2018
/codeitsuisse/routes/twodinosaurs.py
UTF-8
2,717
2.671875
3
[]
no_license
import logging import numpy as np from flask import request, jsonify from codeitsuisse import app logger = logging.getLogger(__name__) def solve_dp(A): mod = 100000123 maxn = 400010 dp = np.zeros(maxn, dtype=np.int32) dp[0] = 1 cur = 0 for a in A: cur += a dp_new = np.copy(dp) dp_new[a:cur+1] += dp[:cur-a+1] dp_new %= mod tmp = dp_new dp_new = dp dp = tmp # print(dp[:20]) # for i in range(cur,-1,-1): # j = i - a # if j < 0: # break # dp[i] = (dp[i]+dp[j]) % mod return dp def solve_dino(n, A, B, q): a = solve_dp(A) b = solve_dp(B) return a, b @app.route('/two-dinosaurs', methods=['POST']) def dinosaur(): data = request.get_json() # logging.info("data sent for evaluation {}".format(data)) N = data["number_of_types_of_food"] listA = data["calories_for_each_type_for_raphael"] listB = data["calories_for_each_type_for_leonardo"] Q = data["maximum_difference_for_calories"] mod = 100000123 # if N > 150: # return jsonify({"EMPTY": "EMPTY"}) p1, p2 = solve_dino(N, listA, listB, Q) l2 = len(p2) l1 = len(p1) # if False: # old # p1 = np.poly1d([1]) # for a in listA: # pt = np.zeros(a+1) # pt[0] = 1 # pt[a] = 1 # p1 = np.fmod(np.polymul(p1, pt), mod) # this is an np array, not np.poly1d # # print(p1) # p2 = np.poly1d([1]) # for b in listB: # ps = np.zeros(b+1) # ps[0] = 1 # ps[b] = 1 # p2 = np.fmod(np.polymul(p2, ps), mod) # # print(p2) # # sliding windo # l1 = p1.size # l2 = p2.size # if l1 < l2: # p3 = np.zeros(l2) # p3[-l1:] = p1 # p1 = p2 # p2 = p3 # l1 = p1.size # elif l1 > l2: # p3 = np.zeros(l1) # p3[-l2:] = p2 # p2 = p3 # l2 = p2.size # print(p1) # print(p2) # preprocessing, so that first term of l1 is not zero answer = 0 sum = 0 ub = min(l2 - 1, Q) lb = 0 for j in range(lb, ub + 1): sum = (sum + p2[j]) % mod answer = sum * p1[0] % mod # print(sum) for index in range(1,l1): if index - Q > 0: # update left sum = (mod + sum - p2[index - Q - 1]) % mod if index + Q < l1: # update right sum = (sum + p2[index + Q]) % mod # print(sum) temp = p1[index] * sum % mod answer = (temp+answer)%mod return jsonify({"result":int(answer)})
true
52668a2fdbf22a75b0b005b9ee7c6f81ad0b659b
Python
bonangrs/Bonang-Respati-S_I0320018_Wildan_Tugas4
/I0320018_soal2_tugas4.py
UTF-8
227
3.765625
4
[]
no_license
import math bil1 = int(input("Masukkan bilangan pertama: ")) bil2 = int(input("Masukkan bilangan kedua: ")) hasil = math.floor(bil2 / bil1) print("Angka", bil2, "dapat dibagi menjadi angka", bil1, "sebanyak", hasil, "kali.")
true
273a23793f38173a41b155b9ceef808d686dbaae
Python
tillahoffmann/nonpoisson-dynamics
/distributions.py
UTF-8
4,712
3.6875
4
[ "MIT" ]
permissive
""" This file builds on the C extension _distributions.c and contains python classes that are used to evaluate PDFs and CDFs of distributions and draw samples from a range of distributions. The following distributions are currently implemented - Exponential (http://en.wikipedia.org/wiki/Exponential_distribution) - Lognormal (http://en.wikipedia.org/wiki/Lognormal_distribution) - Gamma (http://en.wikipedia.org/wiki/Gamma_distribution) - Pareto (http://en.wikipedia.org/wiki/Pareto_distribution) - Rayleigh (http://en.wikipedia.org/wiki/Rayleigh_distribution) - Uniform (http://en.wikipedia.org/wiki/Uniform_distribution_(continuous)) Other distributions can be added easily by implementing the following template class mydistribution: def __init__(self, param1, param2): #Initialize the parameters characterizing the distribution self.param1 = param1 self.param2 = param2 def rvs(self): #Sample the distribution return draw_a_sample def pdf(self, x): #Evaluate the PDF at x return evalue_the_pdf def cdf(self, x): #Evaluate the CDF at x return evaluate_the_cdf """ from random import Random import numpy as np import scipy.special as special import _distributions random = Random() class exponential: def __init__(self, mean): self.mean = mean def rvs(self): return random.expovariate(1 / self.mean) def pdf(self, x): return np.exp(-x / self.mean) / self.mean def cdf(self, x): return 1 - np.exp(-x / self.mean) class lognormal: """ Lognormal distribution with mean 'exp(mu + sigma ** 2 / 2)' and variance 'mean ** 2 * (exp(sigma ** 2) - 1)'. """ def __init__(self, mu, sigma): self.mu = mu self.sigma = sigma def pdf(self, x): return _distributions.lognormal_pdf(self.mu, self.sigma, x) def cdf(self, x): return _distributions.lognormal_cdf(self.mu, self.sigma, x) def rvs(self): return random.lognormvariate(self.mu, self.sigma) @staticmethod def from_moments(mean, std): """ This function creates a lognormal distribution with given mean and standard deviation. """ mean = float(mean) #Cast to floating point to avoid integer division sigma2 = np.log(1 + (std / mean) ** 2) mu = np.log(mean) - .5 * sigma2 return lognormal(mu, np.sqrt(sigma2)) class gamma: def __init__(self, alpha, beta): self.alpha = alpha self.beta = beta def rvs(self): return random.gammavariate(self.alpha, self.beta) def pdf(self, x): return _distributions.gamma_pdf(self.alpha, self.beta, x) def cdf(self, x): return special.gammainc(self.alpha, x / self.beta) def __str__(self): return str.format("gamma: alpha = {0}; beta = {1}", self.alpha, self.beta) @staticmethod def from_moments(mean, std): """ This function creates a gamma distribution with given mean and standard deviation. """ return gamma((mean / std) ** 2, std ** 2 / mean) class pareto: def __init__(self, k, alpha): self.k = k self.alpha = alpha def rvs(self): c = random.random() return self.k * (1 - c) ** (-1. / self.alpha) def pdf(self, x): return 0 if x < self.k else self.alpha * self.k ** self.alpha * \ x ** -(self.alpha + 1) def cdf(self, x): return 0 if x < self.k else 1 - (self.k / x) ** self.alpha @staticmethod def from_moments(mean, std): """ This function creates a Pareto distribution with given mean and standard deviation. """ var = std ** 2 return pareto(mean + var / mean - std * np.sqrt(1 + var / mean ** 2), 1 + np.sqrt(1 + mean ** 2 / var)) class rayleigh: def __init__(self, sigma): self.sigma = sigma def pdf(self, x): return x / self.sigma ** 2 * np.exp(-.5 * (x / self.sigma) ** 2) def cdf(self, x): return 1 - np.exp(-.5 * (x / self.sigma) ** 2) def rvs(self): return self.sigma * np.sqrt(-2 * np.log(random.random())) class uniform: def __init__(self, lower, upper): self.lower = lower self.upper = upper def rvs(self): return self.lower + (self.upper - self.lower) * random.random() def pdf(self, x): return 1. / (self.upper - self.lower) if self.lower <= x <= self.upper else 0 def cdf(self, x): if x > self.upper: return 1 elif x < self.lower: return 0 else: return (x - self.lower) / (self.upper - self.lower)
true
71cad161a07621815bccc275f0c109407da176f7
Python
bluekitchen/btstack-packet-log
/btstack-packet-log.py
UTF-8
3,554
2.578125
3
[ "BSD-2-Clause" ]
permissive
# # convert log output to PacketLogger format and open in PacketLogger(mac) or Wireshark # import sublime import sublime_plugin import re import sys import time import os import tempfile import subprocess default_date="2001-01-01" default_hours = 12 packet_counter = 0 last_time = default_date + " " + str(default_hours) + ":00:00.000" def chop(line, prefix): if line.startswith(prefix): return line[len(prefix):] return None def str2hex(value): if value: return int(value, 16) return None def arrayForNet32(value): return bytearray([value >> 24, (value >> 16) & 0xff, (value >> 8) & 0xff, value & 0xff]) def generateTimestamp(t): global last_time global packet_counter # use last_time if time missing for this entry if not t: t = last_time if t: last_time = t # check for date parts = t.split(' ') have_date = True if len(parts) == 1: # only time, prepend fixed date have_date = False t = "2000-01-01 " + t; # handle ms try: (t1, t2) = t.split('.') if t1 and t2: t_obj = time.strptime(t1, "%Y-%m-%d %H:%M:%S") tv_sec = int(time.mktime(t_obj)) if not have_date: # start at 12:00 tv_sec += 12*60*60 tv_usec = int(t2) * 1000 return (tv_sec, tv_usec) except ValueError: # print 'Cannot parse time', t pass packet_counter += 1 return (packet_counter, 0) def dumpPacket(fout, timestamp, type, data): length = 9 + len(data) (tv_sec, tv_usec) = generateTimestamp(timestamp) fout.write(arrayForNet32(length)) fout.write(arrayForNet32(tv_sec)) fout.write(arrayForNet32(tv_usec)) fout.write(bytearray([type])) fout.write(data) def handleHexPacket(fout, timestamp, type, text): try: data = bytearray(list(map(str2hex, text.strip().split()))) dumpPacket(fout, timestamp, type, data) except TypeError: print('Cannot parse hexdump', text.strip()) def processText(text, fout): packet_counter = 0 line_conter = 0 for line in text.splitlines(): try: line_conter += 1 timestamp = None # strip newlines line = line.strip("\n\r") # skip empty lines if len(line) == 0: continue parts = re.match('\[(.*)\] (.*)', line) if parts and len(parts.groups()) == 2: (timestamp, line) = parts.groups() rest = chop(line,'CMD => ') if rest: handleHexPacket(fout, timestamp, 0, rest) continue rest = chop(line,'EVT <= ') if rest: handleHexPacket(fout, timestamp, 1, rest) continue rest = chop(line,'ACL => ') if rest: handleHexPacket(fout, timestamp, 2, rest) continue rest = chop(line,'ACL <= ') if rest: handleHexPacket(fout, timestamp, 3, rest) continue rest = chop(line,'SCO => ') if rest: handleHexPacket(fout, timestamp, 8, rest) continue rest = chop(line,'SCO <= ') if rest: handleHexPacket(fout, timestamp, 9, rest) continue rest = chop(line,'LOG -- ') if rest: line = rest dumpPacket(fout, timestamp, 0xfc, line.encode('ascii')) except: print("Error in line %u: '%s'" % (line_conter, line)) class BtstackPacketLogCommand(sublime_plugin.TextCommand): def run(self, edit): # get window content content = self.view.substr(sublime.Region(0, self.view.size())) # create temp file outfile = tempfile.NamedTemporaryFile(mode='w+b', suffix='.pklg') # create pklg from window content processText(content, outfile) # try to open pklg in PacketLogger (mac) or Wireshark (other) if sublime.platform() == "osx": subprocess.call(["open", outfile.name]) else: subprocess.call(["wireshark", outfile.name])
true
83caff062fb1fea24bb4f821ac398c3b9fe107d9
Python
HydraPhantasm/JokerBot
/cogs/caption.py
UTF-8
1,511
2.71875
3
[ "MIT" ]
permissive
from discord.ext import commands import discord from captionbot import CaptionBot import functools class Caption(commands.Cog): def __init__(self, client): self.client = client @commands.cooldown(1, 2, commands.BucketType.user) @commands.command(pass_context=True) async def caption(self, ctx, *, text: str = ""): url = "" if text: url = text else: if len(ctx.message.attachments) > 0: url = ctx.message.attachments[0].url if not url: await ctx.channel.send(ctx.author.mention + " The correct syntax is: `j!caption <image-url>`.") else: def captionimage(url): c = CaptionBot() return c.url_caption(url) thing = functools.partial(captionimage, url) ptext = await self.client.loop.run_in_executor(None, thing) if ptext == "I really can't describe the picture 😳": await ctx.channel.send(ctx.author.mention+" No image was found in input.") else: embed = discord.Embed(title=ptext, color=discord.Color.dark_purple()) embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url) embed.set_image(url=url) embed.set_footer(text="Image captioned with captionbot.ai!") await ctx.channel.send(embed=embed) def setup(client): client.add_cog(Caption(client))
true
65dbdae557497a53663d4b0ec50dfb2740157e86
Python
sitting-cat/intelligentSystemTraining
/logisticRegression_template.py
UTF-8
4,475
3.0625
3
[]
no_license
# -*- coding: utf-8 -*- import numpy as np import data import matplotlib.pylab as plt import classifier_template as classifier import pdb #------------------- # クラスの定義始まり class logisticRegression(classifier.basic): #------------------------------------ # 1) 学習データおよびモデルパラメータの初期化 # x: 学習入力データ(入力ベクトルの次元数×データ数のnumpy.array) # t: one-hot学習カテゴリデータ(カテゴリ数×データ数のnumpy.array) # batchSize: 学習データのバッチサイズ(スカラー、0の場合は学習データサイズにセット) def __init__(self, x, t, batchSize=0): # デフォルト初期化 self.init(x,t,batchSize) # モデルパラメータをランダムに初期化 xDim = x.shape[0] tDim = t.shape[0] self.W = np.random.normal(0.0, pow(xDim, -0.5), (xDim, tDim)) self.b = np.random.normal(0.0, pow(tDim, -0.5), (tDim, 1)) #------------------------------------ #------------------------------------ # 2) ソフトマックスの計算 # x: カテゴリ数×データ数のnumpy.array def softmax(self,x): # x-max(x):expのinfを回避するため e = np.exp(x-np.max(x)) return e/np.sum(e,axis=0) #------------------------------------ #------------------------------------ # 3) 最急降下法によるパラメータの更新 # alpha: 学習率(スカラー) # printEval: 評価値の表示指示(真偽値) def update(self, alpha=0.1,printEval=True): # 次のバッチ x, t = self.nextBatch(self.batchSize) # データ数 dNum = x.shape[1] # Wの更新 predict_minus_t = self.predict(x) - t self.W -= alpha * 0 # 【wの勾配の計算】 # bの更新 self.b -= alpha * 0 # 【bの勾配の計算】 # 交差エントロピーとAccuracyを標準出力 if printEval: # 交差エントロピーの記録 self.losses = np.append(self.losses, self.loss(self.x[:,self.validInd],self.t[:,self.validInd])) # 正解率エントロピーの記録 self.accuracies = np.append(self.accuracies, self.accuracy(self.x[:,self.validInd],self.t[:,self.validInd])) print("loss:{0:02.3f}, accuracy:{1:02.3f}".format(self.losses[-1],self.accuracies[-1])) #------------------------------------ #------------------------------------ # 4) 交差エントロピーの計算 # x: 入力データ(入力ベクトルの次元数×データ数のnumpy.array) # t: one-hot学習カテゴリデータ(カテゴリ数×データ数のnumpy.array) def loss(self, x,t): crossEntropy = 0 #【交差エントロピーの計算】 return crossEntropy #------------------------------------ #------------------------------------ # 5) 事後確率の計算 # x: 入力データ(入力ベクトルの次元数×データ数のnumpy.array) def predict(self, x): return self.softmax(np.matmul(self.W.T,x) + self.b) #------------------------------------ # クラスの定義終わり #------------------- #------------------- # メインの始まり if __name__ == "__main__": # 1)人工データの生成(簡単な場合) myData = data.artificial(300,150,mean1=[1,2],mean2=[-2,-1],mean3=[2,-2],cov=[[1,-0.8],[-0.8,1]]) # 1) 人工データの生成(難しい場合) #myData = data.artificial(300,150,mean1=[1,2],mean2=[-2,-1],mean3=[4,-2],mean3multi=[-2,4],cov=[[1,0],[0,1]]) # 2)ロジスティック回帰(2階層のニューラルネットワーク)モデルの作成 classifier = logisticRegression(myData.xTrain, myData.tTrain) # 3)学習前の事後確率と学習データの描画 myData.plotClassifier(classifier,"train",prefix="posterior_before") # 4)モデルの学習 Nite = 1000 # 更新回数 learningRate = 0.1 # 学習率 decayRate = 0.99 # 減衰率 for ite in np.arange(Nite): print("Training ite:{} ".format(ite+1),end='') classifier.update(alpha=learningRate) # 5)更新幅の減衰 learningRate *= decayRate # 6)評価 loss = classifier.loss(myData.xTest,myData.tTest) accuracy = classifier.accuracy(myData.xTest,myData.tTest) print("Test loss:{}, accuracy:{}".format(loss,accuracy)) # 7)学習した事後確率と学習データの描画 myData.plotClassifier(classifier,"train",prefix="posterior_after") #メインの終わり #-------------------
true
2cb059ec453fbbff892b6066d43933cd5efa30e9
Python
sankalpreddy1998/Classical-Algorithms
/segment tree/max-range.py
UTF-8
1,169
3.25
3
[]
no_license
# def construct(st,arr,l): # for i in range(l): # st[l+i] = arr[i] # for i in range(l-1,0,-1): # st[i] = max(st[2*i],st[2*i+1]) # def max_query(st,i,j,l): # i += l # j += l # m = -999999 # while i<j: # if i%2!=0: # m = max(m,st[i]) # i += 1 # if j%2!=0: # j -= 1 # m = max(m,st[j]) # i //= 2 # j //= 2 # return m # arr = [2, 6, 10, 4, 7, 28, 9, 11, 6, 33] # l = len(arr) # st = [0 for i in range(2*l)] # construct(st,arr,l) # print(max_query(st,6,9,l)) def construct(st,arr,l): for i in range(l): st[l+i] = arr[i] for i in range(l): st[i] = max(st[2*i],st[2*i+1]) def max_query(st,i,j,l): i += l j += l mx = -999999 while i<j: if i%2!=0: mx = max(mx,st[i]) i += 1 if j%2!=0: j -= 1 mx = max(mx,st[j]) i //= 2 j //= 2 return mx arr = [2, 6, 10, 4, 7, 28, 9, 11, 6, 33] l = len(arr) st = [0 for i in range(2*l)] construct(st,arr,l) print(max_query(st,6,9,l))
true
41a3d82c99a34b5183fdf943305bc70a7a229b83
Python
zion-ai/PythonLearn
/with mosh everybody python/nestedLoops.py
UTF-8
830
4.03125
4
[ "Apache-2.0" ]
permissive
for x in range(4): for y in range(3): print(f'({x,y})') # bu şekilde iç içe geçmiş döngüler oluşturduk print('---------------------------------') """ CHALLENGE """ """ numbers = [5,2,5,2,2] çıktı: xxxxx xx xxxxx xx xx """ # bizden kolayca x le çarpılmasını istenmiyor yoksa bu çıktıya bu yöntemle de ulaşabiliri # burada iç içe geçmiş döngülerden faydalanmamız lazım numbers = [5,2,5,2,2] for m in numbers: print('x'*m) # bu kolay yöntemi print('------------------------') # iç içe geçmiş döngüleri kullanarak yapmak for m in numbers: for x in 'x': print(x*m) print('---------------------') # Öğretmenin yaptığı çözüm: numbers = [5,2,5,2,2] for xCount in numbers: output = '' for count in range(xCount): output += 'x' print(output)
true
ed4c2f7350a30736e021ddf54d608cf04d43abbb
Python
redyelruc/BoringStuff
/StringStripper.py
UTF-8
514
3.515625
4
[]
no_license
def mysplit(strng): strng = strng.lstrip() mylist = [] word = '' for character in range(len(strng)): if strng[character].isalpha(): word = word + strng[character] else: mylist.append(word) word = '' mylist.append(word) return mylist # put your code here # print(mysplit("To be or not to be, that is the question")) print(mysplit("To be or not to be,that is the question")) print(mysplit(" ")) print(mysplit(" abc ")) print(mysplit(""))
true
f6b72bee5b698a6b008e1849b0e57ec64b17ba99
Python
the-inevitable/programming-python-book
/gui/tk-tour/config_label.py
UTF-8
274
2.703125
3
[]
no_license
from tkinter import * root = Tk() label_font = ('times', 20, 'bold') widget = Label(root, text='Trying out config') widget.config(bg='black', fg='yellow') widget.config(font=label_font) widget.config(height=3, width=20) widget.pack(expand=YES, fill=BOTH) root.mainloop()
true
f0671b4d9919eed67c2c5194d98114bb0afa312e
Python
42lan/bootcamp_python
/day00/ex01/exec.py
UTF-8
356
3.34375
3
[]
no_license
import sys def string_from_args(): x = 1 string = '' while x < len(sys.argv): string += sys.argv[x] x += 1 if x < len(sys.argv): string += ' ' return string def rev_alpha(): return (string_from_args().swapcase())[::-1] def main(): print(rev_alpha()) if __name__ == '__main__': main()
true
1b141b735c7b8071eaefc2c5e875fb19b5e80993
Python
BenettTregenna/COMP3000Proj-SNET
/pythonInterface.py
UTF-8
1,003
2.625
3
[]
no_license
import sys # creation of network config python script confFile = open("networkConf.py", "w") #header confFile.write("from mininet.topo import Topo\n") confFile.write("class MyTopo( Topo ):\n") confFile.write(" def __init__( self ):\n") #custom topology creation confFile.write(" Topo.__init__( self )\n")# initialise #------------ Add hosts -------------------- confFile.write(" HostOne = self.addHost( 'h1' )\n") confFile.write(" HostTwo = self.addHost( 'h2' )\n") #------------ Add_Switches ---------------- confFile.write(" SwitchOne = self.addSwitch( 's3' )\n") #------------ Add_routers ----------------- #------------ Add-Links ------------------- confFile.write(" self.addLink( HostOne, SwitchOne )\n") confFile.write(" self.addLink( HostTwo, SwitchOne )\n") #----------- RUN TOPO --------------------- confFile.write("topos = { 'mytopo': ( lambda: MyTopo() ) }\n") confFile.close() #responce print("pythonIF: Conf file created ...")
true
f04c50e4a89d0f19031f9035b6cf09292c7605cd
Python
Louvani/holberton-system_engineering-devops
/0x15-api/0-gather_data_from_an_API.py
UTF-8
742
3.421875
3
[]
no_license
#!/usr/bin/python3 """ 0. Gather data from an API """ import requests from sys import argv if __name__ == '__main__': employee_ID = argv[1] url = 'https://jsonplaceholder.typicode.com/todos' values = {'userId': employee_ID} tasks = requests.get(url, params=values).json() url2 = 'https://jsonplaceholder.typicode.com/users' values = {'id': employee_ID} user = requests.get(url2, params=values).json() done_tasks = [] for item in tasks: if item['completed'] is True: done_tasks.append(item['title']) print('Employee {} is done with tasks({}/{}): '.format( user[0]['name'], len(done_tasks), len(tasks))) for title in done_tasks: print('\t {}'.format(title))
true
d3835111ab0c852c6a0cc90bf43d4cbadf681086
Python
http-www-testyantra-com/Afifa_Sony
/oops_concepts/vehicle.py
UTF-8
1,626
2.796875
3
[]
no_license
class Vehicles(): vehicle_brand="suzuki" INIT=20 def __init__(self,color,chno,price,vhno,mileage): self.color=color self.chno=chno self.price=price self.vhno=vhno self.mileage=mileage def display(self): print(self.color,self.chno,self.price,self.vhno,self.mileage) def discount(self,damt=0): if damt==0: damt=self.get_damount() if damt==0: print("discont amount should be greater than 0") if damt>self.price: self.failure() print("more thaan original price") return self.price=self.discount_ded(self.price,damt) self.success() def modify(self,price="",chno=0,color="",vhno=0,mileage=0): if price!="": self.name=price elif chno!=0: self.chno=chno elif vhno!=0: self.phno=vhno elif color!="": self.email=color elif mileage!=0: self.mileage=mileage self.success() @classmethod def change_brand_name(self, cls, new): if new == "": cls.Bank_name = new self.success() @staticmethod def discount_ded(a, b): return a - b @staticmethod def get_damount(): damount = int(input("enter the amount: ")) return damount @staticmethod def success(): print() @staticmethod def failure(): print("transaction failed") car=Vehicles("black",1234,50000,"KA 25 70003",63) car.display() car.discount() Vehicles.display(car)
true
8133754a82f25118f0dcb17c568fdfd76b25bef8
Python
herzenuni/sem3-assignment1-281117-vonkuptschino
/sumsq/powsum.py
UTF-8
388
3.8125
4
[]
no_license
def digitsSum(num): num = str(num) sum = 0 for i in num: sum += int(i) ** 2 return sum def usinp(): n = int(input("input the number of digits: ")) res = [] for i in range((10 ** (n - 1)), (10 ** n)): if digitsSum(i) % 17 == 0: res.append(i) print("{}-digit nums which sum of powered digits is devisible by 17:\n {}". format(n, res)) if __name__ == "__main__": usinp()
true
0262901427143b582a3c4d71de5ba64a17673230
Python
GerardoTravesedo/fast-rcnn-object-detection
/object_detection/dataset/roi_tools.py
UTF-8
15,100
3.09375
3
[]
no_license
import itertools import math import random import numpy as np import selectivesearch NUMBER_CLASSES = 21 def find_rois_complete(image_pixels, gt_boxes, min_rois_foreground, max_rois_background): """ Generates a minimum number of foreground rois and a maximum number of background ones 1) First it generates a few foreground rois from the ground thruth boxes 2) Then it randomly generates background rois checking that the IoU with every gt box is < 0.5 3) If it hasn't reached the minimum number of foreground rois yet, it runs selective search :param image_pixels: pixels from the image gt_boxes: Ground truth boxes from the image min_rois_foreground: minimum number of foreground rois to find max_rois_background: maximum number of background rois to find """ # Initial scale for selective search. It will be reduces to find more foreground rois init_scale = 500 # Adding some foreground rois generated from the ground truth boxes rois_foreground = \ find_foreground_rois_from_ground_truth_boxes(gt_boxes, image_pixels.shape) # Adding random background rois rois_background = \ find_random_background_rois(gt_boxes, image_pixels.shape, 200, 10, 10, 200, 200) # Find rois using selective search until we have the required number of foreground rois while len(rois_foreground) < min_rois_foreground and init_scale > 100: print("Only {} foreground rois were generated, but {} are required. " "Running selective search".format(len(rois_foreground), min_rois_foreground)) # Finding rois for current scale rois = find_rois_selective_search(image_pixels, scale=init_scale) # For each roi, we find if it is foreground or background for roi in rois: roi_info = find_roi_labels(roi, gt_boxes) key = str(roi[0]) + str(roi[1]) + str(roi[2]) + str(roi[3]) if len(rois_background) < max_rois_background and roi_info["class"][0] == 1: rois_background[key] = roi_info elif roi_info["class"][0] == 0: rois_foreground[key] = roi_info # Reducing scale for next iteration of selective search init_scale = init_scale - 100 # If selective search couldn't find any positive rois even trying multiple parameters, we # generate our own positive rois by moving the ground truth box slightly if len(rois_foreground) == 0: print("It couldn't find any foreground rois") else: return np.array(list(rois_foreground.values())), np.array(list(rois_background.values())) def find_rois_selective_search(image_pixels, scale=200, sigma=0.9, min_size=10): """ Uses the selective search library to find rois :param image_path: path to an image image_pixels: pixels from the image """ # Higher scale means higher preference for larger components (k / |C|, where |C| is the # number of pixels in the component and k is the scale; for a large k, it would be difficult # for a small component to be have a separation boundary with the neighboring components since # the division is large). Smaller components are allowed when there is a sufficiently large # difference between neighboring components (the higher k / |C|, the higher the difference # between neighboring components has to be) img_lbl, regions = \ selectivesearch.selective_search(image_pixels, scale=scale, sigma=sigma, min_size=min_size) unique_rois = {} # Deduplicating rois for region in regions: # rect format: [x, y, w, h] rect = region["rect"] key = str(rect[0]) + str(rect[1]) + str(rect[2]) + str(rect[3]) if key not in unique_rois: # From [x, y, w, h] to {x, y, w, h} unique_rois[key] = rect return np.array(list(unique_rois.values())) def find_foreground_rois_from_ground_truth_boxes(gt_boxes, image_shape): """ Finds foreground rois from the ground truth boxes. 1) It finds possible new values for each field x_min, y_min, width, height by adding and subtracting small fractions of the original box's width and height 2) It finds all the combinations of new fields 3) It keeps only those with IoU > 0.7 with the original gt box Example of result: {'1234' : {'class': [0, 1, 0], 'bbox': [1, 2, 3, 4], 'reg_targets': [-0.1, 0.003, 1.1, 0]}, '6789': {'class': [0, 0, 1], 'bbox': [6, 7, 8, 9], 'reg_targets': [0.1, -0.917, 0.97, 0.01]}} :param: gt_boxes: Ground truth boxes from the image :param: image_shape: shape of the image that contains the boxes :return: map containing the foreground rois. The format is: {'key': {'class': CLASS, 'bbox': BBOX}, 'reg_targets': TARGETS} where the key is constructed from the bbox fields of the roi and the value is another dictionary with the roi information """ image_height_pixels = image_shape[0] image_width_pixels = image_shape[1] foreground_rois = {} def find_possible_coordinate_values(coordinate_value, axis_length, max_possible_value): possible_values = set() max_axis_displacement = axis_length // 6 min_axis_displacement = max_axis_displacement // 2 if not coordinate_value + max_axis_displacement > max_possible_value: possible_values.add(coordinate_value + max_axis_displacement) if not coordinate_value + min_axis_displacement > max_possible_value: possible_values.add(coordinate_value + min_axis_displacement) if coordinate_value - max_axis_displacement > 0: possible_values.add(coordinate_value - max_axis_displacement) if coordinate_value - min_axis_displacement > 0: possible_values.add(coordinate_value - min_axis_displacement) return possible_values for gt_box in gt_boxes: gt_class = gt_box["class"] gt_box = gt_box["bbox"] possible_min_x = \ find_possible_coordinate_values(gt_box[0], gt_box[2], image_width_pixels - 1) possible_max_x = \ find_possible_coordinate_values( gt_box[0] + gt_box[2] - 1, gt_box[2], image_width_pixels - 1) possible_min_y = \ find_possible_coordinate_values(gt_box[1], gt_box[3], image_height_pixels - 1) possible_max_y = \ find_possible_coordinate_values( gt_box[1] + gt_box[3] - 1, gt_box[3], image_height_pixels - 1) all_combinations = list( itertools.product(*[possible_min_x, possible_max_x, possible_min_y, possible_max_y])) for combination in all_combinations: bbox = [combination[0], combination[2], combination[1] - combination[0] + 1, combination[3] - combination[2] + 1] iou = calculate_iou(gt_box, bbox) if iou > 0.7: # We create a hash key from the coordinates to prevent having duplicates key = str(bbox[0]) + str(bbox[1]) + str(bbox[2]) + str(bbox[3]) foreground_rois[key] = \ {"bbox": np.array(bbox), "class": class_string_to_index(gt_class), "reg_target": np.array(find_regression_targets(gt_box, bbox))} return foreground_rois def find_random_background_rois( gt_boxes, image_shape, number_background_rois, min_width, min_height, max_width, max_height): """ This function generates a map with the specified number of background rois randomly generated. The key in this map is used to prevent duplicate rois, and is generated by putting together all the bbox fields for a given roi. The value is the actual roi information, containing the class and the bbox. Example of result: {'1234' : {'class': [1, 0, 0], 'bbox': [1, 2, 3, 4], 'reg_targets': [0, 0, 0, 0]}, '6789': {'class': [1, 0, 0], 'bbox': [6, 7, 8, 9], 'reg_targets': [0, 0, 0, 0]}} This function ONLY generated background rois, so the class vector will always contain a 1 at the first position and a vector of zeros for the reg_targets. It also makes sure none of the generated background rois have IoU >= 0.5 with any gt boxes :param gt_boxes: list of ground truth boxes with format [x, y, w, h] :param image_shape: shape of the image we are calculating background rois for :param number_background_rois: number of rois to generate :param min_width: minimum width for the rois we generate :param min_height: minimum height for the rois we generate :param max_width: maximum width for the rois we generate :param max_height: maximum height for the rois we generate :return: map containing the background rois. The format is: {'key': {'class': CLASS, 'bbox': BBOX}, 'reg_targets': TARGETS} where the key is constructed from the bbox fields of the roi and the value is another dictionary with the roi information """ background_rois = {} while len(background_rois) < number_background_rois: random_height = random.randint(min_height, max_height) random_width = random.randint(min_width, max_width) # We have to control the max possible value so it is not larger than the image size random_x = random.randint(0, image_shape[1] - random_width - 1) random_y = random.randint(0, image_shape[0] - random_height - 1) random_roi = [random_x, random_y, random_width, random_height] ious = [calculate_iou(gt_box["bbox"], random_roi) for gt_box in gt_boxes] max_iou = max(ious) if max_iou < 0.5: # We create a hash key from the coordinates to prevent having duplicates key = str(random_roi[0]) + str(random_roi[1]) + str(random_roi[2]) + str(random_roi[3]) background_rois[key] = {"bbox": np.array(random_roi), "class": class_string_to_index("background"), "reg_target": np.zeros(4)} return background_rois def calculate_iou(gt_bbox, roi_bbox): """ Calculates intersection over union between the ground truth bbox and a particular roi bbox :param gt_bbox: ground truth bbox roi_bbox: region of interest bbox """ # Calculating corners of intersection box # Top left corner intersect_top_left_x = max(gt_bbox[0], roi_bbox[0]) intersect_top_left_y = max(gt_bbox[1], roi_bbox[1]) # Bottom right corner intersect_bottom_right_x = \ min(gt_bbox[0] + gt_bbox[2] - 1, roi_bbox[0] + roi_bbox[2] - 1) intersect_bottom_right_y = \ min(gt_bbox[1] + gt_bbox[3] - 1, roi_bbox[1] + roi_bbox[3] - 1) # We add +1 because the two boxes could be overlapping on one line of pixels (one edge), and # that shouldn't count as 0 area_intersection = max(0, intersect_bottom_right_x - intersect_top_left_x + 1) * \ max(0, intersect_bottom_right_y - intersect_top_left_y + 1) area_gt_bbox = gt_bbox[2] * gt_bbox[3] area_roi_bbox = roi_bbox[2] * roi_bbox[3] union_area = area_gt_bbox + area_roi_bbox - area_intersection return area_intersection / float(union_area) def find_roi_labels(roi_bbox, gt_objects): """ Generates labels for a given roi. The labels are composed of a class and a bbox regression target. The class is found by calculating the IoU with all the ground truth boxes and keeping the class of the one with highest value The regression targets are found using the following formulas: tx = (Gx - Px) / Pw ty = (Gy - Py) / Ph tw = log(Gw / Pw) th = log(Gh / Ph) :param roi_bbox: region of interest bbox gt_objects: all the objects in the image (contains class and bbox) """ max_iou = 0.5 roi_class = None roi_bbox_target = np.zeros(1) # Finding the gt object with the highest IoU with the roi for gt_object in gt_objects: iou = calculate_iou(gt_object["bbox"], roi_bbox) if iou >= max_iou: max_iou = iou roi_class = gt_object["class"] roi_bbox_target = gt_object["bbox"] # If roi_bbox_target only has zeros, any returns false if roi_class and roi_bbox_target.any(): # [tx, ty, tw, th] regression_targets = find_regression_targets(roi_bbox_target, roi_bbox) return {"bbox": np.array(roi_bbox), "class": class_string_to_index(roi_class), "reg_target": np.array(regression_targets)} else: # If roi doesn't have IoU > 0.5 with any gt object, then it is background and it doesn't # have regression targets return {"bbox": np.array(roi_bbox), "class": class_string_to_index("background"), "reg_target": np.zeros(4)} def find_regression_targets(gt_box, roi_bbox): """ The regression targets are found using the following formulas: tx = (Gx - Px) / Pw ty = (Gy - Py) / Ph tw = log(Gw / Pw) th = log(Gh / Ph) :param gt_box: ground truth box used to find the reg targets :param roi_bbox: roi box we need to find reg targets for :return: regression targets [tx, ty, tw, th] """ # Calculating regression targets according to formulas on paper tx = (gt_box[0] - roi_bbox[0]) / float(roi_bbox[2]) ty = (gt_box[1] - roi_bbox[1]) / float(roi_bbox[3]) tw = math.log(gt_box[2] / float(roi_bbox[2])) th = math.log(gt_box[3] / float(roi_bbox[3])) # [tx, ty, tw, th] return [tx, ty, tw, th] def class_string_to_index(class_string): """ Converts a class in string format into an array with all values to 0 but a 1 for the index of the right class :param class_string: string representing the class name """ switcher = { "background": 0, "person": 1, "bird": 2, "cat": 3, "cow": 4, "dog": 5, "horse": 6, "sheep": 7, "aeroplane": 8, "bicycle": 9, "boat": 10, "bus": 11, "car": 12, "motorbike": 13, "train": 14, "bottle": 15, "chair": 16, "diningtable": 17, "pottedplant": 18, "sofa": 19, "tvmonitor": 20 } class_index = switcher.get(class_string, -1) if class_index == -1: raise Exception("Invalid class " + class_string) classes = np.zeros(NUMBER_CLASSES) classes[class_index] = 1 return classes def class_index_to_string(class_index): """ Converts a class index into its corresponding string value :param class_index: index representing the class """ switcher = { 0: "background", 1: "person", 2: "bird", 3: "cat", 4: "cow", 5: "dog", 6: "horse", 7: "sheep", 8: "aeroplane", 9: "bicycle", 10: "boat", 11: "bus", 12: "car", 13: "motorbike", 14: "train", 15: "bottle", 16: "chair", 17: "diningtable", 18: "pottedplant", 19: "sofa", 20: "tvmonitor" } class_string = switcher.get(class_index, -1) if class_index == -1: raise Exception("Invalid class " + class_index) return class_string
true
b3568937641bf82393068a923557ec3eb10d201c
Python
openforcefield/openff-bespokefit
/openff/bespokefit/fragmentation/base.py
UTF-8
3,007
3.0625
3
[ "MIT" ]
permissive
""" Register new fragmentation methods with bespokefit """ from typing import Dict, List, Type, Union from openff.fragmenter.fragment import Fragmenter, PfizerFragmenter, WBOFragmenter from openff.bespokefit.exceptions import FragmenterError _fragmentation_engines: Dict[str, Type[Fragmenter]] = {} def register_fragmentation_engine( engine: Type[Fragmenter], replace: bool = False ) -> None: """ Register a new valid fragment engine with bespokefit. Parameters: engine: The fragment engine class that should be registered. replace: If the fragment engine should replace another registered with the same name. Raises: FragmenterError If the fragment engine is already registered or if the FragmentEngine object is not compatible. """ if not issubclass(engine, Fragmenter): raise FragmenterError( f"The {engine} fragmentation engine could not be registered " f"it must be a subclass of `openff.fragmenter.fragment.Fragmenter`." ) scheme = engine.__name__.lower() if scheme in _fragmentation_engines and not replace: raise FragmenterError( f"An fragmentation engine is already registered under the name " f"{engine.__name__}, to replace this please use " f"the `replace=True` flag." ) _fragmentation_engines[scheme] = engine def deregister_fragmentation_engine(engine: Union[Type[Fragmenter], str]) -> None: """Remove a fragmentation engine from the list of valid options. Parameters: engine: The class or name of the engine that should be removed. """ scheme = engine.lower() if isinstance(engine, str) else engine.__name__.lower() existing_type = _fragmentation_engines.pop(scheme, None) if existing_type is None: raise FragmenterError( f"The fragmentation engine {engine} was not registered with " f"bespokefit." ) def get_fragmentation_engine(engine: str) -> "FragmentationEngine": """Get the fragmentation engine class from the list of registered engines by name. Parameters: engine: The name of the fragment engine that should be fetched Returns: The requested fragmentation engine matching the given fragment engine name. """ fragmenter = _fragmentation_engines.get(engine.lower(), None) if fragmenter is None: raise FragmenterError( f"The fragment engine {engine} was not registered with " f"bespokefit." ) return fragmenter def list_fragmentation_engines() -> List[str]: """ Get the list of registered fragmentation engines with bespokefit. Returns: A list of the fragmentation engine classes registered. """ return list(_fragmentation_engines.keys()) register_fragmentation_engine(WBOFragmenter) register_fragmentation_engine(PfizerFragmenter) FragmentationEngine = Union[tuple(_fragmentation_engines.values())]
true
8bb377b5eeaa2c8b845e25e1d9ba3f7761bb6823
Python
AdamZhouSE/pythonHomework
/Code/CodeRecords/2332/60712/305530.py
UTF-8
65
3.046875
3
[]
no_license
x = int(input()) if x==5: print(8) else: print(x)
true
be9c4968a582a96742642990a09be3a229213927
Python
Ben4issi/7150626-Apprenez-la-programmation-orientee-objet-avec-Python
/exercices/p3c1_solution/contact/owlcontact.py
UTF-8
662
3.375
3
[]
no_license
"""Définit le contact par chouette.""" from contact.abstract import ContactSystem from contact.helpers import verify_adress class OwlContactSystem(ContactSystem): """Envoi un message en utilisant une chouette ! 🧙‍♂️""" def __init__(self, address): """Initialise l'adresse.""" verify_adress(address) self.address = address self.owl = "Hedwige" super().__init__() def send(self, message): """Envoi le message.""" print(f'Envoi du message "{message}" par chouette {self.owl}') def __str__(self): """Représentation.""" return f"L'addresse est '{self.address}'"
true
e9fc2e579251b65dce97135558add968314af128
Python
chtenb/fate
/fate/commandtools.py
UTF-8
3,969
3.515625
4
[ "MIT" ]
permissive
""" This module contains several base classes and decorators for creating commands. """ from logging import debug from collections import deque from inspect import isclass from .mode import Mode class Undoable: """ For some commands, we want to be able to undo them. Let us define the class Undoable for that. Note that, all commands can be made trivially undoable, by storing the document before and after applying the command. This is however not desirable for reasons of space. Therefore we leave the specific implementation of the undo method to the concrete subclasses. """ def __call__(self, doc): """Add command to the undotree and execute it.""" doc.undotree.add(self) self.do(doc) def undo(self, doc): """Undo command.""" raise NotImplementedError("An abstract method is not callable.") def do(self, doc): """ Execute command without it being added to the undotree again, e.g. for performing a redo. """ raise NotImplementedError("An abstract method is not callable.") # PROBLEM: # Suppose we want to create a compound selection which involves a the mode # of the document to change to extend mode at some point. # Then extend mode must be executed at creation time, # in order to create the intended selection. # However, this violates the principle that the document must not be # touched while only creating a command. # The solution is that compositions don't return an command and thus # cannot be inspected # If this functionality is required nonetheless, # the composition must be defined in a command body # PROBLEM: # how do you know whether to wait or to proceed after executing a mode # solution: always wait, if you need a mode to change behaviour of further commands # you should do it differently. Modes are meant to change the way that userinput is # processed. If you need to switch between behaviours of certain commands (like head/tail # selection) you should toggle a bool somewhere. def compose(*subcommands, docs=''): """ In order to be able to conveniently chain commands, we provide a function that composes a sequence of commands into a single command. The undoable subcommands will be undoable as a whole. """ # We need to define a new class for each composition # This is because compounds have state, so each execution is in fact a creation of a # new object. class Compound: def __init__(self, doc, callback=None): self.subcommands = subcommands self.callback = callback self.todo = deque(self.subcommands[:]) doc.undotree.start_sequence() self.proceed(doc) def proceed(self, doc): """ This function gets called when a submode finishes, as it is passed as a callback function to submodes. """ while self.todo: command = self.todo.popleft() while 1: debug('Subcommand: {}'.format(command)) # Pass ourselves as callback when executing a mode if isinstance(command, Mode): command(doc, callback=self.proceed) return # Also pass ourselves as callback when executing a Compound, # since this compound may contain a mode if isclass(command) and issubclass(command, Compound): command(doc, callback=self.proceed) return result = command(doc) if not callable(result): break command = result # Now we are completely finished doc.undotree.end_sequence() if self.callback: self.callback(doc) Compound.__docs__ = docs return Compound
true
0184bca86826ade957b46d31e2622bd4bec42fd5
Python
jnjnslab/firebase-sample
/python/add_example_data.py
UTF-8
2,688
2.96875
3
[]
no_license
import firebase_admin from firebase_admin import credentials from firebase_admin import firestore # Use a service account cred = credentials.Certificate('firebase.json') firebase_admin.initialize_app(cred) db = firestore.client() # [START custom_class_def] # [START firestore_data_custom_type_definition] class City(object): def __init__(self, name, state, country, capital=False, population=0, regions=[]): self.name = name self.state = state self.country = country self.capital = capital self.population = population self.regions = regions @staticmethod def from_dict(source): # [START_EXCLUDE] city = City(source[u'name'], source[u'state'], source[u'country']) if u'capital' in source: city.capital = source[u'capital'] if u'population' in source: city.population = source[u'population'] if u'regions' in source: city.regions = source[u'regions'] return city # [END_EXCLUDE] def to_dict(self): # [START_EXCLUDE] dest = { u'name': self.name, u'state': self.state, u'country': self.country } if self.capital: dest[u'capital'] = self.capital if self.population: dest[u'population'] = self.population if self.regions: dest[u'regions'] = self.regions return dest # [END_EXCLUDE] def __repr__(self): return( f'City(\ name={self.name}, \ country={self.country}, \ population={self.population}, \ capital={self.capital}, \ regions={self.regions}\ )' ) # [END firestore_data_custom_type_definition] # [END custom_class_def] # [START add_example_data] # [START firestore_data_get_dataset] cities_ref = db.collection(u'cities') cities_ref.document(u'BJ').set( City(u'Beijing', None, u'China', True, 21500000, [u'hebei']).to_dict()) cities_ref.document(u'SF').set( City(u'San Francisco', u'CA', u'USA', False, 860000, [u'west_coast', u'norcal']).to_dict()) cities_ref.document(u'LA').set( City(u'Los Angeles', u'CA', u'USA', False, 3900000, [u'west_coast', u'socal']).to_dict()) cities_ref.document(u'DC').set( City(u'Washington D.C.', None, u'USA', True, 680000, [u'east_coast']).to_dict()) cities_ref.document(u'TOK').set( City(u'Tokyo', None, u'Japan', True, 9000000, [u'kanto', u'honshu']).to_dict()) # [END firestore_data_get_dataset] # [END add_example_data] print('END')
true
bd68d31462866fbeee6f9fb8341dfff8708e10ef
Python
RoryGlenn/CSE_20
/pa3/cipher.py
UTF-8
5,351
4.0625
4
[]
no_license
# assignment: programming assignment 3 # author: Rory Glenn # date: 7/17/20 # file: cipher.py is a program that encrypts letters by using shifting the entire alphabet to the right by 3 digits # input: give the program an input file to read such as input_file.txt # output: writes to a new file the encrypted text and prints it to the console """ # encode - shift alphabet to the right by 3 def encrypt(char): if char < 'A' or char > 'Z' and char < 'a' or char > 'z': return char elif char >= 'A' and char <= 'Z': offset = ord(char) - 65 offset = (offset + 3) % 26 converted = offset + 65 return chr(converted) # decode - shift alphabet to the left by 3 def decrypt(char): if char < 'A' or char > 'Z' and char < 'a' or char > 'z': return char elif char >= 'A' and char <= 'Z': offset = ord(char) - 65 if offset >= 3: offset = (offset - 3) % 26 else: offset = (offset - 3) % 26 converted = offset + 65 return chr(converted) check if file exist def check_file(filename): try: filename = open(filename, mode="r+") except IOError: print(f"Error: could not find {filename}") return False return filename """ # encode text letter by letter using a Caesar cipher # return a list of encoded symbols def encode(plaintext): shift = 3 ciphertext = [] alphabet = make_alphabet() length = len(alphabet) for char in plaintext: found = False for i in range(length): if char == alphabet[i]: letter = alphabet[(i + shift) % length] ciphertext.append(letter) found = True break if not found: ciphertext.append(char) return ciphertext # decode text letter by letter using a Caesar cipher # return a list of decoded symbols def decode(text): shift = -3 plaintext = [] alphabet = make_alphabet() length = len(alphabet) for char in text: found = False for i in range(length): if char == alphabet[i]: letter = alphabet[(i + shift) % length] plaintext.append(letter) found = True break if not found: plaintext.append(char) return plaintext # read text from a file and return text as a string def readfile(file_in): list_read_in = list() for line in file_in: line = line.rstrip("\n") for char in line: list_read_in.append(char.upper()) file_in.close() return to_string(list_read_in) def writefile(_list, file): file.write(to_string(_list)) file.close() # takes in a list and converts to string def to_string(text): return ''.join(text) # make a list (tuple) of letters in the English alphabet def make_alphabet(): alphabet = () for i in range(26): char = i + 65 alphabet += (chr(char),) return alphabet # starting point for program done = False while not done: print("Would you like to encode or decode the message?") choice = input("Type E to encode, D to decode, or Q to quit: ") if choice == 'E' or choice == 'e': file_read = input("Please enter a file for reading: ") file_write = input("Please enter a file for writing: ") try: file_read = open(file_read, mode="r") # why is "decrypted.txt" not being writen to??? except IOError: # print(f"Error: could not find {file_read}") pass # read the file we want to encode and return a string file_read_str = readfile(file_read) # print the plaintext to console print("\nPlaintext:") print(file_read_str) # encode the string and return a list encoded_list = encode(file_read_str) # open the file we want to write to file_handler = open(file_write, "w+") # maybe this should be r+ # write the encoded list to the file writefile(encoded_list, file_handler) # print the ciphertext list to console print("Ciphertext:") print(to_string(encoded_list)) file_read.close() file_handler.close() elif choice == 'D' or choice == 'd': file_read = input("Please enter a file for reading: ") file_write = input("Please enter a file for writing: ") try: file_read = open(file_read, mode="r") except IOError: # print(f"Error: could not find {file_read}") pass # read the file we want to encode and return a string file_read_str = readfile(file_read) # print the ciphertext to console print("\nCiphertext:") print(file_read_str) # encode the string and return a list decoded_list = decode(file_read_str) # open the file we want to write to file_handler = open(file_write, "w+") # write the decoded list to the file writefile(decoded_list, file_handler) # print the plaintext to the console print("\nPlaintext:") print(to_string(decoded_list)) file_read.close() file_handler.close() elif choice == 'Q' or choice == 'q': print("\nGoodbye!") done = True
true
66d770f667ed8a44128d8577090390d9edd7c5ab
Python
DanielLongo/eegML
/discriminators/convD_eeg.py
UTF-8
1,960
2.609375
3
[]
no_license
import torch.nn as nn import torch.nn.functional as F import torch import numpy as np class ConvDiscriminator(nn.Module): def __init__(self, img_shape): self.img_size = img_shape[1] self.channels = img_shape[0] super(ConvDiscriminator, self).__init__() def discriminator_block(in_filters, out_filters, bn=True): block = [ nn.Conv2d(in_filters, out_filters, [3,5], [2,1], padding=0), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)] if bn: block.append(nn.BatchNorm2d(out_filters, 0.8)) return block self.model = nn.Sequential( *discriminator_block(self.channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 32), *discriminator_block(32, 16), *discriminator_block(16, 8), *discriminator_block(8, 4), ) # The height and width of downsampled image self.fc1 = nn.Sequential( nn.Linear(4*6*16, 16), nn.LeakyReLU(.2, inplace=True)) self.fc2 = nn.Sequential( nn.Linear(16, 1), nn.Sigmoid()) def forward(self, img, matching=False): if (len(img.shape) == 3): img = img.view(img.shape[0], 1, img.shape[1], img.shape[2]) out = self.model(img) out = out.view(out.shape[0], -1) out = self.fc1(out) final = self.fc2(out) if matching: return out, final return final # class ConvDiscriminator(nn.Module): # def __init__(self, img_shape): # super(ConvDiscriminator, self).__init__() # self.model = nn.Sequential( # nn.Linear(int(np.prod(img_shape)), 512), # nn.LeakyReLU(0.2, inplace=True), # nn.Linear(512, 256), # nn.LeakyReLU(0.2, inplace=True), # nn.Linear(256, 1) # ) # def forward(self, img): # img_flat = img.view(img.shape[0], -1) # validity = self.model(img_flat) # return validity if __name__ == "__main__": # x = torch.ones(64, 100, 42) x = torch.ones(64, 1, 32, 32) d = ConvDiscriminator((1,32,32)) z = d(x) # print("Z",z) print("Z shape", z.shape)
true
23eaea447b58c4482e5c776c2cdf71a87b6e0595
Python
karanj1994/personal-training
/python_tips.py
UTF-8
1,530
2.671875
3
[]
no_license
import os import json import argparse my_file = os.popen("maprcli table region list -path /datalake/other/polarisprovider/polarisdatamovement/blue/perf/data/hcp_region -json") with my_file as json_data: d = json.load(my_file) # test = [] # for i in d["data"]: # test.append(i["logicalsize"]) # list comprehension # logical_size = [i["logicalsize"] for i in d["data"]] # number_of_rows = [i["numberofrows"] for i in d["data"]] # fid = [i["fid"] for i in d["fid"]] # dictionary definition (dictionary comprehension) hbase_regions = {i["fid"]:(i["logicalsize"],i["numberofrows"]) for i in d["data"]} def avg(d, key): # d.itervalues creates an interator on your dictionaries values (d.iterkeys would create an list of keys) to_compute = [rec[key] for rec in d.itervalues()] mean = sum(to_compute)/float(len(to_compute)) return mean avg_logical_size, avg_num_rows = avg(hbase_regions, 0), avg(hbase_regions, 1) for key,val in hbase_regions.iteritems(): print val offending_ips = [] for k,v in hbase_regions.iteritems(): if ((v[0] > 1000000000 and v[0] > avg_logical_size) or (v[1] > 100000 and v[1] > avg_num_rows)): print k, " will be split" os.system("maprcli table region split -path /datalake/other/polarisprovider/polarisdatamovement/blue/perf/data/hcp_region -fid " + k) # print stored input of variable in a string # print("hello, I am " + var_name) # print("hello, I am %s" % var_name) #for key,val in hbase_regions.iteritems(): # print val
true
3f28b4f33a45e5a47a19a84f3012bd69f5ffa8a6
Python
lhy0807/A2CS
/Ch23/binary_tree.py
UTF-8
1,609
3.640625
4
[]
no_license
nullPtr = -1 class Node(object): def __init__(self): self.data = None self.leftPtr = nullPtr self.rightPtr = nullPtr class BinaryTree(object): def __init__(self, space): self.space = space self.freePtr = 0 self.rootPtr = nullPtr self.record = [] for i in range(self.space): # initialize the tree newNode = Node() newNode.leftPtr = i + 1 self.record += [newNode] # set the last node's left tree self.record[-1].leftPtr = nullPtr def insert(self, value): if (self.freePtr != nullPtr): newNodePtr = self.freePtr self.freePtr = self.record[self.freePtr].leftPtr self.record[newNodePtr].data = value # first set a temp value to a temp ptr self.record[newNodePtr].leftPtr = nullPtr # preset the next ptr self.record[newNodePtr].rightPtr = nullPtr # preset the next ptr if (self.rootPtr == nullPtr): self.rootPtr = newNodePtr else: # determine the left or right position for the temp ptr thisPtr = self.rootPtr while (thisPtr != nullPtr): # traking in the tree prePtr = thisPtr if (self.record[thisPtr].data > value): isLeft = True thisPtr = self.record[thisPtr].leftPtr else: isLeft = False # go right thisPtr = self.record[thisPtr].rightPtr if (isLeft): self.record[prePtr].leftPtr = newNodePtr else: self.record[prePtr] def find(self, value): thisPtr = self.rootPtr while (thisPtr != nullPtr) \ and (self.record[thisPtr].data != value): if (self.record[thisPtr].data > value): thisPtr = self.record[thisPtr].leftPtr else: thisPtr = self.record[thisPtr].rightPtr return thisPtr
true
41e1dcea661ca16589c347ae01446de67ca16084
Python
nikicat/yasd
/yasd/recvmmsg.py
UTF-8
2,057
2.71875
3
[]
no_license
import cffi def recv_mmsg(stream, sock, vlen=1000, bufsize=9000): ffi = cffi.FFI() ffi.cdef(''' typedef unsigned int socklen_t; struct msghdr { void *msg_name;/* Address to send to/receive from. */ socklen_t msg_namelen;/* Length of address data. */ struct iovec *msg_iov;/* Vector of data to send/receive into. */ size_t msg_iovlen;/* Number of elements in the vector. */ void *msg_control;/* Ancillary data (eg BSD filedesc passing). */ size_t msg_controllen;/* Ancillary data buffer length. !! The type should be socklen_t but the definition of the kernel is incompatible with this. */ int msg_flags;/* Flags on received message. */ }; struct mmsghdr { struct msghdr msg_hdr; /* Message header */ unsigned int msg_len; /* Number of received bytes for header */ }; struct iovec { void *iov_base; size_t iov_len; }; int recvmmsg(int sockfd, struct mmsghdr *msgvec, unsigned int vlen,unsigned int flags, struct timespec *timeout); ''') lib = ffi.verify(''' #include <sys/socket.h> ''', libraries=[]) MSG_WAITFORONE = 0x10000 msgs = ffi.new('struct mmsghdr[{}]'.format(vlen)) iovecs = ffi.new('struct iovec[{}]'.format(vlen)) bufs = ffi.new('char[{}][{}]'.format(vlen, bufsize)) for i, iovec in enumerate(iovecs): iovec.iov_base = bufs[i] iovec.iov_len = bufsize msgs[i].msg_hdr.msg_iov = ffi.addressof(iovec) msgs[i].msg_hdr.msg_iovlen = 1 timeout = ffi.cast('struct timespec *', 0) fd = sock.fileno() for _ in stream: retval = lib.recvmmsg(fd, msgs, vlen, MSG_WAITFORONE, timeout) if retval == -1: raise Exception('recvmmsg failed') for i in range(retval): yield ffi.string(bufs[i], msgs[i].msg_len)
true
580ca5f6c97d4d207cda76bd1a60565f16a4fcbb
Python
wxWidgets/Phoenix
/unittests/test_lib_agw_flatmenu.py
UTF-8
3,510
2.546875
3
[]
no_license
import unittest from unittests import wtc import wx import wx.lib.agw.flatmenu as FM #--------------------------------------------------------------------------- class lib_agw_flatmenu_Tests(wtc.WidgetTestCase): def setUp(self): ''' Monkey patch some methods which don't behave well without a MainLoop. We could restore them in tearDown, but there's no need because self.frame will be destroyed in tearDown. ''' super(lib_agw_flatmenu_Tests, self).setUp() self.realPushEventHandlerMethod = self.frame.PushEventHandler def MockPushEventHandler(handler): pass self.frame.PushEventHandler = MockPushEventHandler self.realPopEventHandlerMethod = self.frame.PopEventHandler def MockPopEventHandler(deleteHandler=False): pass self.frame.PopEventHandler = MockPopEventHandler def test_lib_agw_flatmenuCtor(self): self._popUpMenu = FM.FlatMenu() # First we create the sub-menu item subMenu = FM.FlatMenu() subSubMenu = FM.FlatMenu() # Create the menu items menuItem = FM.FlatMenuItem(self._popUpMenu, 20001, "First Menu Item", "", wx.ITEM_CHECK) self._popUpMenu.AppendItem(menuItem) menuItem = FM.FlatMenuItem(self._popUpMenu, 20002, "Sec&ond Menu Item", "", wx.ITEM_CHECK) self._popUpMenu.AppendItem(menuItem) menuItem = FM.FlatMenuItem(self._popUpMenu, wx.ID_ANY, "Checkable-Disabled Item", "", wx.ITEM_CHECK) menuItem.Enable(False) self._popUpMenu.AppendItem(menuItem) menuItem = FM.FlatMenuItem(self._popUpMenu, 20003, "Third Menu Item", "", wx.ITEM_CHECK) self._popUpMenu.AppendItem(menuItem) self._popUpMenu.AppendSeparator() def test_lib_agw_flatmenuOpen(self): def CreateLongPopupMenu(self): popMenu = FM.FlatMenu() sub = FM.FlatMenu() #----------------------------------------------- # Flat Menu test #----------------------------------------------- for ii in range(30): if ii == 0: menuItem = FM.FlatMenuItem(popMenu, wx.ID_ANY, "Menu Item #%ld"%(ii+1), "", wx.ITEM_NORMAL, sub) popMenu.AppendItem(menuItem) for k in range(5): menuItem = FM.FlatMenuItem(sub, wx.ID_ANY, "Sub Menu Item #%ld"%(k+1)) sub.AppendItem(menuItem) else: menuItem = FM.FlatMenuItem(popMenu, wx.ID_ANY, "Menu Item #%ld"%(ii+1)) popMenu.AppendItem(menuItem) return popMenu popMenu = CreateLongPopupMenu(self) fPt = self.frame.GetPosition() popMenu.Popup(wx.Point(fPt.x, fPt.y), self.frame) popMenu.Dismiss(True, True) # Clear the capture since the test won't do a normal shudown of the flatmenu cap = wx.Window.GetCapture() if cap: cap.ReleaseMouse() def test_lib_agw_flatmenuConstantsExist(self): FM.FM_OPT_IS_LCD FM.FM_OPT_MINIBAR FM.FM_OPT_SHOW_CUSTOMIZE FM.FM_OPT_SHOW_TOOLBAR def test_lib_agw_flatmenuEvents(self): FM.EVT_FLAT_MENU_DISMISSED FM.EVT_FLAT_MENU_ITEM_MOUSE_OUT FM.EVT_FLAT_MENU_ITEM_MOUSE_OVER FM.EVT_FLAT_MENU_SELECTED #--------------------------------------------------------------------------- if __name__ == '__main__': unittest.main()
true
a60ccef01881134369b60a3f176b7377f7f72d4e
Python
FitzWang/Marvel
/agg_pairwise.py
UTF-8
13,801
2.546875
3
[]
no_license
# -*- coding: utf-8 -*- """ Created on Sun Mar 21 16:52:32 2021 @author: guang """ import pandas as pd import os import matplotlib # matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from pathlib import Path import time import copy def ExtendOutput(pathbase,csvList): for outFile in csvList: filename = outFile.split('_') idx1 = int(filename[0].split('output')[-1]) idx2 = int(filename[1]) if idx1 != idx2: dataFrame = pd.read_csv(pathbase/outFile) dataFrame['RV'] = dataFrame['RV'].mul(-1) dataFrame['RVmean'] = dataFrame['RVmean'].mul(-1) name = outFile.split('.')[0].split('_') index1 = name[0].split('output')[1] index2 = name[1] relativeRV = str(-int(name[2])) newFileName = 'output' + index2 + '_' + index1 + '_' + relativeRV + '.csv' dataFrame.to_csv(pathbase/newFileName,index = False) def ReadAll(N,pathbase,csvList): resultAll = [] for i in range(N): outputTemp = [] for j in range(N): index1 = str(int(i/10)) + str(i%10) index2 = str(int(j/10)) + str(j%10) filename = [item for item in csvList if item.startswith('output'+index1+'_'+index2)] assert len(filename) <= 1, 'May there are duplicates files with same index' assert len(filename) >= 1, 'No output named start with this index:{},{}'.format(index1,index2) dataFrame = pd.read_csv(pathbase/filename[0]) outputTemp.append(dataFrame) resultAll.append(outputTemp) return resultAll def RVSuborder(resultList,suborderLen): RVList = [] for idx1 in range(len(resultList)): start = time.perf_counter() dfsubList = resultList[idx1] RVI = np.empty(suborderLen,dtype=float) # for all suborders VarI = np.empty(suborderLen,dtype=float) for suborderIdx in range(suborderLen): RVtmp = np.empty(N,dtype=float) vartmp = np.empty(N,dtype=float) for idx2 in range(len(dfsubList)): subdf = dfsubList[idx2].iloc[[suborderIdx]] RVtmp[idx2] = subdf['RV'].item() vartmp[idx2] = subdf['var'].item() weighti = (1/vartmp)/np.sum(1/vartmp) RVi = np.sum(RVtmp*weighti) # under assumption of independent estimator vari = 1/np.sum(1/vartmp) RVI[suborderIdx] = RVi VarI[suborderIdx] = vari dfRVI = pd.DataFrame({"RVI":RVI,"VarI":VarI}) RVList.append(dfRVI) print(time.perf_counter()-start) return RVList def RVAggAll(RVList): RVout = np.empty(len(RVList),dtype=float) Varout = np.empty(len(RVList),dtype=float) for i in range(len(RVList)): df = RVList[i] weight = (1/df['VarI'])/(1/df['VarI']).sum() RV = (df["RVI"]*weight).sum() var = 1/(1/df['VarI']).sum() RVout[i] = RV Varout[i] = var return RVout,Varout ## First compute all suborders, then pairwise def RVAggAll2(resultList): RV = np.empty(len(resultList),dtype=float) Var = np.empty(len(resultList),dtype=float) for i in range(len(resultList)): subList = resultList[i] RVtmp = np.empty(len(subList),dtype=float) Vartmp = np.empty(len(subList),dtype=float) for j in range(len(subList)): dataFrame = subList[j] weight = 1/dataFrame['var'] weightSum = sum(weight) weightedRV = sum(dataFrame['RV']*weight/weightSum) weightedVar = 1/weightSum RVtmp[j] = weightedRV Vartmp[j] = weightedVar weight = 1/Vartmp weightSum = sum(weight) weightedRV = sum(weight*RVtmp/weightSum) weightedVar = 1/weightSum RV[i] = weightedRV Var[i] = weightedVar return RV,Var def SaveDFbySuborder(pathbase,resultList,suborderLen): pathTarget = pathbase / 'bysuborder' if not os.path.exists(pathTarget): os.makedirs(pathTarget) fileLength = len(os.listdir(pathTarget)) if fileLength == suborderLen: print('Generated suborder files exist!') else: length = len(resultList) for i in range(suborderLen): outIdx1 = np.empty(length*length,dtype=int) outIdx2 = np.empty(length*length,dtype=int) RV = np.empty(length*length,dtype=float) Var = np.empty(length*length,dtype=float) for idx1 in range(length): for idx2 in range(length): df = resultList[idx1][idx2] index = idx1*length + idx2 outIdx1[index] = idx1 outIdx2[index] = idx2 RV[index] = df['RV'].iloc[i] Var[index] = df['var'].iloc[i] dfOut = pd.DataFrame({'idx1':outIdx1,'idx2':outIdx2,'RV':RV,'var':Var}) outFileName = 'suborder_'+str(i)+'.csv' dfOut.to_csv(pathTarget/outFileName,index=False) return pathTarget def RVSuborder2(pathTarget,N): csvList = os.listdir(pathTarget) csvList = [filename for filename in csvList if filename.endswith('.csv') and filename.startswith('suborder')] csvList.sort() RVList = [] for file in csvList: df = pd.read_csv(pathTarget/file) RVoneOrder = np.empty(N,dtype=float) VaroneOrder = np.empty(N,dtype=float) for idx1 in range(N): dfsub = df[df['idx1']==idx1] dfsub = dfsub[dfsub['idx2']<N] weight = (1/dfsub['var'])/(1/dfsub['var']).sum() weightedRV = (weight * dfsub['RV']).sum() weightedVar = 1/(1/dfsub['var']).sum() RVoneOrder[idx1] = weightedRV VaroneOrder[idx1] = weightedVar dfoneOrder = pd.DataFrame({'RVl':RVoneOrder,'varl':VaroneOrder}) RVList.append(dfoneOrder) return RVList def RVAggAll3(RVList,N): length = len(RVList) RVout = np.empty(N,dtype=float) Varout = np.empty(N,dtype=float) for idx1 in range(N): RVtmp = np.empty(length,dtype=float) Vartmp = np.empty(length,dtype=float) for i in range(length): RVtmp[i] = RVList[i]['RVl'].iloc[idx1] Vartmp[i] = RVList[i]['varl'].iloc[idx1] weight = 1/Vartmp weightsum = np.sum(weight) weightedRV = np.sum(RVtmp*weight/weightsum) weightedVar = 1/weightsum RVout[idx1] = weightedRV Varout[idx1] = weightedVar return RVout, Varout def LeaveOneOutCorr(RVList,N,removeIdx): remainingList = copy.deepcopy(RVList) RVselected = remainingList.pop(removeIdx) RVRemaing,_ = RVAggAll3(remainingList,N) corr = np.corrcoef(RVRemaing, RVselected['RVl']) return corr[0,1] if __name__ == '__main__': #############parameters######## fileExtend = False # specify if need to extend output file (e.g. only optimize pairwise 1-0, not 0-1) saveImg = False ErrorByN = False targetFolder = 'HPC_2021_05_09_SNR200_QA' cutOffOrder = 43 N = 100 ContaminationDet = False bottomK = 20 ############################# pathbase = Path(os.getcwd()) / '..' /'results'/ targetFolder csvList = os.listdir(pathbase) csvList = [ filename for filename in csvList if filename.endswith('.csv')] # extend the output file if fileExtend == True: if len(csvList)>=N*N: print('Extension of files have been already done!!') else: ExtendOutput(pathbase,csvList) if not os.path.exists(pathbase / 'graph'): os.makedirs(pathbase / 'graph') csvList = os.listdir(pathbase) csvList = [ filename for filename in csvList if filename.endswith('.csv')] csvList.sort() lenSuborder = len(pd.read_csv(pathbase/csvList[0])) if ('bysuborder' in os.listdir(pathbase)): if len(os.listdir(pathbase/'bysuborder')) == lenSuborder: print('Suborder CSV files exist, no need to save') pathDFByOrder = pathbase / 'bysuborder' else: # Read all files into one 2d array print('Reading all the raw output CSV files......') start = time.perf_counter() resultAll = ReadAll(N,pathbase,csvList) print('time:{}s'.format(time.perf_counter()-start)) # Saving New CSV files by suborders start = time.perf_counter() print('Saving New CSV files by suborders......') pathDFByOrder = SaveDFbySuborder(pathbase,resultAll,lenSuborder) print('time:{}s'.format(time.perf_counter()-start)) # Deal with suborders rv start = time.perf_counter() print('Computing RV estimation for all suborders......') RVListbySuborder = RVSuborder2(pathDFByOrder,N) # Final output RVout,Varout = RVAggAll3(RVListbySuborder,N) print('time:{}s'.format(time.perf_counter()-start)) # compute correlation between using leave-one-out method if ContaminationDet: start = time.perf_counter() print('Computing correlation by leave-one-out method......') corr = map(lambda i:LeaveOneOutCorr(RVListbySuborder,N,i),range(lenSuborder)) corr = list(corr) print('time:{}s'.format(time.perf_counter()-start)) RemaingCorrIdx = np.array(corr).argsort()[bottomK:] RVListRmSmCorr = [RVListbySuborder[i] for i in RemaingCorrIdx] RVRmLowCorr,VarRmLowCorr = RVAggAll3(RVListRmSmCorr,N) # # Deal with suborders rv # RVList = RVSuborder(resultAll,lengthOut) # # Final output # RVout,Varout = RVAggAll(RVList) # # RVout,Varout = RVAggAll2(resultAll) RVTrue = pd.read_csv(pathbase/'RVTrue.txt')['RVTrue'].to_numpy() ### compute errors using subset of N RV estimation ### e.g, with 100 spectrum, only use 5,10,20.... spectra to compute average errors. if ErrorByN == True: Nsubs = np.linspace(2,100,40,dtype=int) MAEs = [] meanSigmas=[] for Nsub in Nsubs: start = time.perf_counter() print('Computing RV estimation for all suborders......') RVListbySuborder_Sub = RVSuborder2(pathDFByOrder,Nsub) # Final output RVoutSub,VaroutSub = RVAggAll3(RVListbySuborder_Sub,Nsub) print('time:{}s'.format(time.perf_counter()-start)) RVTrueSub = RVTrue[0:Nsub] meansubTrueRVSub = RVTrueSub#-np.mean(RVTrueSub) MAE = np.around(abs(RVoutSub-meansubTrueRVSub).mean(),decimals=2) MAEs.append(MAE) meanSigmas.append(np.sqrt(VaroutSub).mean()) pd.DataFrame({'sigma':meanSigmas,'MAE':MAEs}).to_csv(pathbase / 'graph' /'MAE_meansub.csv',index=False) fig4, ax4 = plt.subplots(figsize=(11,7)) ax4.plot(Nsubs,MAEs,'o',label = "MAE") ax4.set_xscale('log') ax4.set_yscale('log') ax4.set_xlabel("Number of Spectra Involved") ax4.set_ylabel("MAE") ax4.grid(True, color='gainsboro', linestyle='-', linewidth=0.5) ax4.set_title('MAE versus N') fig4.legend() # plot the results fig, ax = plt.subplots(figsize=(18,9)) X = np.arange(N,dtype=int) meansubTrueRV = RVTrue#-np.mean(RVTrue) ax.plot(X,meansubTrueRV,'o',label = "True Radial Velocity (mean-substracted)") turerms1 = np.around(np.sqrt(((RVout-meansubTrueRV)**2).mean()),decimals=2) ax.plot(X,RVout,'o-',label = "Total Suborders, RMSE:"+str(turerms1)) ax.fill_between(X, (RVout + 1.96*np.sqrt(Varout)), (RVout - 1.96*np.sqrt(Varout)), alpha=0.3) ax.set_xlabel("Index of epochs") ax.set_ylabel("Radial Velocity/ m/s (mean-substracted)") ax.grid(True, color='gainsboro', linestyle='-', linewidth=0.5) fig.legend() fig1, ax1 = plt.subplots(figsize=(11,7)) error = 1.96*np.sqrt(Varout) ax1.plot(X,meansubTrueRV,'-',label = "True Radial Velocity (mean-substracted)") turerms1 = np.around(np.sqrt(((RVout-meansubTrueRV)**2).mean()),decimals=2) ax1.errorbar(X, RVout, yerr=error,color='black',capsize=3, fmt='o',label = "Estimated Radial Velocity, RMSE:"+str(turerms1)) ax1.set_xlabel("Index of epochs") ax1.set_ylabel("Radial Velocity/ m/s (mean-substracted)") ax1.grid(True, color='gainsboro', linestyle='-', linewidth=0.5) ax1.set_title('RV estimation using all suborders') fig1.legend() if ContaminationDet: fig2, ax2 = plt.subplots(figsize=(11,7)) error = 1.96*np.sqrt(VarRmLowCorr) ax2.plot(X,meansubTrueRV,'-',label = "True Radial Velocity (mean-substracted)") turerms1 = np.around(np.sqrt(((RVRmLowCorr-meansubTrueRV)**2).mean()),decimals=2) ax2.errorbar(X, RVRmLowCorr, yerr=error,color='black',capsize=3, fmt='o',label = "Estimated Radial Velocity, RMSE:"+str(turerms1)) ax2.set_xlabel("Index of epochs") ax2.set_ylabel("Radial Velocity/ m/s (mean-substracted)") ax2.grid(True, color='gainsboro', linestyle='-', linewidth=0.5) ax2.set_title('RV estimation after removing suborders with low correlation') fig2.legend() # plot the histogram of variance using all suborders fig3, ax3 = plt.subplots(figsize=(11,7)) ax3.hist(Varout) ax3.set_title('Histogram of Variance for Each RV Estimation') if saveImg == True: fig.savefig(pathbase / 'graph' / "RVcomparison.png") fig1.savefig(pathbase / 'graph' / "RVcomparison1.png") fig3.savefig(pathbase /'graph'/'hist.png') if ContaminationDet: fig2.savefig(pathbase / 'graph' / "RVcomparison2.png") if ErrorByN: fig4.savefig(pathbase / 'graph' / "MAE_N.png")
true
2a408d2a9a172a8e1657f227b0a6cf1e9983ac6e
Python
cephcyn/MessengerGrapher
/parser.py
UTF-8
2,440
2.59375
3
[]
no_license
import pickle as pkl from collections import namedtuple from datetime import datetime, timedelta import json from urllib.request import urlopen import os.path from bs4 import BeautifulSoup from userinfo import ME, API_KEY soup = BeautifulSoup(open("messages.htm", encoding='utf8').read(), 'html.parser') Message = namedtuple("Message", ['person', 'sent_by_me', 'timestamp', 'sex']) # types: str, bool, datetime, str messages = [] if os.path.isfile("name_to_sex.pkl"): name_to_sex = pkl.load(open("name_to_sex.pkl", 'rb')) else: name_to_sex = {} def get_sex(name): if len(name.split(" ")) != 1: name = name.split(" ")[0] myKey = API_KEY url = "https://gender-api.com/get?key=" + myKey + "&name=" + name response = urlopen(url) decoded = response.read().decode('utf-8') data = json.loads(decoded) return data["gender"] for thread in soup.findAll('div', class_="thread"): #if isinstance(thread.contents[0], basestring): #people = list(map(str.strip, thread.contents[0].split(','))) #else: try: people = list(map(str.strip, thread.contents[0].split(','))) except TypeError: continue if 2 != len(people): # skip group chats for now continue person1, person2 = people person = person1 if person2 in ME else person2 # who im talking to for item in thread.contents[1:]: if item.name == "div" and item["class"][0] == "message": datestring = item.contents[0].contents[1].contents[0] try: timestamp = datetime.strptime(datestring, '%A, %B %d, %Y at %I:%M%p') except ValueError: from dateutil.parser import parse timestamp = ' '.join(datestring.split()[-1]) # remove timezone timestamp = parse(datestring) person_sending = item.contents[0].contents[0].contents[0] sent_by_me = person_sending in ME if person in name_to_sex.keys(): sex = name_to_sex[person] else: try: sex = get_sex(person) name_to_sex[person] = sex except Exception: sex = "unknown" messages.append(Message(person, sent_by_me, timestamp, sex)) pkl.dump(messages, open("messages.pkl", "wb")) pkl.dump(name_to_sex, open("name_to_sex.pkl", 'wb'))
true
ed23a63ebfd7784306f90a73b4e4053572d185f6
Python
LVO3/python-2021
/fmake.py
UTF-8
119
3.203125
3
[]
no_license
#write f = open("새파일.txt", 'w') for i in range(1, 11): data = '%dth line\n' %i f.write(data) f.close()
true
61338e611c822320a747c7d26fbfa77a385a7b79
Python
jdwdm3/CodingChallenge
/Exercise3/Algorithm.py
UTF-8
3,423
4.3125
4
[]
no_license
################################################################inp########## ## ## ## Jeremy Warden -- Gateway Blend Coding Challenge ## ## ## ########################################################################## ####### Import Section ####### import sys import itertools ############################## def worker(word): #Retrieve our list perm = itertools.permutations(word) return_list = [] #Combine our tuple's into a single word and add it to a list (easier to manage) for word in perm: return_list.append(''.join(word)) #sort our list based on following criteria return_list.sort() return return_list def readFile(file): #Open the file in read only mode try: file_object = open(file,"r") except FileNotFoundError: print("File " + file + "Does not exist, or at very least does not exist in the directory you are running this script from") #Loop through the file: #For each line, make sure there is only one word #for each word, make sure it contains no special characters #Add each word to a list #list data structure word_list = [] #Count the lines in the file for User purposes line_number = 0 for line in file_object.readlines(): #Make sure there is just one word if(len(line.split()) > 1): print("Line number: " + str(line_number) + " is invalid, must contain only one word: " + line) line_number = line_number + 1 #Line has only one word, check to make sure it doesn't contain any special characters else: #Error flag for invalid character error_flag = False for letter in line: if(letter.isdigit() == False and letter.isalpha() == False and letter != "\n"): #Error Message print("Line number: " + str(line_number) + " contains a special character: " + line) #Increment Line number we are on line_number = line_number + 1 #Set error flag to true error_flag = True #Error flag is false, we have a single word with no special characters if(error_flag == False): #increment line number line_number = line_number + 1 #remove new line character and add the word to our list word_list.append(line.rstrip()) print(word_list) print("Total lines read in our file: " + str(line_number)) return word_list def main(): #Ensure the Program is run with the correct command line use if(len(sys.argv) != 2): print("Invalid use of program.\nCorrect use -- Algorithm <input_file>") return #Read in each line from our input file into a list word_list = readFile(sys.argv[1]) print("\nFINAL PRODUCT\n") #Pass each word to our worker function, worker returns a complete list representing all the permutations in alphabetical order for word in word_list: print(",".join(worker(word))) ######### main()
true
dbe873de758254a420338b2563e8d5fbddc87266
Python
parksjsj9368/TIL
/ALGORITHM/PROGRAMMERS/ALGORITHM_TEST/22. 압축.py
UTF-8
664
3.328125
3
[]
no_license
# 아이디어 구상 # 앞에가 있고, 뒤에가 없어 # => 앞+뒤 append # => 앞 값 출력 # # 앞에가 있고, 뒤에도 있어 # => 뒤 값을 +1 # 결국엔, 뒤에가 없을 때까지 # => 앞+뒤 append # => 앞 값 출력 def solution(msg): dict = {} # 기본 A~Z 리스트 for i in range(65, 91): dict[chr(i)] = i - 64 answer = [] i = 0 while (i < len(msg)): j = 1 while (i+j+1 <= len(msg)) and (msg[i:i+j+1] in dict): j += 1 answer.append(dict[msg[i:i+j]]) dict[msg[i:i+j+1]] = max(dict.values()) + 1 i += j return answer
true
73a392b60ca9d20f6adf8df651dc5c9121a13ea6
Python
Caceros/Load-Forecast-using-SVR
/my_functions/scrape_weather_data.py
UTF-8
4,830
3.140625
3
[]
no_license
""" Download historical weather data using wunderground API. https://www.wunderground.com/weather/api/d/docs This script is based on @jtelszasz work. https://github.com/jtelszasz/my_energy_data_viz Basic Usage: python scrape_weather_data.py startMonth startDay startYear endMonth endDay endYear python scrape_weather_data.py 3 1 2017 3 31 2017 `key`: API key. `zmw`: a city's zmw code, see https://www.wunderground.com/weather/api/d/docs?d=autocomplete-api&MR=1 `save_path`: download data file to this path. `empty_obs_dict`: this dict structure will be reused a few times for storing data. https://www.wunderground.com/weather/api/d/docs?d=resources/phrase-glossary { 'tempm': temp in C 'wspdm': windspeed kph 'precipm': precipitation in mm 'conds': weather condition phrases } """ import sys import pandas as pd import numpy as np import datetime import urllib.request import json import copy import time key = '0f63b2474e401b5d' # zmw = '00000.1.59493' # Shenzhen, China 深圳 zmw = '00000.1.59082' # Shaoguan, China 韶关 # this dict structure will be reused a few times empty_obs_dict = {'tempm': [], 'hum': [], 'wspdm': [], 'precipm': [], 'conds': []} save_path = 'D:\Study\mierda\data\load_forecast\weather_data\\' def download_one_day_data(weather_date): ''' Download one day's weather data. Return a parsed json. ''' YYYYMMDD = weather_date.strftime('%Y%m%d') query = 'http://api.wunderground.com/api/%s/history_%s/q/zmw:%s.json' % ( key, YYYYMMDD, zmw) f = urllib.request.urlopen(query) json_string = f.read() parsed_json = json.loads(json_string) f.close() prettydate = parsed_json['history']['date']['pretty'] print(prettydate) return parsed_json def parse_weather_data(parsed_json, input_dict): ''' Returns: a timestamp list a dict using the structure of input_dict. Timestamp is corresponding to each observation. Each key contains the hourly observations for this day (a list). ''' timestamp_list = [] obs_dict = copy.deepcopy(input_dict) obs_num = len(parsed_json['history']['observations']) for i in range(obs_num): year = int(parsed_json['history']['observations'][i]['date']['year']) month = int(parsed_json['history']['observations'][i]['date']['mon']) day = int(parsed_json['history']['observations'][i]['date']['mday']) hour = int(parsed_json['history']['observations'][i]['date']['hour']) minute = int(parsed_json['history']['observations'][i]['date']['min']) timestamp_list.append(datetime.datetime( year, month, day, hour, minute)) for obs in obs_dict: # obs are the features like temp, windspeed try: value = float(parsed_json['history']['observations'][i][obs]) except: # weather conds are strings, can't be converted to float value = parsed_json['history']['observations'][i][obs] if value == -9999: value = np.nan obs_dict[obs].append(value) return timestamp_list, obs_dict def main(): if len(sys.argv) != 7: print('Not enough date args') sys.exit(1) startMonth = int(sys.argv[1]) startDay = int(sys.argv[2]) startYear = int(sys.argv[3]) endMonth = int(sys.argv[4]) endDay = int(sys.argv[5]) endYear = int(sys.argv[6]) startDate = datetime.datetime( year=startYear, month=startMonth, day=startDay) endDate = datetime.datetime(year=endYear, month=endMonth, day=endDay) if startDate > endDate: raise Exception('Invalid date arguments.') # store all days' obs data full_timestamp_list = [] full_obs_dict = copy.deepcopy(empty_obs_dict) currentDate = startDate count = 0 # the API has a limit of 10 calls per minute while currentDate <= endDate: parsed_json = download_one_day_data(currentDate) daily_timestamp, daily_obs_dict = parse_weather_data( parsed_json, empty_obs_dict) # merge each day's data # don't use append -> [a, [b]] full_timestamp_list.extend(daily_timestamp) {full_obs_dict[obs].extend(daily_obs_dict[obs]) for obs in full_obs_dict} currentDate += datetime.timedelta(days=1) count += 1 if count == 10: count = 0 print('Pause for 60 seconds...') time.sleep(60) df = pd.DataFrame(full_obs_dict, index=full_timestamp_list) start_string = startDate.strftime("%Y%m%d") end_string = endDate.strftime("%Y%m%d") file_path = save_path + 'weather_data_' + start_string + '-' + end_string df.to_csv('%s.csv' % file_path) if __name__ == '__main__': main()
true
02ed06d8c1e05c241d2b2978bc4642aad49c1236
Python
yuju30/NTUML18
/hw2/HW2_logistic.py
UTF-8
2,955
2.734375
3
[]
no_license
import sys import numpy as np import pandas as pd def read_data(trainX_name,trainY_name,testX_name): trainX = pd.read_csv(trainX_name,header=0).as_matrix() #print(trainX.shape) trainX = trainX.astype(float) trainY = pd.read_csv(trainY_name,header=0).as_matrix() #print(trainY.shape) trainY = trainY.astype(int) testX = pd.read_csv(testX_name,header=0).as_matrix() #print(testX.shape) testX = testX.astype(float) return trainX,trainY,testX def normalize(X_all, X_test): mu = (sum(X_all) / X_all.shape[0]) sigma = np.std(X_all, axis=0) mu_a = np.tile(mu, (X_all.shape[0], 1)) sigma_a = np.tile(sigma, (X_all.shape[0], 1)) mu_t = np.tile(mu, (X_test.shape[0], 1)) sigma_t = np.tile(sigma, (X_test.shape[0], 1)) X_all = (X_all - mu_a) / sigma_a X_test = (X_test - mu_t) / sigma_t return X_all, X_test def random(Xtrain,Ytrain): r_list = np.array(range(0,len(Xtrain))) np.random.shuffle(r_list) Xtrain = Xtrain[r_list] Ytrain = Ytrain[r_list] return Xtrain,Ytrain def split(Xtrain,Ytrain,persent): t_size = int(floor(len(Xtrain)*persent)) return Xtrain[:t_size],Ytrain[:t_size],Xtrain[t_size:],Ytrain[t_size:] def sigmoid(z): res = 1 / (1.0 + np.exp(-z)) return res def train(Xtrain,Ytrain): lr = 0.05 (data_len,fea_num)=Xtrain.shape w = np.array([0.1]*fea_num) b = 0.1 sgra = np.array([0.0]*fea_num) sgra_b = 0.0 g=3000 while (g>=0): z = Xtrain.dot(w) + b prob = sigmoid(z) err = Ytrain.flatten() - prob dLw = - np.dot(Xtrain.T,err)/data_len dLb = - (err.sum())/data_len if(g%100==0): loss = -np.mean(Ytrain.flatten().dot(np.log(prob))+(1 - Ytrain).flatten().dot(np.log(1-prob))) print("iteration num : ",3000-g," loss = ",loss) dea_pro = np.around(prob) acc_out = np.mean(1-np.abs(Ytrain.flatten()-dea_pro)) print("iteration num : ",3000-g," accuracy = ",acc_out) #adagrad sgra = sgra + dLw**2 sgra_b = sgra_b + dLb**2 w = w - (lr*dLw)/np.sqrt(sgra) b = b - (lr*dLb)/np.sqrt(sgra_b) g=g-1 return w ,b def select(data): work = np.array([0, 1, 3, 4, 5]) data = np.concatenate((data, data[:, work] ** 1,data[:, work] ** 2,data[:, work] ** 2.5,data[:, work] ** 3), axis = 1) return data def test(test_data,w,b,ans_filename): ans_file = open(ans_filename, "w") ans_file.write("id,label\n") z = test_data.dot(w)+b prob = sigmoid(z) res_prob = np.around(prob) for i in range(len(res_prob)): ans_str = str(i+1) + ',' + str(int(res_prob[i])) + '\n' ans_file.write(ans_str) trainX_name = sys.argv[1] trainY_name = sys.argv[2] testX_name = sys.argv[3] ans_name = sys.argv[4] (trainX_ret,trainY_ret,testX_ret) = read_data(trainX_name,trainY_name,testX_name) Xtrain = select(trainX_ret) Xtest = select(testX_ret) (Xtrain_ret,Xtest_ret) = normalize(Xtrain,Xtest) #(w_res, b_res) = train(Xtrain_ret, trainY_ret) #np.save('model.npy',(w_res, b_res)) (w,b) = np.load('model.npy') test(Xtest_ret,w,b,ans_name)
true
8f71ac65f01197ca0e08c3df87b0174abe4ebf62
Python
SAV2018/Python-BaseLevel
/BL-L4/task-4-6.py
UTF-8
1,343
4.15625
4
[]
no_license
''' Реализовать два небольших скрипта: а) бесконечный итератор, генерирующий целые числа, начиная с указанного, б) бесконечный итератор, повторяющий элементы некоторого списка, определенного заранее. Подсказка: использовать функцию count() и cycle() модуля itertools. ''' from itertools import count, cycle # а) ilist = ['one', 'two', 'three', 555] print(f'\n(a) ------------------------------------------ \n\n{ilist}') for i, item in enumerate(cycle(ilist)): if i > 13: break print(item, end=' | ') print('... That\'s enough.') # б) print('\n(b) ------------------------------------------') action = '' while action == '': try: start_num = int(input('\nEnter start number: ')) final_num = int(input('Enter final number: ')) if start_num > final_num: print('Incorrect value') else: for i in count(start_num): if i > final_num: break print(i) print('That\'s enough.') except ValueError: print('Incorrect value') action = input('\nPress Enter to continue or any key to exit...')
true
8fabd84bc2d756b00e6288c74cafb6ec841caac2
Python
webclinic017/loris-server-tmp
/ebest/minute.py
UTF-8
657
2.921875
3
[]
no_license
import os import pickle import pandas as pd """ ebest로 받은 데이터를 종목별로 구분하여 저장하는 역할 """ files = os.listdir('./data') codes = list(set([f.split('_')[0] for f in files])) cnt = 0 for code in codes: f = [f for f in files if code == f.split('_')[0]] full_df = pd.DataFrame() for pkl in f: pkl_f = open(f'./data/{pkl}', 'rb') data = pickle.load(pkl_f) if data != []: df = pd.DataFrame(data) full_df = pd.concat([full_df, df], axis=0) if len(full_df) > 0: full_df.to_csv(f'./minute/{code}.csv') print(f'{cnt} / {len(codes)} {code}') cnt += 1
true
ea8a2a573b56ba3fe18fbc81e3db50f4b651619d
Python
Xinxinatg/chess
/encoder.py
UTF-8
5,003
3.046875
3
[ "MIT" ]
permissive
from chess_types import * import copy import collections import numpy as np BOARD = ['帅', '将', '士', '仕', '相', '象', '马', '馬', '车', '車', '兵', '卒', '炮', '包'] DEFAULT = { '帅': [-1], '将': [-1], '士': [-1, -1], '仕': [-1, -1], '相': [-1, -1], '象': [-1, -1], '马': [-1, -1], '馬': [-1, -1], '车': [-1, -1], '車': [-1, -1], '兵': [-1, -1, -1, -1, -1], '卒': [-1, -1, -1, -1, -1], '炮': [-1, -1], '包': [-1, -1] } PIECE_VALUES = { '帅': 1, '将': -1, '士': 2, '仕': -2, '相': 3, '象': -3, '马': 4, '馬': -4, '车': 5, '車': -5, '兵': 6, '卒': -6, '炮': 7, '包': -7, } PIECE_VALUES_REV = { 1: '帅', -1: '将', 2: '士', -2: '仕', 3: '相', -3: '象', 4: '马', -4: '馬', 5: '车', -5: '車', 6: '兵', -6: '卒', 7: '炮', -7: '包', } MOVE = [ ('帅', 4), ('士', 4), ('士', 4), ('相', 4), ('相', 4), ('马', 8), ('马', 8), ('车', 17), ('车', 17), ('炮', 17), ('炮', 17), ('兵', 3), ('兵', 3), ('兵', 3), ('兵', 3), ('兵', 3), ] TOTAL_MOVES = sum([m[1] for m in MOVE]) class SimpleEncoder: def encode(self, board: Board): result = np.zeros((1, board.height, board.width)) for piece in board.pieces: result[0][piece.pos.row][piece.pos.col] = PIECE_VALUES[str(piece)] return result def decode(self, array) -> Board: board = Board() for row in range(len(array[0])): for col in range(len(array[0][0])): if array[0][row][col] in PIECE_VALUES_REV: ch = PIECE_VALUES_REV[array[0][row][col]] board.pieces.append(Piece.from_name(Point(row, col), ch)) return board def encode_move(self, state: GameState, move: Move): result = np.zeros(TOTAL_MOVES) index = self.move_to_index(state, move) result[index] = 1 return result def move_to_index(self, state: GameState, move: Move) -> int: index = 0 candidates = sorted( filter(lambda piece: piece.color == move.piece.color, state.board.pieces)) skip = 0 for c in candidates: if c.name() == move.piece.name(): if c.pos != move.piece.pos: skip += 1 else: break for name, step in MOVE: if name != move.piece.name(): index += step else: index += skip * step break index += move.piece.possible_positions().index(move.target) return index def decode_move(self, state: GameState, index: int) -> Move: start = 0 passed = collections.defaultdict(int) move_fig = None fig_num = -1 move_dir = -1 for fig, step in MOVE: if index < start + step: move_fig = fig fig_num = passed[fig] move_dir = index - start break else: start += step passed[fig] += 1 assert fig_num != -1 and move_dir != -1 return self.gen_move(state, move_fig, fig_num, move_dir) def gen_move(self, state: GameState, move_fig: str, fig_num: int, move_dir: int) -> Move: candidates = sorted( filter(lambda piece: piece.color == state.player, state.board.pieces)) encounter = 0 for piece in candidates: if piece.name() == move_fig: if encounter == fig_num: target = piece.possible_positions()[move_dir] return piece.calc_move(state.board, target) encounter += 1 return None def move_mask(self, state: GameState) -> List[int]: result = [0] * TOTAL_MOVES candidates = sorted( filter(lambda piece: piece.color == state.player, state.board.pieces)) d = collections.defaultdict(list) for piece in candidates: d[piece.name()].append(piece) d[piece.name()].sort() start = 0 for name, step in MOVE: if name in d: inc = 0 for piece in d[name]: for pos in piece.possible_positions(): next_move = piece.calc_move(state.board, pos) if pos.row >= 0 and pos.row < state.board.height and pos.col >= 0 \ and pos.col < state.board.width and next_move is not None: result[start + inc] = 1 inc += 1 del d[name] start += step return result def point_to_index(self, point): return point.row * 9 + point.col def shape(self): return (34, 1)
true
9b1b24787943cf9bd1c4934094097d0e60e616b9
Python
felipefiali/studying-in-python
/algorithms/find_numbers_that_sum.py
UTF-8
1,938
4.25
4
[]
no_license
def check_for_sum_in_two_unique_numbers(array, target): """ Given an array of unique integers, and a target T, check if it is possible to reach that value with two unique numbers in the array. """ hash_table = {} for index, number in enumerate(array): hash_table[number] = True for number in hash_table: diff = target - number if diff == number: continue if diff in hash_table: return True return False def check_for_sum_in_two_numbers(array, target): """ Given an array of integers that may be repeated in the array itself, check if it is possible to reach a value with two numbers in the array. """ hash_table = {} for number in array: if number not in hash_table: hash_table[number] = 1 else: hash_table[number] += 1 for number in hash_table: diff = target - number if diff in hash_table: if diff == number: if hash_table[diff] >= 2: return True else: return True return False def check_for_sum_in_three_numbers(array, target): """ Given an array of integers, find if there is a combination of three numbers that can reach to a target. The combination can be three different numbers, twice a number and a different one, or three times the same number. """ hash_table = {} for number in array: if number * 3 == target: return True hash_table[number] = True for first_number in hash_table: if first_number < target: for second_number in hash_table: if first_number + second_number > target: continue diff = target - (first_number + second_number) if diff in hash_table: return True return False
true
09e1b9f20caaab175e3411e44047609badccd66d
Python
LalithK90/LearningPython
/privious_learning_code/OS_Handling/os.read() Method.py
UTF-8
681
4.375
4
[]
no_license
# Description # # The method read() reads at most n bytes from file desciptor fd, return a string containing the bytes read. If the end of file referred to by fd has been reached, an empty string is returned. # Syntax # # Following is the syntax for read() method − # # os.read(fd,n) # # Parameters # # fd − This is the file descriptor of the file. # # n − These are n bytes from file descriptor fd. # # Return Value # # This method returns a string containing the bytes read. # Example import os, sys # Open a file fd = os.open("f1.txt", os.O_RDWR) # Reading text ret = os.read(fd, 12) print(ret) # Close opened file os.close(fd) print("Closed the file successfully!!")
true
217e6a825add1f4baf5d12c95a37150dfa8d631a
Python
david888844/C-digos-Programaci-n
/4.Calcular la velocidad.py
UTF-8
319
3.46875
3
[]
no_license
# -*- coding: utf-8 -*- """ Created on Thu Feb 18 09:37:00 2021 @author: David Alzate """ # Calcular la velocidad # Valores a determinar d y t tf=int(input('¿cuanto tiempo?: ')) d=float(input('¿distancia recorrida?: ')) v=d/tf vkmh=(v/1000)*60*60 print('Velocidad del vehiculo(Km/h)') print(vkmh)
true
b3252eb52bad2d873346f1764e5bdad8c0b51df1
Python
hibiup/HelloSciKitLearn
/tests/test_normalization.py
UTF-8
2,995
3.21875
3
[]
no_license
from unittest import TestCase class NormalizationTest(TestCase): def test_normalization(self): ''' Normalization 又称为 scale,它的作用是让数据更集中在设定的范围内。在 ML 中数据的取值区域如果太宽会导致内涵太丰富以至于无法解释 因此需要将其转化为无量纲的纯数值以便于比较。其中最典型的是归一化处理,即将数据映射到[0,1]区间。 原理解释: https://zhuanlan.zhihu.com/p/33173246 http://www.360doc.com/content/18/0122/22/7378868_724269731.shtml Python 示例: https://blog.csdn.net/pipisorry/article/details/52247679 ''' from sklearn import preprocessing import numpy as np a = np.array([[ 10, 2.7, 3.6], [-100, 5, -2], [ 120, 20, 40]], dtype=np.float64) # 以上三组数据每组对应位置的值,比如 A1:B1:C1 分别为 10, -100, 120,分布在 超过200的区间,scale后的结果会大大地缩小这个跨度。 nor_a = preprocessing.scale(a) print(nor_a) def test_SVC_classfication(self): ''' SVM:支持向量机。用于数据分类, 支持多元分类。适用于监督学习的分类和线形回归. SVC:C-Support Vector Classification. 什么是 SVM: http://www.cnblogs.com/LeftNotEasy/archive/2011/05/02/basic-of-svm.html SciKitLearn 支持的 SVM 算法: http://sklearn.apachecn.org/cn/0.19.0/modules/svm.html https://xacecask2.gitbooks.io/scikit-learn-user-guide-chinese-version/content/sec1.4.html https://blog.csdn.net/gamer_gyt/article/details/51265347 ''' from sklearn.svm import SVC # Tools from sklearn import preprocessing import numpy as np from sklearn.cross_validation import train_test_split # For mockup classification data from sklearn.datasets.samples_generator import make_classification # For virtualizaiton import matplotlib.pyplot as plot # Mock up data X, y = make_classification(n_samples=300, n_features=2, # 两个 feature n_redundant=0, n_informative=2, random_state=22, # Random seed,固定数值导致每次输出相同(纯函数特性) n_clusters_per_class=1, scale=100) X = preprocessing.minmax_scale(X, feature_range=(-1, 1)) # plot.scatter(X[:,0], X[:,1], c=y) # 以两个 Feature 分别作为坐标轴 # plot.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) clf = SVC() clf.fit(X_train, y_train) print( clf.score(X_test, y_test) ) # 0.93. 如果不做 Normalization,score 的结果只有 0.45 左右
true
e65e8ed2cd3852e43102190bf40fe3d405406cad
Python
genkinaoko/Mac
/w.py
UTF-8
356
3.453125
3
[]
no_license
import re text = "私の名前は__名前__です。" def mad(mls): hint = re.findall("__.*?__",mls) if hint is not None: for word in hint: q = "{}を入力".format(word) new = input(q) mls = mls.replace(word, new, 1) #print("\n") #mls = mls.replace("\n","") print(mls) mad(text)
true
81f00d1b3d32995c9d6882ea160914b2e40eebc5
Python
gentle-yu/PythonProject
/Script/configuration/case_readyaml.py
UTF-8
492
2.515625
3
[]
no_license
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @File : case_readyaml.py @Author: gentle.yu @Date : 2020/8/16 16:00 """ import yaml import os # 获取当前脚本所在文件夹路径 curpath = os.path.dirname(os.path.realpath(__file__)) # 获取yaml文件路径 #yamlpath = os.path.join(curpath, "case_readyaml.yaml") # 用open方法打开直接读出来 f = open(yamlpath, 'r', encoding='utf-8') cfg = f.read() print(type(cfg)) print(cfg) d = yaml.load(cfg) print(d) print(type(d))
true
b74e1ba6d47d2a6590949add68df6bbe785a0c72
Python
melnaquib/CarND-Traffic-Sign-Classifier-Project
/scratch.py
UTF-8
826
2.75
3
[]
no_license
### Load the images and plot them here. ### Feel free to use as many code cells as needed. import os import numpy as np import cv2 examples_dir = 'examples' example_files = [os.path.join(examples_dir, filename) for filename in list(os.walk(examples_dir))[0][2]] n_imgs = len(example_files) example_images = np.zeros((n_imgs, 32, 32, 1)) for i in range(n_imgs): img = cv2.imread(example_files[i], cv2.IMREAD_GRAYSCALE) shape = img.shape scale = (shape[0] / 32, shape[1] / 32) scale = max(scale) shape = (int(shape[1] / scale), int(shape[0] / scale)) img = cv2.resize(img, shape) img = np.expand_dims(img, 3) example_images[i][:shape[1]][:shape[0]][:] = img # example_images[i][:17][:18] = img print(type(example_images[0][0])) print(example_images[0].shape) print(example_images.shape)
true
8022bd48eb7830df868c09162f1f7959605a4d75
Python
FredericoIsaac/mail_project
/word_machine.py
UTF-8
3,117
2.890625
3
[]
no_license
import corresponding_date from mailmerge import MailMerge import mammoth from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.image import MIMEImage import docx2txt MONTH_DICT = { 1: 'Janeiro', 2: 'Fevereiro', 3: 'Março', 4: 'Abril', 5: 'Maio', 6: 'Junho', 7: 'Julho', 8: 'Agosto', 9: 'Setembro', 10: 'Outubro', 11: 'Novembro', 12: 'Dezembro', } class WordMachine: def __init__(self, template_path: str, output_path: str, **kwargs): # Date month_year = corresponding_date.month_in_reference() self.month = month_year[0] self.year = month_year[1] self.extended_month = MONTH_DICT[self.month] # Paths self.template_path = template_path self.output_path = output_path # Data to fill fields self.company = kwargs.get("empresa", "") self.nif = str(kwargs.get("nif", "")) self.id_company = kwargs.get("id_empresa", "") def populate_word(self): """ Populate the word Document and save to a new file """ document = MailMerge(self.template_path) # Get the name of the fields in Word # fields = document.get_merge_fields() # for field in document.get_merge_fields(): # print(field) document.merge( empresa=self.company, ano_referente=str(self.year), mes_referente=str(self.extended_month), nif=str(self.nif), ) document.write(self.output_path) def word_to_html(self): """ :return: An HTML string with image of the signature Frederico Gago """ with open(self.output_path, "rb") as docx_file: result = mammoth.convert_to_html(docx_file) html = result.value # The generated HTML # messages = result.messages # Any messages, such as warnings during conversion # Add Signature: html += f"<br><br><br><br><img src='cid:logo'>" return html def message_to_mail(self): # Encapsulate the plain and HTML versions of the message body in an # 'alternative' part, so message agents can decide which they want to display. message = MIMEMultipart("alternative") # Get data from Word text = docx2txt.process(self.output_path) # Transform Word to HTML html = self.word_to_html() plain_text = MIMEText(text, "plain") message.attach(plain_text) html_text = MIMEText(html, "html") message.attach(html_text) # We reference the image in the IMG SRC attribute by the ID we give it <img src='cid:logo'> with open(r"C:\Users\Frederico\Desktop\Frederico Gago\Confere\Programas\mail_project\images\signature.png", "rb") as img: signature = MIMEImage(img.read()) signature.add_header("Content-ID", "<logo>") message.attach(signature) return message def subject_mail(self): return f"{self.id_company} - Saft {str(self.month).zfill(2)}-{self.year}"
true
8c9d94285a90d1bf672f415898ac2d38d6421ea5
Python
ssulav/interview-questions
/leetcode/3_longest-substring-without-repeating-characters.py
UTF-8
1,952
3.640625
4
[]
no_license
""" https://leetcode.com/problems/longest-substring-without-repeating-characters Given a string s, find the length of the longest substring without repeating characters. Example 1: Input: s = "abcabcbb" Output: 3 Explanation: The answer is "abc", with the length of 3. """ import time class Solution: def lengthOfLongestSubstring(self, s: str) -> int: word = '' words = [] mx = 0 for i in range(len(s)): found = True if s[i] in word else False if not found: word += s[i] else: words.append(word) # mx = max(mx, len(word)) c = i - s[:i:][::-1].find(s[i]) # print(s[:i:], s[c:i+1]) word = s[c:i+1] # word = s[:i:][::-1][:s[:i:][::-1].rfind(s[i])-1:][::-1] # print(f'word : "{word}" tmp : "{found}"') else: words.append(word) # mx = max(mx, len(word)) # print(words) length = len(max(words, key = len)) if words else 0 # print(length) return length # return mx class Solution2(object): def lengthOfLongestSubstring(self, s): """ :type s: str :rtype: int """ dic = {} res = 0 j = 0 for i in range(len(s)): if s[i] not in dic: dic[s[i]] = i else: j = max(j, dic[s[i]]+1) dic[s[i]] = i res = max(res, i+1-j) # print(i,j,res,dic) return res t = time.time() for _ in range(1000): s = time.time() #.25 #0.19 a = Solution() #.3 # a.lengthOfLongestSubstring("dvdf") # a.lengthOfLongestSubstring(" ") # a.lengthOfLongestSubstring("") # a.lengthOfLongestSubstring("pwwkew") a.lengthOfLongestSubstring("abcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdasdasvdhasdkjhasdhnbabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdaabcadefHJghjdsdjfhbjasdhyuadhasdakdghjafjdasdasvdhasdkjhasdhnb") # (pw, wke, kew) print(time.time() - t)
true
b159fe6e628f6a0d7f65911490dcdf6a4b455a3b
Python
tsxtypr/demo
/demo/views.py
UTF-8
2,875
2.765625
3
[]
no_license
from django.http import HttpResponse def index(request): return HttpResponse("Hello world") def about(request): return HttpResponse("这是一个about页面") def demo(request,year,mon,day): ls = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30] res = 0 if int(year) % 4 == 0 and int(year) % 100 != 0: for i in range(int(mon) - 1): res += ls[i] res += int(day) else: if int(mon) == 1: res = int(day) elif int(mon) == 2: res = int(ls[0]) + int(day) else: for i in range(int(mon) - 1): res += ls[i] res += int(day) - 1 return HttpResponse('这是{}年{}月{}日,第{}天'.format(year,mon,day,res)) from django.template import Template,Context def gethtml(request): html=""" <html> <head></head> <body> <h1>{{num}}标签</h1> <a href='https://baike.so.com/doc/4557198-4767897.html' target='_blank'> <img src='http://wx4.sinaimg.cn/large/866ceb95ly1fkt6egkj84j20sg0g0acu.jpg' title='李钟硕' alt='图片消失了'> </a> </body> </html> """ # 创建一个模板 template_obj=Template(html) # 渲染模板 dic={'num':'h'} content_obj=Context(dic) # 数据渲染 res=template_obj.render(content_obj) # 返回结果 return HttpResponse(res) # return HttpResponse(html) # 最常用版本 from django.shortcuts import render def indextmp(request): name='柯基' # 有三个参数,第一个参数要加上request return render(request,'indextmp.html',{'name':name}) from django.shortcuts import render_to_response def abc(request): # 这个可以不用写request,只有两个参数,第二个是字典 return render_to_response('abc.html') # from django.template.loader import get_template # def abc(request): # # 先获得一个模板,将网页传进来 # template=get_template('abc.html') # dic={'name':'二哈'} # res=template.render(dic) # # 返回的httpresponse # return HttpResponse(res) # def test(request): # name='Amy' # age=19 # hobby=['sing','dance','read'] # score={'math':90,'english':92,'chinese':94} # return render(request,'test.html',{'name':name,'age':age,'hobby':hobby,'score':score}) class A(object): def hello(self): return 'hello' def test(request): name='Amy' age=19 hobby=['sing','dance','read'] score={'math':90,'english':92,'chinese':94} a=A() return render(request,'test.html',locals()) def statictest(request): params=[ {'name':'图片1','url':'1.jpg'}, {'name': '图片2', 'url':'2.jpg'}, {'name': '图片3', 'url':'3.jpg'}, {'name': '图片4', 'url':'4.jpg'}, ] return render(request,'statictest.html',locals())
true
7ac045a96cdab44c63072b02562b33e90377381f
Python
dkatz23238/LegalDataScience1
/learning.py
UTF-8
462
2.65625
3
[]
no_license
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression import pandas as pd df = pd.read_csv("dataset.csv").set_index("index") train_ = df[[i for i in df.columns if i != "label"]] labels_ = df["label"] train_X, test_X, train_y, test_y = train_test_split(train_, labels_) model = LogisticRegression() model.fit(train_X, train_y) print(model.score(test_X, test_y))
true
4d1f332ca051d76c1680ff245a279167a752c2a4
Python
sss6391/iot_python2019
/03_Bigdata/02_Standardization_Analysis/3. DB/2db_insert_rows.py
UTF-8
1,128
2.984375
3
[]
no_license
# 비휘발성 데이터 베이스 생성 # 외부 데이터로부터 초기 DB table 값 생성 import csv import sys import sqlite3 input_file = sys.argv[1] # supplier_data.csv # SQLite의 경우 DB명이 파일로 1:1 매칭이 된다. con = sqlite3.connect('Suppliers.db') c = con.cursor() create_table = '''CREATE TABLE IF NOT EXISTS Suppliers (Supplier_Name VARCHAR(20), InVoice_Number VARCHAR(20), Part_Number VARCHAR(20), Cost FLOAT, Purchase_Date DATE); ''' c.execute(create_table) con.commit() print('원본데이터 근황') file_reader = csv.reader(open(input_file, 'r'), delimiter=',') header = next(file_reader, None) for row in file_reader: data = [] for column_index in range(len(header)): data.append(row[column_index]) print(data) c.execute("INSERT INTO Suppliers VALUES (?, ?, ?, ?, ?);", data) con.commit() output = c.execute("SELECT * FROM Suppliers") rows = output.fetchall() print('테이블 레코드 현황') row_counter = 0 for row in rows: out = [] for column_index in range(len(row)): out.append(str(row[column_index])) print(out)
true
e39448469a1a9729897b849b7352090768777aa3
Python
rmurali200/Assorted
/Sorting/MergeSort/SelectionSort.py
UTF-8
353
3.03125
3
[]
no_license
def selectionsort(A): for i in range(len(A)): minIdx = i for j in range(i+1,len(A)): if A[j] < A[minIdx]: minIdx = j A[i],A[minIdx] = A[minIdx],A[i] return A x = [4,5,2,10,1.2,1.3,1.8,2.1,2.4,2.7,65,3.5,3.9,4.1,4.2,4.3,4.6,4.9,5.3,5.7,6.5,7.2,2.2,1.8] xSorted = selectionsort(x) print xSorted
true
7c3b3e7785b7b732008416c379697d54a2f45115
Python
ahmetmenguc/Zoom_Poll_Analyzer
/Zoom-Poll-Analyzer-main/python_iteration1/Question.py
UTF-8
232
3.203125
3
[]
no_license
class Question: def __init__(self, question, answer): self.question = question self.answer = answer def get_question(self): return self.question def get_answer(self): return self.answer
true
50928700a932a183388aed48dc149f9811fd5843
Python
muenchner/event-impact
/STEP_1_update_vstar_medians3.py
UTF-8
16,755
2.984375
3
[]
no_license
#!/usr/bin/python2.7 """Author: Mahalia Miller Date: August 10, 2011 Builds graph of D7 sensors """ import sqlite3 import string from math import cos, sin, radians, acos, pi import numpy as np def run_on_file(filename): """opens and reads file""" try: f = open(filename, "r") rows = f.read() f.close() except IOError: print "The file does not exist, exiting gracefully" rows=None yield rows def find_ids(rows, fwy): """Tokenizes string and returns list of sensors that are within specified corridor. Args: rows: a long string with some \n that indicate sensors minPM: postmile as int maxPM: postmile as int fwy: for example, 5 direction: for example, 'S' Returns: lines: list of ids in corridor with id, postmile, direction, coordinates (list), and highway as a list e.g. 715898, 117.28, 'S', ['33.880183', '-118.021787'], 5] """ MAX_PM=150 #TODO CHANGE THIS lines = [] for row in rows: strings=string.split(row,'\n')[1:] for stringI in strings: tokens=string.split(stringI,'\t') if len(tokens)>1: if int(tokens[1])==fwy and tokens[11]=='ML' and float(tokens[7])<MAX_PM: # if int(tokens[1])==fwy and tokens[11]=='ML': try: lines.append(int(tokens[0])) except ValueError: print 'bad tokens: ', tokens continue return lines def find_all_ids(rows): """Tokenizes string and returns list of sensors that are within specified corridor. Args: rows: a long string with some \n that indicate sensors minPM: postmile as int maxPM: postmile as int fwy: for example, 5 Returns: lines: list of ids in corridor with id, postmile, direction, coordinates (list), and highway as a list e.g. 715898, 117.28, 'S', ['33.880183', '-118.021787'], 5] """ lines = [] road_list=['ML'] # road_list=['ML','HV', 'FR'] for row in rows: strings=string.split(row,'\n')[1:] for stringI in strings: tokens=string.split(stringI,'\t') if len(tokens)>1: if tokens[11] in road_list: try: lines.append([int(tokens[0]), float(tokens[7]), tokens[2], [float(tokens[8]), float(tokens[9])], int(tokens[1])]) except ValueError: print 'bad tokens: ', tokens continue # print lines return lines def parse_direction(t, dir): if t=='SB' or t=='S/B' or t=='S': dir='S' elif t=='EB' or t=='E/B' or t=='E': dir='E' elif t=='NB' or t=='N/B' or t=='N': dir='N' elif t=='WB' or t=='W/B' or t=='W': dir='W' elif t=='': pass else: pass return dir def handle_special_cases(substring): """identifies some manually-observed cases in wrong format and fixes them""" if substring==['', 'EB/NB', '2'] or substring==['EB/NB', '2', ''] or substring==['', 'EB/NB', '2', '#1'] or substring==['', 'EB/NB', '2', '#2']: dir='E' road=2 elif substring== ['', 'WB/SB', '2'] or substring==['WB/SB', '2', '']: dir='W' road=2 elif substring==['WB210', '']: dir='W' road=210 elif substring==['S', '605/W', '10', '']: dir='W' road=10 elif substring==['', 'NB5', 'TRK', 'RTE']: dir='N' road=5 elif substring==['', 'S605/E10']: dir='E' road=10 else: dir=None road=0 return dir, road def distance(lat1, lon1, lat2, lon2): """ finds the distance in miles between points""" earth_radius=3959.0 #miles if lat1==lat2 and lon1==lon2: dst=0 else: dst = acos( (sin(radians(lat1)) * sin(radians(lat2))) + (cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(lon1) - radians(lon2))) ) * earth_radius return dst def nearest(coordinate, coordinate_list, limit=None): """finds nearest k points using great circle distance Args: coordinate: 2 element list with lat and lon only of reference point coordinate_list: candidate list of either coordinates or full line of a connector sensor with the 3rd item in the list the coordinates limit: number of points to return Returns: distances: tuple of distance and the corresponding element in coordinate_list of k closest points """ distances = [] coordinate_lat=coordinate[0] coordinate_lon=coordinate[1] for c in coordinate_list: if len(c)==5: distances.append( (distance(coordinate_lat, coordinate_lon, c[3][0], c[3][1]), c)) else: distances.append( (distance(coordinate_lat, coordinate_lon, c[0], c[1]), c)) distances.sort() if limit: return distances[:limit] return distances def toRadiant(self, distance): earth_radius=3959.0 #miles return distance / (earth_radius * 2 * pi) * 360 # #def get_readings(chosen_sensor, route, time_of_day, chosen_date, cur): # """returns the flow-weighted average speed across all lanes at one sensor # Args: # chosen_sensor: id # time_of_day: in minutes since midnight # chosen_date: in format %m/%d/%Y # Returns: # speed: speed as float at closest time to time_of_day at this specific sensor on this specific day # """ # if time_of_day%5==0: # time_of_day=int(time_of_day) # else: # time_of_day=int(5.0*int(time_of_day/5)) #round down # try: # sensor=int(chosen_sensor) # except TypeError: # return (None,None,None,None) # print 'sensor is likely nonetype' # print chosen_sensor # print time_of_day # print chosen_date # if route==5: # try: # cur.execute("select minutes, aveSpeed from sensorsi5manual where sensorID=? and date=? and minutes>(?-1) and minutes<(?+16)", (sensor, chosen_date, time_of_day, time_of_day)) # readings=check_time(cur.fetchone(), sensor, route, time_of_day, chosen_date, cur) # return readings[1:] # except TypeError: # print chosen_sensor, chosen_date, time_of_day # return None # elif route==605: # if time_of_day<500: # try: # cur.execute("select minutes, aveSpeed from sensors where sensorID=? and date=? and minutes>(?-1) and minutes<(?+16)", (sensor, chosen_date, time_of_day, time_of_day)) # readings=check_time(cur.fetchone(), sensor, route, time_of_day, chosen_date, cur) # return readings[1:] # except: # return None # else: # try: # cur.execute("select minutes, aveSpeed from sensorsi605manual where sensorID=? and date=? and minutes>(?-1) and minutes<(?+16)", (sensor, chosen_date, time_of_day, time_of_day)) # readings=check_time(cur.fetchone(), sensor, route, time_of_day, chosen_date, cur) # return readings[1:] # except: # cur.execute("select minutes, aveSpeed from sensors where sensorID=? and date=? and minutes>(?-1) and minutes<(?+16)", (sensor, chosen_date, time_of_day, time_of_day)) # readings=check_time(cur.fetchone(), sensor, route, time_of_day, chosen_date, cur) # return readings[1:] # else: # print chosen_sensor, chosen_date, time_of_day # return None ## print 'Sensor not found in manually chosen tables. Now resorting to massive table but it will be slow' ## try: ## cur.execute("select aveSpeed, aveFlow, length from sensors where sensorID=? and date=? and minutes=?", (sensor, chosen_date, time_of_day)) ## return cur.fetchone() ## except TypeError: ## print chosen_sensor, chosen_date, time_of_day ## return None #def check_time(readings, sensor, route, t_min, t_a_date, cur): # ''' checks that the result from readings is for the correct time''' # try: # if int(readings[0]) != int(t_min): #if the speedy method failed, then get recordings by the slow method that is more sure # readings=[t_min] # readings.append(get_speed(sensor, t_min, t_a_date)) # # return readings # else: # return readings # except TypeError: # readings=[t_min] # readings.append(get_speed(sensor, t_min, t_a_date)) # return readings # # #def get_speed(chosen_sensor, time_of_day, chosen_date, cur): # """returns the flow-weighted average speed across all lanes at one sensor # Args: # chosen_sensor: id # time_of_day: in minutes since midnight # chosen_date: in format %m/%d/%Y # Returns: # speed: speed as float at closest time to time_of_day at this specific sensor on this specific day # """ # if time_of_day%5==0: # time_of_day=int(time_of_day) # else: # time_of_day=int(5.0*int(time_of_day/5)) #round down # try: # sensor=int(chosen_sensor) # except TypeError: # return None # print 'sensor is likely nonetype' # print chosen_sensor # print time_of_day # print chosen_date # cur.execute("select aveSpeed from sensorsi5manual where sensorID=? and date=? and minutes=?", (sensor, chosen_date, time_of_day)) # try: # v= cur.fetchone()[0] # return v # except TypeError: # print 'except!' # try: # cur.execute("select aveSpeed from sensorsi605manual where sensorID=? and date=? and minutes=?", (sensor, chosen_date, time_of_day)) # return cur.fetchone()[0] # except TypeError: # print chosen_sensor, chosen_date, time_of_day # return None def main(): # db_file='d7Huge.db' # tables=['vStarValsMedian','sensorsi605manual','sensorsi605forvs'] ## tables=['devices'] # #load relevant tables into memory to improve performance # con = sqlite3.connect(':memory:') # cur = con.cursor() # cur.execute("attach database '" + db_file + "' as attached_db") # for table_to_dump in tables: # cur.execute("select sql from attached_db.sqlite_master " # "where type='table' and name='" + table_to_dump + "'") # sql_create_table = cur.fetchone()[0] # cur.execute(sql_create_table); # cur.execute("insert into " + table_to_dump + # " select * from attached_db." + table_to_dump) # con.commit() # cur.execute("detach database attached_db") # con.commit() # for row in cur.execute("select name from sqlite_master where type = 'table'"): # print row con=sqlite3.connect('d7Huge.db') cur = con.cursor() cur.execute('drop table vStarValsMedianWeek') cur.execute('''create table vStarValsMedianWeek (minutes real, sensorID real, vStar real)''') sensor_rows = run_on_file('d07_stations_2008_11_26.txt') subset_road_set=set([605]) sensors=set() for road in subset_road_set: sensors|=set(find_ids(sensor_rows, road)) print 'finding medians for sensors numbering ', len(sensors) for sensorr in sensors: print sensorr for time in range(0,1436,5): #before, did time in range(480,600,5) SENSOR_LIST_0=[] SENSOR_LIST_5=[] SENSOR_LIST_10=[] SENSOR_LIST_15=[] try: cur.execute("select minutes, aveSpeed from sensorsi605manual where sensorID=? and minutes>(?-1) and minutes<(?+16) and day>0 and day<6 and holidayBoolean==0", (sensorr, time, time)) #check this postmile condition rows=cur.fetchall() for index in range(0,len(rows),4): # print rows[index][0] SENSOR_LIST_0.append(rows[index][1]) for index in range(1,len(rows),4): # print rows[index][0] SENSOR_LIST_5.append(rows[index][1]) for index in range(2,len(rows),4): # print rows[index][0] SENSOR_LIST_10.append(rows[index][1]) for index in range(3,len(rows),4): # print rows[index][0] SENSOR_LIST_15.append(rows[index][1]) v_star_0=np.median(np.array(SENSOR_LIST_0)) v_star_5=np.median(np.array(SENSOR_LIST_5)) v_star_10=np.median(np.array(SENSOR_LIST_10)) v_star_15=np.median(np.array(SENSOR_LIST_15)) cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time, sensorr, v_star_0]) # print 'inserted ', v_star_0 cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time+5, sensorr, v_star_5]) # print 'inserted ', v_star_5 cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time+10, sensorr, v_star_10]) # print 'inserted ', v_star_10 cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time+15, sensorr, v_star_15]) print 'inserted ', v_star_15 except: print 'could not find this i605 value at minute: ', time pass# for sensorr in sensors: # con.commit() sensor_rows = run_on_file('d07_stations_2008_11_26.txt') subset_road_set=set([5]) sensors=set() for road in subset_road_set: sensors|=set(find_ids(sensor_rows, road)) print 'finding medians for sensors numbering ', len(sensors) for sensorr in sensors: print sensorr for time in range(0,1436,5): #before, did time in range(480,600,5) SENSOR_LIST_0=[] SENSOR_LIST_5=[] SENSOR_LIST_10=[] SENSOR_LIST_15=[] try: cur.execute("select minutes, aveSpeed from sensorsi5manual where sensorID=? and minutes>(?-1) and minutes<(?+16) and day>0 and day<6 and holidayBoolean==0", (sensorr, time, time)) #check this postmile condition rows=cur.fetchall() for index in range(0,len(rows),4): # print rows[index][0] SENSOR_LIST_0.append(rows[index][1]) for index in range(1,len(rows),4): # print rows[index][0] SENSOR_LIST_5.append(rows[index][1]) for index in range(2,len(rows),4): # print rows[index][0] SENSOR_LIST_10.append(rows[index][1]) for index in range(3,len(rows),4): # print rows[index][0] SENSOR_LIST_15.append(rows[index][1]) v_star_0=np.median(np.array(SENSOR_LIST_0)) v_star_5=np.median(np.array(SENSOR_LIST_5)) v_star_10=np.median(np.array(SENSOR_LIST_10)) v_star_15=np.median(np.array(SENSOR_LIST_15)) cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time, sensorr, v_star_0]) # print 'inserted ', v_star_0 cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time+5, sensorr, v_star_5]) # print 'inserted ', v_star_5 cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time+10, sensorr, v_star_10]) # print 'inserted ', v_star_10 cur.execute("insert into vStarValsMedianWeek(minutes, sensorID, vStar) values (?, ?, ?)", [time+15, sensorr, v_star_15]) print 'inserted ', v_star_15 except: print 'could not find this i5 value at minute: ', time pass con.commit() for row in cur.execute("select count(*) from vStarValsMedianWeek"): print row #before: 6365=67 sensors*95 values per sensor con.commit() if __name__ == '__main__': main()
true
3b35fbc2906eab9ef7c8a048051a06cc39438f83
Python
coldblade2000/MathIAData
/getAccelDiff.py
UTF-8
2,066
3.109375
3
[ "BSD-3-Clause" ]
permissive
import pandas as pd import matplotlib.pyplot as plt import numpy as np def getFirstgoodrow(tb, height, groundheight): for index, row in df.iterrows(): if row["ASL"] < height + groundheight + 500 and row["ASL"] > height + groundheight - 500: return index return tb.head(1).index[0] def getFirstAccel(): for index, row in df.iterrows(): if row['Acceleration'] > 5: return index return df.head(1).index[0] df = pd.read_csv("C:\\Users\\diego\\PycharmProjects\\KSPDataIA\\Data\\accel.csv") df.set_index("Time", inplace=True) # selectedHeight = input("What height will you fall from?") selectedHeight = 10000 terrainHeight = df.iloc[-1]["ASL"] print(terrainHeight) df = df[getFirstgoodrow(df, float(selectedHeight), terrainHeight):] f = lambda x: x - df.head(1).index[0] df.index = df.index.map(f) # df = df[df.TrueHeight != -1] print(df.tail(1)["ASL"]) h = lambda x: x - df.ASL.iloc[-1] df["TrueHeight"] = df.ASL.map(h) plt.rcParams['axes.grid'] = True df['Acceleration'] = df["GForce"] f2 = lambda x: x * 9.81 df.Acceleration = df.Acceleration.map(f2) acceldf = df["Acceleration"] # [df.index<110][df.index>4] acceldf = acceldf[acceldf.index<110] acceldf = acceldf[acceldf.index>18] grad = acceldf.diff()/acceldf.index.to_series().diff() grad = grad.dropna() plt.scatter(grad.index, grad, c='g', label="Raw data", s=8) jerkpolyfit = np.polyfit(grad.index, grad, 2) plt.plot(range(121), np.poly1d(jerkpolyfit)(np.unique(range(121))), c='r',zorder=10,label="Line of best fit") plt.title("The derivative of Acceleration: Jerk") plt.ylabel("Jerk (m * s^-3)") plt.show() print(jerkpolyfit) secondder = grad.diff()/grad.index.to_series().diff() plt.scatter(secondder.index, secondder, c='g', label="Raw data", s=8, zorder=10) jouncepolyfit=np.polyfit(secondder.index, secondder, 2) plt.plot(range(121), np.poly1d(jouncepolyfit)(range(121)), c='r', label="Line of best fit") plt.title("The derivative of Jerk: Jounce") plt.ylabel("Jounce (m * s^-4)") plt.legend(loc='best') plt.show() print(secondder.mean())
true
aa791af8fc0fe359ea3937657ef2494425c42b10
Python
jdanray/leetcode
/ThroneInheritance.py
UTF-8
554
3.84375
4
[]
no_license
# https://leetcode.com/problems/throne-inheritance/ class ThroneInheritance(object): def __init__(self, kingName): self.dead = set() self.tree = {kingName: []} self.root = kingName def birth(self, parentName, childName): self.tree[parentName].append(childName) self.tree[childName] = [] def death(self, name): self.dead.add(name) def getInheritanceOrder(self): def dfs(root): res = [] if root not in self.dead: res += [root] for child in self.tree[root]: res += dfs(child) return res return dfs(self.root)
true
8313d5d7712291ccb5018a9b47af8b0b47cd8b4c
Python
aKeller25/Coursework
/GEOG476/Lab01.py
UTF-8
7,616
3.859375
4
[]
no_license
#!/usr/bin/env python # coding: utf-8 # Class: GEOG476 # Semester: FALL2018 # Assignment: Lab01 # Author: Alex Keller # Written: 10:17 AM 9/5/2018 # Last edited: 10:16 AM 9/10/2018 # # Notes: http://pythontutor.com & http://www.asciitable.com/ are a good resources # Deliminate the sec 4 strings by using '/' # # Separate each word into it's own separate string # Section 1 # In[50]: ### Printing message message = "I look forward to working with you!" print('Original: ' + message + '\n') ### Encrypting and printing message def encrypt(message): encrypted = [] encrypted_final = [] single_word = [] sentences = [] #for i in range(len(message)): # Individual sentences # From here down works for a message that only has 1 sentence words = message.split(" ") # Breaks the sentence into words for word in words: # Iterates through each word for character in range(len(word)): # Iterates through each character in a word single_word.append(ord(word[character])) # Converts the letter to ascii single_word[character] += (character + 1) # Adds on the current index to the character (padded by 1 since it's 0-indexed) single_word[character] = chr(single_word[character]) # Converts the ascii character to ascii encrypted.append(single_word) ## Make it so it doesn't append a space onto the last word of the list encrypted.append(" ") single_word = [] for x in range(len(encrypted)): # Flattens the nested lsits into a single list for y in encrypted[x]: encrypted_final.append(y) sentence = "".join(encrypted_final) # Concatonates the characters into a string return sentence encr_message = encrypt(message) print('Encrypted: ' + encr_message + '\n') # Prints out the encrypted text ### Decrypting and printing message def decrypt(encr_message): decrypted = [] decrypted_final = [] single_word = [] ## for i in range(len(message)): # Individual sentences # From here down works for a message that only has 1 sentence words = encr_message.split(" ") # Breaks the sentence into words for word in words: # Iterates through each word for character in range(len(word)): # Iterates through each character in a word single_word.append(ord(word[character])) # Converts the letter to ascii single_word[character] -= (character + 1) # Adds on the current index to the character (padded by 1 since it's 0-indexed) single_word[character] = chr(single_word[character]) # Converts the ascii character to ascii decrypted.append(single_word) ## Make it so it doesn't append a space onto the last word of the list decrypted.append(" ") single_word = [] for x in range(len(decrypted)): for y in decrypted[x]: decrypted_final.append(y) sentence = "".join(decrypted_final) # Concatonates the characters into a string return sentence decr_message = decrypt(encr_message) # Concatonates the characters into a string print('Decrypted: ' + decr_message + '\n' + '\n') ### Names, emails, and phone numbers james = "James Whyte jhwumd@gmail.com 2406409005" daniel = "Daniel Ataalla fakeEmail@gmail.com 3017420207" print('James original: ' + james + '\n') encr_james = encrypt(james) print('James encoded: ' + encr_james + '\n') print('James decrypted: ' + decrypt(encr_james) + '\n' + '\n') print('Daniel original: ' + daniel + '\n') encr_daniel = encrypt(daniel) print('Daniel encoded: ' + encr_daniel + '\n') print('Daniel decrypted: ' + decrypt(encr_daniel) + '\n') # In[ ]: # In[51]: ### Printing message message = "I look forward to working with you!" print('Original: ' + message + '\n') ### Encrypting and printing message with own encryption def encrypt(message, num): encrypted = [] encrypted_final = [] single_word = [] sentences = [] ##for i in range(len(message)): # Individual sentences # From here down works for a message that only has 1 sentence words = message.split(" ") # Breaks the sentence into words for word in words: # Iterates through each word for character in range(len(word)): # Iterates through each character in a word single_word.append(ord(word[character])) # Converts the letter to ascii single_word[character] += ((character + 1) * num) # Adds on the current index to the character (padded by 1 since it's 0-indexed) single_word[character] = chr(single_word[character]) # Converts the ascii character to ascii encrypted.append(single_word) ## Make it so it doesn't append a space onto the last word of the list encrypted.append(" ") single_word = [] for x in range(len(encrypted)): # Flattens the nested lsits into a single list for y in encrypted[x]: encrypted_final.append(y) sentence = "".join(encrypted_final) # Concatonates the characters into a string return sentence encr_message = encrypt(message, 3) print('Encrypted: ' + encr_message + '\n') # Prints out the encrypted text ### Decrypting and printing message of my own design def decrypt(encr_message, num): decrypted = [] decrypted_final = [] single_word = [] ## for i in range(len(message)): # Individual sentences # From here down works for a message that only has 1 sentence words = encr_message.split(" ") # Breaks the sentence into words for word in words: # Iterates through each word for character in range(len(word)): # Iterates through each character in a word single_word.append(ord(word[character])) # Converts the letter to ascii single_word[character] -= ((character + 1) * num) # Adds on the current index to the character (padded by 1 since it's 0-indexed) single_word[character] = chr(single_word[character]) # Converts the ascii character to ascii decrypted.append(single_word) ## Make it so it doesn't append a space onto the last word of the list decrypted.append(" ") single_word = [] for x in range(len(decrypted)): for y in decrypted[x]: decrypted_final.append(y) sentence = "".join(decrypted_final) # Concatonates the characters into a string return sentence encr_message = 'L ouxw iu{ƒp„y wu zu{wx€| zo}t |u~-' decr_message = decrypt(encr_message, 3) # Concatonates the characters into a string print('Decrypted: ' + decr_message + '\n' + '\n') ### Names, emails, and phone numbers james = "James Whyte jhwumd@gmail.com 2406409005" daniel = "Daniel Ataalla fakeEmail@gmail.com 3017420207" print('James original: ' + james + '\n') encr_james = encrypt(james, 5) print('James encoded: ' + encr_james + '\n') print('James decrypted: ' + decrypt(encr_james, 5) + '\n' + '\n') print('Daniel original: ' + daniel + '\n') encr_daniel = encrypt(daniel, 5) print('Daniel encoded: ' + encr_daniel + '\n') print('Daniel decrypted: ' + decrypt(encr_daniel, 5) + '\n') # In[ ]:
true
50b4a00bda531edb2944a89ee8fcc5beca0ffc7a
Python
Ben-Hardy/leetcode_solutions
/171_title_to_number.py
UTF-8
200
3.875
4
[ "MIT" ]
permissive
def title_to_number(title: str) -> int: total = 0 idx = 0 for i in title[::-1]: total += (ord(i) - 64) * 26 ** idx idx += 1 return total print(title_to_number("AB"))
true
33aa739b945a5e0ad47d601a5223d145403fc441
Python
davidthaler/FluentPython
/ch17_futures/futures.py
UTF-8
532
2.59375
3
[]
no_license
# Example 17-3 from Fluent Python from concurrent import futures from seqential import save_flag, get_flag, show, main MAX_WORKERS = 5 def download_one(cc): img = get_flag(cc) show(cc) save_flag(img, cc.lower() + '.gif') return cc def download_many(cc_list): num_workers = min(MAX_WORKERS, len(cc_list)) with futures.ThreadPoolExecutor(num_workers) as executor: result = executor.map(download_one, sorted(cc_list)) return len(list(result)) if __name__ == '__main__': main(download_many)
true
02120845cd6f46e8bc8dacfad56eac0e871c8d6c
Python
zopepy/leetcode
/mergeklist.py
UTF-8
1,184
3.40625
3
[]
no_license
# Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None def __str__(self): return str(self.val) def __repr__(self): return str(self) from heapq import heappush, heappop class Solution: def mergeKLists(self, lists): """ :type lists: List[ListNode] :rtype: ListNode """ result = ListNode(0) curresult = result kheap = [] k = len(lists) for i in range(0, k): if lists[i] is not None: heappush(kheap, (lists[i].val, i, lists[i])) lists[i] = lists[i].next while kheap: # print(kheap) (nodeval, index, node) = heappop(kheap) curresult.next = node node.next = None curresult = curresult.next if lists[index] is not None: heappush(kheap, (lists[index].val, index, lists[index])) lists[index] = lists[index].next return result.next from extras import * t1 = list_to_linkedlist([1,1,2]) t2 = list_to_linkedlist([1,2,2]) lst = [t1,t2] s=Solution() ls = s.mergeKLists(lst) while ls: print(ls) ls = ls.next
true
ca883e2ba92cfbe18e27c4bba2386dfcd1c18e9f
Python
Vishesh-Sharma221/School-Projects
/read_uplow.py
UTF-8
544
4.4375
4
[]
no_license
# Input File Name print("\n\nEnter the name of the file : ") file_name=str(input()) with open(file_name,"r") as file: file.seek(0) lines=file.read() count_upper=0 count_lower=0 # Checking For Uppercase and Lowercase Letters for items in lines: if items.isupper(): count_upper+=1 elif items.islower(): count_lower+=1 # Displaying Output print("\n\nThe number of uppercase letters in your sentence is : " + str(count_upper)) print("The number of lowercase letters in your sentence is : " + str(count_lower))
true
6bd38b4a5e7eeffa34e935f52303e5ef0a853f7b
Python
luizgustavodarossi/Pyhton
/ex034.py
UTF-8
244
3.859375
4
[]
no_license
salario = float(input('Qual é o salário do funcionário? ')) if salario > 1250: aumento = salario * 0.1 else: aumento = salario * 0.15 print('Quem ganhava R${:.2f} passa a ganhar R${:.2f} agora'.format(salario,salario + aumento))
true
c17145f202edcad5698fceaede0a35c82abedfe8
Python
Hermoine-Granger/Rectangle-Rise-and-Drop
/Rectangles.py
UTF-8
696
2.875
3
[]
no_license
n=int(input()) recs=[] mx_range=0 min_range=99999999999 for i in range(n): x=tuple(map(int,input().split())) recs.append(x) mx_range=max(mx_range,x[1]) min_range=min(min_range,x[0]) height=[0 for i in range(0,mx_range+1)] print(len(height)) for each in recs: #print (each[0]," ",each[1]) x1,x2,h=each[0],each[1],each[2] for i in range(x1,x2): #print (i-each[0]) height[i]=max(height[i],h) height[x2]=max(0,height[x2]) prev_height=-1 print(height) for i in range(min_range,mx_range+1): if height[i]==prev_height: pass else: prev_height=height[i] print("(",i," ",height[i],")")
true
bc53e611d48be45bb82dc18afae921dc5ebc3ff1
Python
dskut/euler
/25-fib.py
UTF-8
157
2.828125
3
[]
no_license
#! /usr/bin/env python fib = [] prev = 1 fib = 1 count = 2 while fib < 10**999: tmp = fib + prev prev = fib fib = tmp count += 1 print count
true
21b25ec87ec1cc611692f0cca9c90836eaa57b19
Python
NielshuldC/Simulation-AOCS-APEX-CubeSat
/CAN_bus/sender.py
UTF-8
506
2.859375
3
[]
no_license
#main.py -- put your code here! # Sending message board import pyb from pyb import CAN led = pyb.LED(3)# Using red LED from board to check if messages are sent while True: led.toggle() pyb.delay(500) can = CAN(1, CAN.NORMAL, extframe=True, prescaler=16, sjw=4, bs1=25, bs2=1) # Receiver and sender should have the same parameters for the CAN bus can.setfilter(0, CAN.LIST16, 0, (123, 124, 125, 126)) can.send('1', 123) # sending message '1' with ID 123 pyb.delay(500)
true
104c9d5847e05aacff74171e338c4e4510432d95
Python
TaraBlackburn/crowd-sound-affect
/src/rps_app.py
UTF-8
1,617
2.71875
3
[]
no_license
import streamlit as st import tensorflow as tf from PIL import Image, ImageOps from tensorflow.keras import models import numpy as np import os import cv2 from tensorflow.keras.preprocessing import image from tensorflow import keras import sklearn # loaded_model = models.load_model('/home/pteradox/Galvanize/capstones/crowd-sound-affect/src/model_checkpoint/my_h5_model_compact') class_dict = {0:'Approval', 1:'Disapproval', 2:'Neutral'} st.write(""" # Spectrogram Classification """ ) st.write("Predict whether a spectrogram converted from an audiofile is going to be approval, disapproval or neural") file = st.file_uploader("Please upload an image file", type=["jpg", "png"]) def import_and_predict(image_data): model = models.load_model('/home/pteradox/Galvanize/capstones/crowd-sound-affect/src/model_checkpoint/my_h5_model_compact') image = tf.keras.preprocessing.image.img_to_array(image_data)/255 image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) predict = model.predict(image) return predict if file is None: st.text("Please upload an image file") else: image = Image.open(file) st.image(image) feed = Image.open(file).convert('RGB') feed = feed.resize((224,224)) prediction = import_and_predict(feed) if np.argmax(prediction) == 0: st.write("Approval") elif np.argmax(prediction) == 1: st.write("Disapproval") else: st.write("Neutral") # a, b, c = prediction[0] st.text(f"Probability {prediction})") st.write(prediction)
true
b1e239b37b120ead40ed5b8d2bd3214bced67737
Python
MijaToka/Random3_1415
/Codficadores/rot13.py
UTF-8
461
3.53125
4
[]
no_license
def rot13(char): if ord(char)>=ord('A') and ord(char)<=ord('Z'): rank = ord(char) - ord('A') shiftedRank = (rank + 13) % 26 newOrd = shiftedRank + ord('A') return chr(newOrd) elif ord(char)>=ord('a') and ord(char)<=ord('z'): rank = ord(char) - ord('a') shiftedRank = (rank + 13) % 26 newOrd = shiftedRank + ord('a') return chr(newOrd) else: return char
true
269d42d6824ec940c9a7dcf9b9558684e46b3368
Python
KupermanAlex/Zen_of_Python
/CardGame.py
UTF-8
1,399
3.296875
3
[]
no_license
# import pygame # import random # from random import shuffle # SUITS = ['heart','diamonds','spades','clubs'] # class Card : # def __init__(self, rank,suit): # self.rank = rank # self.suit = suit # def __repr__(self): # return (f'{self.__class__.__name__}'f'(rank={self.rank!r}, suit={self.suit!r})') # def __eq__(self, other): # if other.__class__ is not self.__class__: # return NotImplemented # return (self.rank, self.suit) == (other.rank, other.suit) # deck =[] # deck= [(rank,suit) for rank in range(6,14) for suit in SUITS] ########################################## import random from random import shuffle RANKS =[ "Ace","6", "7","8", "9", "10", "Jack", "Queen", "King" ] SUITS =[ "Clubs", "Diamonds", "Hearts", "Spades" ] class Card: def __init__( self, rank, suit ): self.rank = rank self.suit = suit def __str__( self ): return self.rank + " of " + self.suit class Deck: def __init__( self ): self.contents = [] self.contents = [ Card( rank, suit ) for rank in RANKS for suit in SUITS ] random.shuffle( self.contents ) ###################################### # import random # suit = ["Clubs", "Diamonds", "Hearts", "Spades"] # deck = [(rank , s) for rank in range(6,15) for s in (suit)] # print (random.choice(deck))
true
7d854f18c4f5933cf1a3ff5503a4e03b6d380b1b
Python
Tirklee/python3-demo
/T2_47 Python 数组翻转指定个数的元素.py
UTF-8
1,440
4
4
[]
no_license
# 实例 1 def leftRotate(arr, d, n): for i in range(d): leftRotatebyOne(arr, n) def leftRotatebyOne(arr, n): temp = arr[0] for i in range(n - 1): arr[i] = arr[i + 1] arr[n - 1] = temp def printArray(arr, size): for i in range(size): print("%d" % arr[i], end=" ") arr = [1, 2, 3, 4, 5, 6, 7] leftRotate(arr, 2, 7) printArray(arr, 7) print() # 实例 2 def leftRotate(arr, d, n): for i in range(gcd(d, n)): temp = arr[i] j = i while 1: k = j + d if k >= n: k = k - n if k == i: break arr[j] = arr[k] j = k arr[j] = temp def printArray(arr, size): for i in range(size): print("%d" % arr[i], end=" ") def gcd(a, b): if b == 0: return a; else: return gcd(b, a % b) arr = [1, 2, 3, 4, 5, 6, 7] leftRotate(arr, 2, 7) printArray(arr, 7) print() # 实例 3 def rverseArray(arr, start, end): while (start < end): temp = arr[start] arr[start] = arr[end] arr[end] = temp start += 1 end = end - 1 def leftRotate(arr, d): n = len(arr) rverseArray(arr, 0, d - 1) rverseArray(arr, d, n - 1) rverseArray(arr, 0, n - 1) def printArray(arr): for i in range(0, len(arr)): print(arr[i], end=' ') arr = [1, 2, 3, 4, 5, 6, 7] leftRotate(arr, 2) printArray(arr)
true
4b3e87f2a8575894d6ab7f56691aecbdc2f39dbe
Python
google/agi
/vulkan_generator/vulkan_parser/internal/funcptr_parser.py
UTF-8
4,453
2.640625
3
[ "Apache-2.0" ]
permissive
# Copyright (C) 2022 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is responsible for parsing Vulkan function pointers""" from typing import Dict import xml.etree.ElementTree as ET from vulkan_generator.vulkan_parser.internal import internal_types from vulkan_generator.vulkan_parser.internal import parser_utils def parse_arguments(function_ptr_elem: ET.Element) -> Dict[str, internal_types.VulkanFunctionArgument]: """Parses the arguments of a Vulkan Function Pointer""" arguments: Dict[str, internal_types.VulkanFunctionArgument] = {} # In the XML const modifier of the type is part of the # previous argument of the function # <type>int32_t</type> messageCode, # const <type> char </type> * pLayerPrefix, is_next_type_const = False for elem in function_ptr_elem: # Skip the name tag if elem.tag != "type": continue if not elem.text: raise SyntaxError(f"Argument type could not found: {ET.tostring(elem, 'utf-8')!r}") if not elem.tail: raise SyntaxError(f"Argument name could not found: {ET.tostring(elem, 'utf-8')!r}") argument_type = parser_utils.clean_type_string(elem.text) argument_name = parser_utils.clean_type_string(elem.tail) # Multiple const are not supported if argument_name.count("const") > 1: raise SyntaxError(f"Double const are not supported: {argument_type} {argument_name}") # Multiple pointers are not supported if argument_name.count("*") > 1: raise SyntaxError(f"Double pointers are not supported: {argument_type} {argument_name}") # This means previous argument has the const modifier for this type if is_next_type_const: argument_type = f"const {argument_type}" is_next_type_const = False if "const" in argument_name: if not argument_name.endswith("const"): raise SyntaxError(f"""This is probably a const pointer which is not supported: {argument_type} {argument_name}""") is_next_type_const = True argument_name = argument_name.replace("const", "") # Pointers of the type is actually in the argument name if "*" in argument_name: argument_type = argument_type + "*" argument_name = argument_name[1:] arguments[argument_name] = internal_types.VulkanFunctionArgument( argument_type=argument_type, argument_name=argument_name, ) return arguments def parse(func_ptr_elem: ET.Element) -> internal_types.VulkanFunctionPtr: """Returns a Vulkan function pointer from the XML element that defines it. A sample Vulkan function_pointer: < type category="funcpointer" > typedef void(VKAPI_PTR * <name > PFN_vkInternalAllocationNotification < /name > )( < type > void < /type > * pUserData, < type > size_t < /type > size, < type > VkInternalAllocationType < /type > allocationType, < type > VkSystemAllocationScope < /type > allocationScope); < /type > """ function_name = parser_utils.get_text_from_tag_in_children(func_ptr_elem, "name") # Return type is in the type tag's text field with some extra information # e.g typedef void (VKAPI_PTR * return_type = func_ptr_elem.text if not return_type: raise SyntaxError(f"No return type found for the function pointer: {ET.tostring(func_ptr_elem, 'utf-8')!r}") # remove the function pointer boilers around type return_type = return_type.split("(")[0] return_type = return_type.replace("typedef", "") return_type = parser_utils.clean_type_string(return_type) arguments = parse_arguments(func_ptr_elem) return internal_types.VulkanFunctionPtr( typename=function_name, return_type=return_type, arguments=arguments)
true
3fc0e315d8f9d2bb7f73413d2e0746e5726dc0c7
Python
warun27/Association-Rules
/movies.py
UTF-8
2,255
3.15625
3
[]
no_license
# -*- coding: utf-8 -*- """ Created on Fri Nov 13 04:41:54 2020 @author: shara """ import pandas as pd import numpy as np from mlxtend.frequent_patterns import apriori, association_rules movies = pd.read_csv("F:\Warun\\DS Assignments\\DS Assignments\\Association Rules\\my_movies.csv") movies.head() movies.values movies1 = movies.iloc[:, [5,6,7,8,9,10,11,12,13,14]] movies1.head frequent_movies = apriori(movies1, min_support = 0.005, max_len = 3, use_colnames = True) print(frequent_movies.sort_values("support", ascending = False, inplace = True)) import matplotlib.pyplot as plt rules = association_rules(frequent_movies, metric = "lift", min_threshold= 1) rules_r = rules.sort_values('lift', ascending = False) frequent_movies1 = apriori(movies1, min_support = 0.01, max_len = 4, use_colnames = True) rules1 = association_rules(frequent_movies1, metric = "lift", min_threshold= 1) frequent_movies2 = apriori(movies1, min_support = 0.1, max_len = 3, use_colnames = True) rules2 = association_rules(frequent_movies2, metric = "lift", min_threshold= 1) frequent_movies3 = apriori(movies1, min_support = 0.1, max_len = 3, use_colnames = True) rules3 = association_rules(frequent_movies3, metric = "lift", min_threshold= 1.5) frequent_movies4 = apriori(movies1, min_support = 0.5, max_len = 3, use_colnames = True) rules4 = association_rules(frequent_movies4, metric = "lift", min_threshold= 1) frequent_movies5 = apriori(movies1, min_support = 0.007, max_len = 3, use_colnames = True) rules5 = association_rules(frequent_movies5, metric = "confidence", min_threshold= 0.8) plt.scatter(rules['support'], rules['confidence'], alpha=0.5) plt.xlabel('support') plt.ylabel('confidence') plt.title('Support vs Confidence') plt.show() plt.scatter(rules["support"], rules["lift"], alpha=0.5) plt.xlabel("support") plt.ylabel("lift") plt.title("Support vs Lift") plt.show() plt.scatter(rules["confidence"], rules["lift"], alpha=0.5) plt.xlabel("confidence") plt.ylabel("lift") plt.title("confidence vs Lift") plt.show() fit = np.polyfit(rules['lift'], rules['confidence'], 1) fit_fn = np.poly1d(fit) plt.plot(rules['lift'], rules['confidence'], 'yo', rules['lift'], fit_fn(rules['lift']))
true
9d1e3438c03d52c9d729f9e188e19a4da8dab37d
Python
hidepin/dbviewer
/dbviewer.py
UTF-8
2,896
2.71875
3
[]
no_license
#!/usr/bin/env python import argparse import sys import os import dropbox # OAuth2 access token. TODO: login etc. TOKEN = '' parser = argparse.ArgumentParser(description='View Dropbox') parser.add_argument('folder', type=str, nargs='?', default='download', help='Folder name in your Dropbox') parser.add_argument('--token', type=str, default=TOKEN, help='Access token ' '(see https://www.dropbox.com/developers/apps)') parser.add_argument('--size', '-s', action='store_true', help='Display sum of file size.') parser.add_argument('--quiet', '-q', action='store_true', help='quiet mode.') def main(): args = parser.parse_args() if not args.token: print('--token is mandatory') sys.exit(2) folder = args.folder if not args.quiet: print('Dropbox folder name:', folder) dbx = dropbox.Dropbox(args.token) if args.size: disk_usage(dbx, folder); else: list_folder(dbx, folder, '') def disk_usage(dbx, folder): path = '/%s' % (folder) while '//' in path: path = path.replace('//', '/') path = path.rstrip('/') try: if _stopwatch('disk_usage'): res = dbx.files_list_folder(path, recursive=True) except dropbox.exceptions.ApiError as err: print('Folder listing failed for', path, '-- assumed empty:', err) return {} else: size = _disk_usage_recursive(dbx, res) print(folder, 'size = ', size/1024/1024) def list_folder(dbx, folder, subfolder): path = '/%s/%s' % (folder, subfolder.replace(os.path.sep, '/')) while '//' in path: path = path.replace('//', '/') path = path.rstrip('/') try: if _stopwatch('list_folder'): res = dbx.files_list_folder(path) except dropbox.exceptions.ApiError as err: print('Folder listing failed for', path, '-- assumed empty:', err) return {} else: _list_folder_recursive(dbx, res) def _stopwatch(message): t0 = time.time() try: yield finally: t1 = time.time() print('Total elapsed time for %s: %.3f' % (message, t1 - t0)) def _disk_usage_recursive(dbx, res): size = 0 for entry in res.entries: if type(entry) is dropbox.files.FileMetadata: size += entry.size if (res.has_more): res_continue = dbx.files_list_folder_continue(res.cursor) size += _disk_usage_recursive(dbx, res_continue) return size def _list_folder_recursive(dbx, res): for entry in res.entries: if type(entry) is dropbox.files.FileMetadata: print(entry.name, ",", entry.size) if (res.has_more): res_continue = dbx.files_list_folder_continue(res.cursor) _list_folder_recursive(dbx, res_continue) if __name__ == '__main__': main()
true
d94e371733660bab8e31872b6d180a337e71b6db
Python
CMakerA/WiSync
/build/lib/WiSync/Ider.py
UTF-8
632
3.140625
3
[ "MIT" ]
permissive
class Ider: def __init__(self, prefix: str): self.prefix = prefix self.elements = list() def __len__(self) -> int: return len(self.elements) def add(self, element) -> str: self.elements.append(element) return self.prefix + str(len(self)) class Iders: btnIder = Ider("btn") lblIder = Ider("lbl") pnlIder = Ider("pnl") txtIder = Ider("txt") __iders = [btnIder, lblIder, pnlIder, txtIder] def ider_from_str(self, prefix: str) -> Ider: for ider in self.__iders: if ider.prefix is prefix: return ider iders = Iders()
true
7b8c13c54feb9784f43a8560c1ce93a0df8c4f28
Python
guicavicci1997/ilana
/aula-23-08/histograma.py
UTF-8
714
3.1875
3
[]
no_license
import cv2 # Biblioteca para plotar imagem com os eixos from matplotlib import pyplot as plt #Biblioteca para trabalhar com numeros import numpy as np imagem = cv2.imread("layne-staley2.jpg") #cv2.imshow("Original", imagem) #Determinando os metodos de cores que será convertido #No caso, em tons de cinza cinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY) #fig representa o plt fig, ((primeira, segunda), (terceira, quarta)) = plt.subplots(2,2) primeira.imshow(cinza, cmap='gray') segunda.hist(cinza.flatten(), 256, range=(0,255)) #Equalizando a imagem nova = cv2.equalizeHist(cinza) terceira.imshow(nova, cmap='gray', interpolation='bicubic') quarta.hist(cinza.flatten(), 256, range=(0,255)) plt.show()
true
b67c7ecdec520d6041f113ab8d3d8771a1b2e413
Python
marqeta/marqeta-python
/tests/cards_merchant/test_cards_merchant_create.py
UTF-8
3,310
2.546875
3
[ "MIT" ]
permissive
import unittest import time from tests.lib.client import get_client from marqeta.errors import MarqetaError class TestCardsMerchantCreate(unittest.TestCase): """Tests creating merchant cards""" def setUp(self): """Setup each test.""" self.client = get_client() def get_merchant(self): """Creates a unique merchant.""" merchant_options = { "name": "qe_merchant_" + str(int(time.time() % 1000000000)) } return self.client.merchants.create(merchant_options) def get_card_product(self): """Creates a card product.""" card_product_options = { "name": "Card Create Product", "start_date": "2019-02-01", "config": { "fulfillment": { "payment_instrument": "VIRTUAL_PAN" }, "special": { "merchant_on_boarding": True } } } return self.client.card_products.create(card_product_options) def verify_card_response(self, response, verify): """ Verifies a card response matches the expected values. Parameters: response (CardResponse): The response object to verify. verify (Dictionary): The values that should be in the response. """ # Verify the expected attributes are defined expected_attributes = [ 'created_time', 'last_modified_time', 'token', 'user_token', 'card_product_token', 'last_four', 'pan', 'expiration', 'expiration_time', 'barcode', 'pin_is_set', 'state', 'state_reason', 'fulfillment_status', 'instrument_type', 'expedite', 'metadata' ] for attribute in expected_attributes: with self.subTest(f'{attribute} is not defined'): self.assertIsNotNone(getattr(response, attribute)) # Verify values match expected values match_attributes = list(verify.keys()) for attribute in match_attributes: # funding_source_token is masked for program funding sources if attribute == 'funding_source_token': continue with self.subTest(f'{attribute} does not match the expected value'): self.assertEqual(getattr(response, attribute), verify[attribute]) def test_merchant_card_create_fail(self): """Check error handing when a merchant card request is missing required data.""" merchant = self.get_merchant() with self.assertRaises(MarqetaError): self.client.cards.create_for_merchant(merchant.token, {}) def test_merchant_card_create_success(self): """Create a merchant card.""" merchant = self.get_merchant() card_product = self.get_card_product() merchant_card_options = { "card_product_token": card_product.token } merchant_card = self.client.cards.create_for_merchant( merchant.token, merchant_card_options) self.verify_card_response(merchant_card, merchant_card_options)
true
bdb0b82630fde9b06f56cd1aa886987747fecc2d
Python
dr-dos-ok/Code_Jam_Webscraper
/solutions_python/Problem_96/1844.py
UTF-8
696
3.21875
3
[]
no_license
#!/usr/bin/env python T = int(raw_input()) for i in xrange(T): sum = 0 G = raw_input().split() N = int(G[0]) S = int(G[1]) p = int(G[2]) # print N, S, p for j in xrange(N): n = int(G[j+3]) x = n/3 y = n%3 # print "--",n, x, y if n == 0: if p == 0: sum += 1 else: pass elif x >= p: sum += 1 elif y>=1 and x+1 >= p: sum += 1 elif S>0 and y==0 and x+1 >= p: S -= 1 sum += 1 elif S>0 and y==2 and x+2 >= p: S -= 1 sum += 1 print "Case #%d: %s" % (i+1, sum)
true
27008cb3dff020593b05ea2d32a4ef7bb46f1197
Python
alpha-kwhn/Baekjun
/powerful104/6603.py
UTF-8
244
2.890625
3
[]
no_license
import itertools as ite while True: li = list(map(int, input().split())) if li[0]==0: break num=li[0] del li[0] lit = ite.combinations(li,6) for i in lit: print(" ".join(map(str,i))) print()
true
9b4e0268290528d3eea5a1d8bd3f41ab42d2c403
Python
pedireddy/guvi1
/poornabeg48.py
UTF-8
114
3.03125
3
[]
no_license
num=int(input()) l=[int(x) for x in input().split()] sum=0 for i in range(1,num+1): sum=sum+i print(sum//num)
true
58475ceb9eb99e26c7cfc4507d12b735a5e0206b
Python
tungminhphan/reactive_contracts
/components/robots.py
UTF-8
5,260
2.984375
3
[ "LicenseRef-scancode-warranty-disclaimer", "BSD-3-Clause" ]
permissive
#!/usr/local/bin/python # Robot Class # Tung M. Phan # California Institute of Technology # April 14, 2019 import imageio import os import numpy as np from PIL import Image import scipy.integrate as integrate dir_path = os.path.dirname(os.path.realpath(__file__)) all_robot_types = {'1','2','3'} class Robot: def __init__(self, init_state = [0,0,0,0], # (x, y, theta, gait) number_of_gaits = 6, gait_length = 4, gait_progress = 0, film_dim = (1, 6), prim_queue = None, # primitive queue robot_type = '3', name = None, age = 20): """ Robot class """ # init_state: initial state by default (x = 0, y = 0, theta = 0, gait = 0) self.state = np.array(init_state, dtype="float") self.number_of_gaits = film_dim[0] * film_dim[1] self.gait_length = gait_length self.gait_progress = gait_progress self.film_dim = film_dim self.name = name self.age = age self.robot_type = robot_type if prim_queue == None: self.prim_queue = Queue() else: prim_queue = prim_queue self.fig = dir_path + '/imglib/walker' + robot_type + '.png' def next(self, inputs, dt): """ The robot advances forward """ dee_theta, vee = inputs self.state[2] += dee_theta # update heading of robot self.state[0] += vee * np.cos(self.state[2]) * dt # update x coordinate of robot self.state[1] += vee * np.sin(self.state[2]) * dt # update y coordinate of robot distance_travelled = vee * dt # compute distance travelled during dt gait_change = (self.gait_progress + distance_travelled / self.gait_length) // 1 # compute number of gait change self.gait_progress = (self.gait_progress + distance_travelled / self.gait_length) % 1 self.state[3] = int((self.state[3] + gait_change) % self.number_of_gaits) def visualize(self): # convert gait number to i, j coordinates of subfigure current_gait = self.state[3] i = current_gait % self.film_dim[1] j = current_gait // self.film_dim[1] img = Image.open(self.fig) width, height = img.size sub_width = width/self.film_dim[1] sub_height = height/self.film_dim[0] lower = (i*sub_width, (j-1)*sub_height) upper = ((i+1)*sub_width, j*sub_height) area = (lower[0], lower[1], upper[0], upper[1]) cropped_img = img.crop(area) return cropped_img def extract_primitive(self): """ This function updates the primitive queue and picks the next primitive to be applied. When there is no more primitive in the queue, it will return False """ while self.prim_queue.len() > 0: if self.prim_queue.top()[1] < 1: # if the top primitive hasn't been exhausted prim_data, prim_progress = self.prim_queue.top() # extract it return prim_data, prim_progress else: self.prim_queue.pop() # pop it return False def prim_next(self, dt): if self.extract_primitive() == False: # if there is no primitive to use self.next((0, 0), dt) else: prim_data, prim_progress = self.extract_primitive() # extract primitive data and primitive progress from prim start, finish, vee = prim_data # extract data from primitive x = finish[0] - start[0] y = finish[1] - start[1] total_distance = np.linalg.norm(np.array([x, y])) if prim_progress == 0: # ensure that starting position is correct at start of primitive self.state[0] = start[0] self.state[1] = start[1] if start == finish: #waiting mode remaining_distance = 0 self.state[3] = 0 # reset gait if self.prim_queue.len() > 1: # if current not at last primitive last_prim_data, last_prim_progress = self.prim_queue.bottom() # extract last primitive last_start, last_finish, vee = last_prim_data dx_last = last_finish[0] - self.state[0] dy_last = last_finish[1] - self.state[1] heading = np.arctan2(dy_last,dx_last) if self.state[2] != heading: self.state[2] = heading else: # if in walking mode dx = finish[0] - self.state[0] dy = finish[1] - self.state[1] remaining_distance = np.linalg.norm(np.array([dx, dy])) heading = np.arctan2(dy,dx) if self.state[2] != heading: self.state[2] = heading if vee * dt > remaining_distance and remaining_distance != 0: self.next((0, remaining_distance/dt), dt) else: self.next((0, vee), dt) if total_distance != 0: prim_progress += dt / (total_distance / vee) self.prim_queue.replace_top((prim_data, prim_progress)) # update primitive queue
true
46ecd6085515330043d2d5a39b0e044ea9203b3f
Python
2mohammad/lucky_number
/app.py
UTF-8
1,253
2.859375
3
[]
no_license
from flask import Flask, render_template, jsonify, request import requests import random from werkzeug.utils import redirect app = Flask(__name__) @app.route("/") def homepage(): """Show homepage.""" return render_template("index.html") @app.route('/api/get-lucky-num', methods=["POST", "GET"]) def response(): print(len(request.form)) response = { "name" : request.form["name"], "email" : request.form["email"], "color" : request.form["color"], "year" : request.form["year"], "lucky number": random.randint(0, 100) } for item in response: if response[item] == "": response[item] = ["This field is required"] elif item == "year" or item == "lucky number": log = api_getter(response[item]) logd = log.json() for items in logd: if items == "text": response[item] = { "fact": logd[items], item: response[item] } print(response) return render_template("index.html", response=response) def api_getter(value): response = requests.get(f"http://numbersapi.com/{value}?json") return response
true
0539ca564f8bc2c03f3f60faabbcf035673c9a9b
Python
JeroenDM/benchmark_runner
/benchmark_runner/scripts/run.py
UTF-8
2,835
2.546875
3
[]
no_license
#!/usr/bin/env python """ This script runs a task. It takes as a command line argument the planner group it has to use. The task is read from the parameter server for now (parameter: '/planning_task_path'). """ import sys import json import datetime import rospy import rospkg import rosparam from nexon.robot import Robot from benchmark_runner.task_solver import solve_task def execute_plans(robot, plans): """ Execute a list of plans, this list is returned when solving a task. """ # make sure the robot is actually in the home position # before executing a plan robot.mg.set_joint_value_target( plans[0].joint_trajectory.points[0].positions) robot.mg.go(wait=True) print("Moved to home, start executing task.") # TODO quick fix, add first point to lin path plans[1].joint_trajectory.points.insert( 0, plans[0].joint_trajectory.points[-1]) for plan in plans: print("========================================") print("executing plan of lenght") print(len(plan.joint_trajectory.points)) print(plan.joint_trajectory.points[0]) print(plan.joint_trajectory.points[1]) print("\n...\n") print(plan.joint_trajectory.points[-1]) print("========================================") # print(plan) robot.mg.execute(plan, wait=True) rospy.sleep(1.0) if __name__ == "__main__": if len(sys.argv) < 2: print("One argument required:") print("rosrun benchmark_runner run.py <planning_group_name>") exit() else: planning_group_name = sys.argv[1] rospy.init_node("execute_simple_task") rospack = rospkg.RosPack() # Open a connection to database for logging # DB = LogDB() filepath = rosparam.get_param("/planning_task_path") config_file = "planning_groups.json" config_file_path = rospack.get_path("benchmark_runner") + "/config/" with open(config_file_path + config_file) as file: config = json.load(file) group_config = config["groups"][planning_group_name] print("Using planning group: {}".format(planning_group_name)) timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") run_id = timestamp + "0" rospy.set_param("/benchmark_run_id", run_id) # load parameters to parameter server ptp_config = config["groups"][planning_group_name]["ptp_config"] print(ptp_config) rospy.set_param("/ptp_config", ptp_config) cart_config = config["groups"][planning_group_name]["cart_config"] print(cart_config) rospy.set_param("/cart_config", cart_config) plans = solve_task(filepath, group_config) # print("LOGGING ===========================") # # print(psi.logs) # # log_run_to_db(task, filepath) robot = Robot() execute_plans(robot, plans)
true
c3c550cf5e7e0cef99a8af92ab2656ee5090d0bf
Python
veezard/vid_scraper
/scrapers/simons.py
UTF-8
4,202
2.75
3
[ "MIT" ]
permissive
import requests import re from bs4 import BeautifulSoup from scrapers import Talk from datetime import date from dateutil.parser import parse as dateParser_ from scrapers import dateParse from scrapers import removeParentheses from scrapers import cleanSpeaker import pickle def scrape(start_date=date(1980, 1, 1), process=None): # process should be Talk -> None page_number = 1 hostname = "http://scgp.stonybrook.edu" while page_number >= 0: URL = 'http://scgp.stonybrook.edu/video_portal/index.php?page=' + str( page_number) page_number = page_number + 1 try: page = requests.get(URL) soup = BeautifulSoup(page.content, 'html.parser') new_talks_div = soup.find('div', class_="col-lg-7 col-md-9") talk_divs = new_talks_div.find_all('div', class_="col-xs-8") if len(talk_divs) == 0: page_number = -1 break for talk_div in talk_divs: link = hostname + talk_div.find('a', class_="btn btn-primary btn-xs")['href'] talk = Talk(link) dataField = talk_div.text # Tokens are used in the regular expression to extract talk # data all_tokens = "(\nTitle: |\nEvent: |\nName: |\nDate: |\nLocation: |\nview video)" tokens = { 'title': '\nTitle: ', 'workshop': '\nEvent: ', 'speaker': '\nName: ', 'date': '\nDate: '} try: date = dateParse( re.search( '%s(.*?)%s' % (tokens['date'], all_tokens), dataField, flags=re.DOTALL | re.MULTILINE).group(1).replace( '@', '')) if date < start_date: page_number = -1 break except BaseException: pass try: speaker = re.search( '%s(.*?)%s' % (tokens['speaker'], all_tokens), dataField, flags=re.DOTALL | re.MULTILINE).group(1).strip() talk.firstName, talk.lastName = cleanSpeaker(speaker) except BaseException: pass try: title = re.search( '%s(.*?)%s' % (tokens['title'], all_tokens), dataField, flags=re.DOTALL | re.MULTILINE).group(1).strip() if title != '': talk.title = title except BaseException: pass try: workshop = re.search( '%s(.*?)%s' % (tokens['workshop'], all_tokens), dataField, flags=re.DOTALL | re.MULTILINE).group(1).strip() if workshop != '': talk.workshop = "Simons- " + workshop except BaseException: pass if (abstract := urlToMaybeAbstract(link)): talk.abstract = abstract if process: process(talk) print(talk) except BaseException: pass return None def urlToMaybeAbstract(url): try: page = requests.get(url) talk = Talk(url) soup = BeautifulSoup(page.content, 'html.parser') dataField = soup.find_all('div', class_='col-md-12')[2].text abstract = re.search(r'\nAbstract: (.*)', dataField).group(1).strip() if abstract != '': return abstract else: return None except BaseException: return None
true
7a449f989d5b3b4c48f9d58f0158b34fc00df9ea
Python
chalei/espectro32-micropython
/dht22.py
UTF-8
266
2.734375
3
[]
no_license
from machine import Pin import time import dht sensor = dht.DHT22Pin(26)) delay = 2 while True: try: sensor.measure() print(sensor.temperature(), "C") print(sensor.humidity(), "persen") time.sleep(delay) except OSError: pass
true
feef2ff9f26bdbf42279fdef1ca8e06b7577f3bf
Python
FabrizioParker/Piggy
/student.py
UTF-8
10,270
3.21875
3
[ "MIT" ]
permissive
from teacher import PiggyParent import sys import time class Piggy(PiggyParent): ''' ************* SYSTEM SETUP ************* ''' def __init__(self, addr=8, detect=True): PiggyParent.__init__(self) # run the parent constructor ''' MAGIC NUMBERS <-- where we hard-code our settings ''' self.LEFT_DEFAULT = 90 self.RIGHT_DEFAULT = 90 self.MIDPOINT = 1500 self.corner_count = 0 self.SAFE_DIST = 250 self.starting_postion = 0 self.load_defaults()# what servo command (1000-2000) is straight forward for your bot? def load_defaults(self): """Implements the magic numbers defined in constructor""" self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT) self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT) self.set_servo(self.SERVO_1, self.MIDPOINT) def menu(self): """Displays menu dictionary, takes key-input and calls method""" ## This is a DICTIONARY, it's a list with custom index values. Python is cool. # Please feel free to change the menu and add options. print("\n *** MENU ***") menu = {"c": ("Calibrate", self.calibrate), "d": ("Dance", self.dance), "h": ("Hold position", self.hold_position), "n": ("Navigate", self.nav), "o": ("Obstacle count", self.obstacle_count), "q": ("Quit", self.quit), "v": ("Veer", self.slither) } # loop and print the menu... for key in sorted(menu.keys()): print(key + ":" + menu[key][0]) # store the user's answer ans = str.lower(input("Your selection: ")) # activate the item selected menu.get(ans, [None, self.quit])[1]() ''' **************** STUDENT PROJECTS **************** ''' def dance(self): # print("I don't know how to dance. \nPlease give my programmer a zero.") # higher ordered #check to see its safe if not self.safe_to_dance(): print("It is too dangerouse to dance") return else: print("time to dance") for x in range(3): self.twist() self.spin() self.chacha() self.shuffle() def scan(self): """Sweep the servo and populate the scan_data dictionary""" for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 50): self.servo(angle) self.scan_data[angle] = self.read_distance() def obstacle_count(self): """Does a 360 scan and returns the number of obstacles it sees""" found_something = False count = 0 starting_postion = self.get_heading() self.right(primary=60, counter=60) time.sleep(0.5) while self.get_heading() != starting_postion: if self.read_distance() < 250 and not found_something: found_something = True count += 1 print ("I found something") elif self.read_distance() > 250 and found_something: found_something = False print("I have a clear view") self.stop() print("I have found this many things: %d" % count) return count def quick_check(self): """looks around as it moves to check the distance""" for ang in range(self.MIDPOINT-150, self.MIDPOINT+151, 150): self.servo(ang) if self.read_distance() < self.SAFE_DIST: return False return True def nav(self): """can navigate a maze by itself""" print("-----------! NAVIGATION ACTIVATED !------------\n") print("-------- [ Press CTRL + C to stop me ] --------\n") print("-----------! NAVIGATION ACTIVATED !------------\n") #print("Wait a second. \nI can't navigate the maze at all. Please give my programmer a zero.") self.starting_postion = self.get_heading() while True: while self.quick_check(): self.fwd() time.sleep(0.01) self.corner_count = 0 #counts the corners so you can use it later to get out of them self.stop() self.corner_work() self.left_or_right() # how would you make it turn when it is going the wrong direction def corner_work(self): self.corner_count += 1 if self.corner_count > 3: self.corner_check() def left_or_right(self): """turn left or right depending on averaged scan""" #traversal left_total = 0 left_count = 0 right_total = 0 right_count = 0 self.scan() for ang, dist in self.scan_data.items(): if ang < self.MIDPOINT: right_total += dist right_count += 1 print("Angle: %d // dist: %d // right_count: %d" % (ang, dist, right_count)) else: left_total += dist left_count += 1 left_avg = left_total / left_count right_avg = right_total / right_count if left_avg > right_avg: self.turn_by_deg(-45) else: self.turn_by_deg(45) # if robot is facing the wrong way it will turn it around self.exit_bias() def corner_check(self): self.turn_by_deg(180) self.deg_fwd(360) # what happens when it turns 180 but then goes to starting position by turning another 180 landing in the same place? # hopefully will stop the robot from going circles if self.read_distance() > self.SAFE_DIST: return else: self.turn_to_deg(self.starting_postion) def exit_bias(self): current_position = self.get_heading() if abs(self.starting_postion - current_position) >180: self.turn_to_deg(self.starting_postion) if self.quick_check(): return else: self.left_or_right() def slither(self): """practice a smooth veer""" #writedown where we started starting_direction = self.get_heading() #start driving forward self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT) self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT) self.fwd() # throttl down the left motor for power in range(self.LEFT_DEFAULT, 60,-10): self.set_motor_power(self.MOTOR_LEFT, power) time.sleep(.5) #throttle up the left while lowring the right for power in range(60, self.LEFT_DEFAULT +1, 10): self.set_motor_power(self.MOTOR_LEFT, power) time.sleep(.5) # throttl down the right motor for power in range(self.RIGHT_DEFAULT, 60,-10): self.set_motor_power(self.MOTOR_RIGHT, power) time.sleep(.5) #throttle up the right while lowring the right for power in range(60, self.RIGHT_DEFAULT +1, 10): self.set_motor_power(self.MOTOR_RIGHT, power) time.sleep(.5) #straighten out while self.get_heading() != starting_direction: #if I need to veer right if self.get_heading() < starting_direction: self.set_motor_power(self.MOTOR_LEFT, 90) self.set_motor_power(self.MOTOR_RIGHT, 60) #if I need to veer left elif self.get_heading() > starting_direction: self.set_motor_power(self.MOTOR_LEFT, 60) self.set_motor_power(self.MOTOR_RIGHT, 90) time.sleep(.1) self.stop() def hold_position(self): start = self.get_heading() while True: time.sleep(.01) current = self.get_heading() if abs(start - current) > 20: self.turn_to_deg(start) def twist(self): """turns right then left""" self.right() time.sleep(1) self.stop() self.left() time.sleep(1) self.stop() def spin(self): """ one full circle right & one full circle left """ self.fwd() time.sleep(1) self.right() time.sleep(3) self.stop() self.back() time.sleep(1) self.left() time.sleep(3.5) self.stop() def chacha(self): """turns right the goes backward and turns its head left goes forward and its head turns right then turn left """ self.right() time.sleep(2) self.stop() self.back() time.sleep(1) self.servo(1000) time.sleep(1) self.stop() self.fwd() time.sleep(1) self.stop() self.servo(2000) time.sleep(1) self.stop() self.left() time.sleep(2) self.stop() def shuffle(self): """turns 20 degrees and then moves forward and back then turns -40 degrees and goes forward and back""" self.turn_by_deg(20) time.sleep(.25) self.fwd() time.sleep(1) self.stop() self.back() time.sleep(1) self.stop() self.turn_by_deg(-40) time.sleep(.25) self.fwd() time.sleep(1) self.back() time.sleep(1) self.stop() def safe_to_dance(self): for x in range(4): for ang in range(1000, 2001, 100): self.servo(ang) time.sleep(.1) if self.read_distance()<250: return False self.turn_by_deg(90) return True ########### ## MAIN APP if __name__ == "__main__": # only run this loop if this is the main file p = Piggy() if sys.version_info < (3, 0): sys.stdout.write("Sorry, requires Python 3.x\n") p.quit() try: while True: # app loop p.menu() except KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard. p.quit()
true
6802d69408cceaaabc72e7b2db1170b53912e5c1
Python
ephracis/hermes
/utilities/strings.py
UTF-8
983
3.453125
3
[ "MIT" ]
permissive
""" This file contains code for working with strings. """ import re def fixName(category): """ Turn the category name into human readable form. """ exceptions = ['a', 'an', 'of', 'the', 'is', 'and', 'with', 'by'] fixed = title_except(re.sub('_',' ',category), exceptions) if fixed == "App Wallpaper": fixed = "Live Wallpaper" elif fixed == "App Widgets": fixed = "Widgets" return fixed def fixRow(name): """ Escape special characters for LaTeX output. """ name = re.sub("%", "\\%", name) name = re.sub("&", "\\&", name) return name def title_except(s, exceptions): """ Titlelize a string with exceptions. """ word_list = re.split(' ', s.lower()) #re.split behaves as expected final = [word_list[0].capitalize()] for word in word_list[1:]: final.append(word in exceptions and word or word.capitalize()) return " ".join(final) def str2float(str): """ convert a string to a float """ try: return float(re.sub("[^0-9\.]", "", str)) except: return 0
true
a5f683df759304a3406cb32a6c7a4d16e9865230
Python
57066698/simpleRotate
/rotateExample/ex0.1-Calculate-RM-by-3P.py
UTF-8
568
2.671875
3
[]
no_license
# successed import numpy as np from rotateExample.scenes.rotateScene import RotateScene rotateScene = RotateScene() def cal(): rotation = rotateScene.axis1.transform.rotation p1 = [1, 0, 0] p2 = [0, 1, 0] p3 = [0, 0, 1] P = np.stack([p1, p2, p3], axis=1) p1_ = np.dot(rotation, [1, 0, 0]) p2_ = np.dot(rotation, [0, 1, 0]) p3_ = np.dot(rotation, [0, 0, 1]) P_ = np.stack([p1_, p2_, p3_], axis=1) R = np.dot(P_, np.linalg.inv(P)) rotateScene.axis2.transform.rotation = R rotateScene.scene.add(cal) rotateScene.start()
true
7087570f625e02b4e8ee59177ee2e42dce2fb941
Python
dundunmao/lint_leet
/mycode/lintcode/Binary Tree & Divide Conquer/95 Validate Binary Search Tree.py
UTF-8
3,171
3.890625
4
[]
no_license
# -*- encoding: utf-8 -*- # 给定一个二叉树,判断它是否是合法的二叉查找树(BST) # # 一棵BST定义为: # # 节点的左子树中的值要严格小于该节点的值。 # 节点的右子树中的值要严格大于该节点的值。 # 左右子树也必须是二叉查找树。 # 一个节点的树也是二叉查找树。 # 您在真实的面试中是否遇到过这个题? Yes # 样例 # 一个例子: # # 2 # / \ # 1 4 # / \ # 3 5 # 上述这棵二叉树序列化为 {2,1,4,#,#,3,5}. class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None class Solution: """ @param root: The root of binary tree. @return: True if the binary tree is BST, or false """ def isValidBST(self, root): # write your code here validate, _, _ = self.helper(root) return validate def helper(self, root): if root is None: return True, float('-inf'), float('inf') validate_left, max_val_left, min_val_left = self.helper(root.left) validate_right, max_val_right, min_val_right = self.helper(root.right) if validate_left is False: return False, 0, 0 elif max_val_left >= root.val: return False, 0, 0 if validate_right is False: return False, 0, 0 elif min_val_right <= root.val: return False, 0, 0 return True, max(root.val, max_val_right), min(root.val, min_val_left) # 上法的简化 class Solution1: def isValidBST(self, root): validate, maxi, mini = self.helper(root) return validate def helper(self, root): if root is None: return True, float('-inf'), float('inf') left = self.helper(root.left) if not left[0] or left[1] >= root.val: return False, 0, 0 right = self.helper(root.right) if not right[0] or right[2] <= root.val: return False, 0, 0 return True, max(root.val, right[1]), min(root.val, left[2]) # leet class Solution_leet(object): def isValidBST(self, root): """ :type root: TreeNode :rtype: bool """ minVal = float('-inf') maxVal = float('inf') return self.helper(root, minVal, maxVal) def helper(self, root, minVal, maxVal): #minVal和maxVal是root的最小最大boundary if root is None: return True if root.val >= maxVal or root.val <= minVal: return False return self.helper(root.left, minVal, root.val) and self.helper(root.right, root.val, maxVal) if __name__ == '__main__': # TREE 1 # Construct the following tree # 5 # / \ # 3 6 # / \ # 2 4 P = TreeNode(5) P.left = TreeNode(3) P.left.left = TreeNode(2) P.left.right = TreeNode(4) P.right = TreeNode(6) # Q = P.left.right # Q = Node(26) # Q.left = Node(10) # Q.left.left = Node(4) # Q.left.right = Node(6) # Q.right = Node(3) # # Q.right.right = Node(3) s = Solution1() print s.isValidBST(P)
true
2eab3b12267e49248803b14b55c9eecbe8418f26
Python
UmbertoFasci/sdm-python
/project/b_data_processing/scripts/prepare_files.py
UTF-8
2,865
2.796875
3
[ "MIT" ]
permissive
import os import datetime import pandas as pd def _only_chosen(bag_of_files, infile, file_end): f_list = [] for f in bag_of_files: if f.endswith(file_end): for rec in infile: if rec in f: f_list.append(f) return f_list def get_filelist(folder, infile, file_ending): filelist = os.listdir(folder) filelist = _only_chosen(filelist, infile, file_ending) return filelist def _get_tilename(tile, tilenames): if type(tilenames) == str: if tilenames in tile: return tilenames else: for tilename in tilenames: if tilename in tile: return tilename else: pass def _leap_or_regular(year): if ((year % 4) == 0 and (year % 100) != 0) or ((year % 400) == 0): return True else: return False def julian_date_to_month(name_str, lookup_table_leap, lookup_table_regular, tilenames): """Function for MODIS file name processing""" tilename = _get_tilename(name_str, tilenames) # Get year and Julian day from filename position = name_str.find('.') position = position + 2 position_end = position + 7 date = name_str[position:position_end] m_year = date[:4] m_year = int(m_year) julian_day = date[4:] julian_day = int(julian_day) # Check if year is leap or not is_leap = _leap_or_regular(m_year) # Find month of measurements if is_leap: lut_address = lookup_table_leap else: lut_address = lookup_table_regular lut_df = pd.read_csv(lut_address, index_col=0) cols = list(lut_df.columns) status = lut_df.isin([julian_day]).any().any() acquisition_time = '-1' nb_month = '-1' if status: for col in cols: if lut_df[col].isin([julian_day]).any(): str_month = col nb_month = lut_df.columns.get_loc(str_month) + 1 day = list(lut_df[col][lut_df[col] == julian_day].index)[0] # Set the date acquisition_time = datetime.date(year=m_year, month=nb_month, day=day) return [name_str, tilename, acquisition_time, m_year, nb_month] else: return [name_str, '-1', '-1', '-1', '-1', '-1'] def create_modis_dataframe(hdf_files, lookup_table_leap, lookup_table_regular, tilenames, sort_by_date=True): data = [] for f in hdf_files: data.append(julian_date_to_month(f, lookup_table_leap, lookup_table_regular, tilenames)) df = pd.DataFrame(data, columns=['filename', 'tile type', 'acquisition time', 'year', 'month']) if sort_by_date: df = df.sort_values(['acquisition time', 'filename']) df.drop_duplicates(inplace=True) return df
true
aad7ef7b5caa7c8c105228666f794a1fccdb45d4
Python
kidusasfaw/addiscoder_2016
/labs/server_files/lab7/fromListToMatrix/fromListToMatrix.py
UTF-8
725
3.234375
3
[]
no_license
def fromListToMatrix(): num_nodes = int(raw_input()) list_graph = [] for i in range(num_nodes): nodes = raw_input() int_nodes = [int(elem) for elem in nodes.split()] list_graph.append(int_nodes) print list_graph ans = [] for i in xrange(len(list_graph)): ans += [[0]*len(list_graph)] for i in xrange(len(list_graph)): for x in list_graph[i]: ans[i][x] = 1 return ans ### DO NOT EDIT ANY CODE BELOW THE LINE ### num_nodes = int(raw_input()) list_graph = [] for i in range(num_nodes): nodes = raw_input() int_nodes = [int(elem) for elem in nodes.split()] list_graph.append(int_nodes) print fromListToMatrix(list_graph)
true
61aa51604f9db9e6c0e3381a61b2f50b69b8ece5
Python
kervynj/Trending-Value-Python-web-data-via-CGI
/data_search.py
UTF-8
1,299
2.65625
3
[]
no_license
#!/usr/bin/python import cgi import csv import cgitb; cgitb.enable() print "Content-Type: text/html\n" #Get requested company from user form input form = cgi.FieldStorage() ticker = form.getvalue('ticker') if ".to" in ticker: #Open TSX daily data TSX_object = open('TSX_master.csv','rU') TSX_data = csv.reader(TSX_object) data = [] #Search File for requested company for row in TSX_data: data.append(row) for row in data: if row[0] == ticker: retrieved_values = row break else: #Open NYSE daily data NYSE_object = open('NYSE_master.csv','rU') NYSE_data = csv.reader(NYSE_object) data = [] #Search File for requested company for row in NYSE_data: data.append(row) for row in data: if row[0] == ticker.upper(): retrieved_values = row break try: retrieved_values except NameError: print """<p>Invalid Ticker Entry</p>""" else: print """ <p> <strong>Company:</strong> %s<br><strong>Overall Rank:</strong> %s<br><strong>6 Month Change:</strong> %s<br>Price/Earning: %s<br>Price/Sales: %s<br>Price/Book: %s<br>Dividend Yield: %s<br>Market Cap/EBITDA %s </p> """ %(retrieved_values[1],retrieved_values[2],retrieved_values[3],retrieved_values[4],retrieved_values[5],retrieved_values[6],retrieved_values[7],retrieved_values[8])
true
cd7bc9b61c7deb6ccfe5935067af49034be8b2d0
Python
bopopescu/pythonFist
/src/ziptest/readercsv.py
UTF-8
140
2.703125
3
[]
no_license
import csv with open('names.csv', newline='') as f: reader = csv.reader(f) for row in reader: print(row)
true
20c994ed0208d461a43e71af763b9eba009a08a9
Python
ZhangYet/vanguard
/myrtle/befor0225/remove_invalid_parentheses.py
UTF-8
867
3.484375
3
[]
no_license
# https://leetcode.com/problems/remove-invalid-parentheses/ from typing import List class Solution: def removeInvalidParentheses(self, s: str) -> List[str]: def _is_valid(s: str): stack = [] for c in s: if c == '(': stack.append(c) if c == ')': try: stack.pop() except IndexError: return False return not stack res = [] for i in range(len(s)): sub = s[:i] + s[(i+1):] if _is_valid(sub): res.append(sub) if _is_valid(s): res.append(s) if not res: return [""] return list(set(res)) def test_case(s: str): i = Solution() return i.removeInvalidParentheses(s)
true
4c502c21252d98482a5e2a6de164552497851ffa
Python
malhotra1432/rasa-1
/rasa/shared/utils/common.py
UTF-8
3,897
3.15625
3
[ "Apache-2.0", "MIT", "LicenseRef-scancode-generic-cla" ]
permissive
import importlib import logging from typing import Text, Dict, Optional, Any, List, Callable, Collection logger = logging.getLogger(__name__) def class_from_module_path( module_path: Text, lookup_path: Optional[Text] = None ) -> Any: """Given the module name and path of a class, tries to retrieve the class. The loaded class can be used to instantiate new objects. """ # load the module, will raise ImportError if module cannot be loaded if "." in module_path: module_name, _, class_name = module_path.rpartition(".") m = importlib.import_module(module_name) # get the class, will raise AttributeError if class cannot be found return getattr(m, class_name) else: module = globals().get(module_path, locals().get(module_path)) if module is not None: return module if lookup_path: # last resort: try to import the class from the lookup path m = importlib.import_module(lookup_path) return getattr(m, module_path) else: raise ImportError(f"Cannot retrieve class from path {module_path}.") def all_subclasses(cls: Any) -> List[Any]: """Returns all known (imported) subclasses of a class.""" return cls.__subclasses__() + [ g for s in cls.__subclasses__() for g in all_subclasses(s) ] def module_path_from_instance(inst: Any) -> Text: """Return the module path of an instance's class.""" return inst.__module__ + "." + inst.__class__.__name__ def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]: """Sorts a list of dictionaries by their first key.""" return sorted(dicts, key=lambda d: list(d.keys())[0]) def lazy_property(function: Callable) -> Any: """Allows to avoid recomputing a property over and over. The result gets stored in a local var. Computation of the property will happen once, on the first call of the property. All succeeding calls will use the value stored in the private property.""" attr_name = "_lazy_" + function.__name__ @property def _lazyprop(self): if not hasattr(self, attr_name): setattr(self, attr_name, function(self)) return getattr(self, attr_name) return _lazyprop def transform_collection_to_sentence(collection: Collection[Text]) -> Text: """Transforms e.g. a list like ['A', 'B', 'C'] into a sentence 'A, B and C'.""" x = list(collection) if len(x) >= 2: return ", ".join(map(str, x[:-1])) + " and " + x[-1] return "".join(collection) def minimal_kwargs( kwargs: Dict[Text, Any], func: Callable, excluded_keys: Optional[List] = None ) -> Dict[Text, Any]: """Returns only the kwargs which are required by a function. Keys, contained in the exception list, are not included. Args: kwargs: All available kwargs. func: The function which should be called. excluded_keys: Keys to exclude from the result. Returns: Subset of kwargs which are accepted by `func`. """ excluded_keys = excluded_keys or [] possible_arguments = arguments_of(func) return { k: v for k, v in kwargs.items() if k in possible_arguments and k not in excluded_keys } def mark_as_experimental_feature(feature_name: Text) -> None: """Warns users that they are using an experimental feature.""" logger.warning( f"The {feature_name} is currently experimental and might change or be " "removed in the future 🔬 Please share your feedback on it in the " "forum (https://forum.rasa.com) to help us make this feature " "ready for production." ) def arguments_of(func: Callable) -> List[Text]: """Return the parameters of the function `func` as a list of names.""" import inspect return list(inspect.signature(func).parameters.keys())
true
760b44eab9274bd3bce1d3d57c1780f45b3d5236
Python
Jokezor/Instagram
/insta_main.py
UTF-8
882
2.59375
3
[]
no_license
''' This is the main script which will handle all of the functions of the isntagram scraper, bot and statistics etc. ''' # Own code import init import Database_talk # Creates all databases/tables needed for accounts. def setup(path_db, path_acc): # Creates the database init.setup_database(path_db, path_acc) # Create tables for each account Database_talk.create_tables(path_db, path_acc) # Insert information about themes into the database # Second we need to call the setup.py which will talk to the database, setup if doesn't exist. if __name__ == '__main__': paths = init.Get_path_info() path_db = paths[0] path_acc = paths[1] path_pages = paths[2] # First we need databases and tables to store the data setup(path_db, path_acc) # Now we can go along and start to scrape for the information we need #Database_talk.insert_data(path_db, path_acc)
true