blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
5
133
path
stringlengths
2
333
src_encoding
stringclasses
30 values
length_bytes
int64
18
5.47M
score
float64
2.52
5.81
int_score
int64
3
5
detected_licenses
listlengths
0
67
license_type
stringclasses
2 values
text
stringlengths
12
5.47M
download_success
bool
1 class
0a28199e21e6e8edde83df8963dff7bb8ea72e9c
Python
testingforAI-vnuuet/AdvGeneration
/src/attacker/l_bfgs.py
UTF-8
3,484
2.6875
3
[]
no_license
from __future__ import absolute_import import tensorflow as tf from tensorflow import keras from attacker.constants import * from data_preprocessing.mnist import MnistPreprocessing from utility.filters.filter_advs import * from utility.statistics import * logger = MyLogger.getLog() class L_BFGS: def __init__(self, classifier, epsilon=0.5, target=7, num_iters=20, weight=0.1): """ :param classifier: target cnn model :param epsilon: learning rate :param target: target attack :param num_iters: number of iterations :param weight: balance between target and L2 """ self.epsilon = epsilon self.target = target self.weight = weight self.classifier = classifier self.target_label = keras.utils.to_categorical(target, MNIST_NUM_CLASSES, dtype='double') self.num_iters = num_iters @staticmethod def loss_func(x_star, origin, model, target): return tf.keras.losses.mean_squared_error(x_star, origin) + \ tf.keras.losses.categorical_crossentropy(model.predict(np.array([x_star]))[0], target) @staticmethod def create_adversarial_pattern_two_losses(gen_image, input_image, target_label, pretrained_model, weight): with tf.GradientTape() as tape: tape.watch(gen_image) prediction = pretrained_model(gen_image) final_loss = (1 - weight) * tf.keras.losses.mean_squared_error(gen_image, input_image) + weight * tf.keras.losses.categorical_crossentropy( prediction[0], target_label) gradient = tape.gradient(final_loss, gen_image) return gradient def create_adv_single_image(self, image): gen_img = np.array([image]) image = np.array([image]) for i in range(self.num_iters): grad = self.create_adversarial_pattern_two_losses(tf.convert_to_tensor(gen_img, dtype='double'), image, self.target_label, self.classifier, self.weight) gen_img -= self.epsilon * grad gen_img = np.clip(gen_img, 0, 1) return gen_img def create_adversaries(self, images): result = [self.create_adv_single_image(image) for image in images] return np.array(result) if __name__ == '__main__': START_SEED = 0 END_SEED = 1000 TARGET = 7 ATTACKED_CNN_MODEL = CLASSIFIER_PATH + '/pretrained_mnist_cnn1.h5' classifier = keras.models.load_model(ATTACKED_CNN_MODEL) (trainX, trainY), (testX, testY) = keras.datasets.mnist.load_data() pre_mnist = MnistPreprocessing(trainX, trainY, testX, testY, START_SEED, END_SEED, TARGET) trainX, trainY, testX, testY = pre_mnist.preprocess_data() logger.debug('Creating adversarial examples: ') lbfgs = L_BFGS(classifier=classifier, target=TARGET) result_imgs = lbfgs.create_adversaries(trainX) result_origin_imgs, result_origin_confidients, result_gen_imgs, result_gen_confidents = filter_advs(classifier, trainX, result_imgs, TARGET) logger.debug('L-BFGS done')
true
865c240fe2cca68aec2c864e56f63c0d64959cb5
Python
0xqq/NLP-project
/bert/predict.py
UTF-8
1,695
2.71875
3
[]
no_license
#!/usr/bin/python # -*- coding: UTF-8 -*- #Author zhang import tensorflow as tf import time from tensorflow.keras.models import model_from_json from importlib import import_module from utils import build_dataset, get_time_dif, build_net_data import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model',default="BruceBertCNN", type=str, help='choose a model: BruceBert, BruceBertCNN,BruceBertRNN') args = parser.parse_args() if __name__ == '__main__': dataset = 'BruceNews' # 数据集 model_name = args.model # 'TextRCNN' # TextCNN x = import_module('models.' + model_name) #一个函数运行需要根据不同项目的配置,动态导入对应的配置文件运行。 config = x.Config(dataset) #进入到对应模型的__init__方法进行参数初始化 start_time = time.time() print("Loading data...") train_data, dev_data, test_data = build_dataset(config) time_dif = get_time_dif(start_time) print("Time usage:", time_dif) test_x, test_y = build_net_data(test_data, config) # train model = x.MyModel(config) optimizer = tf.keras.optimizers.Adam(learning_rate=config.learning_rate) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.fit( x=test_x, y=test_y, epochs=0, ) model.load_weights(config.save_path) # = model.to_json() #model = model_from_json(json_string) # 评估模型 score = model.evaluate(test_x, test_y, verbose=2) print('Test score:', score[0]) print('Test accuracy:', score[1])
true
01249d9362f18c753a4c237aa30c29f0c76fa2e5
Python
TheKermitFrog/Homiefy
/main/models.py
UTF-8
3,768
2.703125
3
[]
no_license
from django.db import models from register.models import User, Partner import datetime from pytz import timezone # Create your models here. class Service(models.Model): """ This model contains information about the service being provided. PK: Business Partner, Service Type. A business partner can provide many kind of services (e.g., a salon can provide haircut, pati&mani etc.) So the combination of the business partner and the service type determines a single service. :Business_Partner: Connection to register app's User model. :Type: Type of service. :Max_Capacity: How many such kind of service can the business partner produce at a certain time slot. :price: Price of the service. """ businessPartner = models.ForeignKey( Partner, blank=False, null=False, on_delete=models.CASCADE ) type = models.CharField(max_length=50, blank=False, null=False) maxCapacity = models.IntegerField(blank=False, default=1) price = models.DecimalField(max_digits=36, decimal_places=2, blank=False, null=False) DURATION = [ (30, '30 Minutes'), (60, '1 Hour'), (90, '1.5 Hour'), (120, '2 Hour'), (180, '3 Hour') ] duration = models.IntegerField(blank=False, choices=DURATION, default=60) class Meta: unique_together = (("businessPartner", "type"), ) def __str__(self): return '{} by {}'.format(self.type, self.businessPartner) # class TimeSlot(models.Model): # """ # This model contains information about the time slot. # PK: Business Partner, Service Type # Each service of every business partner has one time slot table contains date and time. # This is connected to service model, it only appears when a service being created. # :Service_Type: Foreign key, connection to service model. # :date: Date of the time slot. # :startTime: Starting time of the time slot. # :endTime: Ending time of the time slot. # """ # # serviceType = models.OneToOneField( # # Service, # # blank=False, # # null=False, # # on_delete=models.CASCADE, # # primary_key=True # # ) # date = models.DateField() # startTime = models.TimeField() # endTime = models.TimeField() class Appointment(models.Model): """ This model contains information about the booking of a service at a certain time. PK: Business Partner, Service Type, Time Slot. Meaning that the combination of a business, a service and a certain time slot determines a single booking. :timeSlot: Foreign key inherited from TimeSlot model. :status: Current status of the booking. :creation_date: The time when the Appointment has been made (potentionally use for notification) """ customer = models.ForeignKey(User, limit_choices_to={'isUser': True}, on_delete=models.DO_NOTHING) partner = models.ForeignKey(Partner, on_delete=models.DO_NOTHING) service = models.ForeignKey(Service, on_delete=models.DO_NOTHING) # timeSlot = models.OneToOneField(TimeSlot, on_delete=models.PROTECT) STATUS = [ ('BOOKED', 'Booked'), ('CANCELLED', 'Cancelled') ] status = models.CharField(max_length=20, choices=STATUS, default='BOOKED') startTime = models.DateTimeField() endTime = models.DateTimeField() creationTime = models.DateTimeField(auto_now_add=True) @property def is_complete(self): if datetime.datetime.now() > self.endTime.astimezone(timezone('America/Chicago')).replace(tzinfo=None): return True return False def __str__(self): return '{}-{}-{}'.format(self.customer, self.partner, self.service)
true
92341c8acfddf22b4dde2e5adfd08a4b7a990e97
Python
ylc015/interview_boggle_h
/solution.py
UTF-8
9,756
3.109375
3
[]
no_license
#author Yik Lun Chan # 2/5/2016 import sys #used by build_tries to see if a word exists FIN = 'fin' TOP = 'top' L_TOP = 'left_top' L_BOT = 'left_bottom' BOT = 'bottom' R_TOP = 'right_top' R_BOT = 'right_bottom' NO_EDGE = 'no_edge' #customized queue class class Queue(object): def __init__(self): self.q = [] def get(self): if len(self.q) < 1: return None else: value = self.q[0] self.q = self.q[1:] return value def pop(self): if len(self.q) == 0: return None return self.q[0] def put(self, data): self.q.append(data) def empty(self): return len(self.q) == 0 def tail(self): if len(self.q) == 0: return None return self.q[-1] def size(self): return len(self.q) def toString(self): print "queue: *********" for obj in self.q: print obj.toString() print "****************" #cell class to present # # top # ___ # right_top / \ left_top # right_bottom \___/ left_bottom # bottom class Cell(object): #this class present a cell def __init__(self, value): self.value = value self.top = None self.left_top = None self.left_bottom = None self.bottom = None self.right_bottom = None self.right_top = None #used for DFS self.visited = False self.isPrefix = False def is_edge_full(self, is_first): if self.top is None: return False if self.left_top is None : return False if self.left_bottom is None: return False if self.bottom is None: return False if self.right_bottom is None: return False #this is a special case where the cell is the head of the loop # __ # / a\__ # \__/ \ # / \ / # like cell a if self.right_top is None and not is_first: return False return True #find next availabe edge def next_edge(self): #to handle the presendence of top and right top if self.top is None and self.left_top is None: return TOP elif self.left_top is None: return L_TOP elif self.left_bottom is None: return L_BOT elif self.bottom is None: return BOT elif self.right_bottom is None: return R_BOT elif self.right_top is None: return R_TOP elif self.top is None: return TOP else: return NO_EDGE def next_cw_edge(self, tag): if tag == TOP: return L_TOP elif tag == L_TOP: return L_BOT elif tag == L_BOT: return BOT elif tag == BOT: return R_BOT elif tag == R_BOT: return R_TOP elif tag == R_TOP: return TOP #add cell to this cell def set_adj_edge(self, other, tag): #find an available edge next_edge = '' if tag != '': next_edge = tag else: next_edge = self.next_edge() if next_edge == TOP: self.top = other other.bottom = self #check neighbors if self.right_top is not None: other.right_bottom = self.right_top self.right_top.left_top = other if self.left_top is not None: other.left_bottom = self.left_top self.left_top.right_top = other elif next_edge == L_TOP: self.left_top = other other.right_bottom = self if self.top is not None: other.right_top = self.top self.top.left_bottom = other if self.left_bottom is not None: other.bottom = self.left_bottom self.left_bottom.top = other elif next_edge == L_BOT: self.left_bottom = other other.right_top = self if self.left_top is not None: other.top = self.left_top self.left_top.bottom = other if self.bottom is not None: other.right_bottom = self.bottom self.bottom.left_top = other elif next_edge == BOT: self.bottom = other other.top = self if self.left_bottom is not None: other.left_top = self.left_bottom self.left_bottom.right_bottom = other if self.right_bottom is not None: other.right_top = self.right_bottom self.right_bottom.left_bottom = other elif next_edge == R_BOT: self.right_bottom = other other.left_top = self if self.bottom is not None: other.left_bottom = self.bottom self.bottom.right_top = other if self.right_top is not None: other.top = self.right_top self.right_top.bottom = other elif next_edge == R_TOP: self.right_top = other other.left_bottom = self if self.right_bottom is not None: other.bottom = self.right_bottom self.right_bottom.top = other if self.top is not None: other.left_top = self.top self.top.right_bottom = other return next_edge def toString(self): top = '' left_top = '' left_bottom = '' bottom = '' right_bottom = '' right_top = '' if self.top is None: top = '?' else: top = self.top.value if self.left_top is None: left_top = '?' else: left_top = self.left_top.value if self.left_bottom is None: left_bottom = '?' else: left_bottom = self.left_bottom.value if self.bottom is None: bottom = '?' else: bottom = self.bottom.value if self.right_bottom is None: right_bottom = '?' else: right_bottom = self.right_bottom.value if self.right_top is None: right_top = '?' else: right_top = self.right_top.value print "-----" print " %s " % (top) print "%s %s %s" % (right_top, self.value, left_top) print "%s %s" % (right_bottom, left_bottom) print " %s " % (bottom) print "-----" def __str__(self): return self.value def build_prefixes_words(data): prefixes = set() words = [] for word in data: words.append(word) prefix = '' for letter in word: prefix += letter prefixes.add(prefix) return words, prefixes # build the honeycomb map def build_comb(data): #first number is always the number of layers layers = 1 #this will be the heart cell of the comb head = Cell(data[1]) #this is list that stores all the cells cells = [] cells.append(head) #old candidates old_can = Queue() #holding candidates for next round temp_q = Queue() for letters in data[2:]: candidates = Queue() #this indicates whether all the cells has been put on the comb loop_closed = False #push all cells into a queue for letter in letters: cell = Cell(letter) candidates.put(cell) temp_q.put(cell) #to indicate if this is the first cell of the loop is_first = True # we need to avoid base case if layers == 1: is_first = False while not loop_closed: #head is the cell that we want to connect our new candidates(cell) with #we would append new cells to head until its full #however, there is one base case where head is the first node of the cycle. while not head.is_edge_full(is_first): next_cell = None if candidates.empty(): loop_closed = True break else: next_cell = candidates.get() head.set_adj_edge(next_cell, '') #fetch next head from old candidates if old_can.empty(): loop_closed = True #now we need to connect the last cell of the loop to the first cell # the direction is always right bottom temp_q.pop().set_adj_edge(next_cell, R_BOT) #update old candidates info while not temp_q.empty(): cell = temp_q.get() old_can.put(cell) cells.append(cell) if not old_can.empty(): head = old_can.get() else: #choose next head head = old_can.get() if is_first: is_first = False layers += 1 return cells #perform dfs to find words given a starting point def findwords(start_cell, words, prefixes): stack = [ start_cell ] str = '' result = [] counter = 0 while stack: current_cell = stack.pop() #set as a delimited to indicate that all neight of this node has #been visited, strip off the last element of the string # for instance, S->T->D->K->F where DKF is the neighthours of cell T # the stack should maintain the form [S,T,D,K,F] where T isPrefix is set to false # once we have visted cell, D, k, F. we should strip 'T' from the str and start # from string 'S' again if current_cell.isPrefix: current_cell.isPrefix = False str = str[:-1] continue str += current_cell.value current_cell.visited = True if str in words: result.append(str) if str in prefixes: current_cell.isPrefix = True stack.append(current_cell) if current_cell.top is not None and not current_cell.top.visited: stack.append(current_cell.top) if current_cell.left_top is not None and not current_cell.left_top.visited: stack.append(current_cell.left_top) if current_cell.left_bottom is not None and not current_cell.left_bottom.visited: stack.append(current_cell.left_bottom) if current_cell.bottom is not None and not current_cell.bottom.visited: stack.append(current_cell.bottom) if current_cell.right_bottom is not None and not current_cell.right_bottom.visited: stack.append(current_cell.right_bottom) if current_cell.right_top is not None and not current_cell.right_top.visited: stack.append(current_cell.right_top) else: current_cell.visited = False str = str[:-1] test = '' for cell in stack: test += cell.value return result def findAllwords(cells, words, prefixes): result = [] for cell in cells: result += findwords(cell, words, prefixes) # reset all node to unvisited for cell in cells: cell.visited = False return result def main(argv): #read both files from terminal honey_txt = [] with open(argv[0], "r") as ins: for line in ins: honey_txt.append(line.strip('\n')) dictionary = [] with open(argv[1], "r") as ins: for line in ins: dictionary.append(line.strip('\n')) cells = build_comb(honey_txt) words, prefixes = build_prefixes_words(dictionary) result = findAllwords(cells, words, prefixes) result = set(result) for word in sorted(result): print word if __name__ == "__main__": main(sys.argv[1:])
true
1153836ab3b446370fd909c13cbd68416387b192
Python
JohnyLi/UEMSystem
/util/Time.py
UTF-8
1,129
3.21875
3
[]
no_license
# coding=utf-8 import time # 输出本地时间 从年到秒 举例 "2018-07-20 20:00:00" def from_year_to_second(): return time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()) # 输出本地时间 从年到秒 举例 "2018-07-20_20:00:00" def from_year_to_second_secure(): return time.strftime("%Y-%m-%d_%H:%M:%S",time.localtime()) # 输出本地时间 从年到分 举例 "2018-07-20 20:00" def from_year_to_minute(): return time.strftime("%Y-%m-%d %H:%M", time.localtime()) def get_year(): return time.strftime("%Y",time.localtime()) # 将天转换成秒 def change_day_to_second(day): seconds = float(day * 24 * 60 * 60) return seconds # 输出本地时间 从年到天 举例 "2018-07-20" def from_year_to_day(): return time.strftime("%Y-%m-%d", time.localtime()) # 获取现在的本地时间 def get_unix_time(): return time.time() def get_unix_time_by_format(it_time): good_time = time.strptime(it_time,"%Y-%m-%d %H:%M:%S") return time.mktime(good_time) def get_unix_time_by_date(date): good_time = time.strptime(date, "%Y-%m-%d") return time.mktime(good_time)
true
b203606b9141338e7cf841125f920fc41483de0f
Python
dhruvshah0208/EE-214-Digital-Lab
/Expt2/code/test_generation.py
UTF-8
290
2.609375
3
[]
no_license
in_len = 8 out_len = 8 f = open('TRACEFILE.txt', 'w') for i in range(2**in_len): in_vec = "{:b}".format(i).zfill(8) A = int(in_vec[:4], 2) B = int(in_vec[4:], 2) out_vec = "{:b}".format(A*B).zfill(8) stri = in_vec + ' ' + out_vec + ' ' + '11111111' f.write(stri + '\n') f.close()
true
2483a47799e9010b7f9bca92964f724672551476
Python
c235gsy/Sustech_AI_B
/weeks7-8/prac4_Pacman/ghostAgents.py
UTF-8
6,072
2.9375
3
[]
no_license
# ghostAgents.py # -------------- # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by John DeNero # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). # Student side autograding was added by Brad Miller, Nick Hay, and # Pieter Abbeel (pabbeel@cs.berkeley.edu). from game import Agent from game import Actions from game import Directions import random from util import manhattanDistance import util class GhostAgent( Agent ): def __init__( self, index ): self.index = index def getAction( self, state ): dist = self.getDistribution(state) if len(dist) == 0: return Directions.STOP else: return util.chooseFromDistribution( dist ) def getDistribution(self, state): "Returns a Counter encoding a distribution over actions from the provided state." util.raiseNotDefined() class RandomGhost( GhostAgent ): "A ghost that chooses a legal action uniformly at random." def getDistribution( self, state ): dist = util.Counter() for a in state.getLegalActions( self.index ): dist[a] = 1.0 dist.normalize() return dist class DirectionalGhost( GhostAgent ): "A ghost that prefers to rush Pacman, or flee when scared." def __init__( self, index, prob_attack=0.8, prob_scaredFlee=0.8 ): self.index = index self.prob_attack = prob_attack self.prob_scaredFlee = prob_scaredFlee def getDistribution( self, state ): # Read variables from state ghostState = state.getGhostState( self.index ) legalActions = state.getLegalActions( self.index ) pos = state.getGhostPosition( self.index ) isScared = ghostState.scaredTimer > 0 speed = 1 if isScared: speed = 0.5 actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions] newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors] pacmanPosition = state.getPacmanPosition() # Select best actions given the state distancesToPacman = [manhattanDistance(pos, pacmanPosition ) for pos in newPositions] if isScared: bestScore = max( distancesToPacman ) bestProb = self.prob_scaredFlee else: bestScore = min( distancesToPacman ) bestProb = self.prob_attack bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore] # Construct distribution dist = util.Counter() for a in bestActions: dist[a] = bestProb / len(bestActions) for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions) dist.normalize() return dist class MinimaxGhost(GhostAgent): """ Your minimax agent (question 1) useage: python2 pacman.py -p ExpectimaxAgent -l newLayout -g MinimaxGhost -a depth=4 python2 pacman.py -l newLayout -g MinimaxGhost """ "*** YOUR CODE HERE ***" def getDistribution(self, state): ActionScore = [] numAgent = 3 depth = 2 def rmStop(List): return [x for x in List if x != 'Stop'] def evaluationFunction(state, sindex): dis1 = manhattanDistance(state.getPacmanPosition(),state.getGhostPosition(1)) dis2 = manhattanDistance(state.getPacmanPosition(),state.getGhostPosition(2)) if sindex == 1: dis = dis1 else: dis = dis2 if manhattanDistance(state.getGhostPosition(1), state.getGhostPosition(2)) <= 2: return 0 if dis2+dis1 > dis1*dis1: return 1000 - dis2+dis1 - dis else: return 1000 - dis1*dis1 - dis def get_index(k): if k == 1: return 2 if k == 2: return 1 def miniMax(s, iterCount, sindex): if iterCount >= depth * numAgent or s.isWin () or s.isLose (): return evaluationFunction(s,sindex) if iterCount % numAgent == 2: # Pacman min result = 1e10 for a in rmStop(s.getLegalActions(0)): sdot = s.generateSuccessor(0, a) result = min(result, miniMax (sdot, iterCount + 1, sindex)) return result elif iterCount % numAgent == 0: result = -1e10 for a in rmStop (s.getLegalActions(sindex)): sdot = s.generateSuccessor(sindex, a) result = max(result, miniMax (sdot, iterCount + 1, sindex)) if iterCount == 0: ActionScore.append (result) return result elif iterCount % numAgent == 1: result = -1e10 for a in rmStop(s.getLegalActions(get_index(sindex))): sdot = s.generateSuccessor(get_index(sindex), a) result = max(result, miniMax(sdot, iterCount + 1, sindex)) return result miniMax(state, 0, self.index) print ActionScore print state.getLegalActions (self.index) #while len(state.getLegalActions(self.index)) dre = state.getLegalActions(self.index)[ActionScore.index(max(ActionScore))] print dre dist = util.Counter () for a in state.getLegalActions(self.index): dist[a] = 0 dist[dre] = 1 dist.normalize() return dist def betterEvaluationFunctionGhost(currentGameState): """ Ghost evaluation function """ # Abbreviation ghostEval = betterEvaluationFunctionGhost
true
5ffd9ff793d3d97ed724b90ff7fc38f6d2d3c938
Python
sonqdinh/WB_week1
/Week1/isPowerOfTwo.py
UTF-8
264
2.84375
3
[]
no_license
class Solution: def isPowerOfTwo(self, n: int) -> bool: # All power of two has binary forms: # 10000000 so clear the MSB of power of two will return 0 if n == 0: return False else: return n & (n - 1) == 0
true
5f611c0e23293bcb077ac2a976177e1d4cec3fe4
Python
BloodWorkXGaming/WarframeRelicReader
/warframe/image_cleanser.py
UTF-8
779
2.84375
3
[]
no_license
import numpy as np from PIL import Image r_text = 189 g_text = 169 b_text = 102 black = np.array([0, 0, 0], dtype=np.uint8) white = np.array([255, 255, 255], dtype=np.uint8) def cleanse_numpy(img: Image) -> Image: def test(v): # print(v) if abs(r_text - v[0]) < 70 and abs(g_text - v[1]) < 70 and abs(b_text - v[2]) < 70: return black return white arr = np.asarray(im) l = np.apply_along_axis(test, 2, arr) # arr = list(im.getdata()) # l = list(map(test, arr)) out = Image.fromarray(l) return out # out.show() # change all pixels that don't match color to black ##data[np.logical_not(mask)] = black im: Image = Image.open("C:/Users/jonas/Documents/Github/Warframe/src/warframesc1587918403370823000.png")
true
20b20dfecab9a24519213d9e085551d210d3e11d
Python
jacekzema/Simple_HJ_Method
/HJ_Method.py
UTF-8
4,142
3.234375
3
[]
no_license
import time start = time.clock() t=5 #dlugosc kroku b=0.1 #wspolczynnik korygujacy s=0.001 #dokladnosc obliczen d=1 #kierunek x1=-8 # punkty poczatkowe x2=-89 x1p=x1 x2p=x2 def funkcja(x1,x2): global y y=(2*(x1-25.4)**2)+(4*(x2-7.5)**4) return y def funkcjaX1add(): global x1 x1=x1+t*d return x1 def funkcjaX2add(): global x2 x2=x2+t*d return x2 def funkcjaX1sub(): global x1 x1=x1-t*d return x1 def funkcjaX2sub(): global x2 x2=x2-t*d return x2 while t>=s: #print(x1,x2) q1=funkcja(x1, x2) # poczatkowa funkcjaX1add() q2=funkcja(x1, x2) # po dodaniu do x1 if q2<q1: print("X1 +",x1) x1min=True funkcjaX2add() # dodajemy do x2 q3=funkcja(x1,x2) # wartosc funkcji dla x1 i x2+ if q2<q3: # jezeli funkcja x1,x2+ jest wieksza od poczatkowej funkcjaX2sub() funkcjaX2sub() q4=funkcja(x1,x2) if q2<q4: # jezeli po odjeciu funkcja jest wieksza to funkcjaX2add() print("X2 MIN",x2) x2min=False elif q4<q2: print("X2 - o",x2) x2min=True elif q3<q2: print("X2 + o",x2) x2min=True elif q1<q2: # jezeli po dodaniu jest wieksza to rob to funkcjaX1sub() funkcjaX1sub() q3=funkcja(x1,x2) if q3<q1: print("X1 -",x1) x1min=True funkcjaX2add() # dodajemy do x2 q4=funkcja(x1,x2) # wartosc funkcji dla x1 i x2+ if q3<q4: # jezeli funkcja x1,x2+ jest wieksza od poczatkowej funkcjaX2sub() funkcjaX2sub() q5=funkcja(x1,x2) if q3<q5: # jezeli po odjeciu funkcja jest wieksza to funkcjaX2add() print("X2 MIN",x2) x2min=False elif q5<q3: print("X2 - o",x2) x2min=True elif q4<q3: print("X2 + o",x2) x2min=True elif q1<q3: funkcjaX1add() q3=funkcja(x1,x2) print("X1 MIN",x1) x1min=False funkcjaX2add() # dodajemy do x2 q4=funkcja(x1,x2) # wartosc funkcji dla x1 i x2+ if q3<q4: # jezeli funkcja x1,x2+ jest wieksza od poczatkowej funkcjaX2sub() funkcjaX2sub() q5=funkcja(x1,x2) if q3<q5: # jezeli po odjeciu funkcja jest wieksza to funkcjaX2add() print("X2 MIN",x2) x2min=False elif q5<q3: print("X2 - o",x2) x2min=True elif q4<q3: print("X2 + o",x2) x2min=True else: break if (x1min and x2min)==True: x1p=(2*x1)-x1p x22=(2*x2)-x2p #print("X1p i X2p zmienione") elif x1min==True and x2min==False : x1p=(2*x1)-x1p #print("X1p Zmienione") elif x1min==False and x2min==True : x2p=(2*x2)-x2p #print("X2p zmienione") elif t>s and (x1min and x2min)==False : t=t*b print("zmiejszam T",t) x1min=True x2min=True else: print("Blad") break q1=funkcja(x1, x2) print("Znaleziono minimum funkcji ktorej wartosc wynosi",q1," w punktach x1=",x1," oraz x2=",x2) end= time.clock() total= end-start print("Czas wykonywania",total) wcisnij=input("Wcisnij klawisz")
true
a1821e37b696ce3f364ee0ea51565e5b2b7d5ca5
Python
T0X1C-B15H0P/battle_script
/main.py
UTF-8
5,662
3.296875
3
[]
no_license
import random from Classes.game import person, bcolors from Classes.magic import Spell from Classes.Inventory import Items # Create Black Magic Fire = Spell("Fire", 10, 100, "Black") Thunder = Spell("Thunder", 10, 100, "Black") Blizzard = Spell("Blizzard", 10, 100, "Black") Meteor = Spell("Meteor", 20, 200, "Black") Quake = Spell("Quake", 15, 150, "Black") # Create White Magic Cure = Spell("Cure", 12, 120, "White") Cura = Spell("Cura", 18, 200, "White") # Create some Item potion = Items("Potion", "potion", "Heals for 50 HP", 50) hipotion = Items("Hi-Potion", "potion", "Heals for 100 HP", 100) superportion = Items("Super-Potion", "potion", "Heals for 500 HP", 500) elixir = Items("Elixir", "elixir", "Full restores HP/MP of One party Member", 9999) hielixir = Items("Super-Elixir", "elixir", "Fully restores party's HP/MP", 9999) grenade = Items("Grenade", "attack", "deals 500 damage", 500) player_magic = [Thunder, Blizzard, Meteor, Cura, Cure, Quake] # initialized player Magics list player_spells = [{"item": potion, "quantity": 15}, {"item": hipotion, "quantity": 5}, {"item": superportion, "quantity": 3}, {"item": elixir, "quantity": 2}, {"item": hielixir, "quantity": 1}, {"item": grenade, "quantity": 1}] # initialized player Items list player1 = person("ORION :", 460, 65, 60, 34, player_magic, player_spells) # Player initialization player2 = person("T0X1C :", 460, 65, 60, 34, player_magic, player_spells) # Player2 initialization player3 = person("B15H0P:", 460, 65, 60, 34, player_magic, player_spells) # Player3 initialization enemy = person("ENEMY :", 2400, 65, 45, 25, [], []) # Enemy initialization players = [player1, player2, player3] running = True i = 0 print("\n\n") print("===========================================") print(bcolors.FAIL + bcolors.BOLD + "T0X1C COMMANDLINE BATTLE GAME" + bcolors.ENDC) while running: print("===========================================") print("\n\n") print("NAME: HP: MP:") for player in players: player.get_stats() enemy.enemy_stat() print("\n") for player in players: player.choose_action() player_choice = input("Select Action: ") action_index = int(player_choice) - 1 if action_index == 0: player_dmg = player.generat_damage() enemy.take_damage(player_dmg) print("You attacked for", player_dmg, "Points of damage.") elif action_index == -1: continue elif action_index == 1: player.choose_magic() spell_index = int(input("Select Magic: ")) - 1 spell = player.magic[spell_index] print("Name:", spell.name, "Spell ,", "Charges:", spell.cost) if player.get_mp() <= int(spell.cost): print(bcolors.FAIL + "you don't have enough Magic Power cos your Magic Power is :", str(player.get_mp()) + bcolors.ENDC) continue player.reduce_mp(spell.cost) if spell.type == "white": spell_dmg = spell.generate_spell_damage() player.heal(spell_dmg) print(bcolors.OKBLUE + "/n" + spell.name + "heals for", str(spell_dmg), "HP." + bcolors.ENDC) elif spell.type == "Black": spell_dmg = spell.generate_spell_damage() player.reduce_mp(spell.cost) enemy.take_damage(spell.dmg) print("You attacked for", spell_dmg, "Points of damage.") elif action_index == 2: player.choose_Items() Item_choice = int(input("Choose item: ")) - 1 items = player.items[Item_choice]["item"] if player.items[Item_choice]["quantity"] == 0: print(bcolors.FAIL + "\n" + "None left..." + bcolors.ENDC) continue quantity = player.items[Item_choice]["quantity"] - 1 player.items[Item_choice]["quantity"] = quantity index = Item_choice if index == -1: continue if items.type == "potion": player.heal(items.prop) print(bcolors.OKGREEN + "\n" + items.name + " heals for", str(items.prop), "HP" + bcolors.ENDC) elif items.type == "elixir": player.hp = player.hp_max player.mp = player.mp_max print(bcolors.OKGREEN + "\n" + items.name + " Fully restores HP/MP." + bcolors.ENDC) elif items.type == "attack": enemy.take_damage(items.prop) print(bcolors.FAIL + "\n" + items.name + " deals", str(items.prop), "points of damage" + bcolors.ENDC) else: print("Invalid Action!!!") continue enemy_choice = 1 enemy_target = random.randrange(0, 3) enemy_dmg = enemy.generat_damage() players[enemy_target].take_damage(enemy_dmg) print("Enemy attacked for", enemy_dmg, "Points of damage.") print("========================================================") if player.get_hp() == 0: print(bcolors.FAIL + bcolors.BOLD + "You lost the battle to your Enemy, Try harder next time Loser!!!" + bcolors.ENDC) players.get_stats() enemy.get_stats() break elif enemy.get_hp() == 0: print(bcolors.OKGREEN + "Long Live the King. Amazing you won the battle Motherfucker!!!" + bcolors.ENDC) players.get_stats() enemy.get_stats() break else: continue
true
dbb37bc8c80eab1355d949f4c94f59fe48c0c93d
Python
lesclaz/curso-qt-pyside-udemy
/Proyectos/Proyecto 05/programa_final.py
UTF-8
4,185
3.21875
3
[]
no_license
from PySide6.QtWidgets import ( QApplication, QWidget, QGridLayout, QLCDNumber, QPushButton) from functools import partial from helpers import * # https://doc.qt.io/qt-6/qlcdnumber.html # https://docs.python.org/3/library/functools.html#functools.partial class Calculadora(QLCDNumber): def __init__(self): # definimos unos dígitos y un estilo por defecto al panel LCD super().__init__(digitCount=12, segmentStyle=QLCDNumber.Flat) self.texto = '0' self.reiniciar = False # deberíamos definir también operación inicalmente self.operacion = None def escribir(self, caracter): # La función escribir añade caracteres al panel LCD # después de un cálculo comprobamos si debemos limpiar el panel LCD if self.reiniciar: self.limpiar() # no permitiremos que se pueda añadir más de una coma para decimales if caracter == "." and self.texto.count('.') == 1: return # si la longitud del texto no se ha superado añadimos el caracter if len(self.texto) <= 12: self.texto += caracter # borraremos los posibles 0 a la izquierda del número self.display(self.texto.lstrip("0")) def preparar(self, operacion): # La función preparar almacena en memoria el número y la operación self.operacion = operacion self.memoria = float(self.texto) self.limpiar() def calcular(self): # La función calcular utiliza la operación y el número en memoria # para operarlo en conjunto al número actual del LCD resultado = 0.0 if self.operacion == "+": resultado = self.memoria + float(self.texto) elif self.operacion == "-": resultado = self.memoria - float(self.texto) elif self.operacion == "×": resultado = self.memoria * float(self.texto) elif self.operacion == "÷": resultado = self.memoria / float(self.texto) # es muy importante redondear los decimales self.texto = str(round(resultado, 2)) # si el resultado no cabe en el panel LCD mostamos un error if len(self.texto) > 12: self.texto = 'Error' # mostramos el texto en el panel self.display(self.texto) # reiniciamos self.reiniciar = True def limpiar(self): # La función limpiar establece el texto del panel a cero self.texto = '0' self.display(self.texto) self.reiniciar = False class Window(QWidget): def __init__(self): super().__init__() self.setFixedWidth(480) self.setFixedHeight(360) self.setWindowTitle("Calculadora") with open(absPath("Scalcula.qss")) as styles: self.setStyleSheet(styles.read()) # Creamos un layout cuadricular y añadimos la calduladora LCD arriba self.setLayout(QGridLayout()) self.calculadora = Calculadora() self.layout().addWidget(self.calculadora, 0, 0, 1, 0) # Definimos todos los botones de la calculadora simbolos = [['7', '8', '9', '÷'], ['4', '5', '6', '×'], ['1', '2', '3', '-'], ['.', '0', '=', '+']] # Recreamos los botones usando un par de bucles anidados for i, fila in enumerate(simbolos): for j, simbolo in enumerate(fila): boton = QPushButton(simbolo) boton.setStyleSheet("height:50px;font-size:25px") # pasamos el simbolo al boton usando un objeto parcial boton.clicked.connect(partial(self.boton_clicado, simbolo)) # Sumamos 1 al offset de las filas para dejar espacio al LCD self.layout().addWidget(boton, i + 1, j) def boton_clicado(self, simbolo): if simbolo.isdigit() or simbolo == '.': self.calculadora.escribir(simbolo) elif simbolo == '=': self.calculadora.calcular() else: self.calculadora.preparar(simbolo) if __name__ == '__main__': app = QApplication() window = Window() window.show() app.exec_()
true
2ba39c2ab6e1cefea23261c990e4eeffd9c179c7
Python
dazzyddos/pycodes
/reverse_ip.py
UTF-8
367
2.78125
3
[]
no_license
# Usage: ./reverse_ip.py <site> # Example: ./reverse_ip.py facebook.com import requests import socket import sys site = "http://api.hackertarget.com/reverseiplookup" if len(sys.argv) != 2: print "Usage: reverse_ip.py <site>\n" sys.exit(0) url = sys.argv[1] ip = socket.gethostbyname(url) data = {'q':ip} resp = requests.get(site, params=data) print resp.text
true
f453e96c05bb51ab7ff5db530e69795e79465dd7
Python
nguyennam9696/Learn_Python_The_Hard_Way
/ex5.py
UTF-8
246
3.625
4
[ "MIT" ]
permissive
# Difference between format strings %r vs %s. # %r is used for debugging to see the actual raw data. %s is used to display to users. print "I said: %r" % "There are %d types of people" % 10 print "I said: %s" % "There are %d types of people" % 10
true
5fbe1a6de15e1cedde449f5c7326294124acedca
Python
sers88/FF_Cheker
/FF_Checker.py
UTF-8
2,086
2.8125
3
[ "Apache-2.0" ]
permissive
import re import requests from faker import Faker fake = Faker() headers = {'user-agent':fake.user_agent()} check_lists = list() proxies = {'http':'http://176.74.140.5:57476'} separ_newline = '\n' with open('bookmarks.html', 'r', encoding='utf8') as file: in_file = file.read() links_list = list(in_file.split(separ_newline))#преобразую из хтмла в список comp = re.compile(r'((http|https)://[A-Za-z0-9\.\?\&_\s=/-]+)')#доработать!! не воспринимает русские ссылки и пробелы for box_str in links_list: link_n = comp.search(box_str)#извлекает из списка регулярным выражением ссылки if link_n: check_lists.append(link_n.group()) else: check_lists.append(' ') for idx, value in enumerate(check_lists): if ' ' in value: continue else: try: req = requests.request('GET', value, timeout=10, proxies=proxies, headers=headers)# allow_redirects=False на всякий случай! if req.status_code >= 200 and req.status_code < 300:#проверка ссылок continue elif req.status_code >= 300 and req.status_code < 400: print("3xx возможна блокировка ", value) elif req.status_code == 401: print("проверить 401 ", idx," ",value) elif req.status_code >= 400 and req.status_code < 500: del links_list[idx] print("delete ", value) elif req.status_code >= 500: print("ошибка сервера ", req.status_code, " ", idx, " ",value) else: print("unknow error", value) except requests.ConnectionError: del links_list[idx] print("Error delete ", value) except requests.exceptions.Timeout: print("Error time", value) out_file = separ_newline.join(links_list) with open('bookmarks2.html', 'wt', encoding='utf8') as file: file.write(out_file)
true
bb19545ca6ab1cb1a3b6f7451f0a47d71f594cab
Python
aa4334574/test
/my_email_test/my_first.py
UTF-8
3,427
3.40625
3
[]
no_license
# -*- coding: utf-8 -*- from numpy import * from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression iris = load_iris() trainX = iris.data trainY = iris.target #测试 clf = LogisticRegression(penalty='l2', dual=False, tol=1e-4, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1) ''' @param penalty: 指定正则化策略 @param dual: 是否求解对偶形式 @param C: 惩罚项系数的倒数,越大,正则化项越小 @param fit_intercept: 是否拟合截距 @param intercept_scaling: 当solver='liblinear'、fit_intercept=True时,会制造出一个恒为1的特征,权重为b,为了降低这个人造特征对正则化的影响,可以将其设为1 @param class_weight: 可以是一个字典或'balanced'。字典:可以指定每一个分类的权重;'balanced':可以指定每个分类的权重与该分类在训练集中的频率成反比 @param max_iter: 最大迭代次数 @param random_state: 一个整数或一个RandomState对象或None @param solver: 指定求解最优化问题的算法: 'newton-cg':牛顿法; 'lbfgs':拟牛顿法; 'liblinear':使用liblinear;(适用于小数据集) 'sag':使用Stochastic Average Gradient Descent算法(适用于大数据集) @param tol: 指定迭代收敛与否的阈值 @param multi_class: 'ovr': 采用one-vs-rest策略 'multi_class': 采用多类分类Logistic回归 @param verbose: 是否开启迭代过程中输出日志 @param warm_start: 是否使用前一次的训练结果继续训练 @param n_jobs: 任务并行时指定使用的CPU数,-1表示使用所有可用的CPU @attribute coef_: 权重向量 @attribute intercept_: 截距b @attribute n_iter_: 实际迭代次数 @method fit(X,y[,sample_weight]): 训练模型 @method predict(X): 预测 @method predict_log_proba(X): 返回X预测为各类别的概率的对数 @method predict_proba(X): 返回X预测为各类别的概率 @method score(X,y[,sample_weight]): 计算在(X,y)上的预测的准确率 ''' clf.fit(trainX, trainY) print ("权值:"+str(clf.coef_)) print ("截距:"+str(clf.intercept_)) print ("分数:"+str(clf.score(trainX, trainY))) print (clf.predict(trainX)) print (trainY) ''' C:\Anaconda2\lib\site-packages\sklearn\datasets 权值:[[ 0.41498833 1.46129739 -2.26214118 -1.0290951 ] [ 0.41663969 -1.60083319 0.57765763 -1.38553843] [-1.70752515 -1.53426834 2.47097168 2.55538211]] 截距:[ 0.26560617 1.08542374 -1.21471458] 分数:0.96 [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] '''
true
6b60c7baaac15b87828fc04abcb6bb86dc9df4ef
Python
godseey/kgc-rec
/data_utils.py
UTF-8
7,620
2.671875
3
[]
no_license
import pickle as pkl import numpy as np from sklearn.metrics import ndcg_score np.random.seed(0) def evaluate(pred_rating_matrix, training_matrix, ground_truth_matrix, user_specific=False, for_csv=False): """ Prepare negative item indices to sample 99 items for each positive in testing For each user get testing items, For each testing item, paring with 99 negative samples Calculate relevant eval metric Average the evaluation score over all users as defined in the paper Parameters ----------------------- user_specific: mean of user specific average scores (scores for multiple items of a user is averaged) for_csv: only print csv files if True for dataframe analysis """ user_size = pred_rating_matrix.shape[0] item_size = pred_rating_matrix.shape[1] item_indices = list(range(item_size)) # print("user size:{}, item size:{}".format(user_size, item_size)) # Get negative item indices # Get testing items (ground truth - training) hr5 = 0. hr10 = 0. hr20 = 0. mrr = 0. ndcg5 = 0. ndcg10 = 0. ndcg20 = 0. valid_testing_size = 0 valid_testing_pairs = list() # how many 100 (one pos, 99 neg) pairs tested for i in range(user_size): # per user hr5_ = 0. hr10_ = 0. hr20_ = 0. mrr_ = 0. ndcg5_ = 0. ndcg10_ = 0. ndcg20_ = 0. # if i % 50 == 0: # print("{}-th user".format(i)) # negative indices for the current user # neg_indices = list(set(item_indices)-set(ground_truth_matrix[i])) # neg_indices = [_ for _ in item_indices if _ not in np.where(ground_truth_matrix[i]>0)[0]] neg_indices = np.where(ground_truth_matrix[i] == 0)[0] # testing indices for the current user training_indices = np.where(training_matrix[i] > 0)[0] testing_indices = [x for x in np.where(ground_truth_matrix[i] > 0)[0] if x not in training_indices] # print("testing indices:{}".format(testing_indices)) if len(testing_indices) > 0: for ti in testing_indices: # get testing size*99 items from neg_indices selected_neg_indices = np.random.choice(np.array(neg_indices), 99, replace=False) # print(selected_neg_indices) indices = np.array(list(selected_neg_indices)+[ti]) # valid_testing_pairs.append(np.stack((np.array([i] * 100), indices), axis=1)) indices_sorted = np.argsort(pred_rating_matrix[i][indices]) # print(valid_testing_size, pred_rating_matrix[i][indices]) # print(indices_sorted) ground_truth_indices = [99] # ground_truth_indices = list(range(len(indices)-1, len(indices)-1-len(testing_indices), -1)) hr5_ex = 1. if len(intersection(ground_truth_indices, indices_sorted[-5:])) > 0 else 0. hr5_ += hr5_ex hr10_ex = 1. if len(intersection(ground_truth_indices, indices_sorted[-10:])) > 0 else 0. hr10_ += hr10_ex hr20_ex = 1. if len(intersection(ground_truth_indices, indices_sorted[-20:])) > 0 else 0. hr20_ += hr20_ex index = np.max([np.where(indices_sorted == ind) for ind in ground_truth_indices][0][0]) # sorted is reversed rank for mrr rank = len(indices_sorted) - index mrr_ex = (1./rank) mrr_ += mrr_ex # NDCG@K y_true = np.asarray([[0.]*len(selected_neg_indices)+[1]]) y_pred = np.asarray([pred_rating_matrix[i][indices]]) # print(y_true.shape, y_pred.shape) ndcg5_ex = ndcg_score(y_true, y_pred, k=5) ndcg5_ += ndcg5_ex ndcg10_ex = ndcg_score(y_true, y_pred, k=10) ndcg10_ += ndcg10_ex ndcg20_ex = ndcg_score(y_true, y_pred, k=20) ndcg20_ += ndcg20_ex valid_testing_size += 1 print(i, len(training_indices), len(testing_indices), hr5_ex, hr10_ex, hr20_ex, ndcg5_ex, ndcg10_ex, ndcg20_ex, mrr_ex) if user_specific: num_examples = float(len(testing_indices)) hr5 += hr5_/num_examples hr10 += hr10_/num_examples hr20 += hr20_/num_examples ndcg5 += ndcg5_/num_examples ndcg10 += ndcg10_/num_examples ndcg20 += ndcg20_/num_examples mrr += mrr_/num_examples else: hr5 += hr5_ hr10 += hr10_ hr20 += hr20_ ndcg5 += ndcg5_ ndcg10 += ndcg10_ ndcg20 += ndcg20_ mrr += mrr_ # Store testing pairs # np.save("testing_pairs", np.array(valid_testing_pairs), allow_pickle=False) if not for_csv: if user_specific: print("valid testing size:{}".format(user_size)) print( "hr@5:{:7.4f} hr@10:{:7.4f} hr@20:{:7.4f} mrr:{:7.4f} ndcg@5:{:7.4f} ndcg@10:{:7.4f} ndcg@20:{:7.4f}".format( hr5 / user_size, hr10 / user_size, hr20 / user_size, mrr / user_size, ndcg5 / user_size, ndcg10 / user_size, ndcg20 / user_size )) else: print("valid testing size:{}".format(valid_testing_size)) print("hr@5:{:7.4f} hr@10:{:7.4f} hr@20:{:7.4f} mrr:{:7.4f} ndcg@5:{:7.4f} ndcg@10:{:7.4f} ndcg@20:{:7.4f}".format( hr5/valid_testing_size, hr10/valid_testing_size, hr20/valid_testing_size, mrr/valid_testing_size, ndcg5/valid_testing_size, ndcg10/valid_testing_size, ndcg20/valid_testing_size )) def intersection(lst1, lst2): lst3 = [value for value in lst1 if value in lst2] return lst3 if __name__ == "__main__": # Ground truth training_matrix_f = './data/adjacency_matrix.p' ground_truth_matrix_f = './data/user_action.p' # Prediction file pred_matrix_f = './output/m_rating_pred_bestmrr.p' # ------------------------------------------- # Load matrix with open(training_matrix_f, 'rb') as f: training_matrix = pkl.load(f) if not isinstance(training_matrix, np.matrix): training_matrix = training_matrix.todense() else: training_matrix = np.array(training_matrix) # np.save("training_matrix", training_matrix, allow_pickle=False) # with open("./MOOCCube/data-for-kgcrec/negative.p", 'rb') as f: # negative = pkl.load(f) # np.save("negative", negative, allow_pickle=False) with open(ground_truth_matrix_f, 'rb') as f: ground_truth_matrix = pkl.load(f) if not isinstance(ground_truth_matrix, np.matrix): ground_truth_matrix = ground_truth_matrix.todense() else: ground_truth_matrix = np.array(ground_truth_matrix) with open(pred_matrix_f, 'rb') as f: pred_matrix = pkl.load(f) # print(pred_matrix) # Evaluation evaluate(pred_matrix, training_matrix, ground_truth_matrix, user_specific=False, for_csv=False)
true
e74ec54d8b9ccb4b13eaeecac280ee9c6265d47c
Python
alleri91/Python
/Pole.py
UTF-8
205
3.421875
3
[]
no_license
dlugosc = int(input('Wprowadz dlugosc \n')) szerokosc = int(input ('Wprowadz szerokosc \n')) pole_powierzchni = dlugosc * szerokosc print('Pole powierzchni wynosi :{} '.format(pole_powierzchni))
true
93adb1dabe39b1e5f02d0c9bc0efc2fab3f8c161
Python
horace1024/ble
/run_obs.py
UTF-8
1,789
2.890625
3
[]
no_license
# Creates a basic BLE Observer # Check the Observer.py class for more info import sys import time import signal from datetime import datetime, timedelta from Observer import Observer # Create pretty hex string from bytearray def bytes2Str(byteArray): return(''.join("{0:02X}".format(x) for x in byteArray)) # Observe def main(): dt = datetime.now() print("Started at: %s" % dt) # Create Observer obs = Observer() obs.setupBleStack() # Matching address adr = 'DC:A6:32:63:50:9A' adr = None # Counter rx = 0 while(True): # Get received advertisements ad_structs, rssi, addr = obs.rxParse(match_addr=adr) # Check for address match and valid reception if rssi is not None and ad_structs is not None and addr is not None: # From our Pi only if includes: Name, TX Power name = next((ad['data'] for ad in ad_structs if ad['type'] == 0x09), None) tx_power = next((ad['data'] for ad in ad_structs if ad['type'] == 0x0A), None) # Detect parity bit if tx_power is not None: if tx_power[0] > 0x80: tx_power = tx_power[0] - 0x80 else: tx_power = tx_power[0] # Log if name is not None and tx_power is not None: print('%d %s "%s", TX: %ddBm, RSSI: %ddBm' % (rx, addr, name, tx_power, rssi)) else: print('%d %s, RSSI: %ddBm' % (rx, addr, rssi)) rx += 1 # Program start if __name__ == "__main__": try: main() except KeyboardInterrupt: pass finally: print("CTRL-C Detected") print("Stopped at: %s" % datetime.now()) sys.exit()
true
4a51cb9e0f73e4c67a04a2049c5afbf6b16c875f
Python
willzyz/nn_pred
/dev/python/readTrack.py
UTF-8
705
2.65625
3
[]
no_license
from pylab import * file = '/home/wzou/apc/track/cpu_track_2012-08-15T00:48:55.284500.txt'; #file = '/home/wzou/apc/track/mem_track_2012-08-14T23:40:20.554979.txt'; f = open(file, 'r'); title = f.readline();title=title.split('\n')[0]; s1=[]; s2=[]; t =f.readline(); count = 1; while 1: t = f.readline(); print t.split(', '); if size(t.split(', '))>1: diff = float(t.split(', ')[1].split('\n')[0]); print diff; s1.append(float(t.split(', ')[0])); s2.append(diff); if (not t) or (count>3600): break; count = count + 1; print s1; #print s2; plot(s1, s2, linewidth=1.0); show(); xlabel('time'); ylabel('metric');
true
1d93b30ad7cd07686447e3e9475c609b443e6b22
Python
Manasi0801/Game
/Guess.py
UTF-8
908
3.703125
4
[]
no_license
import random import time Plangs = ("python", "php", "javascript", "perl","java", "ruby" , "cobol", "fortran", "pascal", "basic", "swift") answer = random.choice(Plangs) correct = answer rumble = "" print("\nLet's play") while answer: position = random.randrange(len(answer)) rumble += answer[position] answer = answer[:position] + answer[(position + 1):] start_time = time.perf_counter() print("The Word Is:" ,rumble) guess = input("Guess This Programming Language:- ") guess = guess.lower() while (guess != correct) or (guess == ""): print("That is not the correct answer") guess = input("Guess This Programming Language:- ") guess = guess.lower() if guess == correct: print("\nCongratulation Correct answer!") print("\n--- %s seconds ---" % (time.perf_counter() - start_time)) input("\n Press any to Exit!")
true
48e0a03ba0c7b4d603fed4e982eae8acfad1705b
Python
vjuranek/jenkins-disk-usage
/lib/python/tex_top_jobs.py
UTF-8
786
2.546875
3
[]
no_license
# -*- coding: utf-8 -*- import db_queries as db import utils as u def generate(file_path, limit): CAPTION = "Top %i disk consumers"%limit jobs = db.top_jobs(limit) ft = open(file_path,"w") ft.write("\\begin{table}[h!]\n") ft.write("\\centering\n") ft.write("\\begin{tabular}{|c|c|}\n") ft.write("\t\\hline\n") ft.write("\t\\textbf{Job name} & \\textbf{Size [GB]} \\\\ \n") ft.write("\t\\hline\n") ft.write("\t\\hline\n") for item in jobs: print "%s\t%i"%(item[0],item[1]) ft.write("\t\t %s \t & \t %3.1f"%(u.tex_fix_str(item[0]), item[1]) + " \\\\\n") ft.write("\t\\hline\n") ft.write("\\end{tabular}\n") ft.write("\t\\caption{%s}\n"%CAPTION) ft.write("\\end{table}\n") ft.close()
true
6930e6f9257daedea07cfd86ea5d53f359d0a88e
Python
ajcrosbie/Pong
/Objects.py
UTF-8
2,660
3.15625
3
[]
no_license
import random import pygame class paddle(): def __init__(self, pos, player, size, side=0): self.colour = 255, 255, 255 self.pos = pos self.player = player self.speed = 0 self.size = size def move(self, keys, speed): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() Cspeed = 0 if keys[pygame.K_DOWN] and self.player == 1: Cspeed = speed elif keys[pygame.K_UP] and self.player == 1: Cspeed = -speed elif keys[pygame.K_s] and self.player == 0: Cspeed = speed elif keys[pygame.K_w] and self.player == 0: Cspeed = -speed self.pos = (self.pos[0], self.pos[1] + Cspeed) self.speed = Cspeeds def move1(self, action): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() Cspeed = 0 if action == 0: Cspeed = 8 elif action == 1: Cspeed = -8 self.pos = (self.pos[0], self.pos[1] + Cspeed) self.speed = Cspeed def highMove(self, keys, speed): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() if keys[pygame.K_DOWN] and self.player == 1: self.speed = speed//10 + self.speed elif keys[pygame.K_UP] and self.player == 1: self.speed = -speed//10 + self.speed elif keys[pygame.K_s] and self.player == 0: self.speed = speed//10 + self.speed elif keys[pygame.K_w] and self.player == 0: self.speed = -speed//10 + self.speed self.pos = (self.pos[0], self.pos[1] + self.speed) def draw(self, surface): pygame.draw.rect(surface, self.colour, ((self.pos), (self.size))) class ball(): def __init__(self, pos): self.pos = pos self.colour = 255, 255, 255 self.dir = 0 self.size = 10 self.dir1 = 20 self.colourf = 0 def move(self): self.pos = (self.pos[0] + self.dir1, self.pos[1] + self.dir) def draw(self, surface): pygame.draw.circle(surface, self.colour, self.pos, self.size) def bounce(self, paddle): if paddle == 0: self.dir = self.dir * -1 elif paddle.player == 'block': self.dir = self.dir * -1 elif paddle.player == 'block1': self.dir1 = self.dir1 * -1 else: self.dir = self.dir + paddle.speed self.dir1 = self.dir1 * -1 def bounce1(self): self.dir1 = self.dir1 * -1
true
408652d31c204481541d9df2f77ebf75af07a2db
Python
jingong171/jingong-homework
/夏丽平/10-6-10-7.py
UTF-8
991
4.1875
4
[]
no_license
print("Give me two number,and I will add them") print("enter 'q' to quit") ##10-6 ##number1=input("\nenter the first number:") ##try: ## number1=int(number1) ##except ValueError: ## print("sorry,you didn't enter a number") ##number2=input("\nenter the second number:") ##try: ## number2=int(number2) ##except ValueError: ## print("sorry,you didn't enter a number") ##answer=number1+number2 ##print("the answer is "+str(answer)) ##10-7 flag = True while flag: number1=input("\nenter the first number:") if number1 == 'q': break try: number1=int(number1) except ValueError: print("sorry,you didn't enter a number") continue number2=input("\nenter the first number:") if number2 == 'q': break try: number2=int(number2) except ValueError: print("sorry,you didn't enter a number") continue answer = number1 + number2 print(answer)
true
3734d57efd30475b4cbadafc77b8782cc27f21c1
Python
jbpaduan/MB-Mapping
/mysite/mbdb/views.py
UTF-8
2,162
2.5625
3
[]
no_license
from django.http import HttpResponse from django.shortcuts import get_object_or_404, render from django.template import loader from .models import Expedition, Mission # Create your views here. #--------------- # 1. index view ## using a shortcut: render(), tutorial #3 def index(request): latest_expedition_list = Expedition.objects.order_by('-start_date')[:5] context = {'latest_expedition_list': latest_expedition_list} return render(request, 'mbdb/index.html', context) #--------------- # 2. detail view for an expedition def detail(request, expedition_id): expedition = get_object_or_404(Expedition, pk=expedition_id) return render(request, 'mbdb/detail.html', {'expedition': expedition}) ## early part of tutorial #3 (= "stub methods") # def detail(request, expedition_id): # return HttpResponse("You're looking at expedition %s." % expedition_id) #-------------- # 3. results view ... mission detail # def mission(request, mission_id): # mission = get_object_or_404(Mission, pk=expedition_id) # return render(request, 'mbdb/mission.html', {'mission': mission}) # response = "You're looking at the missions of expedition %s." return HttpResponse(response % mission_id) ## omitted cuz we aren't voting # def vote(request, question_id): # return HttpResponse("You're voting on question %s." % question.id) ## from tutorial #1 # def index(request): # return HttpResponse("Hello, world. You're at the mbdb index.") ## with page's design hard-coded in the view, mid-tutorial #3 # def index(request): # latest_expedition_list = Expedition.objects.order_by('-start_date')[:5] # output = ', '.join([q.expedition_name for q in latest_expedition_list]) # return HttpResponse(output) ## output: Sur Ridge, Extravert Cliff ## using a template with loader.get_template, later in tutorial #3 # def index(request): # latest_expedition_list = Expedition.objects.order_by('-start_date')[:5] # template = loader.get_template('mbdb/index.html') # context = { # 'latest_expedition_list': latest_expedition_list, # } # return HttpResponse(template.render(context, request))
true
89a1947ff87ef527dacb1afee25203ebf24d4d70
Python
zhenyakeg/contest5
/J.py
UTF-8
385
3.125
3
[]
no_license
n = int(input()) A = list(map(int,input().split())) num5 = A.count(5) num10 = A.count(10) num50 = A.count(50) num100 = A.count(100) num_cur,num_min = 0,0 for i in range (n): if A[i] == 5: num_cur+=1 else: if A[i]//5-1 >= num_cur: num_min += A[i]//5 - 1 - num_cur num_cur = 0 else: num_cur -= A[i]//5-1 print(num_min)
true
456e53ca4aa49ca27b6f6599a8647803a761e7d5
Python
DigoShane/Python
/Basic/ReturnStatement.py
UTF-8
463
4.21875
4
[]
no_license
# The return statement is useful to return something from the calculation # of the function back to the main function. ## A function to fid the power of a no. and return it def powwow(num1,num2): return num1**num2 print ("Code") #Note that the return function transfer the flow of the program #back to the main function. Thus the print ("Code") will never #get printed. num = powwow(2,0.5) print (num) # If we do print(cube(3,3)) then the o/p is none.
true
d54f64e09da921befad5cdc4b562cdf07e308762
Python
Cloema/Polar_plotter
/main.py
UTF-8
1,656
3.5
4
[]
no_license
# -*- coding: utf-8 -*- #main.py #author: Drawbot team CRI - Marion Ficher #date : october 2019 #version : V1.0 import serial from drawbot import Drawbot def ask_function(): try: choice = int(input("Enter 1 for a line, 2 for a square ")) except ValueError: print ("Not a digit") return ask_function() if choice < 1 or choice > 2: print ("Not valid") return ask_function() return choice def ask_coordinates(): try: x = int(input("Enter the x coordinates of the choosen point ")) y = int(input("Enter the y coordinates of the choosen point ")) except ValueError: print ("Not a digit") return ask_function() if choice < 1 or choice > 2: print ("Not valid") return ask_function() return choice test_drawbot = Drawbot() coordX = 0 coordY = 0 choice = ask_function() if choice ==1: test_drawbot.drawStraightline(300, 300, 1000, 0) test_drawbot.reinit_drawing() if choice ==2: test_drawbot.drawSquare (300,300,100) test_drawbot.reinit_drawing() ''' a = serial.Serial("/dev/ttyUSB1",9600,timeout=1) b = a.readline() print (b) print ("Enter 1 to ON LED and 0 to OFF LED") while 1: input_data =input() print ("you entered", input_data) if (input_data == '1'): a.write("1".encode()) print ("LED ON") if (input_data == '0'): a.write("0".encode()) print ("LED OFF") '''
true
fd31a884cbcbb384693e763994c5e58764e8af23
Python
csuchite/sqlalchemy-challenge
/app.py
UTF-8
2,386
2.8125
3
[]
no_license
#1. import Flask import numpy as np import pandas as pd import datetime as dt import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func, desc from flask import Flask, jsonify engine = create_engine('sqlite:///hawaii.sqlite', echo=False) # Declare a Base using `automap_base()` Base = automap_base() # Reflect Database into ORM classes Base.prepare(engine, reflect=True) # Save a reference to the measurenment table as 'Measurement' Measurement = Base.classes.measurement # Save a reference to the station table as 'Station' Station = Base.classes.stations # Create our session (link) from Python to the DB session = Session(engine) # 2. Create an app, being sure to pass __name__ app = Flask(__name__) # 3. Define what to do when a user hits the index route @app.route("/") def home(): """List of available routes.""" return ( "Hawaii Precipitation and Weather data<br/><br/>" "Pick from the available routes below:<br/><br/>" "Precipiation from 2016-08-23 to 2017-08-23.<br/>" "/api/v1.0/precipitation<br/><br/>" "A list of all the weather stations in Hawaii.<br/>" "/api/v1.0/stations<br/><br/>" # 4. Define what to do when a user hits the /about route @app.route("/api/v1.0/precipitation") def PRCP(): """Query for the dates and temperature observations from the last year. Convert the query results to a Dictionary using date as the 'key 'and 'tobs' as the value.""" # The last 12 months results = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date > begin.date).\ order_by(Measurement.date).all() prcp_data =[] for prcp_data in results: prcp_data ={} prcp_data_dict["Data"] = prcp_data.date prcp_data_dict["Precipitation"] = prcp_data.prcp precipitation_data.append(prcp_data_dict) return jsonify(precipitation_data) @app.route("/api/v1.0/stations") def about(): print("Server received request for 'About' page...") return "Welcome to my 'About' page!" @app.route("/api/v1.0/tobs") def about(): print("Server received request for 'About' page...") return "Welcome to my 'About' page!" if __name__ == "__main__": app.run(debug=True)
true
ca3f218cfb015fefd852bfeabf9afb63ff514a36
Python
Muhammad-Ibrahiem-Abdelhameed/Python-for-distributed-systems
/client_rest_api.py
UTF-8
434
2.515625
3
[]
no_license
import sys import json import requests import fileinput res = requests.get('http://127.0.0.1:5000/') print(res.content) print("===================") student = requests.get('http://127.0.0.1:5000/student').json() print(student) print("===================") res_file = requests.get('http://127.0.0.1:5000/file', stream=True) with open('ff.pptx', 'wb') as f: f.write(res_file.content) print(type(res_file.content))
true
abd6edb0fdcd8c6ab9bfd7bd4e83881b631ad438
Python
natgrimm/mammothSim
/game.py
UTF-8
5,952
3.984375
4
[]
no_license
# -*- coding: utf-8 -*- """ Created on Wed Nov 13 12:32:07 2019 @author: richardg71 """ """ Course: CS101 File: Game Project: project04 Author: Mac Allen Description: <What does your project/program do?> """ """ Instructions: Here's your chance to be creative. Write a program that will allow the user to play a "Choose Your Own Adventure" type game. Here are the requirements of complexity: 1) Each decision must have only 2 choices to choose from. Use "1" and "2" as your options (to make grading simpler.) 2) The game must have the user make at least 3 choices before the game ends. 3) Your game must have only two possible endings (ie. Live/Die, Win/Lose, Escaped/Captured). 4) The code for the ending situations must be more than just a single print statement that says "You WIN!" but should be more exciting! (ie. This is better: ******************************************* * * * You are the best in the land! * * * ******************************************* ) 5) For full credit (you can get 90% without it), call functions as a result of making a choice. (ie. if choice1 == "1": openRedDoor() elif choice1 == "2": runOtherWay() 6) For extra credit (5-10%), learn to use a library to do something special, like display a graphic or tell the user how much time they took to finish. For an example of how to create this type of game, watch this video: https://www.youtube.com/watch?v=DEcFCn2ubSg """ # TODO -> Add your project code here #functions def openingChoice() : openingOptions = ['yes', 'no'] choice1 = '' while choice1 not in openingOptions : print('Welcome to the Adventure of Life!') print('You have just graduated high school.') print('It is time for your next adventure!') choice1 = input('Would you like to go to college immediately? (yes/no) ') if choice1 == openingOptions[0] : #yes college() elif choice1 == openingOptions[1] : #no nocollege() return def college() : print("This is going to be adventure, I hope you're ready!") choice1aOptions = ['1', '2'] choice2 = '' while choice2 not in choice1aOptions : choice2 = input('Will you go to a community college (1) or a 4-year university (2)? ') if choice2 == choice1aOptions[0] : #comminity college communityCollege() elif choice2 == choice1aOptions[1] : university() return def communityCollege() : print('You are going to save a ton of money!') choice2aOptions = ['1', '2'] choice3 = '' while choice3 not in choice2aOptions : choice3 = input("""You have two options: (1) Live at home (2) Get your own apartment What will you do? """) if choice3 == choice2aOptions[0] : winning() elif choice3 == choice2aOptions[1] : winning() return def university() : print('You are ambitious!') choice2bOptions = ['1', '2'] choice3 = '' while choice3 not in choice2bOptions : choice3 = input("""We all know that there are truly only two options for college. And you have to choose one of them. (1) BYU-Idaho (a.k.a. the best university) (2) another state school What is your choice? """) if choice3 == choice2bOptions[0] : winning() elif choice3 == choice2bOptions[1] : losing() return def nocollege() : print("You dont care what people think, you're gonna make it big!") choice1bOptions = ['1', '2'] choice2 = '' while choice2 not in choice1bOptions : choice2 = input("""There are many routes you can take, but lets say you can only make one of these two. (1) Join the military (2) Get a job as a civilian Which would you choose? """) if choice2 == choice1bOptions[0] : military() elif choice2 == choice1bOptions[1] : civilian() return def military() : print("I'm glad you chose to serve the U.S.A.!") choice2cOptions = ['1', '2'] choice3 = '' while choice3 not in choice2cOptions : choice3 = input("""Now that you are in the military there are a myriad of options, like which branch you will be in or what job you will have. I am only curious about one thing though. Will you choose to (1) Stay in the military for 20 years and retire, or (2) Get dishonorably discharged. What is your choice? """) if choice3 == choice2cOptions[0] : winning() elif choice3 == choice2cOptions[1] : losing() return def civilian() : print("There's nothing better than manking money besides making more money.") choice2dOptions = ['1', '2'] choice3 = '' while choice3 not in choice2dOptions : choice3 = input("""Some people don't need college if they make the right decisions. Obviosly you know that. It's time for you to make the right decision. If you only had the option of these two which would you choose? (1) Work at discount tire and move up the ranks, or (2) Work at a grocery store and never get promoted. Which route will you take? """) if choice3 == choice2dOptions[0] : winning() elif choice3 == choice2dOptions[1] : losing() return def winning() : print('{:#^42}'.format('')) print('#{: ^40}#'.format('You have won the game of life!')) print('{:#^42}'.format('')) winnerOptions = ['yes', 'no'] playAgain = '' while playAgain not in winnerOptions : playAgain = input('Would you like to play again? (yes/no) ') if playAgain == winnerOptions[0] : openingChoice() elif playAgain == winnerOptions[1] : print('Thank you for playing.') return def losing() : print('{:#^42}'.format('')) print('#{: ^40}#'.format('You have lost at life!')) print('#{: ^40}#'.format('Better luck next time...')) print('{:#^42}'.format('')) loserOptions = ['yes', 'no'] playAgain = '' while playAgain not in loserOptions : playAgain = input('Would you like to try to win again? (yes/no) ') if playAgain == loserOptions[0] : openingChoice() elif playAgain == loserOptions[1] : print('You are eternally a loser!') return #main code openingChoice()
true
61ee1819baf172ccb79356b756f6895854cda006
Python
nackwon/Python_Algorithm
/BOJ_Step03/BOJ_11721.py
UTF-8
97
3.171875
3
[]
no_license
str1 = input() result = '\n'.join(str1[i:i+10] for i in range(0, len(str1), 10)) print(result)
true
9d8d92fc2e55c20249dd984394faac14af74064e
Python
TusharKanjariya/python-practicles
/8.py
UTF-8
252
4.25
4
[]
no_license
str1 = input("enter string") str2 = input("enter string") if len(str1) != len(str2): print("Length Not Equal") print(tuple(zip(str1, str2))) for x, y in zip(str1, str2): if x != y: print("Not Equal") else: print("Equal")
true
dfad8e905470a5c5d8f6431bee8417763e3756f5
Python
ameyaditya/SudokuSolver
/Development files/test6.0.py
UTF-8
3,890
2.546875
3
[]
no_license
from imutils.perspective import four_point_transform from imutils import contours import imutils import numpy as np import cv2 i = 0 image = cv2.imread("sudoku.jpg") ''' ratio = image[0]/300.0 print(ratio) orig = image.copy() image = imutils.resize(image,height=300) ''' #blurred = cv2.pyrMeanShiftFiltering(image,31,91) blurred = cv2.medianBlur(image,9) gray = cv2.cvtColor(blurred,cv2.COLOR_BGR2GRAY) #blurred = cv2.medianBlur(gray,5) th3 = cv2.adaptiveThreshold(gray,200,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,25,3) edged = cv2.Canny(th3,100,200) _,contours,_ = cv2.findContours(edged,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) contours = sorted(contours, key=cv2.contourArea,reverse=True)[0:10] screenCnt = None for c in contours: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c,0.02*peri, True) ''' x,y,h,w = cv2.boundingRect(approx) ar = w/float(h) print(ar) ''' if len(approx) == 4: #if ar>=0.95 and ar<=1.05: screenCnt = approx break pts = screenCnt.reshape(4,2) rect = np.zeros((4,2), dtype='float32') s=pts.sum(axis=1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] diff = np.diff(pts, axis=1) rect[1] = pts[np.argmin(diff)] rect[3] = pts[np.argmax(diff)] #rect *= ratio (tl, tr, br, bl) = rect widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) # ...and now for the height of our new image heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) # take the maximum of the width and height values to reach # our final dimensions maxWidth = max(int(widthA), int(widthB)) maxHeight = max(int(heightA), int(heightB)) # construct our destination points which will be used to # map the screen to a top-down, "birds eye" view dst = np.array([ [0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype = "float32") # calculate the perspective transform matrix and warp # the perspective to grab the screen M = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) res = cv2.resize(warp,(600,600), interpolation= cv2.INTER_AREA) for i in range(0,600-(600//9),600//9): for j in range(0,600-(600//9),600//9): roi = res[i:i+600//9,j:j+600//9] grey = cv2.cvtColor(roi,cv2.COLOR_BGR2GRAY) _,thresh = cv2.threshold(grey,150,255,cv2.THRESH_BINARY) _,cnts,_=cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if(len(cnts) > 1): cnts = sorted(cnts, key = cv2.contourArea, reverse=True)[1] area = cv2.contourArea(cnts,True) if area>100 and area<1000: M = cv2.moments(cnts) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cropped = thresh[cy-22:cy+22,cx-20:cx+20] for k in range(1,10): imagepath = "no"+str(k)+".png" template = cv2.imread(imagepath) template = cv2.cvtColor(template,cv2.COLOR_BGR2GRAY) cv2.imshow("temp",template) cv2.imshow("thresh",cropped) cv2.waitKey(0) template = cv2.resize(template,(600//9,600//9),interpolation=cv2.INTER_AREA) result = cv2.matchTemplate(cropped, template, cv2.TM_CCOEFF_NORMED) min_val,max_val,min_loc,max_loc = cv2.minMaxLoc(result) print(max_val) if res>0.9: print(k) cv2.imshow("cropped",thresh) cv2.waitKey(0) cv2.drawContours(image,[screenCnt],-1,(0,0,255),3) cv2.imshow("image",image) cv2.imshow("warped",res) cv2.waitKey(0) cv2.destroyAllWindows()
true
2b62d8ee912b14db20266b9628c7a285daf62d36
Python
JeffersonLab/HDEventStore
/src/EventStoreToolkit/gen_util.py
UTF-8
2,222
3.140625
3
[]
no_license
#!/usr/bin/env python # # Ported to GlueX by Sean Dobbs (s-dobbs@northwestern.edu), 2014 # # Copyright 2004 Cornell University, Ithaca, NY 14853. All rights reserved. # # Author: Valentin Kuznetsov, 2004 # """A set of usefull utilities independent from EventStore""" import os, sys, string, time def dayAhead(): """Form a day ahead in the YYYYMMDD format. To form such day we ask for seconds since epoch time.time(), add one day 60*60*24, convert to tuple in UTC format and send it for formating to time.strftime: int( time.strftime( "%Y%m%d",time.gmtime(time.time()+60*60*24) ) )""" return int( time.strftime( "%Y%m%d",time.gmtime(time.time()+60*60*24) ) ) def lowerUpperBitsOfUID(uid): """Return lower and upper bits of 64-bit number""" # read uid and check if we need to initialize lower(upper) bits lowerMostId = long(uid) & 0x00000000FFFFFFFFL # lowerMost 32 bit upperMostId = long(uid) >> 32 # upperMost 32 bit return (lowerMostId,upperMostId) def form64BitNumber(lower,upper): """Form a 64-bit number from two 32-bit ones""" number = (upper<<32) | lower return number def changeFileName(fileName,fromField,toField): """Change portion of file name from 'fromField' to 'toField'. It uses string replace mechnism to make a change. Return original name if no 'fromField' and 'toField' provided.""" if (not fromField and not toField) or fromField==toField: return fileName newFileName = string.replace(fileName,fromField,toField) return newFileName def printExcept(): """print exception type, value and traceback on stderr""" sys.excepthook(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2]) def printListElements(iList,msg=""): """Loop over elements in a list and print one in a time on stdout""" if msg: print print "### %s:"%msg for item in iList: print item def addToDict(iDict,key,value): """Add value as a list to the dictionary for given key. If dictionary contains such key, update its list with given value. Return dictionary itself.""" if iDict.has_key(key): iList = iDict[key]+[value] iDict[key] = iList else: iDict[key]=[value] return iDict
true
73939a9874de858e7c02ac59c6bfce96a4fd2f0e
Python
yuukidach/The-Python-Challenge
/level9.py
UTF-8
532
2.71875
3
[]
no_license
#! /usr/bin/env python3 # -*- coding: utf-8 -8- __author__ = 'Yuuki_Dach' import requests import re from PIL import Image, ImageDraw webUrl = 'http://www.pythonchallenge.com/pc/return/good.html' webContent = requests.get(webUrl, auth=('huge','file')).text print(webContent) pattern = re.compile(r"(\d{2,3})") nums = re.findall(pattern, webContent) nums = list(map(int,nums)) nums.remove(nums[0]) nums.remove(nums[0]) print(nums) img = Image.new('RGB', (800,800)) draw = ImageDraw.Draw(img) draw.polygon(nums, 'white') img.show()
true
e2b2b01e5f580bdcf1bb83ba233250bd871c02bc
Python
joacolerer/fiuba
/Algo 2/Trabajos Practicos/tp3/BibliotecaGrafos.py
UTF-8
2,724
3.46875
3
[]
no_license
from grafo import Grafo from cola import Cola # Recibe un grafo y una lista de vertices, si existe una relacion de precedencia entre los vertices # devuelve una lista ordenada de estos segun corresponda, o None en caso de que se encuentren ciclos o no existan dichos vertices. def orden_topologico_subgrafo(grafo,lista): subgrafo = Grafo(True) for vertice in lista: if not grafo.existe_vertice(vertice): return None subgrafo.agregar_vertice(vertice) for vertice in lista: for adyacente in grafo.obtener_adyacentes(vertice): subgrafo.agregar_arista(vertice,adyacente) return orden_topologico(subgrafo) # Esta deberiamos trabajarla afuera de la biblioteca, en NetStats # Recibe un grafo, si existe una relacion de precedencia entre los vertices # devuleve una lista ordenada de de estos, o None en caso de que se encuentren ciclos # Este algoritmo utiliza un recorrido similar a BFS def orden_topologico(grafo): grados = {} resultados = [] for v in grafo: grados[v] = 0 for v in grafo: for w in grafo.obtener_adyacentes(v): grados[w] += 1 cola = Cola() for v in grafo: if grados[v] == 0: cola.encolar(v) while not cola.esta_vacia(): v = cola.desencolar() resultados.append(v) for w in grafo.obtener_adyecentes(v): grados[w] -= 1 if grados[w] == 0: cola.encolar(w) if len(resultados) == len(grafo): return resultados return None def calcular_diametro(grafo): diametro= 0 for v in grafo: distancias = caminos_minimos(grafo, v) for w in distancias: if distancias[w] > diametro: diametro = distancias[w] return diametro def caminos_minimos(grafo, origen): cola = Cola() visitados = set() distancia = {} distancia[origen] = 0 visitados.add(origen) cola.encolar(origen) while not cola.esta_vacia(): v = cola.desencolar() for w in grafo.adyacentes(v): if w not in visitados: distancia[w] = distancia[v] + 1 cola.encolar(w) visitados.add(w) return distancia def distancia_exacta(grafo,origen,n): #if n > len(grafo): return 0 cola = Cola() cola.encolar(origen) distancia = {} distancia[origen] = 0 lista = [] while not cola.esta_vacia(): v = cola.desencolar() for w in grafo.obtener_adyacentes(v): if w not in distancia: distancia[w] = distancia[v] + 1 cola.encolar(w) if distancia[w] == n: lista.append(w) if distancia[w] > n: return len(lista) return len(lista)
true
b50d04b440b66e25847d8456674c63280c090da8
Python
chunwei0831/porcupine
/binding/python/porcupine.py
UTF-8
6,002
2.59375
3
[ "LicenseRef-scancode-proprietary-license", "Apache-2.0" ]
permissive
# # Copyright 2018-2020 Picovoice Inc. # # You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE" # file accompanying this source. # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # import os from ctypes import * from enum import Enum class Porcupine(object): """ Python binding for Porcupine wake word engine. It detects utterances of given keywords within an incoming stream of audio in real-time. It processes incoming audio in consecutive frames and for each frame emits the detection result. The number of samples per frame can be attained by calling `.frame_length`. The incoming audio needs to have a sample rate equal to `.sample_rate` and be 16-bit linearly-encoded. Porcupine operates on single-channel audio. """ class PicovoiceStatuses(Enum): SUCCESS = 0 OUT_OF_MEMORY = 1 IO_ERROR = 2 INVALID_ARGUMENT = 3 STOP_ITERATION = 4 KEY_ERROR = 5 INVALID_STATE = 6 _PICOVOICE_STATUS_TO_EXCEPTION = { PicovoiceStatuses.OUT_OF_MEMORY: MemoryError, PicovoiceStatuses.IO_ERROR: IOError, PicovoiceStatuses.INVALID_ARGUMENT: ValueError, PicovoiceStatuses.STOP_ITERATION: StopIteration, PicovoiceStatuses.KEY_ERROR: KeyError, PicovoiceStatuses.INVALID_STATE: ValueError, } class CPorcupine(Structure): pass def __init__(self, library_path, model_path, keyword_paths, sensitivities): """ Constructor. :param library_path: Absolute path to Porcupine's dynamic library. :param model_path: Absolute path to the file containing model parameters. :param keyword_paths: Absolute paths to keyword model files. :param sensitivities: Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A higher sensitivity results in fewer misses at the cost of increasing the false alarm rate. """ if not os.path.exists(library_path): raise IOError("Couldn't find Porcupine's dynamic library at '%s'." % library_path) library = cdll.LoadLibrary(library_path) if not os.path.exists(model_path): raise IOError("Couldn't find model file at '%s'." % model_path) if len(keyword_paths) != len(sensitivities): raise ValueError("Number of keywords does not match the number of sensitivities.") for x in keyword_paths: if not os.path.exists(os.path.expanduser(x)): raise IOError("Couldn't find keyword file at '%s'." % x) for x in sensitivities: if not (0 <= x <= 1): raise ValueError('A sensitivity value should be within [0, 1].') init_func = library.pv_porcupine_init init_func.argtypes = [ c_char_p, c_int, POINTER(c_char_p), POINTER(c_float), POINTER(POINTER(self.CPorcupine))] init_func.restype = self.PicovoiceStatuses self._handle = POINTER(self.CPorcupine)() status = init_func( model_path.encode('utf-8'), len(keyword_paths), (c_char_p * len(keyword_paths))(*[os.path.expanduser(x).encode('utf-8') for x in keyword_paths]), (c_float * len(keyword_paths))(*sensitivities), byref(self._handle)) if status is not self.PicovoiceStatuses.SUCCESS: raise self._PICOVOICE_STATUS_TO_EXCEPTION[status]() self._delete_func = library.pv_porcupine_delete self._delete_func.argtypes = [POINTER(self.CPorcupine)] self._delete_func.restype = None self.process_func = library.pv_porcupine_process self.process_func.argtypes = [POINTER(self.CPorcupine), POINTER(c_short), POINTER(c_int)] self.process_func.restype = self.PicovoiceStatuses version_func = library.pv_porcupine_version version_func.argtypes = [] version_func.restype = c_char_p self._version = version_func().decode('utf-8') self._frame_length = library.pv_porcupine_frame_length() self._sample_rate = library.pv_sample_rate() def delete(self): """Releases resources acquired by Porcupine.""" self._delete_func(self._handle) def process(self, pcm): """ Processes a frame of the incoming audio stream and emits the detection result. :param pcm: A frame of audio samples. The number of samples per frame can be attained by calling `.frame_length`. The incoming audio needs to have a sample rate equal to `.sample_rate` and be 16-bit linearly-encoded. Porcupine operates on single-channel audio. :return: Index of observed keyword at the end of the current frame. Indexing is 0-based and matches the ordering of keyword models provided to the constructor. If no keyword is detected then it returns -1. """ if len(pcm) != self.frame_length: raise ValueError("Invalid frame length. expected %d but received %d" % (self.frame_length, len(pcm))) result = c_int() status = self.process_func(self._handle, (c_short * len(pcm))(*pcm), byref(result)) if status is not self.PicovoiceStatuses.SUCCESS: raise self._PICOVOICE_STATUS_TO_EXCEPTION[status]() return result.value @property def version(self): """Version""" return self._version @property def frame_length(self): """Number of audio samples per frame.""" return self._frame_length @property def sample_rate(self): """Audio sample rate accepted by Picovoice.""" return self._sample_rate
true
37d0d74d6de529708d2a0c6181bea16ae58a7d88
Python
daniel-amos/T-201-GSKI-Vor-2020
/(ADT)Trees/General_Tree.py
UTF-8
3,364
4.1875
4
[]
no_license
class GeneralTreeNode: def __init__(self, data = None): self.data = data self.children = [] class GeneralTree: def __init__(self): ''' Need to know what node is the begning''' self.root = None def _populate_tree_recur(self, level = 0): ''' A Recursive way to input data into general tree ''' data_str = input() if data_str == '': return None level += 1 node = GeneralTreeNode(data_str) while True: print(level*" |"+"--NODE :", end=" ") child_node = self._populate_tree_recur(level) if child_node == None: break node.children.append(child_node) return node def populate_tree(self): ''' Initialize recursive population ''' print("ROOT :", end=" ") self.root = self._populate_tree_recur() def _print_preorder_recur(self, node): ''' Recrusive way to print all data in a pre-order (First itself, then children) ''' if node == None: return print(str(node.data), end = " ") for child_node in node.children: self._print_preorder_recur(child_node) def print_preorder(self): ''' Initalize the recursive preorder ''' self._print_preorder_recur(self.root) print("") def _print_postorder_recur(self, node): ''' Recrusive way to print all data in a post-order (First children, then itself) ''' if node == None: return for child_node in node.children: self._print_postorder_recur(child_node) print(str(node.data), end = " ") def print_postorder(self): ''' Initalize the recursive postorder ''' self._print_postorder_recur(self.root) print("") def _value_counter_recur(self, value, node, counter = 0): ''' Recursive way to find the number of specific values in a tree ''' if node == None: return counter # the case for gen-tree for child_node in node.children: counter = self._value_counter_recur(value, child_node, counter) if node.data == value: counter += 1 return counter def count_val(self): ''' Initalize recursive count ''' print("Value Counter, input a value and I \nwill find how many there are in the tree") value = input("Insert: ") num = self._value_counter_recur(value, self.root) print(value, "Appeared", num, "Times") def _replace_val_recur(self, node, val_1, val_2): if node == None: return for child_node in node.children: self._replace_val_recur(child_node, val_1, val_2) if node.data == val_1: node.data = val_2 return def replace_val(self): print("Value repalcer") val_1 = input("The value to be replaced: ") val_2 = input("The value that takes it's place: ") self._replace_val_recur(self.root, val_1, val_2) self.print_preorder() if __name__ == "__main__": gt = GeneralTree() gt.populate_tree() gt.count_val() print("Postorder: ", end=" ") gt.print_postorder() print("Preorder: ", end=" ") gt.print_preorder() gt.replace_val()
true
bca0c7ce48c9220050588b95963e99888bdd8291
Python
PythonIntec/python.basico
/variables_tipos_de_datos.py
UTF-8
1,415
4.28125
4
[ "MIT" ]
permissive
#coding: utf-8 print "Saludos aprendiz, a continuación una variable" variable = "Soy una variable" print variable print "Las variables pueden tener cualquier nombre" aguacate = "Soy un aguacate" print aguacate print "Los nombres pueden tener cualquier longitud" print "Cuando el nombre incluye más de un sustantivo, separar con _" nombre_largo = "Tengo un nombre largo" print nombre_largo print "Pero no es conveniente que sean tan largos" el_nombre_mas_largo_de_todos = "Yeah" print el_nombre_mas_largo_de_todos print "También pueden contener cualquier tipo de dato, nativo del lenguaje y creado por el usuario (se verá luego)" entero = 21 decimal = 3.2 caracter = 'o' cadena_caracteres = "Esto es interesante" print entero, decimal, caracter, cadena_caracteres print "No se pueden crear variables sin ningún valor, descomenta la siguiente línea y corre el script" print "Resultará en un error del tipo NameError: name is not defined" print "no_tengo_valor" print "Al proceso de asignarle un valor a la variable se le llama asignación" tengo_valor = 1 print tengo_valor print "Las variables son muy codiciosas y aceptan cualquier valor que se le asigne" tengo_valor = "otro valor" print tengo_valor #Imprime "otro valor" tengo_valor = 3.9 print tengo_valor #Imprime "3.9" print "Se pueden crear varias variables en una misma línea" uno, dos, tres, cuatro = 1, 2, 3, 4 print uno, dos, tres, cuatro
true
5bcbc9e6bd9b0d3a742a142cd83e5808d5fe6b39
Python
harip/python_visualizations
/min_max.py
UTF-8
5,372
3.40625
3
[]
no_license
"""Coursera data science course""" import calendar as cl import matplotlib.pyplot as plt import pandas as pd DATA_URL = "https://res.cloudinary.com/harip/raw/upload/v1521364790/site/python/minmax.csv" XLIM = [1, 365] YLIM = [-40, 50] def prep_initial_data(): """Drop unused column and group by columns""" df_main = pd.read_csv(DATA_URL) df_main = df_main.drop(['ID'], axis=1) # Group by date, create seperate dataframes for tmin and tmax and merge groupby_fields = ['Date', 'Element'] df_max1 = df_main[df_main['Element'] == "TMAX"].groupby(groupby_fields).agg({'Data_Value':'max'}).reset_index() df_min2 = df_main[df_main['Element'] == "TMIN"].groupby(groupby_fields).agg({'Data_Value':'min'}).reset_index() df_main = pd.concat([df_max1, df_min2]).reset_index() return df_main def basic_chart_settings(chart_plot): """Basic chart settings""" chart_plot.imshow([[.5, .5], [.6, .6]], interpolation='bicubic', cmap=plt.cm.copper, extent=(1, 365, -40, 50), alpha=1, aspect='auto') # Basic chart settings chart_plot.set_xticklabels([]) chart_plot.set_ylim(YLIM) chart_plot.set_xlim(XLIM) chart_plot.tick_params(axis='x', length=0, which='minor') chart_plot.tick_params(axis='y', which='major', labelsize=7.5) # Set the grid and x, y labels, chart title and Scatter plot legend chart_plot.grid(color='lightgrey', linewidth=0.5) plt.ylabel("Temperature (celsius)", fontsize=9) plt.title("Record high and low temperatures (2005-2014)", fontsize=12) plt.legend(loc=4, fontsize=6.5) def weather_pattern(): """Function that creates the chart""" df_main = prep_initial_data() # Create a pivot, tmax and tmin will become columns, split date in year, month, day # Determine the day of the year df_main = df_main.groupby(['Date', 'Element'])['Data_Value'].min().unstack('Element').reset_index() df_main["TMAX"] = df_main["TMAX"]/10 df_main["TMIN"] = df_main["TMIN"]/10 df_main[["Year", "Month", "Day"]] = df_main["Date"].str.split("-", expand=True).astype('int') df_main["Day_Of_Year"] = (pd.to_datetime(df_main["Date"])).dt.dayofyear # Remove leap year data leap_df = df_main.query('Month == 2 & Day == 29') df_main = df_main.drop(leap_df.index.values) # Copy 2015 data into a dataframe, and years less than 2015 into another frame df_2015 = df_main[df_main["Year"] == 2015].reindex() df_main = df_main[df_main["Year"] < 2015] df_main = df_main.drop(['Date', 'Year'], axis=1) # Merge df again based on day of the year df_main = df_main.groupby(['Day_Of_Year']).agg({'TMAX':'max', 'TMIN':'min'}).reset_index() # Get 2015 record breaking data df_record = pd.DataFrame(columns=['X', 'Y']) for k, row_val in df_2015.iterrows(): # Get 2015 values for TMAX for this day and month tmax_value_2000_2014 = df_main[(df_main["Day_Of_Year"] == row_val["Day_Of_Year"])]["TMAX"].values[0] tmin_value_2000_2014 = df_main[(df_main["Day_Of_Year"] == row_val["Day_Of_Year"])]["TMIN"].values[0] if row_val["TMAX"] > tmax_value_2000_2014: df_record.loc[len(df_record)] = [row_val["Day_Of_Year"], row_val["TMAX"]] if row_val["TMIN"] < tmin_value_2000_2014: df_record.loc[len(df_record)] = [row_val["Day_Of_Year"], row_val["TMIN"]] # Get x-tick positon at the change of the month (major ticks) # Get the minor ticks (middle of the month) and create labels as momths x_ticks = [row_val[1]['Day_Of_Year'] for row_val in df_2015.iterrows() if row_val[1]['Day'] == 1] x_minor_ticks = [(val+x_ticks[idx-1])/2 for idx, val in enumerate(x_ticks) if idx > 0] x_minor_ticks.append((max(x_ticks)+365)/2) x_minor_labels = [month[:3] for month in cl.month_name if month != ''] # Get the line series max_values = df_main['TMAX'].values min_values = df_main['TMIN'].values x_values = range(1, len(max_values)+1) # Plot the line data and apply a gradient to the entire chart chart_fig, chart_plot = plt.subplots(nrows=1, ncols=1) chart_fig.canvas.draw() chart_plot.plot(x_values, max_values, zorder=1, linewidth=0) chart_plot.plot(x_values, min_values, zorder=1, linewidth=0) # Add the scatter plot chart_plot.scatter(df_record["X"].values, df_record["Y"].values, zorder=2, color='b', label="Record 2015 Temperature", alpha=0.75) # Hide the major tick labels (ticks are visible) # Hide the minor ticks (tick labels which are months are visible) chart_plot.set_xticklabels(x_minor_labels, minor=True, fontsize=7.5) chart_plot.set_xticks(x_ticks) chart_plot.set_xticks(x_minor_ticks, minor=True) basic_chart_settings(chart_plot) # Fill area from the TMAX line to the top of chart with white color # Fill area from the TMIN line to the bottom of chart with white color # This will cause the gradient to showup only between the TMAX and TMIN max_y = [50 for min_value in min_values] min_y = [-40 for min_value in min_values] plt.fill_between(x_values, max_values, max_y, facecolor='white', alpha=1) plt.fill_between(x_values, min_values, min_y, facecolor='white', alpha=1) # Show the plot plt.show() weather_pattern()
true
311489115c91ae710a9969c29991e24946387d47
Python
AjinkyaTaranekar/AlgorithmX
/Codeforces/266/B.py
UTF-8
98
2.75
3
[]
no_license
n, t = input().split() s = input() for i in range(int(t)): s = s.replace("BG", "GB") print(s)
true
a02fdfc68bc813032e884deed848327efc842c4d
Python
gavanderlinden/IntroToFlask
/example2/blueprints.py
UTF-8
667
2.671875
3
[]
no_license
from flask import Blueprint, render_template_string, abort templates = { "index": """ <title>flask intro</title> <p>flask app using blueprints</p> <a href="./hello">hello</a> """, "hello": """ <title>hello page</title> <p>Hello! Welcome to this Flask introduction</p> <a href="./">return to index</a> """, "anything": "test" } def get_index_app(): app = Blueprint("index_app", __name__) @app.route("/", defaults={"page": "index"}) @app.route("/<page>") def show(page): template = templates.get(page) if template: return render_template_string(template) else: abort(404) return app
true
8698e53f043b5577b6c398ca9baf43a1dc3c368b
Python
kzagorulko/client-server
/03/server.py
UTF-8
2,380
2.796875
3
[]
no_license
import re from flask import Flask, render_template, request app = Flask(__name__, template_folder='templates/') app.config['student_id'] = 0 RE_INT = re.compile(r'^[-+]?[0-9]+$') RE_FLOAT = re.compile(r'\d+\.\d+|(?<=angle\s)\d+') students = [] @app.route('/', methods=['GET']) def index(): return render_template('index.html') @app.route('/about', methods=['GET']) def about(): return render_template('about.html') @app.route('/table', methods=['GET']) def table(): return render_template('table.html') @app.route('/api/distribution', methods=['POST']) def distribute(): data = request.json text = data.get('text') divided = text.split() integer_values = list(filter(lambda x: RE_INT.match(x), divided)) float_values = list(filter(lambda x: RE_FLOAT.match(x), divided)) another = list(filter( lambda x: x not in integer_values + float_values, divided )) result = ( f'integers: {integer_values},\tfloat: {float_values},' f'\t another{another}' ) return { 'result': result, } @app.route('/api/getStudents', methods=['GET']) def get_students(): return { 'students': students, } @app.route('/api/addStudent', methods=['POST']) def add_student(): data = request.json name = data.get('name') city = data.get('city') birthday = data.get('birthday') app.config['student_id'] += 1 students.append({ 'id': app.config['student_id'], 'name': name, 'city': city, 'birthday': birthday, }) return { 'id': app.config['student_id'] } @app.route('/api/updateStudent', methods=['POST']) def update_student(): data = request.json id_ = int(data.get('id')) name = data.get('name') city = data.get('city') birthday = data.get('birthday') for i in range(len(students)): if students[i]['id'] == id_: students[i]['name'] = name students[i]['city'] = city students[i]['birthday'] = birthday return '' @app.route('/api/removeStudent', methods=['POST']) def delete_student(): data = request.json id_ = int(data.get('id')) for i in range(len(students)): if students[i]['id'] == id_: students.remove(students[i]) return '' if __name__ == "__main__": app.run(host='0.0.0.0', port=5005)
true
aa18f6f51fcced2bd4d79d1b7b9d14f6073a8d49
Python
mcv-m6-video/mcv-m6-2021-team5
/detectron2_tools/io.py
UTF-8
7,513
2.578125
3
[]
no_license
from detectron2.structures import BoxMode from detectron2.structures import Instances, Boxes import xml.etree.ElementTree as ET import torch import os import math from utils.bb import BB import random import numpy as np class detectronReader(): def __init__(self, xmlfile): # Parse XML file tree = ET.parse(xmlfile) root = tree.getroot() image_path = os.path.split(xmlfile)[0] + '/AICity_data/train/S03/c010/frames/' # Read all the boxes from each track and sort by frame number detections = [] for track in root[2:]: for box in track: det = [int(box.attrib['frame']), track.attrib['label'], int(track.attrib['id']), float(box.attrib['xtl']), float(box.attrib['ytl']), float(box.attrib['xbr']), float(box.attrib['ybr'])] detections.append(det) detections = sorted(detections, key=lambda x: x[0]) # Create a dict for every frame self.dataset_dicts = [] last_frame = -1 for i, det in enumerate(detections): # If frame has changed, restart record if det[0] != last_frame or i==0: if i != 0: self.dataset_dicts.append(record) last_frame = det[0] record = {} record["file_name"] = image_path + 'frame_' + str(det[0]+1).zfill(4) + '.png' record["image_id"] = det[0] record["annotations"] = [] record["width"] = 1920 record["height"] = 1080 # Add box to annotations if det[1]!='bike': box = {} box["bbox"] = det[3:7] box["bbox_mode"] = BoxMode.XYXY_ABS box["category_id"] = 0 if det[1] == 'car' else 1 #3 if det[1] == 'car' else 2 box["track"] = det[2] record["annotations"].append(box) if i==len(detections)-1: self.dataset_dicts.append(record) def get_dict_from_xml(self, mode, train_ratio=0.25, K=0): """ Reads an input xml file and returns a detectron2 formatted list of dictionaries """ N = len(self.dataset_dicts) N_train = math.floor(N*train_ratio) # Set the range if K == 0: self.range_train = list(range(0,N_train)) self.range_val = list(range(N_train,N)) #self.range_val = list(range(N_train,N_train+10)) elif K == 1: self.range_train = list(range(N_train,2*N_train)) self.range_val = list(range(0,N_train)) + list(range(2*N_train,N)) #self.range_val = list(range(0,10)) + list(range(2*N_train,2*N_train+10)) elif K == 2: self.range_train = list(range(2*N_train,3*N_train)) self.range_val = list(range(0,2*N_train)) + list(range(3*N_train,N)) elif K == 3: self.range_train = list(range(3*N_train,N)) self.range_val = list(range(0,3*N_train)) elif K == 4: # Random data: 25% (train_ratio) for training lin = np.linspace(0,N-1,N) lin = list(lin.astype(int)) random.shuffle(lin) self.range_train = list(lin[0:N_train]) self.range_val = list(lin[N_train:N]) #self.range_train = list(lin[0:10]) #self.range_val = list(lin[N_train:N_train+10]) else: print('Invalid K value, enter a K between 0 and 3') return if mode == 'train': return [self.dataset_dicts[i] for i in self.range_train] elif mode == 'val': return [self.dataset_dicts[i] for i in self.range_val] else: print('Invalid mode: either train or val') # def get_range_for_k(self, K, train_ratio=0.25): # N = len(self.dataset_dicts) # N_train = math.floor(N*train_ratio) # # Set the range # if K == 0: # range_train = list(range(0,N_train)) # range_val = list(range(N_train,N)) # TODO: Just for testing (N) # elif K == 1: # range_train = list(range(N_train,2*N_train)) # range_val = list(range(0,N_train)) + list(range(2*N_train,N)) # elif K == 2: # range_train = list(range(2*N_train,3*N_train)) # range_val = list(range(0,2*N_train)) + list(range(3*N_train,N)) # elif K == 3: # range_train = list(range(3*N_train,N)) # range_val = list(range(0,3*N_train)) # else: # print('Invalid K value, enter a K between 0 and 3') # return # return [self.dataset_dicts[i] for i in range_train], [self.dataset_dicts[i] for i in range_val] def detectron2converter(self, input_pred, coco=False): """ Convert the detectron2 prediction format to ours to compute the mAP """ output_pred = [] frame_num = 0 for pred in input_pred: #print("Inference for frame: " + str(int(self.range_val[frame_num]))) print(pred["instances"]) pred_classes = pred["instances"].pred_classes.to("cpu") pred_scores = pred["instances"].scores.to("cpu") pred_boxes = pred["instances"].pred_boxes.to("cpu") pred_boxes = list(pred_boxes) box_list = [] for i in range(0, len(pred_classes)): if coco: if pred_classes[i] == 2: # class 2 = car box = BB(int(self.range_val[frame_num]), 0, 'car', float(pred_boxes[i][0]), float(pred_boxes[i][1]), float(pred_boxes[i][2]), float(pred_boxes[i][3]), pred_scores[i]) box_list.append(box) else: box = BB(int(self.range_val[frame_num]), 0, 'car', float(pred_boxes[i][0]), float(pred_boxes[i][1]), float(pred_boxes[i][2]), float(pred_boxes[i][3]), pred_scores[i]) box_list.append(box) output_pred.append(box_list) frame_num += 1 return output_pred def read_detections_file(filename): output = [] with open(filename, 'r') as f: detections = f.readlines() # Create a dict for every frame dataset_dicts = [] last_frame = -1 for i, det in enumerate(detections): det = det.split(',') # If frame has changed, restart record if int(det[0]) != last_frame or i==0 or i==len(detections)-1: if i != 0: record["instances"].set("pred_classes",torch.CharTensor(pred_classes)) record["instances"].set("scores",torch.Tensor(scores)) record["instances"].set("pred_boxes",Boxes(torch.Tensor(pred_boxes))) dataset_dicts.append(record) last_frame = int(det[0]) record = {} pred_boxes = [] pred_classes = [] scores = [] record["instances"] = Instances((1920,1080)) # Add box to instances pred_boxes.append([float(det[2]),float(det[3]),float(det[2])+float(det[4]),float(det[3])+float(det[5])]) pred_classes.append(0) scores.append(float(det[6])) return dataset_dicts
true
02fcd57ae510ae3aea5f551e58f18ef16dfb2c67
Python
zwzwtao/Deep-Learning
/Tensorflow1.x/NLP/Penn Treebank Dataset(PTB)/vocabulary_generation.py
UTF-8
1,485
2.796875
3
[]
no_license
import codecs import collections from operator import itemgetter # set MODE to "PTB" or "TRANSLATE_EN" or "TRANSLATE_ZH" MODE = "PTB" if MODE == "PTB": RAW_DATA = "PTB_data/ptb.train.txt" VOCAB_OUTPUT = "ptb.vocab" elif MODE == "TRANSLATE_ZH": RAW_DATA = "TED_data/train.txt.zh" VOCAB_OUTPUT = "zh.vocab" VOCAB_SIZE = 4000 elif MODE == "TRANSLATE_EN": RAW_DATA = "TED_Data/train.txt.en" VOCAB_OUTPUT = "en.vocab" VOCAB_SIZE = 10000 # count the frequency of each word counter = collections.Counter() with codecs.open(RAW_DATA, "r", "utf-8") as f: for line in f: for word in line.strip().split(): counter[word] += 1 # print(counter.items()) # sort words on frequency sorted_word_cnt = sorted(counter.items(), key=itemgetter(1), reverse=True) # sorted_word_cnt: [('the', 50770), ('<unk>', 45020), ('N', 32481),......] sorted_words = [x[0] for x in sorted_word_cnt] if MODE == "PTB": # add <eos> at text line break sorted_words = ["<eos>"] + sorted_words elif MODE in ["TRANSLATE_EN", "TRANSLATE_ZN"]: # also need to add <unk> and <sos> to "TRANSLATE_EN", "TRANSLATE_ZN", here we ignore "PTB" # then delete words with low frequency sorted_words = ["unk", "<sos>", "<eos>"] + sorted_words if len(sorted_words) > VOCAB_SIZE: sorted_words = sorted_words[: VOCAB_SIZE] with codecs.open(VOCAB_OUTPUT, 'w', 'utf-8') as file_output: for word in sorted_words: file_output.write(word + '\n')
true
c7302b96b7c4596929f44ef7bb187d9c82b2deb8
Python
massadraza/Python-Learning
/multiple_key_sort.py
UTF-8
576
3.515625
4
[]
no_license
from operator import itemgetter Stock = [ {"fname" : 'Amanda', 'lname': "Roberts"}, {"fname" : 'Bobby', 'lname' : "Williams"}, {"fname" : 'John', 'lname' : "Allen"}, {"fname" : 'Elon', 'lname' : "Williams"}, {"fname" : 'Don', 'lname' : "Jones"}, {"fname" : 'Bobby', 'lname' : "Allen"}, {"fname" : 'Nick', 'lname' : "Hayes"}, {"fname" : 'Bobby', 'lname' : "Jones"}, ] for x in sorted(Stock, key=itemgetter('fname', 'lname')): print(x) # Learning how to organize a list with multiple key sort.
true
6dc3ff16853d5e4f7ce31274625885994dbc0836
Python
timoButtenbruck/microRay
/gui/graphicItems/symbols/derivativeFunction.py
UTF-8
1,441
2.59375
3
[]
no_license
# -*- encoding: utf-8 -*- from PyQt4 import QtGui, QtCore class DerivativeFunctionBlock(QtGui.QGraphicsItem): def __init__(self, parent=None): super(DerivativeFunctionBlock, self).__init__(parent) self.boundingRectPath = QtGui.QPainterPath() self.boundingRectPath.addRect(0, 0, 50, 50) # rect for text positions self.sRect = QtCore.QRectF(0, 0, 50, 50) self.font = QtGui.QFont("sans-serif", 12) self.pen = QtGui.QPen() self.pen.setWidth(2) self.pen.setCosmetic(True) def paint(self, QPainter, QStyleOptionGraphicsItem, QWidget_widget=None): # QPainter.setRenderHint(QtGui.QPainter.Antialiasing) QPainter.setPen(self.pen) QPainter.drawPath(self.boundingRectPath) QPainter.setFont(self.font) QPainter.drawText(self.sRect, QtCore.Qt.AlignCenter or QtCore.Qt.AlignVCenter, QtCore.QString("s")) def boundingRect(self): return QtCore.QRectF(0, 0, 50, 50) @property def northCoordinates(self): return self.mapToScene(QtCore.QPoint(25, 0)) @property def westCoordinates(self): return self.mapToScene(QtCore.QPoint(0, 25)) @property def southCoordinates(self): return self.mapToScene(QtCore.QPoint(25, 50)) @property def eastCoordinates(self): return self.mapToScene(QtCore.QPoint(50, 25))
true
5ae892d0dc560a294ec7ce6a81b3a0267171bf2e
Python
gahakuzhang/PythonCrashCourse-LearningNotes
/7.user input and while loops/7.2.3 using a flag.py
UTF-8
324
3.8125
4
[]
no_license
#7.2.3 using a flag 标志 # 注意True和False首字母要大写 prompt="\nTell me something, and I will repeat it back to u: " prompt+="\nEnter 'quit' to end of this program. " active=True while active: message = input(prompt) if message=='quit': active=False else: print(message)
true
40ef4231c25cff57f47fa72f91dc6af96d25cd71
Python
jiivan/crypto-tax
/crypto_tax/parse_nbp.py
UTF-8
2,253
2.984375
3
[ "MIT" ]
permissive
#/usr/bin/env python3 import csv import datetime import decimal import itertools from crypto_tax import db from crypto_tax import models def import_path(p): state = 'header' data = [] currency_names = [] amounts = [] with open(p, newline='', encoding='cp1250') as f: reader = csv.reader(f, delimiter=';') for row in reader: assert not row or row[-1] == '' if state == 'header': state = 'second_header' continue if state == 'second_header': state = 'data' continue if state == 'data': if not row: state = 'footer' continue data.append(row[:-3]) continue if state == 'footer': assert row[0] == 'kod ISO' currency_names.extend(row[1:-1]) state = 'footer_names' continue if state == 'footer_names': assert row[0] == 'nazwa waluty' state = 'footer_amounts' continue if state == 'footer_amounts': assert row[0] == 'liczba jednostek' amounts.extend(row[1:-1]) state = 'finished' continue if state == 'finished': print(f'extra row: {row}') print(f'state {state} data: {len(data)} currencies: {currency_names}') mapped = {} for key, value in itertools.zip_longest(currency_names, amounts): mapped[key] = {'amount': int(value)} for entry in data: date = datetime.datetime.strptime(entry[0], '%Y%m%d').date() for currency, value in itertools.zip_longest(currency_names, entry[1:]): model = models.FiatExchangeRate( authority='NBP', authority_currency='PLN', currency=currency, amount=mapped[currency]['amount'], valid_date=date, exchange_rate = str(decimal.Decimal(value.replace(',', '.'))), ) db.session.add(model) db.session.commit() if __name__ == '__main__': import sys import_path(sys.argv[1])
true
81988727f22b111a8a834dea104264af6aa72828
Python
MTN111/AWO121-Full
/LinkedinScraper/demographics.py
UTF-8
3,205
2.59375
3
[]
no_license
import requests import json import datetime import codes from linkedin_hist_data import db_settings # Linkedin page credentials LINKEDIN_KEY='77v2ofy2by41vh' LINKEDIN_SECRET='763AeuKRTgaq6rjN' LINKEDIN_PAGE_ID=5005117 API_URL = 'https://api.linkedin.com/v1/companies/{0}/company-statistics' API_FOLLOWS_URL = 'https://api.linkedin.com/v1/companies/{0}/num-followers' token = 'AQWMr0yoLD7TdOElkwyE78lgGXZhdQJiTy2VHVARVafxJmCmr1c-SCB38YwsTRYmrtohEes2sLfgzKUgbMBgLNa-ej8QtDIqYxne93MjiaUPoN4ogE2HvMS8l8MfrT2UdMihlzpSkM75bkxfjZzRgjB7PMSOjArL_i8i02HcGSUh-QN7YiA' headers = { 'Authorization': 'Bearer '+token, } params = { 'format':'json' } def get_data(): response = requests.get(API_URL.format(LINKEDIN_PAGE_ID), params=params, headers=headers) data = json.loads(response.text) company_sizes = data['followStatistics']['companySizes']['values'] functions = data['followStatistics']['functions']['values'] seniorities = data['followStatistics']['seniorities']['values'] industries = data['followStatistics']['industries']['values'] countries = data['followStatistics']['countries']['values'] def to_dict(data, category): # transforming data to dict, with changing some fields according to numerical constraints to_return = [] for x in data: if category != 'COMPANY_SIZE': k = int(x['entryKey']) else: k = x['entryKey'] to_return.append({ 'name': codes.CODES[category][k], 'value': x['entryValue'], 'category': category, 'date': datetime.date.today() }) return to_return def save_to_db(data): # saves data to db session = db_settings.session for x in data: key = {} key['date'] = x['date'] key['name'] = x['name'] key['category'] = x['category'] ins = session.query(db_settings.LinkedinDemographics).filter_by(**key).first() if ins: obj = ins else: obj = db_settings.LinkedinDemographics() for k, v in x.iteritems(): setattr(obj, k, v) session.add(obj) session.commit() def save_countries(data): session = db_settings.session date = datetime.date.today() # selecting only country codes for requests query_res = db_settings.engine.execute("select * from linkedin_geography_codes where code like '__.__';") query_res = query_res.fetchall() params = { 'format': 'json', } for k in query_res[:3]: params['geos'] = k[0] # getting data from API for selected data response = requests.get(API_FOLLOWS_URL.format(LINKEDIN_PAGE_ID), params=params, headers=headers) value = int(response.text) key = {} key['date'] = date key['country'] = k[0][3:].upper() ins = session.query(db_settings.FollowersByCountry).filter_by(**key).first() print key['country'] # checking if certain country data for current day is present if ins: obj = ins else: # creating new db object obj = db_settings.FollowersByCountry() obj.date = key['date'] obj.country = key['country'] obj.value = value session.add(obj) session.commit() company_sizes = to_dict(company_sizes,'COMPANY_SIZE') functions = to_dict(functions,'FUNCTION') seniorities = to_dict(seniorities,'SENIORITY') industries = to_dict(industries,'INDUSTRY') # saving demographics data save_to_db(company_sizes) save_to_db(functions) save_to_db(seniorities) save_to_db(industries) # saving country data save_countries(countries) get_data()
true
b0de7065eec0f5f1902dc3a584bc4518d33413af
Python
Aasthaengg/IBMdataset
/Python_codes/p02995/s019692508.py
UTF-8
194
2.828125
3
[]
no_license
from math import gcd A, B, C, D = map(int, input().split()) E = C*D//gcd(C, D) c1 = (B//C)-((A+C-1)//C)+1 c2 = (B//D)-((A+D-1)//D)+1 c3 = (B//E)-((A+E-1)//E)+1 ans = B-A+1-c1-c2+c3 print(ans)
true
9e581e556d2c37ab99ad36b92703c8394d3b7621
Python
jflaboe/Project-Euler
/Euler9.py
UTF-8
125
3.15625
3
[]
no_license
for a in range(1,500): for b in range(1,500): if a**2 + b**2 == (1000-a-b)**2: print(a*b*(1000-a-b));
true
e0411f97356b9f4cc79da26acb71b915e55bedae
Python
gchetrick/honeyports
/honeyports-0.5.py
UTF-8
6,319
2.796875
3
[]
no_license
#!/usr/bin/python # Author: Paul Asadoorian, PaulDotCom/Black Hills Information Security # Contributors: Benjamin Donnelly, BHIS;Greg Hetrick, PaulDotCom # Date: 7/28/13 # Description: This script listens on a port. When a full connection is made to that port # Honeyports will create a firewall rule blocking the source IP address. # Currently works on Windows, Linux and OS X with the built-in firewalls. # You no longer need Netcat on your system! # TODO: # Create own chain for easy management in linux # List all interfaces and define the interface to listen on # Listen on multiple ports # Add ability for a whilelist - Listening port and localhost or file input # Syslog blocks in daemon mode # Set/config a lifetime for the FW rule that is created # Add options for protocol specific banners or taunting messages. # Verification it is running as root/administrator # Change Log v0.5 # Added support for multi-threading # fix options section to actually catch a miss and improper port # Fix menuing system # Added Daemon option # Import the stuff we need import threading import thread import SocketServer import socket # Import socket module import platform # Import platform module to determine the os from subprocess import call # Import module for making OS commands (os.system is deprecated) import sys, getopt # Import sys and getopt to grab some cmd options like port number import os # Import os because on Linux to run commands I had to use popen class ThreadingTCPServer (SocketServer.ThreadingMixIn, SocketServer.TCPServer): pass class ServerHandler (SocketServer.StreamRequestHandler): def handle(self): if not daemon: print "Got Connection from: ", self.client_address print "Blocking the address: ", self.client_address[0] hostname = self.client_address[0] if hostname == host: print 'DANGER WILL ROBINSON: Source and Destination IP addresses match!' return thread = threading.current_thread() self.request.sendall(nasty_msg) # If there is a full connection, create a firewall rule # Run the command associated with the platform being run: if platform == "Windows": if not daemon: print "Creating a Windows Firewall Rule\n" fw_result = call('netsh advfirewall firewall add rule name="honeyports" dir=in remoteip= ' + hostname + ' localport=any protocol=TCP action=block > NUL', shell=True) elif platform == "Linux": if not daemon: print "Creating a Linux Firewall Rule\n" else: log='logger -t honeyports "blocked %s -- stupid bastard"' % hostname os.popen(log) command = '/sbin/iptables -A INPUT -s ' + hostname + ' -j REJECT' fw_result = os.popen(command) if fw_result: fw_result=0 else: fw_result=1 elif platform == "Darwin": if not daemon: print "Creating a Mac OS X Firewall Rule\n" fw_result = call(['ipfw', '-q add deny src-ip ' + hostname]) if not daemon: if fw_result: print 'Crapper, firewall rule not added' else: print 'I just blocked: ', hostname return def finish(self): print self.client_address, 'disconnected!' self.request.send('bye ' + str(self.client_address) + '\n') # Declare some useless variables to display info about this script def StartServer (threadName, host, port): server = SocketServer.ThreadingTCPServer((host,port) , ServerHandler) ip,port = server.server_address serverthread = threading.Thread(target=server.serve_forever()) serverthread.daemon = True serverthread.start() def MenuInteraction (threadName): # Listen for user to type something then give options to # Flush all firewall rules, quit or print the rules while True: mydata = raw_input('Enter Commands (q=quit f=flush rules l=list rules): ') if mydata == "f": print "Flushing firewall rules..." print 'Flush command is: ', str(flush) call(flush[0] + ' ' + flush[1], shell=True) elif mydata == "l": print 'Here is what your rules look like:' call(fwlist[0] + ' ' + fwlist[1], shell=True) elif mydata == "q": print "Goodbye and thanks for playing..." os._exit(1) else: print "What?" port='' # Reserve a port for your service, user must use -p to specify port. version="0.5" name="Honeyports" USAGE=""" USAGE %s -p <port> -p, --port Required <port>, port to start the listner on (1-65535) -h, --host Required <host>, host/IP address to run the listner on -D, --daemon Optionally run in daemon mode (supresses interactive mode) """ % sys.argv[0] # Message to send via TCP to connected addresses # Can be used to mimic a banner nasty_msg = "\n***** This Fuck you provided by the fine folks at PaulDotCom, Hack Naked!*****\n" try: myopts, args = getopt.gnu_getopt(sys.argv[1:], "h: p: D", ["host", "port", "daemon"]) except getopt.GetoptError as error: print (str(error)) print USAGE sys.exit(2) #initial check to verify we have any options since port num is required. if not myopts: print USAGE sys.exit(2) for o, a in myopts: if (o == '-p' or o == '-port') and int(a) < 65536: port=int(a) elif (o == '-p' or o == '-port' ) and int(a) > 65535: print("Not a valid port number") print USAGE sys.exit(2) if o == '-D' or o == '-daemon': daemon = 1 else: daemon = 0 if (o == '-h' or o == '-host'): if not a: print "Forgot to provide an IP" print USAGE else: host = a if not daemon: print name, 'Version:', version print 'I will listen on TCP port number: ', port # Determine which platform we are running on platform = platform.system() if not daemon: print 'Honeyports detected you are running on: ', platform if platform == "Darwin": flush = 'ipfw', '-q flush' fwlist = 'ipfw', 'list' elif platform == "Linux": flush = 'iptables', '-F' fwlist = 'iptables', '-nL' else: flush = 'netsh', 'advfirewall reset' fwlist = "netsh", """ advfirewall firewall show rule name=honeyports | find "RemoteIP" """ host = '' thread.start_new_thread( StartServer, ("Thread1", host, port)) if not daemon: thread.start_new_thread( MenuInteraction, ("Thread2",)) while 1: pass
true
49e2bb6b7ccb96d47b732dbcd2ebc9bb1d38adcb
Python
DerHabicht/RedROVOR
/redrovor/frameTypes.py
UTF-8
3,207
2.9375
3
[]
no_license
#!/usr/bin/python import pyfits import sys import os import os.path from collections import defaultdict import obsDB from fitsHeader import isFits, getFrameType, getObjectName, splitByHeader def getFrameLists(fileList): '''given an iterator of filenames, go through each one, get the the type of the frame and add it to the appropriate list return a dictionary containing lists of files for 'zero', 'dark', 'object', and 'none'. The 'none' category will contain fits files that we can't determine the type for and files that we are unable to open''' results = {'zero':[],'dark':[],'flat':[],'object':[],'unknown':[]} for f in iter(fileList): try: imType = getFrameType(pyfits.getheader(f)) except: imType = 'unknown' if imType is None: imType = 'unknown' results[imType].append(f) return results def saveFrameLists(frameLists, zeroFile='zeros.lst',darkFile='darks.lst', flatFile='flats.lst',objectFile='objects.lst',unknownFile='unknown.lst'): '''Take the output from getFrameLists, and save them to files''' with open(zeroFile,'w') as zf: for frame in frameLists['zero']: zf.write('{0}\n'.format(frame)) with open(darkFile,'w') as df: for frame in frameLists['dark']: df.write('{0}\n'.format(frame)) with open(flatFile,'w') as ff: for frame in frameLists['flat']: ff.write('{0}\n'.format(frame)) with open(objectFile,'w') as of: for frame in frameLists['object']: of.write('{0}\n'.format(frame)) with open(unknownFile,'w') as uf: for frame in frameLists['unknown']: uf.write('{0}\n'.format(frame)) def makeObjectMap(files): '''create a dictionary with keys of the objects, and the values are lists of all the frames of that object ''' result = defaultdict(list) for frame in iter(files): result[getObjectName(pyfits.getheader(frame))].append(frame) return result def makeObjectList(files): '''create a list of all the objects observed''' return makeObjectMap(files).keys() def printObjectList(objectlist,objectFile='objectList.lst'): '''create a file containing a list of all the objects in objectlist''' with open(objectFile,'w') as of: for obj in iter(objectlist): of.write(obj) of.write('\n') return def printObjectMaps(objectMap, fileBase='obj_',ext='.lst'): '''for each object create file named fileBase+objName+ext which contains a single line header in the formate #(objname) followed a list of the frames of that object, one per line''' for obj, frames in objectMap.items(): fname = fileBase + obj + ext #build name for the file with open(fname,'w') as olist: olist.write('#({0})\n'.format(obj)) #write header with object name for frame in frames: olist.write(frame) olist.write('\n') return #main function def main(fileList=None): if fileList is None: #default to everything in the folder fileList = os.listdir('.') #look at the frame types frameTypes = getFrameLists(fileList) #get object names objNames = makeObjectMap(frameTypes['object']) #now print out the files printObjectMaps(objNames) printObjectList(objNames.keys()) saveFrameLists(frameTypes) #run main if the script is directly executed if __name__ == '__main__': main(sys.argv[1:])
true
5cc1c9c3d78dea04741925056f98ce47e6743f5d
Python
JamesBondOOO7/Competitive-Programming
/B/BurglarAndMatches.py
UTF-8
304
2.859375
3
[]
no_license
n,m = [int(i) for i in input().split()] match = [] for i in range(m): match.append([int(j) for j in input().split()]) match.sort(key=lambda a : a[1], reverse=True) ans = 0 idx = 0 while idx<m and n>0: ans += match[idx][1]*(min(n, match[idx][0])) n -= match[idx][0] idx += 1 print(ans)
true
16555a00e36ada6a3b84021ae9d6869b87e72807
Python
gideon59a/Game-G4InRow
/PlayClient.py
UTF-8
4,089
2.609375
3
[]
no_license
''' Created on Oct 2017, started with old "PlayerMain.py" @author: Gideonmessage sent This is a branch test1111 ''' import socket import sys import queue sys.path.append('../') #this has been added so can be found in cmd window TCP_PORT=50002 from G4InRow.gls import gameover import G4InRow.gls as gls import G4InRow.game as game import G4InRow.connect_tcp as connect_tcp import G4InRow.pre_gameM as pre_gameM debug_level=gls.debug_level #0 for no debug, 1 for basic debug, 2 for details def connect2sever (server_ip_address,destination_port): iserver_address = (server_ip_address, destination_port) # Create a TCP/IP socket isock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening print ('connecting to %s port %s' % iserver_address) try: isock.connect(iserver_address) except: print ("error connecting to the server.") return -1 else: if gls.debug_level >= 2: print ("connected to server: ", isock) return isock ######################### # Start of execution # ######################### #Prepare TCP listener server_ip_address = 'localhost' #program argument destination_port = TCP_PORT sock=connect2sever(server_ip_address,destination_port) #connection ready if sock == -1: #exit game print ("Exiting.") exit(1) # Init pre-game data_stream = str() rx_queue = queue.Queue(10) ##tx_queue = queue.Queue(10) #defined in gls #read from keyboard (for now put static values) # start game: myPreGame = pre_gameM.player_pre_game(myName="gideon", myGame="4InRow") #create a pregame isntance myPreGame.start_player_pre_game() #send the starting message to the server ### INIT: #======== #????game.palyer_game_init() ####################################### # MAIN LOOP # ####################################### # loop while not end of game: # transmit_to_server if any # receive_from_server() # check for error # process the received while gls.gameover==False: #game not over #1. Send all messages in tx queue as long as queue is not empty while not gls.tx_queue.empty(): txstr=gls.tx_queue.get_nowait() if debug_level>1: print ('%s: sending "%s"' % (sock.getsockname(), txstr)) sock.send(str.encode(txstr)) #the message must be byte encoded, utf8 is the default if gls.debug_level >=2: print ("message sent.") #2. Receive a single message - ***BLOCKING!*** try: data = sock.recv(1024) except socket.error: print ("error in socket, should be closed") data="" #NOTE: Data is not read. socket will be closed later by the next if if debug_level>1: print ('%s: received "%s"' % (sock.getsockname(), data)) if not data: #Server closed or error in socket print ('closing socket', sock.getsockname()) sock.close() gls.gameover=True else: #3. Process the received string #XXXno queue is needed because each message is processed. rxstr=data.decode("utf-8") data_stream, rx_queue = connect_tcp.extract_message(rxstr, data_stream, rx_queue) while not rx_queue.empty(): rx_message=rx_queue.get_nowait() if myPreGame.game_status == 1: # pre game, waiting for confirmation from server myPreGame.play_player_pre_game(rx_message) if myPreGame.game_status == 2: game.palyer_game_init() if myPreGame.game_status == -1: gls.gameover = True print("Game rejected. Exiting.") elif myPreGame.game_status == 2: # in game game.play_player(rx_message) #PROCESS THE MESSAGE else: #error gls.gameover=True print ("Game status error. Exiting.") #def wait_for_kbd (): # a=input ("enter something to continue") #if debug_level>1: # print ("gameover=",gls.gameover,"wait...") #wait_for_kbd() print ("end.")
true
1eea8cc474561fe900cdea14bf7163c395e3501d
Python
gabrieltseng/datascience-projects
/natural_language_processing/machine_translation/translate/model/seq2seq.py
UTF-8
6,771
2.765625
3
[]
no_license
import torch from torch import nn import random from ..data import BOS_TOKEN, EOS_TOKEN class Attention(nn.Module): def __init__(self, rnn_hidden_size): super().__init__() self.hidden_linear = nn.Linear(in_features=rnn_hidden_size, out_features=rnn_hidden_size) self.total_linear = nn.Linear(in_features=rnn_hidden_size, out_features=1) def forward(self, transformed_encodings, hidden): """ Assumes the encodings have already been put through a linear layer, since there is no need to do this for every loop in the decoder """ x = torch.tanh(transformed_encodings + self.hidden_linear(hidden)) # first dimension is the sequences return nn.functional.softmax(self.total_linear(x), dim=1) class FrenchToEnglish(nn.Module): def __init__(self, fr_embedding_path, en_embedding_path, fr_dict, en_dict, max_en_length, rnn_hidden_size=256, embedding_size=300, embedding_mean=0, embedding_std=0.3, encoder_dropout=0.15, decoder_dropout=0.35, bidirectional=True, forcing_probability=0, attention=False): super().__init__() self.bidirectional = bidirectional self.fr_embedding = self.get_embedding(fr_embedding_path, fr_dict, embedding_size, embedding_mean, embedding_std) self.en_embedding = self.get_embedding(en_embedding_path, en_dict, embedding_size, embedding_mean, embedding_std) self.encoder = nn.GRU(input_size=embedding_size, hidden_size=rnn_hidden_size, num_layers=2, batch_first=True, dropout=encoder_dropout, bidirectional=bidirectional) # a linear transformation from the encoder to the decoder 2) self.hidden_transformer = nn.Linear(rnn_hidden_size * 2 if bidirectional else rnn_hidden_size, rnn_hidden_size) # add attention self.attention = attention if self.attention: self.attention_layer = Attention(rnn_hidden_size=rnn_hidden_size) self.rnn_transformer = nn.Linear((rnn_hidden_size * 2 if bidirectional else rnn_hidden_size) + embedding_size, embedding_size) self.encoding_transformer = nn.Linear(rnn_hidden_size * 2 if bidirectional else rnn_hidden_size, rnn_hidden_size) self.decoder = nn.GRU(input_size=embedding_size, hidden_size=rnn_hidden_size, num_layers=2, batch_first=True) self.decoder_dropout = nn.Dropout(p=decoder_dropout) self.to_vocab = nn.Linear(rnn_hidden_size, len(en_dict)) # the max length of the output self.max_length = max_en_length self.en_bos = en_dict[BOS_TOKEN] self.en_eos = en_dict[EOS_TOKEN] # although this can be set here, ideally it would be controlled by the scheduler self.forcing_probability = forcing_probability def get_embedding(self, embedding_path, language_dict, embedding_size=300, mean=0, std=0.3): """ Load a language embedding from fasttext vectors Arguments: embedding_path: a pathlib.Path of the embeddings' path language_dict: word to int embedding_size: the embedding size mean and std are the pre-trained embedding values, so that the randomly initialized layers are also handled """ embedding = nn.Embedding(num_embeddings=len(language_dict), embedding_dim=embedding_size, padding_idx=language_dict['_pad_']) # aligns with what we will fill it up as nn.init.normal_(embedding.weight, mean=mean, std=std) with torch.no_grad(): with embedding_path.open('r', encoding='utf-8', newline='\n', errors='ignore') as f: for line in f: tokens = line.rstrip().split(' ') if tokens[0] in language_dict: embedding.weight[language_dict[tokens[0]]] = torch.tensor(list(map(float, tokens[1:]))) return embedding def forward(self, fr, en=None): """ fr: the questions to be translated en: if not None, will be used with teacher forcing """ # first, get the embeddings for the french input questions batch_size = fr.shape[0] fr_emb = self.fr_embedding(fr) # we only care about the hidden output of the encoder encoding, hidden = self.encoder(fr_emb) if self.bidirectional: d1, _, d3 = hidden.shape # if bidirectional, d1 will definitely be divisible by 2 hidden = hidden.view(int(d1 / 2), batch_size, int(d3 * 2)) if self.attention: transformed_encoding = self.encoding_transformer(encoding) hidden = self.hidden_transformer(hidden) # generate a [batch_size, 1] dimensional tensor of the beginning of sentence tokens base = torch.ones(batch_size).long().unsqueeze(1) if self.en_embedding.weight.is_cuda: base = base.cuda() seq_tensor = self.decoder_dropout(self.en_embedding(base * self.en_bos)) en_questions = [] for i in range(self.max_length): if self.attention: attention = self.attention_layer(transformed_encoding, hidden[-1].unsqueeze(0).transpose(1, 0)) weighted_inputs = (attention * encoding).sum(1).unsqueeze(1) seq_tensor = self.rnn_transformer(torch.cat([weighted_inputs, seq_tensor], dim=-1)) output, hidden = self.decoder(seq_tensor, hidden) words = self.to_vocab(output) en_questions.append(words) # check if we should use forced teaching now if (en is not None) and (self.forcing_probability > 0): if random.random() < self.forcing_probability: if i < en.shape[1]: selected_words = en[:, i].unsqueeze(1) else: return torch.cat(en_questions, dim=1) else: selected_words = words.argmax(dim=-1) else: selected_words = words.argmax(dim=-1) # check we are not all at an end of sentence token if torch.eq(selected_words, self.en_eos).all(): return torch.cat(en_questions, dim=1) seq_tensor = self.decoder_dropout(self.en_embedding(selected_words)) return torch.cat(en_questions, dim=1)
true
9e28e02485e75cf0dc76d395c64624129a9e5cb6
Python
CatarinaBrendel/Lernen
/curso_em_video/Module 2/exer041.py
UTF-8
586
3.390625
3
[]
no_license
from datetime import date print "\033[1;32m-=\033[m" * 20 print" Bem-Vindo a Federacao de Natacao" print "\033[1;32m-=\033[m" * 20 print' ' print"Deixe-me selecionar sua categoria" idade = int(raw_input("Diga-me o ano em que nasceu: > ")) idadea = date.today().year - idade print "Voce agora tem {} anos".format(idadea) if idadea <= 9: print "Sua categoria e: MIRIM" elif idadea <=14: print "Sua categoria e: INFANTIL" elif idadea <= 19: print "Sua categoria e: JUNIOR" elif idadea <= 25: print "Sua categoria e: SENIOR" else: print "Sua categoria e: MASTER"
true
d30263b573979ff0c5fea820afe5dc5542edf124
Python
hardy-12/hardik-panchal
/all_tasks/day_4_tasks/function_multirtn.py
UTF-8
136
2.890625
3
[]
no_license
def getFullname(): fName = "Hardik" lName = "Panchal" return fName, lName fName, lName = getFullname() print(fName, lName)
true
51bb18ee5547b00ee5ef9301d453e4ad286e3f5b
Python
BomDia12/EstruturasDeDados
/Questionario4/01.py
UTF-8
227
2.546875
3
[]
no_license
def altura(raiz): def recursive(raiz): if raiz is None: return -1 return max(recursive(raiz.esq), recursive(raiz.dir)) + 1 result = recursive(raiz) return result if result > 0 else 0
true
4ad28239ab977793215b41bbfce578d7a5e52ba1
Python
michalzielinski913/PhysicsLab
/MonteCarlo/wykresy.py
UTF-8
1,430
3.0625
3
[]
no_license
import numpy as np import plotly.express as px from sklearn.linear_model import LinearRegression import plotly.graph_objects as go #Circumference x1 = np.array([7.4, 7.52, 8, 5.3, 4.86, 5.14, 25.84, 22.72, 46.08, 21.3]) #Diameter y1 = np.array([2.28, 2.52, 2.62, 1.68, 1.56, 1.52, 8.12, 7.16, 14.62, 6.52]) ex = [0.061, 0.061, 0.051, 0.051, 0.040, 0.040, 0.040, 0.061, 0.061, 0.072, ] ey=[0.061, 0.061, 0.061, 0.061, 0.040, 0.061, 0.061, 0.065, 0.061, 0.061, ] #Converting x matrix in order to fit tensor xr=np.array(x1).reshape((-1, 1)) yr=np.array(y1) #Passing x and y values to linear regression sklearn ML model # https://scikit-learn.org/stable/modules/linear_model.html model = LinearRegression(fit_intercept=False).fit(xr, yr) #Generating 100 points in range <0, biggest measured x> x_range = np.linspace(0, xr.max(), 100) #Preditcting y values for 100 points given above y_range = model.predict(x_range.reshape(-1, 1)) #coeficient is 1/Pi so we need to calculate (1/Pi)^-1 in order to get our result print("Predicted Pi value: ", model.coef_**-1) #Graph generation fig = px.scatter(x=x1, y=y1, error_x=ex, error_y=ey) fig.add_traces(go.Scatter(x=x_range, y=y_range, name='Linear regression')) fig.update_xaxes(showline=True, linewidth=2, linecolor='black', gridcolor='Grey', title_text='Circumference') fig.update_yaxes(showline=True, linewidth=2, linecolor='black', gridcolor='Grey', title_text='Diameter') fig.show()
true
e66e6f642e2359edda2edfd471cc67af7c6454d0
Python
vishantbhat/myfiles
/cats_with_hats.py
UTF-8
384
3.859375
4
[]
no_license
## Cats with Hats # 100 cats with hats ON cats = {} for i in range(1, 101): cats['cat' + str(i)] = "OFF" for round in range(1, 101): for cat in range(1, 101): if cat % round == 0: if cats['cat' + str(cat)] == "OFF": cats['cat' + str(cat)] = "ON" else: cats['cat' + str(cat)] = "OFF" for c, hats in cats.items(): if cats[c] == "ON": print(c + ' has a hat.')
true
8b5da4de8a819cf64a24c6e616a3842ac80315dc
Python
xstrengthofonex/social-networking-kata
/social_network/posts/use_cases/create_post.py
UTF-8
1,652
2.5625
3
[]
no_license
from dataclasses import dataclass from datetime import datetime from social_network.common import boundary from social_network.posts import post from social_network.posts import posts from social_network.users import user from social_network.users import users @dataclass(frozen=True) class Request(boundary.Request): user_id: str text: str @dataclass(frozen=True) class Response(boundary.Response): post_id: post.Id user_id: user.Id text: str created_on: datetime USER_DOES_NOT_EXIST = "User Does Not Exist." class UseCase(boundary.Input): def __init__(self, posts_repository: posts.Repository, users_repository: users.Repository, presenter: boundary.Output, clock: datetime): self.posts_repository = posts_repository self.users_repository = users_repository self.presenter = presenter self.clock = clock def execute(self, request: Request) -> None: if not self.users_repository.find_by_id(user.Id(request.user_id)): self.presenter.on_failure(USER_DOES_NOT_EXIST) else: new_post: post.Post = self.create_new_post_from(request) self.posts_repository.add(new_post) response = Response(new_post.id, new_post.user_id, new_post.text, new_post.created_on) self.presenter.on_success(response) def create_new_post_from(self, request: Request) -> post.Post: return post.Post( id=self.posts_repository.get_next_id(), user_id=user.Id(request.user_id), text=request.text, created_on=self.clock.now())
true
303d546ff6dbfb324d5fcb2716aff9fb05510116
Python
Joyer0099/Leetcode
/Medium/Python/SortingAndSearching/SearchRange.py
UTF-8
372
3.09375
3
[]
no_license
class Solution(object): def searchRange(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ try: left = nums.index(target) right = len(nums) - 1 - nums[::-1].index(target) except: return [-1, -1] return [left, right]
true
56ddb0e8e493abae54ab1be6c0ecfc6f5f211d44
Python
taariksiers/udemy-complete-python-bootcamp
/Section19_Python2_Material/Lecture140_LambdaExpressions.py
UTF-8
1,241
4.4375
4
[]
no_license
# create anonymous / ad-hoc function - for simple functionss # start off with standard def and refine def square(num): result = num**2 return result print 'square(2)={n}' . format(n=square(2)) def square2(num): return num**2 print 'square2(2)={n}' . format(n=square2(2)) def square3(num): return num**2 print 'square3(2)={n}' . format(n=square3(2)) square4 = lambda num: num**2 print 'square4 = lambda num: num**2' . format(n=square4(2)) even = lambda num: num % 2 == 0 print 'even = lambda num: num % 2 == 0 - 10 | {n}' . format(n=even(10)) print 'even = lambda num: num % 2 == 0 - 11 | {n}' . format(n=even(11)) first_char = lambda string: string[0] print 'first_char = lambda string: string[0] - \'hello\' | {f}' . format(f=first_char('hello')) rev = lambda s: s[::-1] print 'rev = lambda s: s[::-1] - \'Hello\' | {f}' . format(f=rev('Hello')) # multiple arguments def adder(x,y): return x+y print 'adder 10,11 = {n}' . format(n=adder(10,11)) adder2 = lambda x,y: x+y print 'adder2 = lambda x,y: x+y - 11, 13 | {n}' . format(n=adder2(11,13)) thelen = lambda item: len(item) the_string = 'hello over there' print 'thelen = lambda item: len(item) - \'{s}\' | {n}' . format(s=the_string,n=thelen(the_string))
true
46c08fbd10f6ade1477e337927d46289cdb49fc7
Python
winni2k/abeona
/tests/test_acceptance/test_subgraphs.py
UTF-8
1,904
2.609375
3
[ "Apache-2.0" ]
permissive
import collections from hypothesis import given, strategies as strat from abeona.test.drivers import SubgraphTestDriver SeqTup = collections.namedtuple('SeqTup', ['seq', 'kmers']) @given(strat.sets( strat.sampled_from([ SeqTup('AAAT', ('AAA', 'AAT')), SeqTup('ATCC', ('ATC', 'GGA')), SeqTup('CCCG', ('CCC', 'CCG')), ]), min_size=1, max_size=3)) def test_decompose(tmpdir, seqs): # given d = SubgraphTestDriver(tmpdir) for seq in seqs: d.with_dna_sequence(seq.seq) # when expect = d.run() # then for seq in seqs: expect.has_graph_with_kmers(*seq.kmers).has_meta_info('n_junctions', 0) expect.has_n_graphs(len(seqs)) def test_allows_initial_kmer_not_in_seqs(tmpdir): # given d = SubgraphTestDriver(tmpdir) seqs = [ SeqTup('AAAT', ('AAA', 'AAT')), SeqTup('ATCC', ('ATC', 'GGA')), SeqTup('CCCG', ('CCC', 'CCG')), ] for seq in seqs: d.with_dna_sequence(seq.seq) d.with_initial_contigs('AAAC') # when expect = d.run() # then expect.has_graph_with_kmers(*seqs[0].kmers) expect.has_n_graphs(1) def test_counts_one_junction_of_subgraphs(tmpdir): # given d = SubgraphTestDriver(tmpdir) d.with_dna_sequence('AAAT') d.with_dna_sequence('AAAC') # when expect = d.run() # then expect.has_graph_with_kmers('AAA', 'AAT', 'AAC').has_meta_info('n_junctions', 1) expect.has_n_graphs(1) def test_counts_two_junctions_of_subgraphs(tmpdir): # given d = SubgraphTestDriver(tmpdir) d.with_dna_sequence('AAAT') d.with_dna_sequence('AAAC') d.with_dna_sequence('AACA') d.with_dna_sequence('AACT') # when expect = d.run() # then expect \ .has_graph_with_kmers('AAA', 'AAT', 'AAC', 'ACA', 'ACT') \ .has_meta_info('n_junctions', 2) expect.has_n_graphs(1)
true
1942847fb4feb2be0337b477304bcccef659103a
Python
jgsimard/COMP767
/hw2/function_approximation.py
UTF-8
3,826
2.90625
3
[]
no_license
import numpy as np import gym class ApproximationFunction: #feature vector def get_state_feature_vector(self, s): pass def get_state_action_feature_vector(self, s): pass #value def get_state_value(self, s, w): pass def get_state_action_value(self, s, a, w): pass #grad def get_state_grad(self, s, w): pass def get_state_action_grad(self, s, a, w): pass class LinearApproximationFunction: def get_state_feature_vector(self, s): pass def get_state_action_feature_vector(self, s, a): pass def get_state_value(self, s, w): feature_vector = self.get_state_feature_vector(s) return w.T @ feature_vector def get_state_action_value(self, s, a, w): feature_vector = self.get_state_action_feature_vector(s, a) return w.T @ feature_vector def get_state_grad(self, s): return self.get_state_feature_vector(s) def get_state_action_grad(self, s, a): return self.get_state_action_feature_vector(s, a) class TileCoding(LinearApproximationFunction): def __init__(self, n_bins, n_tilings, observation_space, action_space = None): self.dims = observation_space.shape[0] self.n_bins = n_bins self.n_tilings = n_tilings normalization = n_tilings / ((n_bins - 1) * n_tilings + 1) self.tile = np.array([high - low for high, low in zip(observation_space.high, observation_space.low)]) * normalization self.offset_base_position = - observation_space.low if action_space is not None: self.action_space = action_space if isinstance(action_space, gym.spaces.box.Box): self.dims += action_space.shape[0] action_tile = np.array([high - low for high, low in zip(action_space.high, action_space.low)]) * normalization self.tile = np.concatenate((self.tile, action_tile)) self.offset_base_position = np.concatenate((self.offset_base_position, action_space.low)) else: self.dims += 1 self.tile = np.concatenate((self.tile, [action_space.n * normalization])) self.offset_base_position = np.concatenate((self.offset_base_position, [0])) self.observation_space = observation_space self.offset = self.tile / n_tilings self.tiling_size = n_bins**self.dims self.size = self.tiling_size * n_tilings def get_state_action_feature_vector(self, s, a): if isinstance(self.action_space, gym.spaces.box.Box): s_prime = np.concatenate((s, a)) else: s_prime = np.concatenate((s, [a])) return self.get_state_feature_vector(s_prime) def get_state_feature_vector(self, s): feature_vector = np.zeros(self.size) for tiling in range(self.n_tilings): s_prime = s + tiling * self.offset + self.offset_base_position index_in_tiling = np.floor(s_prime / self.tile).astype(int) index_in_tiling[index_in_tiling == self.n_bins] = self.n_bins - 1 # for cases at the edge if len(index_in_tiling) > 1: index_in_tiling = np.ravel_multi_index(index_in_tiling, (self.n_bins,) * self.dims) feature_vector[tiling * self.tiling_size + index_in_tiling] = 1 return feature_vector if __name__ == "__main__": import gym low = np.array([0.0, 0.0]) high = np.array([1.0, 1.0]) observation_space = gym.spaces.Box(low=low, high=high, dtype=np.float32) tile_coding = TileCoding(n_bins=2, n_tilings=2, observation_space=observation_space) ij = [0.1, 0.5, 0.9] for i in ij: for j in ij: print(i,j, tile_coding.get_state_feature_vector([i,j])) print(tile_coding.tile)
true
8da62ed4156f9e3c8807089d65e300c89fc86bdf
Python
daniel-reich/ubiquitous-fiesta
/Z8REdTE5P57f4q7dK_17.py
UTF-8
237
3.140625
3
[]
no_license
def collatz(n): collatz_lst = [] while(n > 1): if n%2 == 0: n = n//2 collatz_lst.append(n) else: n = 3*n+ 1 collatz_lst.append(n) collatz_lst.append(1) return (len(collatz_lst),max(collatz_lst))
true
02ed4e9c842f2a3926521e9c7fd38c4f45056be8
Python
monoidic/cryptopals-solutions
/part1/task05.py
UTF-8
407
3.328125
3
[]
no_license
#!/usr/bin/env python3 import sys from mystuff import repeating_key_xor # in: # "Burning 'em, if you ain't quick and nimble"$'\n'"I go crazy when I hear a cymbal" # 'ICE' # out: # 0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f if len(sys.argv) != 3: print('Enter plaintext/ciphertext and key') exit(1) plain, key = bytes(sys.argv[1], 'UTF-8'), bytes(sys.argv[2], 'UTF-8') ciphertext = repeating_key_xor(plain, key) print(ciphertext.hex())
true
eec0d5b16cc50a9d4d8e3afe434e9c22d5e603a1
Python
aliceeli/Python_TTA
/Week 3/numbers write.py
UTF-8
453
4
4
[]
no_license
# Write program input 4 numbers and stores then # create txt file called numbers # input the numbers # write numbers to numbers.txt #close file num_1 = input("Please enter your first number: ") num_2 = input("Please enter your second number: ") num_3 = input("Please enter your third number: ") num_4 = input("Please enter your fourth number: ") myFile = open("numbers.txt", "w") myFile.write(f"{num_1}, {num_2}, {num_3}, {num_4}") myFile.close()
true
190558685a97b211d457f8fe7f338d86f8352d95
Python
mk-dir/python
/Python/dictionary.py
UTF-8
415
2.8125
3
[]
no_license
userDetails={ "Name":"Alex Hunter", "age": 16, "Languages":["Kisw","Eng","Meru","python"], "school":{ "pri":"ABC", "Seco":"123", "uni":"dy/dx" }, "parents":{ "father":{ "fName":"Papa", "lName":"Duck", }, "mother":{ "fname":"Mama", "lname":"Duck" } } } print(userDetails["Languages"][0])
true
dc42f9b84f73f20f9756bd3692c77c59a222bed8
Python
tsnaomi/chinese-mt
/code/ngram.py
UTF-8
4,651
3.3125
3
[]
no_license
import math import nltk from _ngrams import unigrams, bigrams, trigrams, total CORPUS = nltk.corpus.brown.sents(categories=nltk.corpus.brown.categories()) class Train: def __init__(self, corpus=CORPUS): self.unigrams = {'UNK': 0, } self.bigrams = {('UNK', 'UNK'): 0, } self.trigrams = {('UNK', 'UNK', 'UNK'): 0, } self.total = 0 self.train(corpus) self.dump() def train(self, corpus): '''Train a stupid backoff language model using trigrams.''' print 'Training takes forever.' for sent in corpus: # re-tokenize sentence to split punctuation from words, # e.g., Atlanta's -> Atlanta 's sent = nltk.word_tokenize(' '.join(sent)) # get part-of-speech tags sent = nltk.pos_tag(sent) # convert all non- proper nouns into lowercase sent = [w[0].lower() if 'NNP' not in w[1] else w[0] for w in sent] # don't need </S> because both the brown corpus and our corpus use # periods at the end of sentences sentence = ['<S>', ] + sent for i, word in enumerate(sentence): self.unigrams.setdefault(word, 0) self.unigrams[word] += 1 self.total += 1 if i > 0: bigram = (sentence[i-1], word) self.bigrams.setdefault(bigram, 0) self.bigrams[bigram] += 1 if i > 1: trigram = (sentence[i-2], sentence[i-1], word) self.trigrams.setdefault(trigram, 0) self.trigrams[trigram] += 1 def dump(self): '''Dump ngram data into file.''' with open('__ngrams.py', 'w') as f: print 'Writing unigrams to file...' unigrams_ = 'unigrams = {\n' for k, v in self.unigrams.iteritems(): unigrams_ += ' "%s": %s,\n' % (k, v) unigrams_ += ' }\n\n' f.write(unigrams_) print 'Writing bigrams to file...' bigrams_ = 'bigrams = {\n' for k, v in self.bigrams.iteritems(): bigrams_ += ' ("%s", "%s"): %s,\n' % (k[0], k[1], v) bigrams_ += ' }\n\n' f.write(bigrams_) print 'Writing trigrams to file...' trigrams_ = 'trigrams = {\n' for k, v in self.trigrams.iteritems(): if len(k) == 3: trigrams_ += ' ("%s", "%s", "%s"): %s,\n' % \ (k[0], k[1], k[2], v) else: trigrams_ += ' ("%s", "%s"): %s,\n' % (k[0], k[1], v) trigrams_ += ' }\n\n' f.write(trigrams_) print 'writing total to file...' total_ = 'total = %s' % self.total f.write(total_) class StupidBackoffTrigramLanguageModel: def __init__(self, corpus=CORPUS[:1000]): self.unigrams = unigrams self.bigrams = bigrams self.trigrams = trigrams self.total = total def score(self, sequence): '''Given a list of words, return a log-probability of the sequence.''' score = 0.0 for i, word in enumerate(sequence): C = word if i > 0: B = sequence[i-1] if i > 1: A = sequence[i-2] AB = (A, B) ABC = (A, B, C) ABC_count = self.trigrams.get(ABC, 0) if ABC_count: AB_count = self.bigrams[AB] score += math.log(ABC_count) score -= math.log(AB_count) continue BC = (B, C) BC_count = self.bigrams.get(BC, 0) if BC_count: B_count = self.unigrams[B] score += math.log(BC_count * 0.4) score -= math.log(B_count) continue C_count = self.unigrams.get(C, 0) + 1 score += math.log(C_count * 0.4) score -= math.log(self.total + len(self.unigrams)) return score if __name__ == '__main__': m = StupidBackoffTrigramLanguageModel() candidates = [ 'using chopsticks have a', 'using chopsticks has a', 'using chopsticks having a', 'using chopsticks had a', 'using chopsticks to have a', 'using chopsticks to have', ] for t in candidates: print '\t%s: %s' % (t, m.score(t.split()))
true
ec13f5fe96eac7e84d36162336ba4e1f277caec8
Python
juanPabloCesarini/cursoPYTHON2021
/Seccion 9/metodos_especiales.py
UTF-8
566
3.765625
4
[]
no_license
class Auto: def __init__(self, marca, km , modelo): #---> metodo constructor self.marca = marca self.km = km self.modelo = modelo print("Se creó el objeto Auto", self.marca) def __del__(self): #---> metodo destructor print("Se destruyó objeto auto", self.marca) def __str__(self): #---> metodo para mostrar el objeto return "Marca: {} - KM: {} - Modelo: {}".format(self.marca,self.km,self.modelo) def main(): miAuto = Auto("VW Gol", 150000, 1998) print(str(miAuto)) del(miAuto) main()
true
0858bea5581a8526dafbaf206f050bfa9ee2ae4c
Python
jmh9876/p1804_jmh
/1804/git/2-第二个月高级python/1/5-游标位置.py
UTF-8
1,021
3.4375
3
[]
no_license
f=open('5.txt','w') f.write('红豆生南国\n') f.write('春来发几枝\n') f.write('愿君多采霞\n') f.write('此物最相思\n') f.close() f=open('5.txt','r') c=f.readline() p=f.tell() print('读取第一行的内容是:%s'%c) print('读取第一行的游标位置是:%d'%p) c=f.readline() p=f.tell() print('读取第二行的内容是:%s'%c) print('读取第二行的游标位置是:%d'%p) c=f.readline() p=f.tell() print('读取第三行的内容是:%s'%c) print('读取第三行的游标位置是:%d'%p) c=f.readline() p=f.tell() print('读取第四行的内容是:%s'%c) print('读取第四行的游标位置是:%d'%p) ''' #定位seek f=open('1.txt','rb+') f.write('\n') f.close() c=f.readline()#c变量用于保存readline返回的一行内容 p=f.tell()#p变量用于保存tell返回的游标位置 print('读取的第一行内容是:%s'%c) print('读取第一行的游标位置是%d'%p) f.seek(0,0) print(f.readline())#重新返回第一行读取 f.seek(-3,2) print(f.tell()) f.close() '''
true
49bc9a884b49cb047ebbcab0e04970497b9dce55
Python
LKWBrando/CP1404
/practical2/exceptionExample.py
UTF-8
566
3.953125
4
[]
no_license
try: numerator = int(input("Enter the numerator: ")) denominator = int(input("Enter the denominator: ")) fraction = numerator / denominator except ValueError: print("Numerator and denominator must be valid numbers!") except ZeroDivisionError: print("Cannot divide by zero!") print("Finished.") #1. ValueError occurs when the input numerator and denominator is not an integer #2. ZeroDivisionError occurs when the denominator is 0 #3. Yes, an error checking loop can be used to print error messages should the input for the denominator equals 0
true
a613d3d89e104c5691e681c1cb42e0442fd4f7f3
Python
Aasthaengg/IBMdataset
/Python_codes/p03168/s137579656.py
UTF-8
469
2.796875
3
[]
no_license
import sys stdin = sys.stdin def ni(): return int(ns()) def na(): return list(map(int, stdin.readline().split())) def naa(N): return [na() for _ in range(N)] def ns(): return stdin.readline().rstrip() # ignore trailing spaces N = ni() p_array = list(map(float, stdin.readline().split())) dp = [0] * (N+1) dp[0] = 1 for p in p_array: for i in range(N, 0, -1): dp[i] = dp[i] * (1-p) + dp[i-1] * p dp[0] *= (1-p) print(sum(dp[(N+1)//2:]))
true
7de2ee9efdc4d7953904299f0450ce78886ba226
Python
nekohor/pondo2
/pondo2/tables/generate_tables.py
UTF-8
1,978
2.6875
3
[]
no_license
import pandas as pd import numpy as np import json # generate partTable df = pd.read_excel("partTable.xlsx") part_dict = {} part_dict["partTable"] = [] line_list = df["LINE"].unique() for i, line in enumerate(line_list): part_dict["partTable"].append({}) part_dict["partTable"][i]["line"] = int(line) part_dict["partTable"][i]["table"] = [] table = part_dict["partTable"][i]["table"] records = df.loc[df["LINE"] == line] for j, idx in enumerate(records.index): table.append({}) table[j]["part"] = records.loc[idx, "PART"] table[j]["dcafile"] = records.loc[idx, "DCAFILE"] # print(records.loc[idx, "SIGNAL"]) table[j]["signal"] = ( str(records.loc[idx, "SIGNAL"]).replace("\\\\", "\\")) # print(part_dict) with open("../Components/Tables/partTable.json", "w", encoding='utf-8') as jsfile: json.dump(part_dict, jsfile, indent=4, ensure_ascii=False) def cast_dataframe(df, col): cast_tag = "cast_" if col.startswith(cast_tag): new_col = col.replace(cast_tag, "") std_list = [1, 2, 3, 4, 5, 6, 7] for std in std_list: df[new_col + str(std)] = df[col].apply(lambda x: x + str(std)) df.drop([col], axis=1, inplace=True) else: pass # generate task_table df = pd.read_excel("factorTable.xlsx") df = df.fillna("nan") for col in df.columns: cast_dataframe(df, col) print(df) task_dict = {} task_dict["factorTable"] = [] tbl_list = task_dict["factorTable"] for i, col in enumerate(df.columns): tbl_list.append({}) tbl_list[i]["seriesName"] = col tbl_list[i]["factorList"] = [] task_list = tbl_list[i]["factorList"] for task in df[col]: if task.startswith("nan"): pass else: task_list.append(task) with open("../Components/Tables/factorTable.json", "w", encoding='utf-8') as jsfile: json.dump(task_dict, jsfile, indent=4, ensure_ascii=False)
true
0731d9ce50233fb80b3964c4e2c6d84099f885e8
Python
okada1220/100knock
/100knock_chapter4/100knock_chapter4_38.py
UTF-8
918
2.828125
3
[]
no_license
import re import pprint import collections import matplotlib.pyplot as plt import seaborn as sns morph = [] with open('neko.txt.mecab', 'r', encoding='utf-8') as f: for line in f: if not line == 'EOS\n': info = re.split(r'\t|,', line) key_list = {} key_list['surface'] = info[0] key_list['base'] = info[7] key_list['pos'] = info[1] key_list['pos1'] = info[2] morph.append(key_list) words = [] for i in range(len(morph)): words.append(morph[i]['base']) frequency_word = collections.Counter(words) most_frequency_word = frequency_word.most_common() frequency = [] for i in range(len(frequency_word)): frequency.append(most_frequency_word[i][1]) sns.distplot(frequency, kde=False, rug=False, bins=10000) plt.show() SNS = sns.distplot(frequency, kde=False, rug=False, bins=10000) SNS.set(xlim=(0, 20)) plt.show()
true
d5eb3f6ca955c1c348dc8b04ec37fddebd6f8b30
Python
heejung-choi/python
/public/public_mask.py
UTF-8
894
2.890625
3
[]
no_license
# pip install requests import requests from pprint import pprint # 깔끔하게 보이기 위해 pprint def mask(address, n=10): URL='https://8oi9s0nnth.apigw.ntruss.com/corona19-masks/v1/storesByAddr/json' params = f'?address={address}' response = requests.get(URL+params) #print(response.content) stores = response.json().get('stores')[:10] #print(stores) #for store in stores: #print(store) #print(store.get('name')) for store in stores: #print(remain_stat.get('remain_stat')) if store.get('remain_stat') == 'plenty': color = 'green' elif store.get('remain_stat') == 'some': color = 'yellow' elif store.get('remain_stat') == 'few': color = 'red' else: color :'grey' print(store.get('name')+color) mask('서울특별시 강남구 역삼동')
true
0ded6ce44cde7c316c6e82525ffa1034f6e01557
Python
adarsh415/Yelp-Sentiment-Analysis-TensorFlow
/word2vec_embed.py
UTF-8
3,594
2.640625
3
[]
no_license
import tensorflow as tf import numpy as np from process_data import process_data import os vocabulary_size=50000 # size of vocabulary embedding_size=300 # size of embedding batch_size=64 # batch size skip_window=1 # How many words to consider left and right num_skip=2 # How many time to reuse imput to generate label negative_sample=64 #how many negative example to sample LEARNING_RATE=1.0 NUM_TRAIN_STEPS=100000 SKIP_STEP = 1000 # how many steps to skip before reporting the loss def word2vec(batch_gen): graph=tf.Graph() with graph.as_default(): # input data. with tf.name_scope('input'): center_words=tf.placeholder(tf.int32,shape=[batch_size],name='center_word') target_words=tf.placeholder(tf.int32,shape=[batch_size,1],name='target_words') # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # lookup for embedding for input with tf.name_scope('embedding'): embedding=tf.Variable(tf.random_uniform([vocabulary_size,embedding_size],-1.0,1.0)) embed=tf.nn.embedding_lookup(embedding,center_words) #Construct variable to NCE loss with tf.name_scope('weights'): nce_weights=tf.Variable(tf.truncated_normal([vocabulary_size,embedding_size], stddev=1.0 / np.math.sqrt(embedding_size))) with tf.name_scope('biases'): biases=tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. with tf.name_scope('loss'): loss=tf.reduce_mean( tf.nn.nce_loss( weights=nce_weights, biases=biases, inputs=embed, labels=target_words, num_sampled=negative_sample, num_classes=vocabulary_size,name='loss' ) ) #tf.summary.scalar('loss',loss) optimizer=tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE).minimize(loss) make_dir('checkpoints') with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) total_loss=0.0 #we use this to calculate avarage loss writer=tf.summary.FileWriter('./graphs/',sess.graph) # Create model saving operation saver=tf.train.Saver({"embedding":embedding}) for index in range(NUM_TRAIN_STEPS): centers,targets=next(batch_gen) loss_batch,_=sess.run([loss,optimizer],feed_dict={center_words:centers,target_words:targets}) total_loss +=loss_batch if (index+1)% SKIP_STEP == 0: embedding_name= './'+'checkpoints'+'/'+'yulp_review_step_'+str(index)+'.ckpt' model_checkpoint_path=os.path.join(os.getcwd(),embedding_name) save_path=saver.save(sess,model_checkpoint_path) print ('embedding saved in {}'.format(save_path)) print ('Avg loss at step {}: {:5.1f}'.format(index,total_loss/SKIP_STEP)) total_loss=0.0 writer.close() def make_dir(name): if not os.path.exists(os.path.join(os.getcwd(),name)): os.mkdir(os.path.join(os.getcwd(),name)) else: pass def main(): batch_gen=process_data(vocabulary_size,batch_size,skip_window) word2vec(batch_gen) if __name__=='__main__' : main()
true
093383522e4594ddd6178a60184145aec698bd2d
Python
tiandiyijian/CTCI-6th-Edition
/03.06.py
UTF-8
1,122
3.65625
4
[]
no_license
import collections class AnimalShelf: def __init__(self): self._dog = collections.deque() self._cat = collections.deque() def enqueue(self, animal: List[int]) -> None: if animal[1] == 0: self._cat.append(animal) else: self._dog.append(animal) def dequeueAny(self) -> List[int]: if not self._dog and not self._cat: return [-1, -1] elif self._dog and self._cat: if self._dog[0][0] < self._cat[0][0]: return self._dog.popleft() else: return self._cat.popleft() else: return self._dog.popleft() if self._dog else self._cat.popleft() def dequeueDog(self) -> List[int]: return self._dog.popleft() if self._dog else [-1, -1] def dequeueCat(self) -> List[int]: return self._cat.popleft() if self._cat else [-1, -1] # Your AnimalShelf object will be instantiated and called as such: # obj = AnimalShelf() # obj.enqueue(animal) # param_2 = obj.dequeueAny() # param_3 = obj.dequeueDog() # param_4 = obj.dequeueCat()
true
d145a8c29dce076b26780eb4bc12e5f69b1b817e
Python
jedgar74/GAKMC
/Estacion.py
UTF-8
10,570
2.640625
3
[]
no_license
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 8 11:41:13 2021 @author: tauger """ from Vehiculo import * import statistics class Estacion(object): def __init__(self, name, id_est=None, nv=0): self.nv = nv self.id_est = id_est self.name = name self.empresa = 0 self.v = [] self.mvaveh = [] self.id_veh = [] self.f = [] self.ninactive = 0 # El parámetro uid corresponde a un identificador único para el vehículo # qué podría ser el chasis o el MVA def add(self, uid, tipo, fec, stat): vv = Vehiculo(uid, tipo) vv.add(fec, stat) self.v.append(vv) self.mvaveh.append(uid) self.nv = self.nv+1 try: if self.f.index(fec) == -1: self.f.append(fec) except: self.f.append(fec) def add2(self, uid, idv, tipo, fec, stat): vv = Vehiculo(uid, tipo) vv.add(fec, stat) self.v.append(vv) self.mvaveh.append(uid) self.id_veh.append(idv) self.nv = self.nv+1 try: if self.f.index(fec) == -1: self.f.append(fec) except: self.f.append(fec) def addD(self, uid, fec, stat): idv = self.mvaveh.index(uid) self.v[idv].add(fec, stat) try: if self.f.index(fec) == -1: self.f.append(fec) except: self.f.append(fec) def gets(self, uid): try: if self.mvaveh.index(uid) != -1: return True else : return False except: return False def compare(self, uid, fecha, stat): try: idv = self.mvaveh.index(uid) if idv != -1: self.v[idv].setstatus(fecha, stat) else : # print('NO LIST: ', uid) pass except: # print('NO LIST: ', uid) pass def compare2(self, uid, fecha, stat): try: idv = self.mvaveh.index(uid) if idv != -1: self.v[idv].setstatus2(fecha, stat) else : # print('NO LIST: ', uid) pass except: # print('NO LIST: ', uid) pass def prints(self): print('Nombre de estación ', self.name) print('Número de vehículos ', len(self.mvaveh)) for j in range(len(self.mvaveh)): self.v[j].prints() def prints2(self): print('Nombre de estación ', self.name) print('Número de vehículos ', len(self.mvaveh)) for j in range(len(self.mvaveh)): self.v[j].prints2() def indtip(self): print(" ") self.tip = [] self.ntip = [] for i in range(len(self.mvaveh)): ind = 0 c = self.v[i] try: ind = self.tip.index(c.tipo) if ind != -1: self.ntip[ind] = self.ntip[ind] + 1 else : self.tip.append(c.tipo) self.ntip.append(1) except: self.tip.append(c.tipo) self.ntip.append(1) # if self.verveh(c) == 1: # # print( '*** 1' ) # self.ntip[ind] = self.ntip[ind] - 1 # self.ninactive = self.ninactive + 1 # self.v[i].active = False print(self.tip, '\n', self.ntip) return [self.tip, self.ntip] def indtipc(self): print(" ") self.tipc = [] self.tipd = [] self.tipe = [] dd = [0] * len(self.f) #print(self.f) for i in range(len(self.tip)): ind = 0 for k in range(len(self.mvaveh)): c = self.v[k] #print(c.contipos(['On Rent'])) if c.tipo == self.tip[i] and c.active == True: ind = ind + 1 for j in range(len(c.fecha)): #print(self.f, c.fecha) e = self.f.index(c.fecha[j]) if c.status[j] == 'On Rent': dd[e] = dd[e] + 1 self.tipc.append(max(dd)) self.tipd.append(round(sum(dd)/(len(self.f)),3) ) self.tipe.append(round(statistics.stdev(dd),3)) dd = [0] * len(self.f) print(self.tipc, '\n', self.tipd, '\n', self.tipe) return [self.tipc, self.tipd, self.tipe] def indtipg(self, cond): print(" ") self.tipc = [] self.tipd = [] self.tipe = [] dd = [0] * len(self.f) #print(self.f) for i in range(len(self.tip)): ind = 0 for k in range(len(self.mvaveh)): c = self.v[k] if c.tipo == self.tip[i] and c.active == True: ind = ind + 1 for j in range(len(c.fecha)): #print(self.f, c.fecha) e = self.f.index(c.fecha[j]) for w in range(len(cond)): if c.status[j] == cond[w]: dd[e] = dd[e] + 1 self.tipc.append(max(dd)) self.tipd.append(round(sum(dd)/(len(self.f)),3) ) self.tipe.append(round(statistics.stdev(dd),3)) dd = [0] * len(self.f) print(self.tipc, '\n', self.tipd, '\n', self.tipe) return [self.tipc, self.tipd, self.tipe] def indtiplap(self, cond): print(" ") vtotd = [] vmaxd = [] vmind = [] vprod = [] vintd = [] for i in range(len(self.tip)): vtot = [] vmax = [] vmin = [] vpro = [] vint = [] for k in range(len(self.mvaveh)): c = self.v[k] if c.tipo == self.tip[i] and c.active == True: d = c.contipos(cond) vtot.append(d[0]) vmax.append(d[1]) vpro.append(d[2]) vint.append(len(d[3])-1) if len(d[3]) == 1: vmin.append(0) else : e = [] e.extend(d[3]) e.sort() vmin.append(min(e[1:])) vtotd.append(vtot) vmaxd.append(vmax) vmind.append(vmin) vprod.append(vpro) vintd.append(vint) print(len(self.f), '\n', vtotd, '\n', vmaxd, '\n', vmind, '\n', vprod, '\n', vintd) return [len(self.f), vtotd, vmaxd, vmind, vprod, vintd] # Verificar si un auto figura como fuera de servicio robado o extraviado def verveh(self, veh, cond= ['Missing', 'Stolen'] ): auto = 0 a = 0 for j in range(len(veh.fecha)): for i in range(len(cond)): if veh.status[j] == cond[i]: a = a + 1 if (a/len(veh.fecha)) > 0.8: auto = 1 print( '*** 1', veh.uid) return auto # definir función de evaluación, recorrer de forma vertical los vehiculos rentados y disponibles y determinar si supera el valor máximo estimado por cada grupo def ver(self, kindof, nkindof, fec): r = ['On Rent'] m = ['CHEQUEO', 'In Maintenance', 'STOP'] d = ['DISPONIBLE', 'AVAILABLE', '0', 'Non-Rev. Transfer'] rr= [ ] rd= [ ] fo= [ ] # return [fo, rr, rd] for ii in range(len(nkindof)): for i in range(len(kindof)): RR = 0 RD = 0 for w in range(len(fec)): for j in range(len(self.mvaveh)): c = self.v[j] for ff in range(len(c.fecha)): if fec[w] == c.fecha[ff]: if c.tipo == kindof[i] and c.active == True and fec[w] == c.fecha[j]: if c.status[j] in r: RR = RR +1 if c.status[j] in r or c.status[j] in m : RD = RD +1 break rr.append(RR ) rd.append(RD ) if RR > nkindof[ii][i] or RD > nkindof[ii][i]: fo.append(1) else : fo.append(0) return [sum(fo), fo, rr, rd] def onRentToGraph(self): # print(" ") tTipos = [] tData = [] #print(self.f) for i in range(len(self.tip)): ind = 0 tTipos.append(self.tip[i]) data = [0] * len(self.f) for k in range(len(self.mvaveh)): c = self.v[k] #print(c.contipos(['On Rent'])) if c.tipo == self.tip[i] and c.active == True: ind = ind + 1 for j in range(len(c.fecha)): e = self.f.index(c.fecha[j]) if c.status[j] == 'On Rent': data[e] = data[e] + 1 tData.append(data) # print(tTipos, '\n', self.f, '\n', tData) return [tTipos, self.f, tData]
true
c0a48f105522c6e29784e0150af917aab4ee3dff
Python
kapitsa2811/homographynet
/predict.py
UTF-8
2,734
2.515625
3
[]
no_license
''' Author: Richard Guinto Project: DeepHomography Dependencies: keras Usage: python <this file> ''' import os.path #import glob import numpy as np from keras.models import load_model #from keras.layers import Activation, Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization, InputLayer from keras import backend as K from keras import optimizers def euclidean_l2(y_true, y_pred): return K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1, keepdims=True)) def mean_square_error(y_true, y_pred): return K.mean(K.square(y_pred - y_true), axis=-1, keepdims=True) #checkpoint checkpoint = "/data/richard/model.hdf5" # Dataset-specific predict_data_path = '/home/samsung/richard/dataset/test/f7823b11-901a-457a-89a2-8c37dc4fef60.npz' samples_per_archive = 9216 # load model print('Loading model... ', checkpoint) model = load_model(checkpoint, custom_objects={'euclidean_l2': euclidean_l2}) print('Loading archive... ', os.path.abspath(predict_data_path)) archive = np.load(os.path.abspath(predict_data_path)) print('keys: ', archive.files) images = archive['images'] offsets = archive['offsets'] for idx in range(10): print('*************** SAMPLE ', idx) sample_image = images[idx]; sample_shape = sample_image.shape sample_image = sample_image.reshape(1, 128, 128, 2) sample_offset = offsets[idx]; print('Sample Offset: ', sample_offset) norm_sample_image = (sample_image - 127.5) / 127.5 norm_sample_offset = sample_offset / 32. print('Normalize Sample Offset: ', norm_sample_offset) print('Predicting Offset...') norm_pred_offset = model.predict(norm_sample_image) print('Predicted Offset(Normalize): ', norm_pred_offset) pred_offset = norm_pred_offset * 32. print('Predicted Offset: ', pred_offset) norm_rmse = np.sqrt(np.sum(np.square(norm_pred_offset - norm_sample_offset),axis=-1,keepdims=True)) print('Normalize RMSE: ', norm_rmse) norm_mse = np.mean(np.square(norm_pred_offset - norm_sample_offset), axis=-1, keepdims=True) print('Normalize MSE: ', norm_mse) mse = np.mean(np.square(pred_offset - sample_offset), axis=-1, keepdims=True) print('MSE: ', mse) norm_mae = np.mean(np.absolute(norm_pred_offset - norm_sample_offset), axis=-1, keepdims=True) print('Normalize MAE: ', norm_mae) mae = np.mean(np.absolute(pred_offset - sample_offset), axis=-1, keepdims=True) print('MAE: ', mae) sum = 0 for i in range(0, len(sample_offset),2): h = np.square(pred_offset[0][i] - sample_offset[i]) + np.square(pred_offset[0][i+1] - sample_offset[i+1]) h = np.sqrt(h) print('h: ', h) sum = sum + h sum = sum / (len(sample_offset) / 2) print('Ave. Corner Error: ', sum)
true
e0d57587a32d2e7c36357b58592736a08a10b842
Python
junwei-wang/project-euler
/001-100/20/20.py
UTF-8
130
2.625
3
[ "MIT" ]
permissive
#!/usr/bin/env python from math import log v = reduce(lambda x, y: x*y, [i for i in range(1, 101)]) print sum(map(int, str(v)))
true
4d5c4c1f6976e524440b9c66584303578ab3dbb8
Python
AK-1121/code_extraction
/python/python_18467.py
UTF-8
88
2.640625
3
[]
no_license
# Python "in" operator speed list - Average: O(n) set/dict - Average: O(1), Worst: O(n)
true
b9abeeef757047eb589064cb9d367715c6882acf
Python
daniel-reich/ubiquitous-fiesta
/hzs9hZXpgYdGM3iwB_19.py
UTF-8
280
3.125
3
[]
no_license
def alternating_caps(txt): i, lst = 0, [] for x in txt.split(): s = '' for ch in x: if not i%2: s+=ch.upper() else: s+=ch.lower() i+=1 lst.append(s) ​ return ' '.join(lst)
true
60f235b7114d7b354c60731579380b419b5716bf
Python
Mark-Na/My_Code
/Python_Course/boolean_conditionallogic.py
UTF-8
1,023
4.40625
4
[]
no_license
# if condition is True: #do something #elif some other condition is True: # do something #else: #do something name = "Jon Snow" if name == "Arya Stark": print("Valar Morghulis") elif name == "Jon Snow": print("You know nothing") else: print("Carry on") # x=1 # x is 1 #True, truthy # x is 0 #False, falsy #natural false, empty strings, None, 0, empty objects animal = input("Enter your favourite animal: ") if animal: print(animal + " is my favourite too") else: print("YOU DIDN\'T SAY ANYTHING") # == truthy if a has same value of b # != truthy if a does not have same value as b # > truthy if a greater than b # < truth if a is less than b # >= truthy if a is greater than or equal to b # <= truthy if a is less than or equal to b # and truthy if both a and b are true # or truthy if either a or b are true # not truthy if the opposite of a is true #is vs == # not the same # a = [1,2,3] # b = [1,2,3] # a == b # returns true, checks the value # a is b #checks to see if they are placed in same memory
true
f81c05be3f8e9e6cde171098a314786b6e406565
Python
yichaogan/LPR
/models/svm/char_recognition.py
UTF-8
4,230
2.53125
3
[ "Apache-2.0" ]
permissive
import numpy as np import os import random from PIL import Image import string # from sklearn import svm import time # from sklearn.externals import joblib classfies = [] def get_labels(): dict_labels = {} list_labels = [] for i in range(10): dict_labels[i] = i for word in string.ascii_uppercase: dict_labels[word] = word dict_labels["川"] = 'zh_cuan' dict_labels["鄂"] = 'zh_e' dict_labels["赣"] = 'zh_gan' dict_labels["甘"] = 'zh_gan1' dict_labels["贵"] = 'zh_gui' dict_labels["桂"] = 'zh_gui1' dict_labels["黑"] = 'zh_hei' dict_labels["沪"] = 'zh_hu' dict_labels["冀"] = 'zh_ji' dict_labels["津"] = 'zh_jin' dict_labels["京"] = 'zh_jing' dict_labels["吉"] = 'zh_jl' dict_labels["辽"] = 'zh_liao' dict_labels["鲁"] = 'zh_lu' dict_labels["蒙"] = 'zh_meng' dict_labels["闽"] = 'zh_min' dict_labels["宁"] = 'zh_ning' dict_labels["青"] = 'zh_qing' dict_labels["琼"] = 'zh_qiong' dict_labels["陕"] = 'zh_shan' dict_labels["苏"] = 'zh_su' dict_labels["晋"] = 'zh_sx' dict_labels["皖"] = 'zh_wan' dict_labels["湘"] = 'zh_xiang' dict_labels["新"] = 'zh_xin' dict_labels["豫"] = 'zh_yu' dict_labels["渝"] = 'zh_yu1' dict_labels["粤"] = 'zh_yue' dict_labels["云"] = 'zh_yun' dict_labels["藏"] = 'zh_zang' dict_labels["浙"] = 'zh_zhe' for value in dict_labels.values(): list_labels.append(value) return dict_labels,list_labels def get_label(_preds): dict_labels,list_labels = get_labels() results = [list(dict_labels.keys())[list(dict_labels.values()).index( list_labels[pred])]for pred in _preds] results = [str(re) for re in results] return results def open_dir(): global classfies for root, dirs, filename in os.walk('D:\workspace\EasyPR-python\data\easypr_train_data\chars', topdown=False): for dir in dirs: classfies.append(dir) def init_data(): open_dir() dict_labels,list_labels = get_labels() # print(len(list_labels)) x_data = [] y_data = [] for index,label in enumerate(list_labels): print(label) for root,_,filename in os.walk(r'H:/毕业论文/car_recognition/chars/'+str(label),topdown=False): for name in filename: # print(name) img = Image.open(os.path.join(root,name)).convert('L') img = np.round(np.array(img,'i')/255) x_data.append(img) # print(os.path.join(root,name)) y_data.append(index) # print(count) return x_data,y_data def create_svm(dataMat, dataLabel, decision='ovo'): clf = svm.SVC(kernel='rbf',decision_function_shape=decision) clf.fit(dataMat, dataLabel) joblib.dump(clf,r'D:\workspace\car_card_recognition\model\train_model.m') return clf def train(): tst = time.clock() x_data, y_data = init_data() x_test = [] y_test = [] rans = [ i for i in range(len(x_data))] for i in range(1000): a = random.choice(rans) x_test.append(x_data[a]) y_test.append(y_data[a]) x_train = np.array(x_data).reshape([len(x_data), 400]) y_train = np.array(y_data).reshape([len(y_data), 1]) clf = create_svm(x_train,y_train) pre_st = time.clock() print("Training Finshed! {:.3f}".format((pre_st - tst))) x_test = np.array(x_test).reshape([len(x_test),400]) y_test = np.array(y_test).reshape([len(y_test),1]) score = clf.score(x_test,y_test) print("Trained Score {:.3f}%".format(score*100)) def prediction(imgs): clf = joblib.load(r'D:\workspace\EasyPR-python\models\svm\model\train_model.m') print("Read Model Ready!") pre_start = time.clock() _preds = [] for img in imgs: image = np.round(img/255) image = image.reshape([1,400]) _pred = clf.predict(image) _preds.append(_pred[0]) results = get_label(_preds) pre_end = time.clock() print("Predtion:"+''.join(results)) # print("Predtion spent time: {:.4f}s.".format((pre_end-pre_start))) if __name__ == '__main__': #训练 train() #预测 # init_data()
true
98d9dca9e7b963185358ce73494625a122e66527
Python
koliveiraba/TheInternetAutomationChallenge
/scroll_infinito.py
UTF-8
494
2.8125
3
[]
no_license
from selenium import webdriver from selenium.webdriver.common.keys import Keys def find_the_end(browser: webdriver): page_loaded = browser.execute_script("return document.readyState") if page_loaded == "complete": body = browser.find_element_by_css_selector('body') body.send_keys(Keys.PAGE_DOWN) driver = webdriver.Chrome(executable_path='C:\\chromedriver.exe') driver.get("https://the-internet.herokuapp.com/infinite_scroll") while(True): find_the_end(driver)
true
1fe9e44c63a0d3469a07831f3bbd8b3b7d77617f
Python
mikegordo/lottery-checker-py
/freqa.py
UTF-8
1,030
3.0625
3
[]
no_license
class FrequencyAnalyser(object): def analyse(self, data): numbers = [[0 for __ in range(0, 6)] for __ in range(0, 76)] mballs = [0 for __ in range(0, 16)] for item in data: for pos, num in enumerate(item['numbers']): numbers[num][pos+1] += 1 mballs[item['mega']] += 1 maximum = float(len(data)) for i, j in enumerate(numbers): for pos in range(0, len(j)): numbers[i][pos] = numbers[i][pos] / maximum * 100.0 for i in range(0, len(mballs)): mballs[i] = mballs[i] / maximum * 100.0 self.cache = {'numbers': numbers, 'mb': mballs} return self.cache def check(self, set): normal = [] for pos, number in enumerate(set): pos += 1 tempset = self.cache['numbers'][number] right = float(tempset[pos]) for qq in tempset: if qq > right: normal.append(pos - 1) return normal
true
c2b550695a95542d3e001c47e13033cdfe64c799
Python
ryhanahmedtamim/pattern_recognition
/lab3/yest_plot.py
UTF-8
1,778
3.078125
3
[]
no_license
from scipy.io import arff from operator import itemgetter import math import statistics import matplotlib.pyplot as plt allK = [1, 5, 10, 20, 30] data = arff.loadarff("yeast_train" + '.arff') global data_frame data_frame = data[0] global instances def knn(k): instances = 0 correct = 0 train_data_frame = data_frame.copy() for test_data_row in data_frame: instances += 1 original_class = test_data_row[-1].decode("utf-8") distances = [] for train_data_row in train_data_frame: flag = False for i in range(len(train_data_row)): if test_data_row[i] != train_data_row[i]: flag = True break if flag: distance = 0 for i in range(len(train_data_row)-1): distance += ((test_data_row[i] - train_data_row[i]) ** 2) distance = math.sqrt(distance) train_data_class = train_data_row[-1].decode("utf-8") distances.append({'class': train_data_class, 'distance': distance}) classes = [] for i in range(k): obj = min(distances, key=itemgetter('distance')) classes.append(obj['class']) distances.remove(obj) try: predicted_class = statistics.mode(classes) except statistics.StatisticsError as e: predicted_class = classes[0] if predicted_class == original_class: correct += 1 return correct result = [] item = 0 for v_K in allK: result.append(knn(v_K)) print("Correctly classified for k = " + str(v_K) + " : " + str(result[item])) item += 1 plt.plot(allK, result) plt.xlabel('K') plt.ylabel('Error') plt.show()
true
2f7360396d9a3623e5eab1034829fe9db79fab3a
Python
KaluEmeKalu/cracking_the_coding_interview_python_solutions
/ch1_arrays_and_strings/q1_is_permutation.py
UTF-8
914
3.5
4
[]
no_license
import unittest class isPerm(): def run1(self, word1, word2): sorted_word1 = sorted(word1) sorted_word2 = sorted(word2) if sorted_word1 == sorted_word2: return True return False def run2(self, word1, word2): def make_arr(word): arr = [0] * 128 for letter in word: num = ord(letter) arr[num] += 1 return arr return make_arr(word1) == make_arr(word2) class testPerm(unittest.TestCase): def setUp(self): self.isperm = isPerm() def test_is_perm1(self): self.assertFalse(self.isperm.run1('bog', 'god')) self.assertTrue(self.isperm.run1('dog', 'god')) def test_is_perm2(self): self.assertFalse(self.isperm.run2('bog', 'god')) self.assertTrue(self.isperm.run2('dog', 'god')) if __name__ == "__main__": unittest.main()
true
619dd8a1e28146866beee738dff55a628a912bab
Python
jtharris85/CCI_Baseline.py
/CCI_Baseline/CCI_Baseline.py
UTF-8
8,065
2.65625
3
[]
no_license
import pandas as pd import numpy as np from google.cloud import bigquery from .constants import * version = '1.0.0' class QueryRuns: def __init__(self, project, query): self.query = query self.client = bigquery.Client(project=project) def run_query(self): query_job = self.client.query(self.query) query_result = query_job.result() return query_result def run_query_return_df(self): return self.run_query().to_dataframe().fillna(np.nan) class BaselineBuilds: def __init__(self, project, cohort_table, output_file, baseline='12 MONTH', year_start=2016, year_end=2020, splits={'gender': 'M/F', 'age': None}, patient_age_col=None, return_base=False): self.cohort = f'`{cohort_table}`' self.baseline = baseline self.output_file = output_file self.splits = splits self.year_start = year_start self.year_end = year_end self.project = project self.patient_age = patient_age_col self.return_base = return_base pd.options.mode.chained_assignment = None if not return_base: base = self.data_pull() self.final_build(base) def data_pull(self): cases = [] for y in list(range(2020, 2009, -1)): cases.append(f'WHEN EXTRACT(YEAR FROM index_date) = {y} THEN Zip_{y}') cases = '\n'.join(cases) q = f''' WITH patients AS ( SELECT patient_key, {INDEX_DATE}, {"patient_age" if self.patient_age else f"EXTRACT(YEAR FROM index_date) - SAFE_CAST(patient_birth_year AS INT64) AS patient_age"}, CASE WHEN {COL_GENDER} = '1' THEN 'M' WHEN {COL_GENDER} = '2' THEN 'F' ELSE 'U' END AS {COL_GENDER}, CASE {cases} END AS zip FROM {self.cohort} AS patients LEFT JOIN {DEMO_DATA} AS demo_data USING (patient_key) ), all_comorbidities AS ( SELECT patient_key, claim_number, diagnosis, comorbidity, weight, year_of_service FROM {DIAGNOSES} INNER JOIN {ICD_ID} ON (left(diagnosis, 3) = diagnosis_3 OR left(diagnosis, 4) = diagnosis_4) RIGHT JOIN {self.cohort} USING (patient_key) WHERE year_of_service BETWEEN DATE_SUB(index_date, INTERVAL {self.baseline}) AND DATE_SUB(index_date, INTERVAL 1 DAY) ), unique_co AS ( SELECT DISTINCT patient_key, comorbidity, weight FROM all_comorbidities ) SELECT * FROM patients LEFT JOIN unique_co USING (patient_key) ''' print(q) table_build = QueryRuns(self.project, q).run_query_return_df() print(f"Total Patients: {len(table_build.patient_key.unique().tolist())}") if self.return_base: self.base = table_build table_build.to_csv(f"{self.output_file}_base.csv") return table_build def add_splits(self, base): zips = pd.read_csv(ZIPS, usecols=['zip', 'state_id'], converters={'zip': str}) zips['zip'] = zips['zip'].str.slice(0, 3) zips = zips.drop_duplicates() df = base.merge(zips, how='left', on='zip') df['age'] = df['patient_age'] df['region'] = np.nan for k, v in REGIONS.items(): df['region'].loc[df.state_id.isin(v)] = k df['year'] = pd.to_datetime(df['index_date']).dt.year df['age_group'] = np.nan for k, v in AGE_BRACKETS.items(): df['age_group'].loc[df.age.between(v[0], v[1])] = k if self.splits['age']: age = self.splits['age'] df['age_large'] = np.nan df['age_large'].loc[df.age >= age] = f'Over {age}' df['age_large'].loc[df.age < age] = f'Under {age}' cci = df[['patient_key', 'comorbidity', 'weight']].drop_duplicates().groupby('patient_key').sum()[ ['weight']].reset_index() cci.columns = ['patient_key', 'CCI'] df2 = df.merge(cci, how='left', on='patient_key') df2['cci_cat'] = '0' for k, v in CCI_CATS.items(): if '+' in k: df2['cci_cat'].loc[df2.CCI >= v] = k else: df2['cci_cat'].loc[df2.CCI == v] = k return df2 def final_build(self, base): df2 = self.add_splits(base) breakdown = {} breakdown['All Patients'] = df2 if self.splits['gender']: breakdown['Male'] = df2[df2[COL_GENDER] == 'M'] breakdown['Female'] = df2[df2[COL_GENDER] == 'F'] if self.splits['age']: age_break = df2['age_large'].unique().tolist() for age in age_break: breakdown[f'Males {age}'] = df2[(df2['age_large'] == age) & (df2[COL_GENDER] == 'M')] breakdown[f'Females {age}'] = df2[(df2['age_large'] == age) & (df2[COL_GENDER] == 'F')] writer = pd.ExcelWriter(f'{self.output_file}.xlsx', engine='xlsxwriter') dfs = [] combo = dict(zip(CATS, TITLES)) for c, t in combo.items(): try: del df_new except: pass for k, v in breakdown.items(): v = v if c == 'year': df_total = pd.DataFrame({'patient_key': [v['patient_key'].nunique(), '']}, index=['Total Patients', 'Index Year, n(%)']) df = pd.concat([df_total, v.groupby(c).nunique()[['patient_key']]]) elif c == 'age_group': mean = round(v.drop_duplicates(subset=['patient_key'])['age'].mean(), 1) std = round(v.drop_duplicates(subset=['patient_key'])['age'].std(), 1) df_total = pd.DataFrame({'patient_key': [f'{mean} ({std})', ''] }, index=['Age, Mean (SD)', t]) elif c == 'cci_cat': mean = round(v.drop_duplicates(subset=['patient_key'])['CCI'].mean(), 2) std = round(v.drop_duplicates(subset=['patient_key'])['CCI'].std(), 2) df_total = pd.DataFrame({'patient_key': [f'{mean} ({std})'] }, index=['CCI Score, Mean (SD)', t]) else: df_total = pd.DataFrame({'patient_key': ''}, index=[t]) df = pd.concat( [df_total, v.groupby(c).nunique()[['patient_key']], pd.DataFrame({'patient_key': ''}, index=[''])]) df.columns = [k] df[k] = pd.to_numeric(df[k], errors='ignore', downcast='integer') try: df_new = df_new.merge(df, how='left', left_index=True, right_index=True) except: df_new = df dfs.append(df_new) segments = pd.concat(dfs) percents = segments percents_new = {} for col in list(percents): percents_check = [] for row in percents.index: try: percents_check.append(round(percents[col][row] / percents[col]['Total Patients'] * 100, 1)) except: percents_check.append(np.nan) percents_new[col] = percents_check percentages = pd.DataFrame(percents_new, index=percents.index) formatted = (segments.astype(str).replace(r'\.0$', '', regex=True) + ' (' + percentages.astype(str) + '%)') formatted = formatted.replace(r' \(nan%\)', '', regex=True).replace('nan', np.nan) formatted.to_excel(writer, sheet_name='Full Cohort') writer.save() return formatted
true
5cf4a7e80ea6e8d81290b4e80bda636b1a3d32fa
Python
kopok2/CodeforcesSolutionsPython
/src/682A/test_cdf_682A.py
UTF-8
2,371
2.59375
3
[ "MIT" ]
permissive
import unittest from unittest.mock import patch from cdf_682A import CodeforcesTask682ASolution class TestCDF682A(unittest.TestCase): def test_682A_acceptance_1(self): mock_input = ['6 12'] expected = '14' with patch('builtins.input', side_effect=mock_input): Solution = CodeforcesTask682ASolution() Solution.read_input() Solution.process_task() actual = Solution.get_result() self.assertEquals(expected, actual) def test_682A_acceptance_2(self): mock_input = ['11 14'] expected = '31' with patch('builtins.input', side_effect=mock_input): Solution = CodeforcesTask682ASolution() Solution.read_input() Solution.process_task() actual = Solution.get_result() self.assertEquals(expected, actual) def test_682A_acceptance_3(self): mock_input = ['1 5'] expected = '1' with patch('builtins.input', side_effect=mock_input): Solution = CodeforcesTask682ASolution() Solution.read_input() Solution.process_task() actual = Solution.get_result() self.assertEquals(expected, actual) def test_682A_acceptance_4(self): mock_input = ['3 8'] expected = '5' with patch('builtins.input', side_effect=mock_input): Solution = CodeforcesTask682ASolution() Solution.read_input() Solution.process_task() actual = Solution.get_result() self.assertEquals(expected, actual) def test_682A_acceptance_5(self): mock_input = ['5 7'] expected = '7' with patch('builtins.input', side_effect=mock_input): Solution = CodeforcesTask682ASolution() Solution.read_input() Solution.process_task() actual = Solution.get_result() self.assertEquals(expected, actual) def test_682A_acceptance_6(self): mock_input = ['21 21'] expected = '88' with patch('builtins.input', side_effect=mock_input): Solution = CodeforcesTask682ASolution() Solution.read_input() Solution.process_task() actual = Solution.get_result() self.assertEquals(expected, actual) if __name__ == "__main__": unittest.main()
true
fe2a7fc265c83a7347481f8fa004f21140421c64
Python
Neeky/chanceClient
/chanceClient/spiders/chinaclear.py
UTF-8
865
2.671875
3
[]
no_license
import scrapy from datetime import datetime now =datetime.now() class InvestorOverviewItem(scrapy.Item): newlyAddInvestors=scrapy.Field() endInvestors =scrapy.Field() pushDate =scrapy.Field() def convert(self): datas=dict(self) res ={} assert len(datas['newlyAddInvestors']) ==1,'exception in InvestorOverviewItem.convert len != 1' assert len(datas['endInvestors']) ==1,'exception in InvestorOverviewItem.convert len != 1' newlyAddInvestors =datas['newlyAddInvestors'][0].replace(',','') endInvestors =datas['endInvestors'][0].replace(',','') res['newlyAddInvestors']=float(newlyAddInvestors) res['endInvestors'] =float(endInvestors) res['pushDate'] ='{0:0>#4}-{1:0>#2}-{2:0>#2}'.format(now.year,now.month,now.day) return res
true
bdfff961d5d5d3890acf21addd0254a32775e2ca
Python
deepio/pybagit
/examples/newbag.py
UTF-8
3,464
2.78125
3
[ "CC-BY-4.0", "MIT", "CC-BY-3.0" ]
permissive
# The MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell import pybagit.bagit # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # sample code for how to create and update a bag. import sys, os import pybagit.bagit from optparse import OptionParser def create_new_bag(bagname): """ Takes a path to a bag. Returns a BagIt object. """ bag = pybagit.bagit.BagIt(bagname) return bag def make_test_files(bagobject): print("Creating Test Files...") # create some test directories os.makedirs(os.path.join(bagobject.data_directory, "subdir", "subsubdir1")) os.makedirs(os.path.join(bagobject.data_directory, "subdir", "subsubdir2")) os.makedirs(os.path.join(bagobject.data_directory, "subdir", "subsubdir3")) os.makedirs(os.path.join(bagobject.data_directory, "subdir", "subsubdir4")) # set up some random file contents t1 = "afdklsjanas.sm,nf.as,dfsdflsjkdhfalskjdfhasdfa" t2 = "zxm,v.cnzxclkjfsdiouaafdskjhasdfhalskdjfhasldkfja" t3 = "a0s98dfyasdfhaslkj938asiudhflaksjhdp9q8yiaudflaksd" t4 = "zx/v.,mzxc';lsdf;laksdjhfapda098dp9a78erewqkl;asdfj" testfiles = [t1, t2, t3, t4] i = 1 for contents in testfiles: f = open( os.path.join( bagobject.data_directory, "subdir", "subsubdir{0}".format(i), "testfile.txt", ), "w", ) f.write(contents) f.close() i += 1 print("Done!") if __name__ == "__main__": parser = OptionParser() parser.add_option("-p", "--path", help="Path to save the new bag") parser.add_option("-b", "--bag", help="Bag Name") parser.add_option( "-c", "--compress", help="Whether to compress the bag", action="store_true" ) (options, args) = parser.parse_args() if not options.path and options.bag: print("You need to specify a path and a bag name.") sys.exit(0) # instantiate the bag. mybag = create_new_bag(os.path.join(options.path, options.bag)) # set up some test files. make_test_files(mybag) # update the bag mybag.update() # validate the bag mybag.validate() # print some information about the bag. mybag.show_bag_info() # compress the bag (tgz is the default) if options.compress: print("Compressing.") pack = mybag.package(options.path) print("Package created at {0}".format(pack)) print("Done!") # and we're done!
true
1664cc7654af7c04e4ba8006ef313e17b5d62f03
Python
yizhongw/tf-qanet
/src/vocab.py
UTF-8
4,515
2.734375
3
[ "MIT" ]
permissive
#!/usr/bin/env python # -*- coding: utf-8 -*- # author: yizhong # created_at: 17-5-2 下午3:47 import h5py import json import numpy as np class Vocab(object): def __init__(self, filename=None, lower=False): self.id2token = {} self.token2id = {} self.token_cnt = {} self.use_lowercase = lower self.embed_dim = None self.embeddings = None self.pad_token = '<pad>' self.unk_token = '<unk>' self.add(self.pad_token) # <pad> -> 0 self.add(self.unk_token) # <unk> -> 1 if filename is not None: self.load_file(filename) def size(self): return len(self.id2token) def load_file(self, filename): for line in open(filename, 'r'): token = line.rstrip('\n') self.add(token) def get_id(self, key): key = key.lower() if self.use_lowercase else key if key in self.token2id: return self.token2id[key] elif key.lower() in self.token2id: return self.token2id[key.lower()] else: return self.token2id[self.unk_token] def get_token(self, idx): try: return self.id2token[idx] except KeyError: return self.unk_token def add(self, label, cnt=1): label = label.lower() if self.use_lowercase else label if label in self.token2id: idx = self.token2id[label] else: idx = len(self.id2token) self.id2token[idx] = label self.token2id[label] = idx if cnt > 0: if label in self.token_cnt: self.token_cnt[label] += cnt else: self.token_cnt[label] = cnt return idx def filter_tokens_by_cnt(self, min_cnt): tokens_to_keep = [token for token in self.token2id if self.token_cnt[token] >= min_cnt] # rebuild the token x id map self.token2id = {} self.id2token = {} self.add(self.pad_token, 0) self.add(self.unk_token, 0) for token in tokens_to_keep: self.add(token, cnt=0) def load_pretrained_embeddings(self, embedding_path): trained_embeddings = {} if embedding_path.endswith('.hdf5'): with h5py.File(embedding_path, 'r') as fin: jstr = fin['vocab'][()].decode('ascii', 'strict') vocab = json.loads(jstr) embed = np.array(fin['embed']) for token_idx, token in enumerate(vocab): if token not in self.token2id: continue trained_embeddings[token] = embed[token_idx] if self.embed_dim is None: self.embed_dim = len(embed[token_idx]) else: with open(embedding_path, 'r') as fin: for line in fin: contents = line.strip().split(' ') token = contents[0] values = list(map(float, contents[1:])) if token in self.token2id: trained_embeddings[token] = values else: token = token.lower() if token in self.token2id and token not in trained_embeddings: trained_embeddings[token] = values if self.embed_dim is None: self.embed_dim = len(contents) - 1 filtered_tokens = trained_embeddings.keys() # rebuild the token x id map self.token2id = {} self.id2token = {} self.add(self.pad_token, 0) self.add(self.unk_token, 0) for token in filtered_tokens: self.add(token, cnt=0) # load embeddings self.embeddings = np.zeros([self.size(), self.embed_dim]) for token in self.token2id.keys(): if token in trained_embeddings: self.embeddings[self.get_id(token)] = trained_embeddings[token] def convert_to_ids(self, tokens): """Convert tokens to ids, use unk_token if the token is not in vocab.""" vec = [] vec += [self.get_id(label) for label in tokens] return vec def recover_from_ids(self, ids, stop_id=None): """Recover tokens from ids""" tokens = [] for i in ids: tokens += [self.get_token(i)] if stop_id is not None and i == stop_id: break return tokens
true
7c4cd5f280f413a5c9eb8442b7c6910999fcf79f
Python
jakehoare/leetcode
/python_1_to_1000/650_2_Keys_Keyboard.py
UTF-8
1,271
3.71875
4
[]
no_license
_author_ = 'jake' _project_ = 'leetcode' # https://leetcode.com/problems/2-keys-keyboard/ # Initially on a notepad only one character 'A' is present. You can perform two operations on this notepad for each step: # Copy All: You can copy all the characters present on the notepad (partial copy is not allowed). # Paste: You can paste the characters which are copied last time. # Given a number n. You have to get exactly n 'A' on the notepad by performing the minimum number of steps permitted. # Output the minimum number of steps to get n 'A'. # If n is prime, we can only make it by copying 'A' and pasting it n - 1 times. # Consider breaking n down to make a single 'A'. If we find a divisor d of n, then from n // d we can take d steps to # make n 'A's. Repeat this process as many times as possible with each divisor starting from 2, until only 1 'A' left. # Solution is the sum of the factors (apart from 1). # Time - O(n) # Space - O(1) class Solution(object): def minSteps(self, n): """ :type n: int :rtype: int """ steps = 0 divisor = 2 while n > 1: while n % divisor == 0: steps += divisor n //= divisor divisor += 1 return steps
true
b1b7856bcf1143416443e0f9143fde31c5cd0feb
Python
ankit-119/acadview-assignements
/important methods/ques1.py
UTF-8
151
3.59375
4
[]
no_license
lst1=[] n=int(input("enter the no of elements")) for i in range(0,n): x=input("enter the elements") lst1.append(x) lst1.reverse() print(lst1)
true