blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a3af0b321d0afa09b77e6a77ffcb3dbae99ecc62 | Python | devangverma/CTCI | /stacks_and_queues/3.6.py | UTF-8 | 2,620 | 3.46875 | 3 | [] | no_license |
class MyQueue:
def __init__(self):
self.stack1 = []
self.stack2 = []
def enqueue(self, val):
while len(self.stack2) > 0:
self.stack1.append(self.stack2.pop())
self.stack1.append(val)
def dequeue(self):
while len(self.stack1) > 0:
self.stack2.append(self.stack1.pop())
return self.stack2.pop()
def peek(self):
while len(self.stack1) > 0:
self.stack2.append(self.stack1.pop())
if len(self.stack2)-1 >= 0:
return self.stack2[len(self.stack2)-1]
else:
return -1
def isEmpty(self):
return self.stack1 == [] and self.stack2 == []
class AnimalShelter:
def __init__(self):
self.dogs = MyQueue() #queue.Queue()
self.cats = MyQueue()
self.animals = MyQueue()
def put(self, type, id):
if type == "dog":
self.dogs.enqueue(id)
elif type == "cat":
self.cats.enqueue(id)
else:
self.animals.enqueue(id)
def get(self, type):
result = None
temp = MyQueue()
if type == "dog":
if self.dogs.peek() != -1:
if self.dogs.peek() == self.animals.peek():
self.dogs.dequeue()
return self.animals.dequeue()
else:
while not self.animals.isEmpty():
if self.dogs.peek() == self.animals.peek():
result = self.animals.dequeue()
temp.enqueue(self.animals.dequeue())
self.dogs.dequeue()
while not temp.isEmpty():
self.animals.enqueue(temp.dequeue())
else:
print("No more dogs left.")
elif type == "cat":
if self.cats.peek() != -1:
if self.cats.peek() == self.animals.peek():
self.cats.dequeue()
return self.animals.dequeue()
else:
while not self.animals.isEmpty():
if self.cats.peek() == self.animals.peek():
result = self.animals.dequeue()
temp.enqueue(self.animals.dequeue())
self.cats.dequeue()
while not temp.isEmpty():
self.animals.enqueue(temp.dequeue())
else:
print("No more cats left.")
else:
result = self.animals.dequeue()
if result == self.dogs.peek():
self.dogs.dequeue()
elif result == self.cats.peek():
self.cats.dequeue()
return result
animal = AnimalShelter()
animal.put("dog", 1)
animal.put("", 1)
animal.put("cat", 2)
animal.put("", 2)
animal.put("cat", 3)
animal.put("", 3)
animal.put("cat", 4)
animal.put("", 4)
animal.put("dog", 5)
animal.put("", 5)
animal.put("cat", 6)
animal.put("", 6)
print("animal:",animal.get("animal"))
print("animal:",animal.get("animal"))
print("animal:",animal.get("dog"))
print("animal:",animal.get("dog"))
| true |
9a54844f513f5ccc914b8c2727538508eb711ada | Python | hafiyyanabdulaziz/cekos-collaborativefiltering-notebook | /cf.py | UTF-8 | 1,844 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
property_interaction_df = pd.DataFrame(pd.read_csv('dataset_interaction_excel.csv',index_col=0))
interactions_matrix = pd.pivot_table(property_interaction_df, values='ratings', index='user_id', columns='property_id', aggfunc=np.max).fillna(0)
def similar_users(user_id, interactions_matrix):
similarity = []
for user in range(0, interactions_matrix.shape[0]):
sim = cosine_similarity([interactions_matrix.loc[user_id]], [interactions_matrix.loc[interactions_matrix.index[user]]])
similarity.append((interactions_matrix.index[user], sim))
similarity.sort(key=lambda x: x[1], reverse=True)
most_similar_users = [tup[0] for tup in similarity]
similarity_score = [tup[1] for tup in similarity]
most_similar_users.remove(user_id)
similarity_score.remove(similarity_score[0])
return most_similar_users, similarity_score
def recommendations(user_id, num_of_property, user_item_interactions):
most_similar_users = similar_users(user_id, user_item_interactions)[0]
property_ids = set(list(interactions_matrix.columns[np.where(interactions_matrix.loc[user_id] > 0)]))
recommendations = []
already_interacted = property_ids.copy()
for similar_user in most_similar_users:
if len(recommendations) < num_of_property:
similar_user_property_ids = set(list(interactions_matrix.columns[np.where(interactions_matrix.loc[similar_user] > 0)]))
recommendations.extend(list(similar_user_property_ids.difference(already_interacted)))
already_interacted = already_interacted.union(similar_user_property_ids)
else:
break
return recommendations[:num_of_property]
print(recommendations('5bfaed099a291d00012035eb', 5, interactions_matrix)) | true |
68c9e7e3db57a1192c71f8bf89dcfc22f87afd37 | Python | xulzee/LeetCodeProjectPython | /54. Spiral Matrix.py | UTF-8 | 2,005 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2019/3/15 18:42
# @Author : xulzee
# @Email : xulzee@163.com
# @File : 54. Spiral Matrix.py
# @Software: PyCharm
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
if matrix == []:
return []
start_row, start_column = 0, 0
end_row, end_column = len(matrix) - 1, len(matrix[0]) - 1
res = []
while start_column <= end_column and start_row <= end_row:
self.printEdge(matrix, start_row, start_column, end_row, end_column, res)
start_column += 1
start_row += 1
end_column -= 1
end_row -= 1
return res
def printEdge(self, matrix: list, start_row: int, start_column: int, end_row: int, end_column: int, res: List):
if start_row == end_row:
while start_column <= end_column:
res.append(matrix[start_row][start_column])
start_column += 1
elif start_column == end_column:
while start_row <= end_row:
res.append(matrix[start_row][start_column])
start_row += 1
else:
cur_row = start_row
cur_column = start_column
while cur_column < end_column:
res.append(matrix[cur_row][cur_column])
cur_column += 1
while cur_row < end_row:
res.append(matrix[cur_row][cur_column])
cur_row += 1
while cur_column > start_column:
res.append(matrix[cur_row][cur_column])
cur_column -= 1
while cur_row > start_row:
res.append(matrix[cur_row][cur_column])
cur_row -= 1
if __name__ == '__main__':
# A = [[5, 1, 9, 11],
# [2, 4, 8, 10],
# [13, 3, 6, 7],
# [15, 14, 12, 16]]
A = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
res = []
res = Solution().spiralOrder(A)
print(res)
| true |
93b2934a5f1b1417ad65c27f4b6890131f59309a | Python | sjdv1982/seamless | /seamless/imperative/Transformer.py | UTF-8 | 6,832 | 2.75 | 3 | [
"MIT"
] | permissive | """Imperative transformers"""
import inspect
from copy import deepcopy
import functools
import multiprocessing
class Transformer:
def __init__(self, func, is_async, **kwargs):
"""Imperative transformer.
Imperative transformers can be called as normal functions, but
the source code of the function and the arguments are converted
into a Seamless transformation.
The transformer may be asynchronous, which means that calling it
creates a coroutine.
The following properties can be set:
- local. If True, transformations are executed in the local
Seamless instance.
If False, they are delegated to remote job servers.
If None (default), remote job servers are tried first
and local execution is a fallback.
- blocking. Only for non-async transformers.
If True, calling the function executes it immediately,
returning its value.
If False, it returns an imperative Transformation object.
Imperative transformations can be queried for their .value
or .logs. Doing so forces their execution.
As of Seamless 0.11, forcing one transformation also forces
all other transformations.
- celltypes. Returns a wrapper where you can set the celltypes
of the individual transformer pins.
The syntax is: Transformer.celltypes.a = "text"
(or Transformer.celltypes["a"] = "text")
for pin "a"."""
from ..highlevel.Environment import Environment
from . import getsource, serialize, calculate_checksum, _get_semantic
code = getsource(func)
codebuf = serialize(code, "python")
code_checksum = calculate_checksum(codebuf)
semantic_code_checksum = _get_semantic(code, code_checksum)
signature = inspect.signature(func)
self._semantic_code_checksum = semantic_code_checksum
self._signature = signature
self._codebuf = codebuf
self._code_checksum = code_checksum
self._is_async = is_async
self._celltypes = {k: "mixed" for k in signature.parameters}
self._celltypes["result"] = "mixed"
self._modules = {}
self._blocking = True
self._environment = Environment(self)
self._environment_state = None
if "meta" in kwargs:
self._meta = deepcopy(kwargs["meta"])
else:
self._meta = {"transformer_path": ["tf", "tf"]}
self._kwargs = kwargs
functools.update_wrapper(self, func)
@property
def celltypes(self):
return CelltypesWrapper(self._celltypes)
@property
def modules(self):
return ModulesWrapper(self._modules)
@property
def environment(self) -> "Environment":
"""Computing environment to execute transformations in"""
return self._environment
def __call__(self, *args, **kwargs):
from .Transformation import Transformation
from .module import get_module_definition
from . import _run_transformer, _run_transformer_async
from .. import database_sink
self._signature.bind(*args, **kwargs)
if multiprocessing.current_process().name != "MainProcess":
if self._is_async:
raise NotImplementedError # no plans to implement this...
if not database_sink.active:
raise RuntimeError("Running @transformer inside a transformation requires a Seamless database")
runner = _run_transformer_async if self._is_async else _run_transformer
if not self._blocking:
tr = Transformation()
result_callback = tr._set
else:
result_callback = None
modules = {}
for module_name, module in self._modules.items():
module_definition = get_module_definition(module)
modules[module_name] = module_definition
result = runner(
self._semantic_code_checksum,
self._codebuf,
self._code_checksum,
self._signature,
self._meta,
self._celltypes,
modules,
result_callback,
args,
kwargs,
env=self._environment._to_lowlevel()
)
if self._blocking:
return result
else:
return tr
@property
def meta(self):
return self._meta
@meta.setter
def meta(self, meta):
self._meta[:] = meta
@property
def local(self):
return self.meta.get("local")
@local.setter
def local(self, value:bool):
self.meta["local"] = value
@property
def blocking(self):
return self._blocking
@blocking.setter
def blocking(self, value:bool):
if not isinstance(value, bool):
raise TypeError(value)
if (not value) and self._is_async:
raise ValueError("non-blocking is meaningless for a coroutine")
self._blocking = value
def __setattr__(self, attr, value):
if attr.startswith("_") or hasattr(type(self), attr):
return super().__setattr__(attr, value)
raise AttributeError(attr)
class CelltypesWrapper:
"""Wrapper around an imperative transformer's celltypes."""
def __init__(self, celltypes):
self._celltypes = celltypes
def __getattr__(self, attr):
return self._celltypes[attr]
def __getitem__(self, key):
return self._celltypes[key]
def __setattr__(self, attr, value):
if attr.startswith("_"):
return super().__setattr__(attr, value)
return self.__setitem__(attr, value)
def __setitem__(self, key, value):
from ..core.cell import celltypes
if key not in self._celltypes:
raise AttributeError(key)
pin_celltypes = list(celltypes.keys()) + ["silk"]
if value not in pin_celltypes:
raise TypeError(value, pin_celltypes)
self._celltypes[key] = value
def __dir__(self):
return sorted(self._celltypes.keys())
def __str__(self):
return str(self._celltypes)
def __repr__(self):
return str(self)
class ModulesWrapper:
"""Wrapper around an imperative transformer's imported modules."""
def __init__(self, modules):
self._modules = modules
def __getattr__(self, attr):
return self._modules[attr]
def __setattr__(self, attr, value):
from types import ModuleType
if attr.startswith("_"):
return super().__setattr__(attr, value)
if not isinstance(value, ModuleType):
raise TypeError(type(value))
self._modules[attr] = value
def __dir__(self):
return sorted(self._modules.keys())
def __str__(self):
return str(self._modules)
def __repr__(self):
return str(self)
| true |
2ea2bef9479fd0abc6fd921ebbadd973a0ab87ea | Python | estraviz/codewars | /6_kyu/longest_palindrome/python/solution.py | UTF-8 | 416 | 3.734375 | 4 | [] | no_license | """longest_palindrome"""
def longest_palindrome(s):
max_len = 0
for i in range(len(s)):
for j in range(len(s), i, -1):
if max_len >= j - i:
break
else:
sub_s = s[i:j]
if is_palindrome(sub_s):
max_len = len(sub_s)
break
return max_len
def is_palindrome(s):
return s == s[::-1]
| true |
11d9a1e87925fdda4f8c08a53aacc0855024b387 | Python | fariahiago/LearningPython | /FUNCOES/ex02.py | UTF-8 | 226 | 3.046875 | 3 | [] | no_license | def piramideNumCrescente(n):
a = 1
b = 1
for i in range(n+1):
while a < b:
print(a, end = " ")
a +=1
print("\n")
b +=1
a = 1
piramideNumCrescente(5) | true |
c4f12a39dcbec31e46969ea51823ffd375307ab4 | Python | davwhite/funcflasker | /flaskr/functions/funcalc.py | UTF-8 | 216 | 3.375 | 3 | [] | no_license | def two_params(aname, acolor):
return "You have entered '" + aname + "' for a name and '" + acolor + "' for a color.\n"
def one_param(alocation):
return "You have entered '" + alocation + "' for location.\n" | true |
0fd9e52b6b727c4e0315eaff59c5cffb8b12f195 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2744/60717/266004.py | UTF-8 | 415 | 3.40625 | 3 | [] | no_license | def isPalindrome(str1):
lenn=int(len(str1)/2)
for i in range(0,lenn):
if str1[i]!=str1[len(str1)-1-i]:
return False
return True
n=int(input())
list1=[]
for i in range(0,n):
list1.append(input().split()[1])
list2=[]
for i in range(0,n):
for j in range(0,n):
list2.append(list1[i]+list1[j])
count=0
for i in list2:
if isPalindrome(i):
count+=1
print(count) | true |
95891e6ff37851cdf08b9cae68832d50962e8aec | Python | GabrielCernei/codewars | /kyu7/Remove_The_Minimum.py | UTF-8 | 840 | 4.375 | 4 | [] | no_license | # https://www.codewars.com/kata/remove-the-minimum/train/python
'''
Given an array of integers, remove the smallest value. Do not mutate the original array/list.
If there are multiple elements with the same value, remove the one with a lower index. If you get
an empty array/list, return an empty array/list.
Don't change the order of the elements that are left.
'''
def remove_smallest(numbers):
result = numbers.copy()
if numbers:
result.pop(result.index(min(result)))
return result
print(remove_smallest([1, 2, 3, 4, 5]))#, [2, 3, 4, 5], "Wrong result for [1, 2, 3, 4, 5]")
print(remove_smallest([5, 3, 2, 1, 4]))#, [5, 3, 2, 4], "Wrong result for [5, 3, 2, 1, 4]")
print(remove_smallest([1, 2, 3, 1, 1]))#, [2, 3, 1, 1], "Wrong result for [1, 2, 3, 1, 1]")
print(remove_smallest([]))#, [], "Wrong result for []") | true |
e5bf60da0827e0b0b2877b389a8d67efaef9ebc1 | Python | divanshu79/GeeksForGeeks-solutions | /Rotation.py | UTF-8 | 564 | 2.90625 | 3 | [] | no_license | for _ in range(int(input())):
n = int(input())
arr = list(map(int,input().split()))
minVal = min(arr)
maxVal = max(arr)
indVal1 = arr.index(minVal)
indVal2 = arr.index(maxVal)
count_max = arr.count(maxVal)
count_min = arr.count(minVal)
listlen = len(arr)
maxind = listlen - 1
if indVal1 == 0 and arr[maxind] == maxVal:
print('sdbjan')
print(0)
elif indVal1 == 0 and arr[maxind] == minVal:
print(indVal2+count_max)
else:
print(indVal1)
| true |
9a1e98add784d971db2d08ff0aa0c777bea2438a | Python | jonjon1010/RockPaperScissor | /Main.py | UTF-8 | 3,388 | 3.921875 | 4 | [] | no_license | '''
Created on Jun 22, 2021
Program: Rock, Paper, Scissor Game
@author: Jonathan Nguyen
'''
import random
import math
play = "yes"
while play.lower() == "yes":
print("This is a game of rock, paper, and scissor")
rounds = input("How many rounds would you like to play: ")
while rounds.isdigit() == False:
print("The input enter was not a number.")
rounds = input("Please enter how many rounds would you like to play again: ")
userWins = 0
compWins = 0
while userWins < math.ceil(int(rounds)/2) and compWins < math.ceil(int(rounds)/2):
print("----------------------------------------------------------------------------------------------")
userChoice = input("Enter your choose of either rock, paper, or scissor: ")
print("Your choose", userChoice.lower())
while userChoice.lower() != "rock" and userChoice.lower() != "paper" and userChoice.lower != "scissor":
print("\nYou have entered a wrong input.")
userChoice = input("Please enter your choose of either rock, paper, or scissor again: ")
print("You choose", userChoice.lower())
ListChoice =["rock","paper","scissor"];
compChoose = random.choice(ListChoice)
print("The computer choose",compChoose)
while userChoice == compChoose:
print("\nIt's a draw, please enter your choose of either rock, paper, or scissor again: ")
print("You choose", userChoice.lower())
compChoose = random.choice(ListChoice)
print("The computer choose",compChoose)
if userChoice.lower() == "rock" and compChoose == "paper":
print("paper bets rock, the winner is the computer.")
compWins += 1
elif userChoice.lower() == "paper" and compChoose == "rock":
print("paper bets rock, the winner is the user.")
userWins += 1
elif userChoice.lower() == "paper" and compChoose == "scissor":
print("scissor bets paper, the winner is the computer.")
compWins += 1
elif userChoice.lower() == "scissor" and compChoose == "paper":
print("scissor bets paper, the winner is the user.")
userWins += 1
elif userChoice.lower() == "scissor" and compChoose == "rock":
print("rock bets scissor, the winner is the computer.")
compWins +- 1
elif userChoice.lower() == "rock" and compChoose == "scissor":
print("rocks bets scissor, the winner is the user.")
userWins += 1
print("The number of wins between the user and computer is:", "user -",userWins, ": computer -",compWins)
print("----------------------------------------------------------------------------------------------")
if userWins > compWins:
print("The winner of the game is the user!!!")
else:
print("The winner of the game is the computer!!")
play = input("Do you want to play again? choose either yes or no:")
while play.lower() != "yes" and play.lower() != "no":
play = input("You entered and invalid input.Do you want to play again? choose either yes or no: ")
print("----------------------------------------------------------------------------------------------")
print("Thank you for playing!!!")
| true |
940dae119184842df5c65a3bda9215c74413cca4 | Python | nrkfeller/masterpy | /BeyondTheBasics/pytest/assignment.py | UTF-8 | 1,117 | 3.171875 | 3 | [] | no_license | import os
class ConfigKeyError(Exception):
def __init__(self, this, key):
self.key = key
self.keys = this.keys()
def __str__(self):
return '{} not found{}'.format(
self.key, self.keys
)
class ConfigDict(dict):
def __init__(self, filename):
self._filename = filename
if not os.path.isfile(self._filename):
try:
open(self._filename, 'w').close()
except IOError:
raise IOError("arg to ConfigDict is not a valid pathname")
with open(self._filename) as fh:
for line in fh:
key, value = line.rstrip().split('=', 1)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
if key not in self:
raise ConfigKeyError(self, key)
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
with open(self._filename, 'w') as fh:
line = '{}={}\n'.format(key, value)
fh.write(line)
fh.close()
| true |
d2032956931cc8bbb4ed6a0c8fb44a6260269283 | Python | r4vi/weboutlook | /weboutlook/scraper.py | UTF-8 | 8,521 | 2.875 | 3 | [] | no_license | """
Microsoft Outlook Web Access scraper
Retrieves full, raw e-mails from Microsoft Outlook Web Access by
screen scraping. Can do the following:
* Log into a Microsoft Outlook Web Access account with a given username
and password.
* Retrieve all e-mail IDs from the first page of your Inbox.
* Retrieve the full, raw source of the e-mail with a given ID.
* Delete an e-mail with a given ID (technically, move it to the "Deleted
Items" folder).
The main class you use is OutlookWebScraper. See the docstrings in the code
and the "sample usage" section below.
This module does no caching. Each time you retrieve something, it does a fresh
HTTP request. It does cache your session, though, so that you only have to log
in once.
Updated by Greg Albrecht <gba@gregalbrecht.com>
Based on http://code.google.com/p/weboutlook/ by Adrian Holovaty <holovaty@gmail.com>.
"""
# Documentation / sample usage:
#
# # Throws InvalidLogin exception for invalid username/password.
# >>> s = OutlookWebScraper('https://webmaildomain.com', 'username', 'invalid password')
# >>> s.login()
# Traceback (most recent call last):
# ...
# scraper.InvalidLogin
#
# >>> s = OutlookWebScraper('https://webmaildomain.com', 'username', 'correct password')
# >>> s.login()
#
# # Display IDs of messages in the inbox.
# >>> s.inbox()
# ['/Inbox/Hey%20there.EML', '/Inbox/test-3.EML']
#
# # Display IDs of messages in the "sent items" folder.
# >>> s.get_folder('sent items')
# ['/Sent%20Items/test-2.EML']
#
# # Display the raw source of a particular message.
# >>> print s.get_message('/Inbox/Hey%20there.EML')
# [...]
#
# # Delete a message.
# >>> s.delete_message('/Inbox/Hey%20there.EML')
# Copyright (C) 2006 Adrian Holovaty <holovaty@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
import re, socket, urllib, urlparse, urllib2, socket
from cookielib import CookieJar
import requests
from Cookie import SimpleCookie
import logging
from logging.handlers import *
__version__ = '0.1.2'
__author__ = 'Ravi Kotecha <kotecha.ravi@g'
logger = logging.getLogger('weboutlook')
logger.setLevel(logging.INFO)
consolelogger = logging.StreamHandler()
consolelogger.setLevel(logging.INFO)
logger.addHandler(consolelogger)
socket.setdefaulttimeout(15)
class InvalidLogin(Exception):
pass
class RetrievalError(Exception):
pass
def top_url(url):
logger.debug(locals())
p = urlparse.urlparse(url)
return '%s://%s/' % (p.scheme, p.netloc)
class CookieScraper(object):
"Scraper that keeps track of getting and setting cookies."
def __init__(self):
logger.debug(locals())
self._auth = (self.username, self.password)
self._cookies = CookieJar()
def get_page(self, url, post_data=None, headers={}):
"""
Helper method that gets the given URL, handling the sending and storing
of cookies. Returns the requested page as a string.
"""
logger.debug(locals())
if not post_data:
request = requests.get(url, cookies=self._cookies, auth=self._auth)
else:
request = requests.post(url, data=post_data, cookies=self._cookies, headers=headers)
return request.content
class OutlookWebScraper(CookieScraper):
def __init__(self, domain, username, password):
logger.debug(locals())
self.domain = domain
self.username, self.password = username, password
self.is_logged_in = False
self.base_href = None
super(OutlookWebScraper, self).__init__()
def login(self):
logger.debug(locals())
destination = urlparse.urljoin(self.domain, 'exchange/')
url = destination
html = self.get_page(url=url)
if 'You could not be logged on to Outlook Web Access' in html:
raise InvalidLogin
#import pdb; pdb.set_trace()
matcher = re.compile(r'(?i)<BASE href="([^"]*)">', re.IGNORECASE)
m = matcher.search(html)
if not m:
forms_dest = urlparse.urljoin(self.domain, 'CookieAuth.dll?Logon')
post_data = {
'destination': urlparse.urljoin(self.domain, 'exchange'),
'flags':'0',
'username':self.username,
'password':self.password,
'SubmitCreds':'Log On',
'forcedownlevel': '0',
'trusted':'4'}
header = {'Content-Type':'application/x-www-form-urlencoded'}
html = self.get_page(forms_dest, post_data, headers=header)
m = matcher.search(html)
# import pdb; pdb.set_trace()
if not m:
raise RetrievalError, "Couldn't find <base href> on page after logging in."
self.base_href = m.group(1)
self.is_logged_in = True
def inbox(self):
"""
Returns the message IDs for all messages on the first page of the
Inbox, regardless of whether they've already been read.
"""
logger.debug(locals())
return self.get_folder('/Inbox')
def get_folder(self, folder_name, page=1):
"""
Returns the message IDs for all messages on the first page of the
folder with the given name, regardless of whether the messages have
already been read. The folder name is case insensitive.
"""
logger.debug(locals())
if not self.is_logged_in: self.login()
url = self.base_href + urllib.quote(folder_name) + '/?Cmd=contents&View=Messages&Page=%s' % page
html = self.get_page(url)
page_info = re.search(r'<[^<>]+name="Page"[^<>]+>', html)
if page_info:
page_num = re.search(r'value="(\d+)', page_info.group())
if page_num and int(page_num.group(1)) < page:
raise RetrievalError("Invalid page number: %s is too large." % page)
message_urls = re.findall(r'(?i)NAME=MsgID value="([^"]*)"', html)
return message_urls
def get_folder_all_messages(self, folder_name):
"""
Returns the message IDs for all messages in the entire folder
of a given name, regardless of whether or not the messages have
already been read. The folder name is case insensitive.
"""
page = 1
email_set = set()
emails = []
while True:
try:
message_ids = self.get_folder(folder_name, page)
except RetrievalError, e:
if not e.message.startswith('Invalid page'):
raise
break
else:
for message_id in message_ids:
if message_id not in email_set:
emails.append(message_id)
page += 1
return emails
def get_message(self, msgid):
"Returns the raw e-mail for the given message ID."
logger.debug(locals())
if not self.is_logged_in: self.login()
# Sending the "Translate=f" HTTP header tells Outlook to include
# full e-mail headers. Figuring that out took way too long.
return self.get_page(self.base_href + msgid + '?Cmd=body', headers=[('Translate', 'f')])
def delete_message(self, msgid):
"Deletes the e-mail with the given message ID."
logger.debug(locals())
if not self.is_logged_in: self.login()
return self.get_page(self.base_href + msgid, urllib.urlencode({
'MsgId': msgid,
'Cmd': 'delete',
'ReadForm': '1',
}))
def delete_message_list(self, folder, msgid_list):
"Deletes the e-mail with the given message ID."
logger.debug(locals())
if not self.is_logged_in: self.login()
return self.get_page(self.base_href + folder,
urllib.urlencode({'Cmd': 'delete','ReadForm': '1'})+'&'+urllib.urlencode([('MsgId',x) for x in msgid_list]))
| true |
785a26e51d78a1e7c4a02fdb4595782b0dc7e3c2 | Python | SamruddhiShetty/Sudoku_player | /sudoku_solver.py | UTF-8 | 4,393 | 3.234375 | 3 | [] | no_license | import pygame
#module use to execute get and post request
import requests
#initialising the width and the background of the board displayed
WIDTH=550
background_color=(251,247,245)
#just to distinct the original numbers
original_grid_num_color=(52, 31, 151)
buffer=5
#getting the values from an API suGOku
response=requests.get("https://sugoku.herokuapp.com/board?difficulty=easy")
grid=response.json()['board'] #this is the api part
#crearting another grid with same value just to compare
grid_original=[[grid[x][y] for y in range(len(grid[0]))] for x in range(len(grid))]
def isEmpty(num):#checks whether position supplied is empty or not
if (num==0):
return True
return False
def isValid(position, num):#checks number chosen is valid or not
#constraints-row, col, box
#checking row
for i in range(0, len(grid[0])):
if grid[position[0]][i]==num:
return False
#checking column
for i in range(0, len(grid[0])):
if grid[i][position[1]]==num:
return False
#checking the box
#getting the block number
x=position[0]//3*3
y=position[1]//3*3
for i in range(0, 3):
for j in range(0, 3):
if grid[x+i][y+j]==num:
return False
return True
solved=0
def sudoku_solver(win):#main algo
myFont=pygame.font.SysFont('Comic Sans MS', 35)
#travering the grid recurcively
for i in range(0, len(grid[0])):
for j in range(0, len(grid[0])):
if (isEmpty(grid[i][j])):
for k in range(1, 10):
if isValid((i, j), k):
grid[i][j]=k
#pygame.draw.rect(win, background_color, ((j+1)*50 + buffer, (i+1)*50+ buffer,50 -2*buffer , 50 - 2*buffer))
value=myFont.render(str(k), True, (0,0,0))
win.blit(value, (((j+1)*50+15, (i+1)*50)))
pygame.display.update()
pygame.time.delay(15)
sudoku_solver(win)
global solved
if (solved==1):
return
#if we arrive at a position where we realise k is invalid we replace it
grid[i][j]=0
pygame.draw.rect(win, background_color, ((j+1)*50 + buffer, (i+1)*50+ buffer,50 -2*buffer , 50 - 2*buffer))
pygame.display.update()
return
solved=1
def main():
#initialising pygame
pygame.init()
#setting the window/display
win=pygame.display.set_mode((WIDTH, WIDTH))
#setting the caption
pygame.display.set_caption("Sudoku")
#filling the window with the background-color
win.fill(background_color)
#font of the numbers
myFont=pygame.font.SysFont('Comic Sans MS', 35)
#setting the grid
for i in range(0, 10):
if (i%3)==0:
#every 3rd line should be a bold line to separate boxes
pygame.draw.line(win, (0, 0, 0), (50+50*i, 50), (50+50*i, 500), 4)
pygame.draw.line(win, (0, 0, 0), (50, 50+50*i), (500, 50+50*i), 4)
#line_drawing(window, black, starting_cordinate, ending_cor, line_width)
pygame.draw.line(win, (0, 0, 0), (50+50*i, 50), (50+50*i, 500), 2)#vertical
pygame.draw.line(win, (0, 0, 0), (50, 50+50*i), (500, 50+50*i), 2)#horizontal
pygame.display.update()
for i in range(0, len(grid[0])):
for j in range(0, len(grid[0])):
if (0<grid[i][j]<10):
value=myFont.render(str(grid[i][j]), True, original_grid_num_color)
win.blit(value, ((j+1)*50+15, (i+1)*50))
#here x cordinate has to move horizontally-hence 'j'
#y should move vertically hence i
#+15 is the buffer value/ offset
# +1 cz index starts from 0
pygame.display.update()
sudoku_solver(win)
while True:
for event in pygame.event.get():
#quitting the window
if event.type==pygame.QUIT:
pygame.quit()
return
main()
| true |
7fa5fcc5a61395ed8f600332ff2c52d6cb23717d | Python | CarlosMiraGarcia/Honours-project | /point_cloud_ops/point_cloud_ops.py | UTF-8 | 3,943 | 2.921875 | 3 | [] | no_license | import open3d as o3d
import numpy as np
def remove_outliers(pcd, neighbors, ratio):
""" Removes outliers from a point cloud
\tpcd is the point cloud class,
\tneighbors is the number of neighbors taken into consideration to calculate the distance between points
\tand ratio is the threshold level to be consider on the distance between two points
"""
cl, ind = pcd.remove_statistical_outlier(nb_neighbors=neighbors,
std_ratio=ratio)
pc_cleaned = pcd.select_by_index(ind)
return pc_cleaned # returns the point cloud without outliers
def array_to_point_cloud(pc_array):
"""Converts a numpy array to point cloud class, without normals"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc_array)
return pcd
def array_to_point_cloud_with_normals(pc_array_points, pc_array_normals):
"""Converts a numpy array to point cloud class, including the normals"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc_array_points)
pcd.normals = o3d.utility.Vector3dVector(pc_array_normals)
return pcd
def save_as_pcd(save_file_path, point_cloud):
"""Saves point cloud to a given path"""
o3d.io.write_point_cloud(save_file_path, point_cloud, write_ascii=False, compressed=True)
def append_points_to_array(nparray1, npaarray2):
"""Appends data points to a numpy array containing a point cloud"""
npa = np.vstack((nparray1, npaarray2))
return npa # Returns the merged numpy array
def crop_using_plane(array_points, floor_a, floor_b, floor_c, floor_d):
"""From a point cloud, crops all the data points found under a given plane"""
diff_adj = 5 # Threshold level from a given plane where data points will be removed
list_points_kept = [] # List of data points not removed
# Keeps all the data points above the plane plus the threnshold level
for line in array_points:
if -((floor_a * line[0]) + (floor_b * line[1]) + (floor_c * line[2])) >= floor_d + diff_adj:
list_points_kept.append(line)
array_points_kept = np.asarray(list_points_kept)
array_noplane_points, array_noplane_normals = np.hsplit(array_points_kept, 2) # Splits the array into two arrays: points and normals
point_cloud = array_to_point_cloud_with_normals(
array_noplane_points, array_noplane_normals) # Converts the numpy array to point cloud class
return point_cloud # Returns list with the kept data points
def leaves_segmentation(point_cloud, eps_value, min_points_value, saving_path_postprocessing):
"""Segments a point cloud containing leaves into individual leaf clusters\n
\tUses DBSCAN [Martin Ester, 1996]\n
\thttp://www.open3d.org/docs/latest/tutorial/Basic/pointcloud.html
"""
# creates a label por each of the points depending on the cluster they are in,
# with a -1 for whatever is considered noise
labels = np.array(point_cloud.cluster_dbscan(eps=eps_value, min_points=min_points_value, print_progress=False))
total_labels = labels.max() # Finds the number of labels
pc_points = [] # List with the different point clouds points
pc_normals = [] # List with the different point clouds normals
# For each label created, this function appends the data points to each of the lists
# where the label from each set of data points matches the current cluster label
for label in range (total_labels + 1):
pc_points.append(np.asarray(point_cloud.points)[labels == label])
pc_normals.append(np.asarray(point_cloud.normals)[labels == label])
# Saves each leaf into a point cloud file
for i in range (len(pc_points)):
save_as_pcd(saving_path_postprocessing + 'leaf_' + str(i + 1) + '.pcd', array_to_point_cloud_with_normals(pc_points[i], pc_normals[i])) | true |
17fd0950cf7e115c8c7206416837e80805f3857e | Python | heyzeng/Awesome-Python | /Python-Basic/python30.py | UTF-8 | 817 | 4.4375 | 4 | [] | no_license | # -*- coding:UTF-8 -*-
# 30道python基本入门小练习
# 1. 重复元素判定
# 以下方法可以检查给定列表是不是存在重复元素,它会使用 set() 函数来移除所有重复元素
def all_unique(lst):
return len(lst) == len(set(lst))
x = [1, 2, 2, 3, 4]
y = [1, 2]
print(all_unique(x))
print(all_unique(y))
print("-----------")
# 2. 字符元素组成判定
# 检查两个字符串的组成元素是不是一样的。
from collections import Counter
def fs(first, second):
return Counter(first) == Counter(second)
print(fs("ssa", "ass"))
print("-------------")
# 3. 内存占用
# 下面的代码块可以检查变量 value 所占用的内存
import sys
value = 1
print(sys.getsizeof(value))
print("------------------")
# 4
# 5
d = {'a': 1, 'b': 2}
print(d.get('c', 3))
| true |
455bbbee5245c6899ec883c349f428f87cca4f0b | Python | akinoriosamura/Semantic-Segmentation-Suite | /loss.py | UTF-8 | 548 | 2.671875 | 3 | [] | no_license | import tensorflow as tf
def dice_loss(input, target):
smooth = 1.
#if scale is not None:
# scaled = interpolate(input, scale_factor=scale, mode='bilinear', align_corners=False)
# iflat = scaled.view(-1)
#else:
iflat = tf.reshape(input, [-1])
# tflat = target.view(-1)
tflat = tf.cast(tf.reshape(target, [-1]), tf.float32)
print((iflat * tflat).shape)
intersection = tf.reduce_sum(iflat * tflat)
return 1 - ((2. * intersection + smooth) / (tf.reduce_sum(iflat) + tf.reduce_sum(tflat) + smooth))
| true |
0ac5ef0c1e8b45510b5d511816107d7018091321 | Python | hackergong/Python-TrainingCourseLearning | /day008/8-访问限制/访问限制.py | UTF-8 | 1,926 | 3.984375 | 4 | [] | no_license | class Person(object):
#创建对象的时候自动咨执行
def __init__(self,name,age,height,weight,money):
#定义属性
self.name = name
self.height = height
self.weight = weight
self.__age__ = age
#双下划线变为不可修改内部属性
self.__money = money #Person__money
def run(self):
print(self.__money)
#通过内部的方法,取修改私有属性
#通过自定义的方法时间对私有属性的赋值和取值
def setMoney(self,money):
#数据过滤
if money < 0:
money = 0
self.__money = money
def getMoney(self):
return self.__money
per = Person("tom",20,70,29,10000)
# per.age = 10
# print(per.age)
#如果要让内部的属性不被外部直接访问,在属性前加两个下划线(__)
#在Python中如果在属性前加两个下划线,那么这个属性就变成私有属性
# per.__money = 0
# print(per.__money) #此处仍然能打印,因为动态语言可以随时添加属性
#这是因为添加了一个per.__money的属性
# print(per.__money) #不可通过外部直接属性
# per.run()
per.setMoney(10000)
print(per.getMoney())
#per.__Money
#会报错,不能直接访问私有属性的原因,是因为Python解释器把__Money变成了
#_Person__money,仍然可以使用_Person__money去访问,但是强烈不建议,
#不同的解释器可能存在的变量名不一致
per._Person__money = 1
print(per.getMoney())
#在Python中,__XX__ 属于特色变量,可以直接访问
#self.__age__ = age
print(per.__age__)
#在Python中,_XXXX 变量,这样的实例变量,外部也是可以访问的。
#按照约定的规则,当我们看到这样的变量,意思是:虽然我可以被访问,
#请把我视为私有变量。不要直接访问我。
| true |
1592e28f72b673f4c0b696457cf972f21253aade | Python | codemaster-22/Compiler | /src/Compiler.py | UTF-8 | 42,547 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[184]:
import sys
# In[185]:
def generatelist(code):
a=[]
if('"' in code):
j=code.find('"')
a=code[:j].split()
if('"' in code[j+1:]):
k=code[j+1:].find('"')
a=a+[code[j:j+k+2]]+generatelist(code[j+k+2:])
else:
assert(1==2),"error Invalid String type"
else:
a=code.split()
return a
# In[186]:
def tokenisation(contents):
contents_1=[]
for i in range(len(contents)):
contents_1.append([i+1,contents[i]])
for i in range(len(contents_1)):
while('\n' in contents_1[i][1]):
j=contents_1[i][1].find('\n')
contents_1[i][1]=contents_1[i][1][:j]+contents_1[i][1][j+1:]
while('\t' in contents_1[i][1]):
j=contents_1[i][1].find('\t')
contents_1[i][1]=contents_1[i][1][:j]+contents_1[i][1][j+1:]
b=True
i=0
while(i<len(contents_1)):
if(b):
if('/*' in contents_1[i][1]):
j=contents_1[i][1].find('/*')
if('*/' in contents_1[i][1][j+2:]):
k=contents_1[i][1][j+2:].find('*/')
contents_1[i][1]=contents_1[i][1][:j]+contents_1[i][1][j+k+4:]
i=i-1
else:
contents_1[i][1]=contents_1[i][1][:j]
b=False
else:
if('*/' in contents_1[i][1]):
p=contents_1[i][1].find('*/')
contents_1[i][1]=contents_1[i][1][p+2:]
b=True
i=i-1
else:
contents_1[i][1]=''
i=i+1
for i in range(len(contents_1)):
if('//' in contents_1[i][1]):
j=contents_1[i][1].find('//')
contents_1[i][1]=contents_1[i][1][:j]
contents=[]
for i in contents_1:
if(i[1]!=''):
contents.append(i)
keywords=['class','constructor','method','function','int','boolean','char','void','var','static','field','let','do','if']
keywords+=['else','while','return','true','false','null','this']
symbols=['(',')','[',']','{','}',',',';','=','.','+','-','*','/','&','|','~','<','>']
for i in range(len(contents)):
if('"'not in contents[i][1]):
contents[i][1]=contents[i][1].split()
else:
contents[i][1]=generatelist(contents[i][1])
for i in range(len(contents)):
a=[]
for j in contents[i][1]:
if(j[0]=='"'):
a=a+['<stringConstant>'+' '+j[1:-1]+' '+'</stringConstant>']
else:
p=''
for k in j:
if('_'==k):
p+=k
elif(k.isalpha()):
if(len(p)>0 and p[0].isnumeric()):
assert(1==2),"Invalid StringConstant"
else:
p+=k
elif(k.isnumeric()):
p+=k
else:
if(len(p)>0):
if(p.isnumeric()):
a=a+['<integerConstant>'+' '+p+' '+'</integerConstant>']
elif(p in keywords):
a=a+['<keyword>'+' '+p+' '+'</keyword>']
else:
a=a+['<identifier>'+' '+p+' '+'</identifier>']
p=''
if(k=='<'):
k='<'
if(k=='>'):
k='>'
if(k=='&'):
k='&'
a=a+['<symbol>'+' '+k+' '+'</symbol>']
if(b):
if(len(p)>0):
if(p.isnumeric()):
a=a+['<integerConstant>'+' '+p+' '+'</integerConstant>']
elif(p in keywords):
a=a+['<keyword>'+' '+p+' '+'</keyword>']
else:
a=a+['<identifier>'+' '+p+' '+'</identifier>']
contents[i][1]=a
final=['<tokens>']
for i in contents:
final+=i[1]
final+=['</tokens>']
return [final,contents]
# In[187]:
def typeof(a):
return ('<identifier>' in a) or ('int' in a) or ('char' in a) or ('boolean' in a)
# In[188]:
def statement(a):
return ('let' in a) or ('while' in a) or ('if' in a) or ('do' in a) or('return' in a)
# In[189]:
def op(a):
for i in ['+','-','*','/','&','>','<','|','=']:
if(i in a):
if(i=='/'):
if(a.count(i)==2):
return True
else:
return True
return False
# In[190]:
def KeywordConstant(a):
for i in ['true','false','null','this']:
if(i in a):
return True
return False
# In[191]:
def unaryOp(a):
return ('-' in a) or ('~' in a)
# In[192]:
def CompilevarDec(a,p):
b=[]
index=0
b+=[(p+'<varDec>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if (not typeof(a[index][0])):
assert(1==2),"Error in Line varDec"+str(a[index][1])+"expected type"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('identifier' not in a[index][0]):
assert(1==2),"Error in Line varDec"+str(+a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
while(',' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('identifier' not in a[index][0]):
assert(1==2),"Error in Line varDec "+str(a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if(';' not in a[index][0]):
assert(1==2),"Error in Line varDec "+str(a[index][1])+"expected ;"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</varDec>',-1)]
return [b,index]
# In[193]:
def CompileexpressionList(a,p):
b=[]
b+=[(p+'<expressionList>',-1)]
index=0
if(')' not in a[index][0]):
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
while(',' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
b+=[(p+'</expressionList>',-1)]
return [b,index]
# In[194]:
def CompilesubroutineCall(a,p):
b=[]
index=0
b+=[(p+a[index][0],a[index][1])]
index+=1
if('.' in a[index][0]):
b+=[(p+a[index][0],a[index][1])]
index+=1
a[index]
assert('identifier'in a[index][0]),"Error in Line:subroutineCall "+str(a[index][1])+"Expecting Identifier"
b+=[(p+a[index][0],a[index][1])]
index+=1
a[index]
assert('(' in a[index][0]),"Error in Line subroutineCall "+str(a[index][1])+"Expecting Symbol ("
b+=[(p+a[index][0],a[index][1])]
index+=1
d=CompileexpressionList(a[index:],p)
b+=d[0]
index+=d[1]
assert(')'in a[index][0]),"Error in Line subroutineCall "+str(a[index][1])+"Expecting Symbol )"
b+=[(p+a[index][0],a[index][1])]
index+=1
return [b,index]
# In[195]:
def Compileterm(a,p):
b=[]
index=0
b+=[(p+'<term>',-1)]
if('integerConstant' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
elif('stringConstant' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
elif(KeywordConstant(a[index][0])):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
elif(unaryOp(a[index][0])):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileterm(a[index:],p+' ')
b+=d[0]
index+=d[1]
elif('identifier' in a[index][0]):
if ('(' in a[index+1][0] or '.' in a[index+1][0]):
d=CompilesubroutineCall(a[index:],p+' ')
b+=d[0]
index+=d[1]
elif('[' in a[index+1][0]):
b+=[(p+' '+a[index][0],a[index][1]),(p+' '+a[index+1][0],a[index][1])]
index+=2
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
assert(']' in a[index][0]),"Error in Line: term "+str(a[index][1])+"Expected ]"
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
elif('(' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
assert(')' in a[index][0]),"Error in Line: term "+str(a[index][0])+"Expected )"
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
else:
assert(1==2),"Error in line term "+str(a[index][1])+"Expecting term"
b+=[(p+'</term>',-1)]
return [b,index]
# In[196]:
def Compileexpression(a,p):
b=[]
index=0
b+=[(p+'<expression>',-1)]
d=Compileterm(a[index:],p+' ')
b+=d[0]
index+=d[1]
while(op(a[index][0])):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileterm(a[index:],p+' ')
b+=d[0]
index+=d[1]
b+=[(p+'</expression>',-1)]
return [b,index]
# In[197]:
def CompilewhileStatement(a,p):
b=[]
index=0
b+=[(p+'<whileStatement>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('(' not in a[index][0]):
assert(1==2),"Error in Line whileStatement "+str(a[index][1])+"Expecting ("
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
if(')' not in a[index][0]):
assert(1==2),"Error in Line whileStatement "+str(a[index][1])+"Expecting )"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('{' not in a[index][0]):
assert(1==2),"Error in Line whileStatement "+str(a[index][1])+"Expecting {"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compilestatements(a[index:],p+' ')
b+=d[0]
index+=d[1]
if ('}' not in a[index][0]):
assert(1==2),"Error in Line whileStatement "+str(a[index][1])+"Expecting }"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</whileStatement>',-1)]
return [b,index]
# In[198]:
def CompileReturnStatement(a,p):
b=[]
index=0
b+=[(p+'<returnStatement>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if (';' not in a[index][0]):
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
if(';' not in a[index][0]):
assert(1==2),"Error in Line ReturnStatement"+str(a[index][1])+"Expecting ;"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</returnStatement>',-1)]
return [b,index]
# In[199]:
def CompiledoStatement(a,p):
b=[]
index=0
b+=[(p+'<doStatement>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=CompilesubroutineCall(a[index:],p+' ')
b+=d[0]
index+=d[1]
if(';' not in a[index][0]):
assert(1==2),"Error in Line doStatement"+str(a[index][1])+"Expecting ;"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</doStatement>',-1)]
return [b,index]
# In[200]:
def CompileifStatement(a,p):
b=[]
index=0
b+=[(p+'<ifStatement>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('(' not in a[index][0]):
assert(1==2),"Error in Line ifStatement "+str(a[index][1])+"Expected ("
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
if(')' not in a[index][0]):
assert(1==2),"Error in Line ifStatement "+str(a[index][1])+"Expected )"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('{' not in a[index][0]):
assert(1==2),"Error in Line ifStatement "+str(a[index][1])+"Expected symbol {"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compilestatements(a[index:],p+' ')
b+=d[0]
index+=d[1]
if('}' not in a[index][0]):
assert(1==2),"Error in Line ifStatement "+str(a[index][1])+"Expected Symbol }"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('else' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('{' not in a[index][0]):
assert(1==2),"Error in Line ifStatement "+str(a[index][1])+"Expected Symbol {"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compilestatements(a[index:],p+' ')
b+=d[0]
index+=d[1]
if('}' not in a[index][0]):
assert(1==2),"Error in Line ifStatement "+str(a[index][1])+"Expected Symbol }"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</ifStatement>',-1)]
return [b,index]
# In[201]:
def CompileletStatement(a,p):
b=[]
index=0
b+=[(p+'<letStatement>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('identifier' not in a[index][0]):
assert(1==2),"Error in Line letStatement"+str(a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('[' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
if(']' not in a[index][0]):
assert(1==2),"Error in Line letStatement"+str(a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('=' not in a[index][0]):
assert(1==2),"Error in Line letStatement"+str(a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=Compileexpression(a[index:],p+' ')
b+=d[0]
index+=d[1]
if (';' not in a[index][0]):
assert(1==2),"Error in Line letStatement"+str(a[index][1])+"expected ;"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</letStatement>',-1)]
return [b,index]
# In[202]:
def Compilestatements(a,p):
b=[]
index=0
b+=[(p+'<statements>',-1)]
while(statement(a[index][0])):
if ('let' in a[index][0]):
d=CompileletStatement(a[index:],p+' ')
b+=d[0]
index+=d[1]
elif('while' in a[index][0]):
d=CompilewhileStatement(a[index:],p+' ')
b+=d[0]
index+=d[1]
elif ('if' in a[index][0]):
d=CompileifStatement(a[index:],p+' ')
b+=d[0]
index+=d[1]
elif ('do' in a[index][0]):
d=CompiledoStatement(a[index:],p+' ')
b+=d[0]
index+=d[1]
else:
d=CompileReturnStatement(a[index:],p+' ')
b+=d[0]
index+=d[1]
b+=[(p+'</statements>',-1)]
return [b,index]
# In[203]:
def CompileparameterList(a,p):
b=[]
index=0
b+=[(p+'<parameterList>',-1)]
if (typeof(a[0][0])):
b+=[(p+' '+a[0][0],a[0][1])]
if('<identifier>' not in a[1][0]):
assert(1==2),"Error in line parameterList"+str(a[1][1])+"expected identifier"
else:
b+=[(p+' '+a[1][0],a[1][1])]
index=2
while(',' in a[index][0]):
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if(not typeof(a[index][0])):
assert(1==2),"Error in line parameterList"+str(a[index][0])+"missing type of Argument"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('<identifier>' not in a[index][0]):
assert(1==2),"Error in line parameterList"+str(a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</parameterList>',-1)]
return [b,index]
# In[204]:
def CompilesubroutineBody(a,p):
b=[]
index=0
b+=[(p+'<subroutineBody>',-1)]
if('{' not in a[0][0]):
assert(1==2),"Error in line subroutineBody "+str(a[0][1])+"expected {"
else:
b+=[(p+' '+a[0][0],a[0][1])]
index=1
while('var' in a[index][0]):
d=CompilevarDec(a[index:],p+' ')
index+=d[1]
b+=d[0]
d=Compilestatements(a[index:],p+' ')
b+=d[0]
index+=d[1]
if('}' not in a[index][0]):
assert(1==2),"Error in line"+str(a[index][1])+"expected"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
b+=[(p+'</subroutineBody>',-1)]
return [b,index]
# In[205]:
def CompilesubroutineDec(a,p):
b=[]
index=0
b+=[(p+'<subroutineDec>',-1)]
b+=[(p+' '+a[index][0],a[index][0])]
index+=1
if(not typeof(a[index][0]) and ('void' not in a[index][0])):
print("Error in line subroutineDec",a[index][1],"return type missing for subroutine")
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('<identifier>' not in a[index][0]):
assert(1==2),"Error in line subroutineDec "+str(a[index][1])+"expected subroutine name"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('(' not in a[index][0]):
assert(1==2),"Error in line subroutineDec "+str(a[index][1])+"expected ("
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=CompileparameterList(a[index:],p+' ')
index+=d[1]
b+=d[0]
if(')' not in a[index][0]):
assert(1==2),"Error in line subroutineDec "+str(a[index][1])+"expected )"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
d=CompilesubroutineBody(a[index:],p+' ')
index+=d[1]
b+=d[0]
b+=[(p+'</subroutineDec>',-1)]
return [b,index]
# In[206]:
def CompileclassVarDec(a,p):
b=[]
index=0
b+=[(p+'<classVarDec>',-1)]
b+=[(p+' '+a[0][0],a[0][1])]
if(not type(a[1][0])):
assert(1==2),"Error in line classVarDec"+str(a[1][1])+"type missing"
else:
b+=[(p+' '+a[1][0],a[1][1])]
if('<identifier>' not in a[2][0]):
assert(1==2),"Error in line classVarDec"+str(a[2][1])+"expected identifier"
else:
b+=[(p+' '+a[2][0],a[2][1])]
index=3
while(',' in a[index][0]) :
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('<identifier>' not in a[index][0]):
assert(1==2),"Error in line classVarDec"+str(a[index][1])+"expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if(';' not in a[index][0]):
assert(1==2),"Error in line classVarDec"+a[index][1]+"expected ;"
else:
b+=[(p+' '+a[index][0],a[index][1])]
b+=[(p+'</classVarDec>',-1)]
index+=1
return [b,index]
# In[207]:
def CompileClass(a):
p=''
b=[]
index=0
if (a[0][0]!='<keyword> class </keyword>'):
assert(1==2),"Error in line"+str(a[index][1])+" keyword class missing"
else:
b+=[(p+'<class>',-1)]
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('<identifier>' not in a[index][0]):
assert(1==2),"Error in line"+str(a[1][1])+"Expected identifier"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
if('{' not in a[index][0]):
assert(1==2),"Error in line"+str(a[index][1])+"Expected Symbol {"
else:
b+=[(p+' '+a[index][0],a[index][1])]
index+=1
while('static' in a[index][0] or 'field' in a[index][0]):
d=CompileclassVarDec(a[index:],p+' ')
b+=d[0]
index+=d[1]
while('constructor' in a[index][0] or 'method'in a[index][0] or 'function' in a[index][0]):
d=CompilesubroutineDec(a[index:],p+' ')
b+=d[0]
index+=d[1]
if('}' not in a[index][0]):
assert(1==2),"Error in line"+str(a[index][1])+"Expected Symbol }"
else:
b+=[(p+' '+a[index][0],a[index][1])]
b+=[(p+'</class>',-1)]
return b
# In[208]:
def parser(a):
contents=[]
contents=CompileClass(a)
final=[]
for i in contents:
final+=[i[0]]
return [final,contents]
# In[209]:
def compileTerm(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
i=1
b=[]
c=a[i][0].split()
if(c[0]=='<symbol>'):
if(c[1]=='('):
i+=1 # for (
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1 # for )
else:
i+=1 # for Unaryop
d=compileTerm(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
if(c[1]=='-'):
b+=['neg']
else:
b+=['not']
elif(c[0]=='<integerConstant>'):
b+=['push'+' '+'constant'+' '+c[1]]
i+=1 # for integerConstant
elif(c[0]=='<keyword>'):
i+=1 # for keyword
if(c[1]=='this'):
b+=['push pointer 0']
else:
b+=['push constant 0']
if(c[1]=='true'):
b+=['not']
elif(c[0]=='<identifier>'):
kind=''
index=''
typ=''
flag=[True,-1]
if(c[1] in set(subroutineSymboltable.keys())):
kind=subroutineSymboltable[c[1]][0]
typ=subroutineSymboltable[c[1]][1]
index=str(subroutineSymboltable[c[1]][2])
elif(c[1] in set(classSymboltable.keys())):
kind=classSymboltable[c[1]][0]
if(kind=='field'and subroutineKind=='function'):
flag=[False,i] #print("Error Not declared ")
else:
if(kind=='field'):
kind='this'
typ=classSymboltable[c[1]][1]
index=str(classSymboltable[c[1]][2])
else:
flag=[False,i] #print("Error not declared variable")
i+=1 # for <identifier>
if(len(a[i][0].split())>1):
if('['==(a[i][0].split())[1]):
i+=1 # for '['
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1 # for ']'
if(not flag[0]):
assert(1==2),"Variable"+str(a[flag[1]][0].split()[1])+"is not declared present in line"+str(a[flag[1]][1])
else:
b+=['push'+' '+kind+' '+index]
b+=['add','pop pointer 1','push that 0']
elif('('==(a[i][0].split())[1]):
b+=['push pointer 0']
i+=1 # for (
d=compileExpressionList(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
nP=d[1]
i+=d[2]
i+=1 # for )
b+=['call'+' '+classname+'.'+c[1]+' '+str(nP+1)]
elif('.'==(a[i][0].split())[1]):
i+=1
id2=(a[i][0].split())[1]
if(flag[0]):
b+=['push'+' '+kind+' '+index]
i+=1 # for (
d=compileExpressionList(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
nP=d[1]
i+=d[2]
i+=1 # for )
if(flag[0]):
b+=['call'+' '+typ+'.'+id2+' '+str(nP+1)]
else:
b+=['call'+' '+c[1]+'.'+id2+' '+str(nP)]
else:
b+=['push'+' '+kind+' '+index]
if(not flag[0]):
assert(1==2),"Variable"+str(a[flag[1]][0].split()[1])+"is not declared present in line"+str(a[flag[1]][1])
else:
j=a[i][0].find('<')
string=a[i][0][j+17:-18]
print(string)
length=len(string)
print(len(string))
b+=['push'+' '+'constant'+' '+str(length)]
b+=['call String.new 1']
for ii in string:
b+=['push'+' '+'constant'+' '+str(ord(ii))]
b+=['call String.appendChar 2']
i+=1 # for <stringConstant>
i+=1 # for</term>
return [b,i]
# In[210]:
def call(op):
if (op=='+'):
return ['add']
elif(op=='-'):
return ['sub']
elif(op=='&'):
return ['and']
elif(op=='|'):
return ['or']
elif(op=='>'):
return ['gt']
elif(op=='<'):
return ['lt']
elif(op=='='):
return ['eq']
elif(op=='*'):
return ['call Math.multiply 2']
elif(op=='/'):
return ['call Math.divide 2']
# In[211]:
def compileExpression(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
b=[]
i=1
op=''
while('</expression>'!=(a[i][0].split())[0]):
if('<term>'==(a[i][0].split())[0]):
d=compileTerm(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
if(op!=''):
print(op=='<',op)
b+=call(op)
op=''
elif('<symbol>'==(a[i][0].split())[0]):
if('('==(a[i][0].split())[1]):
i+=1
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1
if(op!=''):
b+=call(op)
op=''
else:
op=(a[i][0].split())[1]
if(op=='<'):
print(a[i])
i+=1
else:
i+=1
i+=1
return [b,i]
# In[212]:
def compileExpressionList(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
b=[]
nP=0
i=1
while('</expressionList>'!=(a[i][0].split())[0]):
if((a[i][0].split())[0]=='<expression>'):
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
nP+=1
else:
i+=1
i+=1 # for '</expressionList>'
return [b,nP,i]
# In[213]:
def compiledoStatement(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
b=[]
i=2
id1=(a[i][0].split())[1]
i+=1
if('.'!=(a[i][0].split())[1]):
b+=['push pointer 0']
i+=1 # for (
d=compileExpressionList(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
nP=d[1]
i+=d[2]
b+=['call'+' '+classname+'.'+id1+' '+str(nP+1),'pop temp 0']
i+=1 # for )
else:
i+=1 # for '.'
kind=''
typ=''
id2=(a[i][0].split())[1]
i+=1
if(id1 in set(subroutineSymboltable.keys())):
kind=subroutineSymboltable[id1][0]
typ=subroutineSymboltable[id1][1]
index=subroutineSymboltable[id1][2]
b+=['push'+' '+kind+' '+str(index)]
elif(id1 in set(classSymboltable.keys())):
kind=classSymboltable[id1][0]
typ=classSymboltable[id1][1]
index=classSymboltable[id1][2]
if(kind=='field'):
if(subroutineKind=='function'):
print("Error can't access field variables")
else:
b+=['push'+' '+'this'+' '+str(index)]
else:
b+=['push'+' '+'static'+' '+str(index)]
i+=1 # for (
d=compileExpressionList(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
nP=d[1]
i+=d[2]
i+=1 # for )
if(kind==''):
b+=['call'+' '+id1+'.'+id2+' '+str(nP)]
else:
b+=['call'+' '+typ+'.'+id2+' '+str(nP+1)]
b+=['pop temp 0']
i+=2 #for ; </doStatement>
return [b,i]
# In[214]:
def compilereturnStatement(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
b=[]
i=2
if('<expression>'==(a[i][0].split())[0]):
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
b+=['return']
else:
b+=['push constant 0']
b+=['return']
i+=2 # for ; and </returnStatement>
return [b,i]
# In[215]:
def compilewhileStatement(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
global labelNum
g=labelNum
b=['label'+' '+classname+'.'+str(g)]
labelNum+=2
i=3
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=2 # for ) {
b+=['not']
b+=['if-goto'+' '+classname+'.'+str(g+1)]
d=compilestatements(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1 # for }
b+=['goto'+' '+classname+'.'+str(g)]
b+=['label'+' '+classname+'.'+str(g+1)]
i+=1 # for </whileStatement>
return [b,i]
# In[216]:
def compileifStatement(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
global labelNum
b=[]
i=3
g=labelNum
labelNum+=2
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
b+=['not']
b+=['if-goto'+' '+classname+'.'+str(g)]
i+=2 # for { )
d=compilestatements(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1 # for }
b+=['goto'+' '+classname+'.'+str(g+1)]
b+=['label'+' '+classname+'.'+str(g)]
if(len(a[i][0].split())>1 and 'else'==(a[i][0].split())[1]):
i+=2 # for else and {
d=compilestatements(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1 # for }
b+=['label'+' '+classname+'.'+str(g+1)]
i+=1 # for </ifStatement>
return [b,i]
# In[217]:
def compileletStatement(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
varname=(a[2][0].split())[1]
kind=''
index=''
b=[]
i=0
if(varname in set(subroutineSymboltable.keys())):
kind=subroutineSymboltable[varname][0]
index=str(subroutineSymboltable[varname][2])
elif(varname in set(classSymboltable.keys())):
if(classSymboltable[varname][0]=='static'):
kind='static'
else:
if(subroutineKind=='function'):
assert(1==2),"Variable"+varname+"not declared and used in line"+str(a[2][1])
else:
kind='this'
index=str(classSymboltable[varname][2])
else:
assert(1==2),"Variable"+varname+"not declared and used in line"+str(a[2][1])
i=4
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1
if('='==(a[3][0].split())[1]):
b+=['pop'+' '+kind+' '+index]
else:
b+=['push'+' '+kind+' '+index]
b+=['add']
i+=1
d=compileExpression(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
i+=1
b+=['pop temp 0','pop pointer 1','push temp 0','pop that 0']
return [b,i]
# In[218]:
def compilestatements(a,classSymboltable,subroutineSymboltable,subroutineKind,classname):
i=1
b=[]
while('</statements>'!=(a[i][0].split())[0]):
if('<letStatement>'==(a[i][0].split())[0]):
d=compileletStatement(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
elif('<ifStatement>'==(a[i][0].split())[0]):
d=compileifStatement(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
elif('<whileStatement>'==(a[i][0].split())[0]):
d=compilewhileStatement(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
elif('<returnStatement>'==(a[i][0].split())[0]):
d=compilereturnStatement(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
elif('<doStatement>'==(a[i][0].split())[0]):
d=compiledoStatement(a[i:],classSymboltable,subroutineSymboltable,subroutineKind,classname)
b+=d[0]
i+=d[1]
else:
i+=1
i+=1
return [b,i]
# In[219]:
def compilevarDec(a,subroutineSymboltable,local_count,total_count):
Symboltable={}
i=2
typeofvar=''
while('</varDec>'!=(a[i][0].split())[0]):
if(typeofvar==''):
typeofvar=(a[i][0].split())[1]
i+=1
elif(','==(a[i][0].split())[1]):
i+=1
elif(';'==(a[i][0].split())[1]):
i+=1
else:
b=a[i][0].split()
print(b)
if(b[1] in set(subroutineSymboltable.keys())):
assert(1==2),"Error redeclared variable"+b[1]+"in line"+str(a[i][1])
else:
Symboltable[b[1]]=['local',typeofvar,local_count]
local_count+=1
total_count+=1
i+=1
i+=1
return [Symboltable,local_count,total_count,i]
# In[220]:
def compilesubroutineBody(a,subroutineSymboltable,classSymboltable,f,local_count,total_count):
i=1
b=[]
classname=f[0]
subroutinename=f[1]
subroutinekind=f[2]
field_count=f[3]
argument_count=f[4]
while('</subroutineBody>'!=(a[i][0].split())[0]):
if('<varDec>'==(a[i][0].split())[0]):
d=compilevarDec(a[i:],subroutineSymboltable,local_count,total_count)
subroutineSymboltable.update(d[0])
print(subroutineSymboltable)
local_count=d[1]
total_count=d[2]
i+=d[3]
elif('<statements>'==(a[i][0].split())[0]):
b+=['function '+f[0]+'.'+f[1]+' '+str(local_count)]
if(subroutinekind=='constructor'):
b+=['push'+' '+'constant'+' '+str(field_count)]
b+=['call Memory.alloc 1']
b+=['pop pointer 0']
elif(subroutinekind=='method'):
b+=['push argument 0']
b+=['pop pointer 0']
d=compilestatements(a[i:],classSymboltable,subroutineSymboltable,subroutinekind,f[0])
b+=d[0]
i+=d[1]
else:
i+=1
return [b,i]
# In[221]:
def compileparameterList(a,argument_count,total_count):
i=1
Symboltable={}
while('</parameterList>'!=(a[i][0].split())[0]):
if(','==((a[i][0]).split())[1]):
i+=1
else:
argtype=(a[i][0].split())[1]
i+=1
varname=(a[i][0].split())[1]
i+=1
Symboltable[varname]=['argument',argtype,argument_count]
argument_count+=1
total_count+=1
i+=1
return [Symboltable,argument_count,total_count,i]
# In[222]:
def compilesubroutineDec(a,classSymboltable,classname,field_count):
subroutineSymboltable={}
subroutinekind=(a[1][0].split())[1]
subroutinetype=(a[2][0].split())[1]
subroutinename=(a[3][0].split())[1]
argument_count=0
local_count=0
total_count=0
b=[]
i=5
if(subroutinekind=='constructor' and subroutinetype!=classname):
assert(1==2),"Error invalid return type for constructor"
else:
if(subroutinekind=='method'):
subroutineSymboltable['this']=['argument',classname,argument_count]
argument_count+=1
total_count+=1
while('</subroutineDec>'!=(a[i][0].split())[0]):
if('<parameterList>'==(a[i][0].split())[0]):
d=compileparameterList(a[i:],argument_count,total_count)
subroutineSymboltable.update(d[0])
argument_count=d[1]
total_count=d[2]
i+=d[3]
elif('<subroutineBody>'==(a[i][0].split())[0]):
f=[classname,subroutinename,subroutinekind,field_count,argument_count]
d=compilesubroutineBody(a[i:],subroutineSymboltable,classSymboltable,f,local_count,total_count)
b+=d[0]
i+=d[1]
else:
i+=1
i+=1
return [b,i]
# In[223]:
def compileclassVarDec(a,static_count,field_count,total_count,classSymboltable):
Symboltable={}
j=len(a)
i=1
keywordtype=''
kind=''
while('</classVarDec>' != (a[i][0].split())[0]):
b=a[i][0].split()
if(kind==''):
kind=b[1]
elif(keywordtype==''):
keywordtype=b[1]
elif(b[0]=='<identifier>'):
if(b[1] in set(classSymboltable.keys())):
assert(1==2),"Error redeclared variable"+b[1]+"in line"+str(a[i][1])
else:
if(kind=='static'):
Symboltable[b[1]]=[kind,keywordtype,static_count]
static_count+=1
else:
Symboltable[b[1]]=[kind,keywordtype,field_count]
field_count+=1
total_count+=1
i+=1
i+=1
return [Symboltable,static_count,field_count,total_count,i]
# In[224]:
def compileClass(a):
b=[]
classSymboltable={}
static_count=0
field_count=0
total_count=0
j=len(a)
i=1
classname=(a[2][0].split())[1]
while(i!=j):
if('<classVarDec>' == (a[i][0].split())[0]):
d=compileclassVarDec(a[i:],static_count,field_count,total_count,classSymboltable)
classSymboltable.update(d[0])
static_count=d[1]
field_count=d[2]
total_count=d[3]
i+=d[4]
elif('<subroutineDec>'==(a[i][0].split())[0]):
d=compilesubroutineDec(a[i:],classSymboltable,classname,field_count)
b+=d[0]
i+=d[1]
else:
i+=1
return b
# In[225]:
labelNum=0
# In[226]:
def main():
count=int(sys.argv[1])
files=[]
global labelNum
for i in range(count):
files.append(sys.argv[2+i])
for file in files:
labelNum=0
with open(file) as myfile:
contents=myfile.readlines()
filename=file[:-5]
try:
contents=tokenisation(contents)
except IndexError:
with open(filename+'.err',mode='w') as f1:
f1.write('Program terminated with no class declaration')
continue
except AssertionError as e:
with open(filename+'.err',mode='w') as f2:
f2.write(str(e))
continue
else:
final=contents[0]
contents=contents[1]
with open(filename+'T.xml',mode='w') as myfile:
myfile.write('\n'.join(final)+'\n')
final=[] # important tuple
for i in contents:
ii=i[0]
for j in i[1]:
final+=[(j,ii)]
try:
contents=parser(final)
except IndexError:
with open(filename+'.err',mode='w') as f1:
f1.write('Program terminated with no class declaration')
continue
except AssertionError as e:
with open(filename+'.err',mode='w') as f2:
f2.write(str(e))
continue
else:
final=contents[0]
contents=contents[1]
with open(filename+'.xml',mode='w') as myfile:
myfile.write('\n'.join(final)+'\n')
try:
contents=compileClass(contents)
except IndexError:
with open(filename+'.err',mode='w') as f1:
f1.write('Program terminated with no class declaration')
continue
except AssertionError as e:
with open(filename+'.err',mode='w') as f2:
f2.write(str(e))
else:
with open(filename+'.vm',mode='w') as myfile:
myfile.write('\n'.join(contents)+'\n')
# In[227]:
if __name__ == "__main__":
main()
# In[ ]:
| true |
f0e2996baa5e49d6f68bf8dd355711606d529dc2 | Python | krystal2694/star_wars_sud | /A3/test_attack.py | UTF-8 | 2,075 | 3.109375 | 3 | [] | no_license | from unittest import TestCase
from unittest.mock import patch
from battle import attack
from imperial import imperial_forces
import io
class TestAttack(TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
@patch('battle.randint', side_effect=[1, 3])
def test_attack_print_output_when_miss(self, mock_randint, mock_stdout):
expected_output = "You strike!\n" \
"The Stormtrooper evaded the attack!\n\n"
attack(0)
self.assertEqual(expected_output, mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
@patch('battle.randint', side_effect=[13, 2])
def test_attack_print_output_when_hit_and_enemy_survives(self, mock_randint, mock_stdout):
expected_output = "You strike!\n" \
"The Shocktrooper has taken a 2 point hit!\n" \
"Their HP has dropped to 3.\n\n"
attack(1)
self.assertEqual(expected_output, mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
@patch('battle.randint', side_effect=[14, 5])
def test_attack_print_output_when_hit_and_enemy_dies(self, mock_randint, mock_stdout):
expected_output = "You strike!\n" \
"The Imperial Officer has taken a 5 point hit!\n\n" \
"You have defeated the Imperial Officer.\n" \
"We are one step closer to peace in the galaxy!\n\n"
attack(2)
self.assertEqual(expected_output, mock_stdout.getvalue())
@patch('battle.randint', side_effect=[15, 3])
def test_attack_when_hit_damage_is_reflected_in_new_hp(self, mock_randint):
hp_before_attack = imperial_forces[3]["HP"]
attack(3)
self.assertEqual(imperial_forces[3]["HP"], hp_before_attack - 3)
@patch('battle.randint', side_effect=[4, 3])
def test_attack_when_miss_hp_unchanged(self, mock_randint):
hp_before_attack = imperial_forces[4]["HP"]
attack(4)
self.assertEqual(imperial_forces[4]["HP"], hp_before_attack)
| true |
3022d157d83f77763a17445fe41c3096c010d135 | Python | juma011/programming-advance-assignment | /Own type built in types.py | UTF-8 | 775 | 4.09375 | 4 | [] | no_license | class Dog:
pass
class Dog:
def speak(self):
print("bark")
def walk(self):
print("walk")
d = Dog()
print(d.walk())
d2 = Dog()
print(d2.walk())
d.name = "jim"
d2.name = "dwight"
print(d2.name)
d.age = 30
d2.age = 35
print(type(d))
def speak():
print('speak')
d1 = 20
d2 = 60
print(d2/10)
class Dog:
def bark(self):
print("bark")
def set_breed(self, breed):
self.breed = breed
def get_breed(self):
return self.breed
d = Dog()
print(d.bark())
d.set_breed("german shepherd")
print(d.get_breed())
class Dog:
def __init__(self, name, age, color):
self.name = name
self.age = age
self.color = color
d = Dog("Micheal", 20, "brown" )
print(d.color)
print(d.age)
print(d.name)
| true |
3795dcbdbae92b11805359fd02acf0cb7c795350 | Python | jaitjacob/Smart-Library | /mp/book.py | UTF-8 | 451 | 3 | 3 | [] | no_license |
class Book:
def __init__(self, bookid: int, title: str, author: str, publisheddate: str):
self.bookid = bookid
self.title = title
self.author = author
self.publisheddate = publisheddate
def get_bookid(self):
return self.bookid
def get_title(self):
return self.title
def get_author(self):
return self.author
def get_publisheddate(self):
return self.publisheddate
| true |
7eaa035c654f839e48fccc06a6cf57cb472d32e8 | Python | lobo1233456/footlbotestproj | /footlbolib/IndependentDecoration/dirFunc.py | UTF-8 | 1,488 | 2.921875 | 3 | [] | no_license | import os
import settings
class dirFunc():
def __init__(self):
directory = "./dir"
# os.chdir(directory) # 切换到directory目标目录
cwd = os.getcwd() # 获取当前目录即dir目录下
def deleteBySize(self,minSize):
"""删除小于minSize的文件(单位:K)"""
files = os.listdir(os.getcwd()) # 列出目录下的文件
for file in files:
if os.path.getsize(file) < minSize * 1000:
os.remove(file) # 删除文件
print(file + " deleted")
return
def deleteNullFile(self):
'''删除所有大小为0的文件'''
files = os.listdir(os.getcwd())
for file in files:
if os.path.getsize(file) == 0: # 获取文件大小
os.remove(file)
print(file + " deleted.")
return
def deleteTarget(self,Name):
'''
删除指定文件
:param Name: 指定文件name
:return:
'''
files = os.listdir(os.getcwd()) # 列出目录下的文件
for file in files:
if file == Name:
os.remove(file) # 删除文件
msg = Name + " deleted"
return msg
def rootProjecct(self):
'''
:param projectName: 项目名称
:return: 项目root目录,末尾带“\”
'''
return settings.PROJECT_ROOT_DIR
if __name__ == '__main__':
print(dirFunc().rootProjecct())
| true |
5605216b4be0a9dfec1d5551c9d0bb92baab976a | Python | portscher/SIFT-SURF-HOG | /hog.py | UTF-8 | 2,847 | 3.015625 | 3 | [] | no_license | from skimage.feature import hog
from sklearn.cluster import MiniBatchKMeans
from sklearn.base import TransformerMixin, BaseEstimator
import cv2
class HogTransformer(BaseEstimator, TransformerMixin):
"""
Provides functionality to use the HoG for training clusters
and computing histograms of oriented gradients.
"""
def __init__(self, cluster_k):
super(HogTransformer, self).__init__()
# Default values for the HoG according to the values given at the paper of Navneet Dalal and Bill Triggs.
# https://hal.inria.fr/inria-00548512/document
self.cellsPerBlock = (2, 2)
self.TransformSqrt = True
self.cluster_k = cluster_k
self.cluster = MiniBatchKMeans(n_clusters=cluster_k, init_size=3 * cluster_k, random_state=0, batch_size=6)
self.features = []
def fit(self, X, y=None, **kwargs):
"""
Learns the cluster by computing HoG feature descriptors.
"""
print("Identify descriptors...");
descriptors = []
for img in self.prepareData(X):
# we must only change the cells per block to (2, 2) and the transform sqrt to true to be conform with the paper.
fd = hog(img, cells_per_block=self.cellsPerBlock, block_norm='L2-Hys', transform_sqrt=self.TransformSqrt)
descriptors.append(fd)
print("Clustering data...")
self.cluster.fit(descriptors)
print("Clustered " + str(len(self.cluster.cluster_centers_)) + " centroids")
return self
def transform(self, X, y=None, **kwargs):
"""
Creates the histograms for the given data.
The HOG implementation of skimage returns the
feature descriptor of HOG. Therefore the feature descriptor
is equal to the produced histogram of oriented gradients.
"""
print("Calculating histograms for " + str(len(X)) + " items.")
histograms = []
for img in self.prepareData(X):
fd = hog(img, cells_per_block=self.cellsPerBlock, block_norm='L2-Hys', transform_sqrt=self.TransformSqrt)
histograms.append(fd)
return histograms
def prepareData(self, X):
"""
Prepares the data for the HOG transformer.
The images will be resized to 64 by 128 pixels, according to the paper of Navneet Dalal and Bill Triggs.
This step results in a better performance of the HOG feature, according to
the source https://learnopencv.com/histogram-of-oriented-gradients/
"""
print("Prepare data for HoG")
preparedData = []
for img in X:
preparedData.append(cv2.resize(img, (64, 128)))
return preparedData
def fit_transform(self, X, y=None, **kwargs):
print("fit and transforming...")
self = self.fit(X, y)
return self.transform(X, y)
| true |
61ef1ce82480165d8bbaf77b3eb4ca0e554e2c5a | Python | frantzmiccoli/artemisia | /src/artemisia/test/arff_exporter_test.py | UTF-8 | 1,837 | 2.640625 | 3 | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | import unittest
from artemisia.exporter.arff_exporter import ArffExporter
class ArffExporterTest(unittest.TestCase):
def test_export(self):
exporter = ArffExporter()
exporter.set_columns('problem,iteration,weight'.split(','))
file_data = self._get_fake_file_data()
exporter.export(file_data)
output = exporter.get_output()
assert_done = 0
for line in output.split("\n"):
if '@ATTRIBUTE problem' in line:
message = 'This line should contains something like ' + \
'"{tsp_solution, othersolution}"'
self.assertTrue('tsp_solution' in line, message)
self.assertTrue('other_solution' in line, message)
self.assertTrue('{' in line, message)
self.assertTrue('}' in line, message)
assert_done += 4
self.assertEqual(4, assert_done)
self.assertTrue('@ATTRIBUTE iteration numeric' in output)
self.assertTrue('@ATTRIBUTE weight numeric' in output)
self.assertTrue('@ATTRIBUTE width numeric' not in output)
def _get_fake_file_data(self):
file_data = [{'problem': 'tsp_solution',
'width': 150,
'iteration': 213,
'weight': -15},
{'problem': 'tsp_solution',
'width': 153,
'iteration': 250,
'weight': 14},
{'problem': 'other_solution',
'width': 15,
'iteration': 213,
'weight': -15},
{'problem': 'other_solution',
'width': 12,
'iteration': 250,
'weight': 11}]
return file_data | true |
27b01f8dcc52904feeec3056cbe5d7c569f29257 | Python | aminosninatos/GitUnderHood | /gittips.py | UTF-8 | 5,571 | 2.515625 | 3 | [] | no_license | Turn paged output
--------------------------
for git branch :
git config --global pager.branch false
for git log :
git config --global pager.log false
Status in silent mode
-------------------------------------------------------------------
git status -s
will show a brief summary about the status of the files instead of long messages
Renaming & removing files
-----------------------------
git mv old_file new_file
git rm filename
then you have to commit
Create a branch and switch to it the delete it
-----------------------------------------------------
git checkout -b branch_name
git branch -d branch_name
See unstages & staged changes
-----------------------------------------
git diff shows only unstaged changes
git diff --staged shows only staged changes
git diff HEAD shows unstaged & staged changes
Explore objects content
----------------------------------
git cat-file -p SHA1-hash
git show
----------------------------------------------------------------------------------------
is equivalent to git show HEAD, i.e. the latest commit in the current branch
Creating a tag
--------------------------------------------------------------------------
whenever you push something to production you have to tag it:
git tag v1.0.0
the it will be visible when you execute git log or git show v1.0.0
Cancel changes
------------------------
git reset --soft HEAD^ undo last commit, put changes into staging
git commit --amend -m "New message" change the last commit
git reset --hard HEAD^ undo last commit and all changes
git reset --hard HEAD^^ undo last 2 commits and all chnages
See changes in a commit
---------------------------------------
git show git show 9e2546
or if you want to have a summary :
git show --shortstat 9e2546
Checkout
-----------------------
git checkout filename
to cancel the last changes & modification made to the file
Diff
--------
git diff --color-words
or
gif diff --word-diff
to get differences of small changes like words
Fetch
-------------------------------------------
git fetch
only downloads new data from a remote repository - but it does not integrate any of this new data into your working file.
Show non-pushed commits
---------------------------------
git log origin/master..HEAD
Show non-pulled commits
-------------------------------
git fetch origin master
then :
git diff master origin/master
Set your details
---------------------------------------------------------------
git config --global user.name "John Doe"
git config --global user.email "john@example.com"
See your settings
---------------------
git config --list
Carriage return & line feed settings
--------------------------------------------------
in windows you should use :
git config --global core.autocrlf true
in linux or mac you should use :
git config --global core.autocrlf input
Alias
---------------
create .gitconfig file to your home directory then add
[alias]
alias_name = log --oneline
then you can use the alias as follow
git alias_name
Go back to a specific commit then go back to the present
-------------------------------------------------------------------
git checkout a1b2c3
... and return to master with:
git checkout master
See the files that changed between two commits
---------------------------------------------------------
git diff --name-only COMMIT1_ID COMMIT2_ID
Change message of last commit
----------------------------------------
git commit --amend -m "New commit message"
Push local commits to remote branch
---------------------------------------------
git push origin master
See commit history printed in single lines
------------------------------------------------
git log --pretty=oneline
or
git log --online
Commits in a specified date range
---------------------------------------------
git log --since '2018-12-10 00:00:00'
git log --until '2018-12-10 00:00:00'
git log --after '2018-12-10 00:00:00'
git log --before '2018-12-10 00:00:00'
Remove files from staging area
----------------------------------------
git rm --cached filename
or
git reset filename
Add modified files to the staging area and commit them
-----------------------------------------------------------------
git commit -a -m "Do something once more"
Typical git workflow
-----------------------------------------------------------------
1- create a new branch
git branch new-branch
2- switch to that branch
git checkout new-branch
3- do all the modification in your project
4- add your files to staging area and commit them
git add -A
git commit -m "your comments"
5- push to a remore repository
git push -u origin new-branch
6- if everything is ok in the new branch
git checkout master
git merge new-branch
git push origin master
7- delete the new-branch form local & remote
git branch -d new-branch
git push origin --delete new-branch
See the files that changed in commits
----------------------------------------------
git log --stat
Create a tag view it and push it to remote repository
--------------------------------------------------------------
git tag tag_name
git tag
git push --tags
Password prompt whenever pushing or pulling
--------------------------------------------------------
git config --system --unset credential.helper
git config --global --unset credential.helper
git config --local --unset credential.helper
| true |
f77b7ab3af02d140fc96f21dfc86ba50cd6013f0 | Python | marcociccone/DAL | /DAL_digits_release/datasets/unaligned_data_loader.py | UTF-8 | 2,711 | 2.65625 | 3 | [] | no_license | import torch.utils.data
from builtins import object
import torchvision.transforms as transforms
from datasets_ import Dataset
class PairedData():
def __init__(self, data_loader_A, data_loader_B, max_dataset_size):
self.data_loader_A = data_loader_A
self.data_loader_B = data_loader_B
self.stop_A = False
self.stop_B = False
self.max_dataset_size = max_dataset_size
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.data_loader_A_iter = iter(self.data_loader_A)
self.data_loader_B_iter = iter(self.data_loader_B)
self.iter = 0
return self
def __next__(self):
A, A_paths = None, None
B, B_paths = None, None
try:
A, A_paths = next(self.data_loader_A_iter)
except StopIteration:
if A is None or A_paths is None:
self.stop_A = True
self.data_loader_A_iter = iter(self.data_loader_A)
A, A_paths = next(self.data_loader_A_iter)
try:
B, B_paths = next(self.data_loader_B_iter)
except StopIteration:
if B is None or B_paths is None:
self.stop_B = True
self.data_loader_B_iter = iter(self.data_loader_B)
B, B_paths = next(self.data_loader_B_iter)
if (self.stop_A and self.stop_B) or self.iter > self.max_dataset_size:
self.stop_A = False
self.stop_B = False
raise StopIteration()
else:
self.iter += 1
return {'S': A, 'S_label': A_paths,
'T': B, 'T_label': B_paths}
class UnalignedDataLoader():
def __init__(self, source, target, bs_source, bs_target, scale=32):
transform = transforms.Compose([
transforms.Resize(scale),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
self.dataset_s = Dataset(source['imgs'], source['labels'], transform=transform)
self.dataset_t = Dataset(target['imgs'], target['labels'], transform=transform)
loader_s = torch.utils.data.DataLoader(
self.dataset_s, batch_size=bs_source,
shuffle=True, num_workers=4)
loader_t = torch.utils.data.DataLoader(
self.dataset_t, batch_size=bs_target,
shuffle=True, num_workers=4)
self.paired_data = PairedData(loader_s, loader_t, float("inf"))
def name(self):
return 'UnalignedDataLoader'
def load_data(self):
return self.paired_data
def __len__(self):
return min(max(len(self.dataset_s), len(self.dataset_t)), float("inf"))
| true |
dcdf6edbeb9892c1d5851b1f112fb45c608aed05 | Python | Rpinto02/DPhi_MachineLearning_bootcamp | /Assignment 3/app/main.py | UTF-8 | 4,238 | 2.875 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import numpy as np
import joblib
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
def transform_input(scaler,Gender, Married, SelfEmployed, PropertyArea, Dependents, Education, CreditHistory, ApplicantIncome, CoapplicantIncome, LoanAmount, LoanAmountTerm):
column_list = ['ApplicantIncome','CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Gender_Male', 'Gender_nan', 'Credit_History_1.0',
'Credit_History_nan', 'Married_Yes' ,'Dependents_1','Dependents_2','Dependents_3+',
'Education_Not Graduate', 'Self_Employed_Yes', 'Property_Area_Semiurban', 'Property_Area_Urban']
df = pd.DataFrame([[0]*len(column_list)], columns=column_list)
if Gender == 'Male':
df.loc[0,'Gender_Male'] = 1
elif Gender == 'Other':
df.loc[0,'Gender_nan'] = 1
if Married == 'Yes':
df.loc[0,'Married_Yes'] = 1
if SelfEmployed == 'Yes':
df.loc[0,'Self_Employed_Yes'] = 1
if PropertyArea == 'Semiurban':
df.loc[0,'Property_Area_Semiurban'] = 1
elif PropertyArea == 'Urban':
df.loc[0,'Property_Area_Urban'] = 1
if Dependents == '1':
df.loc[0,'Dependents_1'] = 1
elif Dependents == '2':
df.loc[0,'Dependents_2'] = 1
elif Dependents == '3+':
df.loc[0,'Dependents_3+'] = 1
if Education == 'Not Graduate':
df.loc[0,'Education_Not Graduate'] = 1
if CreditHistory == 'Yes':
df.loc[0,'Credit_History_1.0'] = 1
elif CreditHistory == 'Does not know':
df.loc[0,'Credit_History_nan'] = 1
df.loc[0,'ApplicantIncome'] = ApplicantIncome
df.loc[0,'CoapplicantIncome'] = CoapplicantIncome
df.loc[0,'LoanAmount'] = LoanAmount
df.loc[0,'Loan_Amount_Term'] = LoanAmountTerm
standarization_columns = ['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term']
temp = df[standarization_columns].copy()
temp.loc[:,standarization_columns] = scaler.transform(temp)
df[standarization_columns] = temp
return df
def load_scaler():
scaler = joblib.load('model/scaler.pkl')
return scaler
def load_model():
model = joblib.load('model/loan_model_rfc_gs.dat')
return model
def main():
st.title('Loan Approval')
st.markdown("---")
st.text("This is an automatic loan approver according to the clients data.")
Gender = st.selectbox(
'What is your gender?',
('Male', 'Female', 'Other')
)
Married = st.selectbox(
'Are you married?',
('Yes', 'No')
)
Dependents = st.selectbox(
'How many dependents do you have?',
('0', '1', '2','3+')
)
Education = st.selectbox(
'What is your level of education?',
('Graduate', 'Not Graduate')
)
SelfEmployed = st.selectbox(
'Are you self employed?',
('Yes', 'No')
)
CreditHistory = st.selectbox(
'Do you have all your debts paid?',
('Yes', 'No', 'Does not know')
)
PropertyArea = st.selectbox(
'How is the region where you live?',
('Semiurban', 'Urban', 'Rural')
)
ApplicantIncome = st.number_input('What is you income?')
CoapplicantIncome = st.number_input('What is the income of your coapplicant?')
LoanAmount = st.number_input('What is the amount of the intended loan?')
LoanAmountTerm = st.number_input('What is the term of that loan?')
if st.button('Approve Loan'):
df = transform_input(load_scaler(),Gender, Married, SelfEmployed, PropertyArea, Dependents, Education, CreditHistory, ApplicantIncome, CoapplicantIncome, LoanAmount, LoanAmountTerm)
predict = load_model().predict(df)
if predict == 1:
st.write('Loan Approved')
else:
st.write('Loan Not Approved')
st.sidebar.title("Useful Links")
st.sidebar.markdown("---")
st.sidebar.markdown("[Github]"
"(https://github.com/Rpinto02/)")
st.sidebar.markdown("[Linkedin]"
"(https://www.linkedin.com/in/rpinto02/)")
st.sidebar.markdown("[DPhi]"
"(https://DPhi.tech)")
if __name__ == '__main__':
main() | true |
c06579ffb9b9b7b065f721d5b78a5070559bc1d6 | Python | AndrewKhamov/Python-Tutorials | /Задание 48. Сортировка (неделя 6).py | UTF-8 | 240 | 3.203125 | 3 | [] | no_license | n = int(input())
a = list(map(int, input().split()))
if len(a) == n:
print(sorted(a))
else:
print('error')
# Отсортируйте данный массив, используя встроенную сортировку.
| true |
34f59a9419d57f2b1181c160af4429e1f37fd21a | Python | xfdywy/vrsgd | /manual/resnet_cifar100/cifarnetdef.py | UTF-8 | 3,376 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 6 20:29:23 2017
@author: yuewang
"""
import tensorflow as tf
import numpy as np
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import tensorflow.contrib.slim.nets
import tflearn
class cifarnetdef():
def __init__(self,imagesize,n,num_class=10):
# self.images = tf.placeholder('float32',[None,imagesize,imagesize ,3])
self.dropout_keep_prob = tf.placeholder('float32',[])
self.n = (n-2)/6
assert((n-2) % 6 == 0 )
self.num_class = num_class
def buildnet(self):
n=int(self.n)
print('self n: ',self.n)
# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3] )
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0005)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
self.logits = tflearn.fully_connected(net, self.num_class)
model = tflearn.DNN(self.logits)
self.images = model.inputs[0]
#
#class cifarnetdef_resnet_slim():
# def __init__(self,imagesize,n,num_class=10):
# self.images = tf.placeholder('float32',[None,imagesize,imagesize ,3])
# self.dropout_keep_prob = tf.placeholder('float32',[])
# self.n = (n-2)/6
# assert((n-2) % 6 == 0 )
# self.num_class = num_class
# def buildnet(self):
#
# resnet = tensorflow.contrib.slim.nets.resnet_v1
#
# model = resnet.resnet_v1_50(inputs=self.images,num_classes=self.num_class)
# net = model[0]
# # Building Residual Network
#
# # Regression
# self.logits = tflearn.fully_connected(net, self.num_class)
# model = tflearn.DNN(self.logits)
# self.images = model.inputs[0]
class cifarnetdef_simple():
def __init__(self,imagesize):
self.images = tf.placeholder('float32',[None,imagesize,imagesize ,3])
self.dropout_keep_prob = tf.placeholder('float32',[])
def buildnet(self):
net = Conv2D(32, (3, 3), padding='same',activation='relu' )(self.images)
net = Conv2D(32, (3, 3), padding='same',activation='relu' )(net)
net = MaxPooling2D((2, 2) , padding='same')(net)
net = tf.nn.dropout(net,self.dropout_keep_prob)
net = Conv2D(64, (3, 3), padding='same',activation='relu' )(net)
net = Conv2D(64, (3, 3), padding='same',activation='relu' )(net)
net = MaxPooling2D((2, 2) , padding='same')(net)
net = tf.nn.dropout(net,self.dropout_keep_prob)
net = Flatten()(net)
net = Dense(512,activation='relu')(net)
net = tf.nn.dropout(net,self.dropout_keep_prob)
self.logits = Dense(10)(net)
self.prob = tf.nn.softmax(self.logits)
self.net = net | true |
6d73e0b98e489b49ec6dc9610c969172e4fc1b94 | Python | erickmiller/AutomatousSourceCode | /AutonomousSourceCode/data/raw/squareroot/852115a1-f869-4dc3-a693-5c19c28748b5__problem-009.py | UTF-8 | 307 | 3.546875 | 4 | [] | no_license | #!/usr/bin/python3
import math
def square_root(n):
s = int(math.sqrt(n))
if s * s == n:
return s
else:
return None
for a in range(1, 1000):
for b in range(a + 1, 1000):
c = square_root(a * a + b * b)
if c and a + b + c == 1000:
print(a * b * c)
| true |
c046d8195b000e2c94897581df7cf2825d0c2744 | Python | armsky/Preps | /Amazon/Minimum Depth of a Binary Tree.py | UTF-8 | 875 | 3.953125 | 4 | [] | no_license | """
Given a binary tree, find its minimum depth. The minimum depth is the number of
nodes along the shortest path from root node down to the nearest leaf node.
Note that the path must end on a leaf node. For example, minimum height of below
Binary Tree is also 2.
10
/
5
"""
class Solution:
"""
@param root: The root of binary tree
@return: An integer
"""
def minDepth(self, root):
if not root:
return 0
return self.getDepth(root, 0)
def getDepth(self, root, depth):
if not root: # None is not a leaf
return 0xffffffff
if not root.left and not root.right: # leaf node has no children
return depth + 1
left = self.getDepth(root.left, depth+1)
right = self.getDepth(root.right, depth+1)
return min(left, right)
| true |
72635c64e06cbf4f9061019ed72b2423466109e2 | Python | ChangShuaibin/Learning | /read_time.py | UTF-8 | 85 | 2.6875 | 3 | [] | no_license | import datetime
time=datetime.datetime.now()
name=str(time)[:10]+'-'+str(time)[11:19] | true |
e091ffe41b51e5314c7ae6785472885eb73ae073 | Python | nyu-mll/jiant | /tests/tasks/lib/test_sst.py | UTF-8 | 4,460 | 3.015625 | 3 | [
"MIT"
] | permissive | import os
from collections import Counter
import numpy as np
from jiant.tasks.retrieval import create_task_from_config_path
from jiant.utils.testing.tokenizer import SimpleSpaceTokenizer
TRAIN_EXAMPLES = [
{"guid": "train-0", "text": "hide new secretions from the parental units ", "label": "0"},
{"guid": "train-1", "text": "contains no wit , only labored gags ", "label": "0"},
{
"guid": "train-2",
"text": "that loves its characters and communicates something rather beautiful about "
"human nature ",
"label": "1",
},
{
"guid": "train-3",
"text": "remains utterly satisfied to remain the same throughout ",
"label": "0",
},
{
"guid": "train-4",
"text": "on the worst revenge-of-the-nerds clich\u00e9s the filmmakers could dredge up ",
"label": "0",
},
]
TOKENIZED_TRAIN_EXAMPLES = [
{
"guid": "train-0",
"text": ["hide", "new", "secretions", "from", "the", "parental", "units"],
"label_id": 0,
},
{
"guid": "train-1",
"text": ["contains", "no", "wit", ",", "only", "labored", "gags"],
"label_id": 0,
},
{
"guid": "train-2",
"text": [
"that",
"loves",
"its",
"characters",
"and",
"communicates",
"something",
"rather",
"beautiful",
"about",
"human",
"nature",
],
"label_id": 1,
},
{
"guid": "train-3",
"text": ["remains", "utterly", "satisfied", "to", "remain", "the", "same", "throughout"],
"label_id": 0,
},
{
"guid": "train-4",
"text": [
"on",
"the",
"worst",
"revenge-of-the-nerds",
"clich\u00e9s",
"the",
"filmmakers",
"could",
"dredge",
"up",
],
"label_id": 0,
},
]
FEATURIZED_TRAIN_EXAMPLE_0 = {
"guid": "train-0",
"input_ids": np.array([1, 4, 5, 6, 7, 8, 9, 10, 2, 0]),
"input_mask": np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 0]),
"segment_ids": np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
"label_id": 0,
"tokens": ["<cls>", "hide", "new", "secretions", "from", "the", "parental", "units", "<sep>"],
}
def test_featurization_of_task_data():
# Test reading the task-specific toy dataset into examples.
task = create_task_from_config_path(
os.path.join(os.path.dirname(__file__), "resources/sst.json"), verbose=True
)
examples = task.get_train_examples()
for example_dataclass, raw_example_dict in zip(examples, TRAIN_EXAMPLES):
assert example_dataclass.to_dict() == raw_example_dict
# Testing conversion of examples into tokenized examples
# the dummy tokenizer requires a vocab — using a Counter here to find that vocab from the data:
token_counter = Counter()
for example in examples:
token_counter.update(example.text.split())
token_vocab = list(token_counter.keys())
tokenizer = SimpleSpaceTokenizer(vocabulary=token_vocab)
tokenized_examples = [example.tokenize(tokenizer) for example in examples]
for tokenized_example, expected_tokenized_example in zip(
tokenized_examples, TOKENIZED_TRAIN_EXAMPLES
):
assert tokenized_example.to_dict() == expected_tokenized_example
# Testing conversion of a tokenized example to a featurized example
feat_spec = tokenizer.get_feat_spec(max_seq_length=10)
featurized_examples = [
tokenized_example.featurize(tokenizer=tokenizer, feat_spec=feat_spec)
for tokenized_example in tokenized_examples
]
featurized_example_0_dict = featurized_examples[0].to_dict()
# not bothering to compare the input_ids because they were made by a dummy tokenizer.
assert "input_ids" in featurized_example_0_dict
assert featurized_example_0_dict["guid"] == FEATURIZED_TRAIN_EXAMPLE_0["guid"]
assert (
featurized_example_0_dict["input_mask"] == FEATURIZED_TRAIN_EXAMPLE_0["input_mask"]
).all()
assert (
featurized_example_0_dict["segment_ids"] == FEATURIZED_TRAIN_EXAMPLE_0["segment_ids"]
).all()
assert featurized_example_0_dict["label_id"] == FEATURIZED_TRAIN_EXAMPLE_0["label_id"]
assert featurized_example_0_dict["tokens"] == FEATURIZED_TRAIN_EXAMPLE_0["tokens"]
| true |
491924ff15657c1c74008b866396e1b9dd0a4e14 | Python | leverans/async_curses_game | /star_animation.py | UTF-8 | 799 | 2.796875 | 3 | [] | no_license | import asyncio
import curses
from random import randint
# захотелось отделить сценарий анимации от логики, так вроде гораздо удобнее
from utilities import sleep
STAR_ANIMATION_STEPS = (
(20, curses.A_DIM),
(3, 0),
(5, curses.A_BOLD),
(3, 0),
)
BLINK_LENGTH = sum(step[0] for step in STAR_ANIMATION_STEPS)
async def blink(canvas, row, column, symbol='*', animation_offset=0):
# задерживаем фазу звезды на случайное число кадров в пределах длины цикла анимации
await sleep(animation_offset)
while True:
for step in STAR_ANIMATION_STEPS:
canvas.addstr(row, column, symbol, step[1])
await sleep(step[0]) | true |
937e908305af796fba82cc0c0b2ce873c14899f1 | Python | JudyCoelho/exerciciosCursoPython | /aula13/aula13.py | UTF-8 | 262 | 3.953125 | 4 | [] | no_license | usuario = input('Digite seu usuário: ')
qtd_caracteres = len(usuario)
#print(usuario, qtd_caracteres, type(qtd_caracteres))
if qtd_caracteres < 6:
print('Você precisa digitar pelo menos 6 caracteres')
else:
print('Você foi cadastrado no sistema.')
| true |
644a38bfc127a8d2c47c72cf5756bbc9db6f9e33 | Python | sudosays/weatherwatch | /weatherwatch_proj/weatherwatch/views.py | UTF-8 | 714 | 2.734375 | 3 | [] | no_license | from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
def index(request):
context = RequestContext(request)
context_dict = {'boldmessage':'Hi Joan! :)'}
return render_to_response('weatherwatch/index.html', context_dict, context)
def about(request):
context = RequestContext(request)
#quick fibonacci
a = 1
b = 1
c = a+b
n = 0
fib_string = ''
while n < 10:
fib_string += ' ' + str(a)
a = b
b = c
c = a+b
n += 1
var_dict = {'fib' : fib_string, 'n' : n }
return render_to_response('weatherwatch/about.html', var_dict, RequestContext(request))
| true |
d9e3c21e1b565b96fb2317380bd869161822838e | Python | willhunt/silvia | /silvia/silviacontrol/display.py | UTF-8 | 2,415 | 2.609375 | 3 | [
"MIT"
] | permissive | from django.conf import settings as django_settings
from silviacontrol.utils import debug_log
from board import SCL, SDA
import busio
import board
import adafruit_ssd1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import time
class SilviaDisplay():
"""
Display using newer Adafruit circuit python library
"""
def __init__(self, i2c_address):
self.i2c_address = i2c_address
self.font_data = ImageFont.truetype(django_settings.STATIC_ROOT + '/silviacontrol/fonts/Roboto-Regular.ttf', 30)
self.font_sub = ImageFont.truetype(django_settings.STATIC_ROOT + '/silviacontrol/fonts/Roboto-Regular.ttf', 12)
i2c = board.I2C()
self.display = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c, addr=self.i2c_address)
self.showBlank()
def getDisplay(self):
# i2c = board.I2C()
# return adafruit_ssd1306.SSD1306_I2C(128, 64, i2c, addr=self.i2c_address)
return self.display
def welcome(self):
self.showWelcome()
time.sleep(2)
self.showBlank()
def showWelcome(self):
display = self.getDisplay()
image = Image.open(django_settings.STATIC_ROOT + '/silviacontrol/display/silvia_logo_128x64_inverted.png') \
.resize((display.width, display.height), Image.ANTIALIAS) \
.convert('1')
display.image(image)
display.show()
def showTemperature(self, T, T_set):
display = self.getDisplay()
image = Image.new('1', (display.width, display.height))
# Get drawing object to draw on image.
drawing = ImageDraw.Draw(image)
padding_x = 2
padding_y = 2
drawing.text((padding_x, padding_y),
"Temperature:", font=self.font_sub, fill=255)
if T is None:
drawing.text((padding_x + 4, 22),
"- {0}C".format(u'\N{DEGREE SIGN}'), font=self.font_data, fill=255)
else:
drawing.text((padding_x + 4, 22),
"{0:.0f}{1}C".format(T, u'\N{DEGREE SIGN}'), font=self.font_data, fill=255)
drawing.text((85, 35),
"[{0:.0f}{1}C]".format(T_set, u'\N{DEGREE SIGN}'), font=self.font_sub, fill=255)
display.image(image)
display.show()
def showBlank(self):
display = self.getDisplay()
display.fill(0)
display.show()
| true |
a7cf37677f75607d3dbe5c6cc761abbfe2df4c0e | Python | dyylanl/TPs-Numerico | /TP 1/Codigo Ejercicios/Ejercicio 3/BusquedaRaicesSecante.py | UTF-8 | 2,265 | 3.1875 | 3 | [] | no_license | import math
import sympy as sym
import numpy as np
def busqueda_raiz_secante(funcion, semilla1, semilla2, error, paso_a_paso=False, iteracionesForzadas = None):
x= sym.Symbol('x')
y= sym.Symbol('y')
dato_viejo1 = semilla1
dato_viejo2 = semilla2
#si tiene mas de 50 iteraciones no va a funcionar
historia = np.zeros((50, 3))
historia[0] = (0, dato_viejo1, None)
historia[1] = (1, dato_viejo2, None)
iteraciones = 2
dato = dato_viejo1 - (((funcion.subs(x,dato_viejo1)) * (dato_viejo1 - dato_viejo2))/ ((funcion.subs(x, dato_viejo1)) - funcion.subs(x, dato_viejo2) ) )
if (iteracionesForzadas == None):
while not (abs(dato - dato_viejo1) <= error):
historia[iteraciones] = (iteraciones, dato, abs(dato-dato_viejo1))
iteraciones+=1
if paso_a_paso:
mostrar_informacion(dato, abs(dato-dato_viejo1), iteraciones)
dato_viejo2= dato_viejo1
dato_viejo1 = dato
dato = dato_viejo1 - (((funcion.subs(x,dato_viejo1)) * (dato_viejo1 - dato_viejo2))/ ((funcion.subs(x, dato_viejo1)) - funcion.subs(x, dato_viejo2) ))
else:
while (iteraciones < iteracionesForzadas):
historia[iteraciones] = (iteraciones, dato, abs(dato-dato_viejo1))
iteraciones+=1
if paso_a_paso:
mostrar_informacion(dato, abs(dato-dato_viejo1), iteraciones)
dato_viejo2= dato_viejo1
dato_viejo1 = dato
dato = dato_viejo1 - (((funcion.subs(x,dato_viejo1)) * (dato_viejo1 - dato_viejo2))/ ((funcion.subs(x, dato_viejo1)) - funcion.subs(x, dato_viejo2) ))
historia[iteraciones] = (iteraciones, dato, abs(dato-dato_viejo1))
historia = historia[:iteraciones + 1]
return (dato, abs(dato- dato_viejo1), iteraciones - 1, historia)
def mostrar_informacion(valor, error, iteracion):
print("Iteracion: " + str(iteracion))
print("Valor: "+ str(valor))
#input("Error actual: "+ str(error))
print("Error actual: "+ str(error))
print("--")
def mostrar_resultados(resultados):
print("RESULTADOS:")
print("Iteraciones: "+ str(resultados[2]))
print("Raiz aproximada: "+ str(resultados)) | true |
324ab5ef867d6fc47a158c9af149eb9f90c45668 | Python | malcolmrite-dsi/JSE_Researcher | /Company_List_Generator.py | UTF-8 | 1,919 | 2.59375 | 3 | [
"MIT"
] | permissive | from request_web import SensGetter
import csv
import requests
from bs4 import BeautifulSoup
class CompanyGenerator():
def get_all_companies():
df = open("JSE_company_list.csv","w")
csv_writer = csv.writer(df)
csv_writer.writerow(["Share Code", "Short Name"])
all_comp = ["4", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M","N", "O", "P", "Q", "R", "S","T","U","V", "W", "X", "Y", "Z"]
for index in all_comp:
url = f"https://www.sharedata.co.za/V2/Controls/Shares/ShareIndex/SIJSONData.aspx?indextype={index}&sortfield=FULLNAME"
list = SensGetter.get_company_list(SensGetter.get_html(url))
print(f'{index}' + " Loaded")
for i in range(0,len(list)-1, 2):
csv_writer.writerow([list[i], list[i+1]])
df.close()
return "JSE_company_list.csv"
def get_top_40():
df = open("TOP40_company_list.csv","w")
csv_writer = csv.writer(df)
csv_writer.writerow(["Share Code", "Short Name"])
url = f"https://www.sharedata.co.za/V2/Controls/Shares/ShareIndex/SIJSONData.aspx?indextype=TOP40&sortfield=FULLNAME"
codeCount = 0
list = SensGetter.get_company_list(SensGetter.get_html(url))
for i in range(0,len(list)-1, 2):
csv_writer.writerow([list[i], list[i+1]])
df.close()
return "TOP40_company_list.csv"
def get_jse_sectors():
df = open("Sector_List.csv","w")
csv_writer = csv.writer(df)
csv_writer.writerow(["Share Code", "ICB Code"])
url = f"https://www.jse.co.za/services/indices/ICBSector"
codeCount = 0
text_list, icb_codes = SensGetter.get_sector_list(SensGetter.get_html(url))
for i in range(1,len(text_list)):
csv_writer.writerow([text_list[i], icb_codes[i]])
df.close()
return "Sector_List.csv"
| true |
f90017150e79f3714f4d36c0c7f81e44bc818157 | Python | xiaoyaowudi-extreme/2019-nCoV | /test.py | UTF-8 | 806 | 3.25 | 3 | [] | no_license | from scipy import log as log
import numpy
from scipy import log
from scipy.optimize import curve_fit
def func(x, a, b):
y = a * log(x) + b
return y
def polyfit(x, y, degree):
results = {}
#coeffs = numpy.polyfit(x, y, degree)
popt, pcov = curve_fit(func, x, y)
results['polynomial'] = popt
# r-squared
yhat = func(x ,popt[0] ,popt[1] ) # or [p(z) for z in x]
ybar = numpy.sum(y)/len(y) # or sum(y)/len(y)
ssreg = numpy.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
sstot = numpy.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
results['determination'] = ssreg / sstot
return results
x=[ 1 ,2 ,3 ,4 ,5 ,6]
y=[ 2.5 ,3.51 ,4.45 ,5.52 ,6.47 ,7.51]
z1 = polyfit(x, y, 2)
print(z1) | true |
15b9b7007496354cb5452880b0cb889509464614 | Python | therealaleksandar/OOP-farma | /zito_klasa.py | UTF-8 | 825 | 2.6875 | 3 | [] | no_license | from usev_klasa import *
class Zito(Usev):
def __init__(self):
super().__init__(1,3,5)
self._tip="Zito"
def rasti(self,svetlost,voda):
if svetlost>=self._potrebna_svetlost and voda>=self._potrebna_voda:
#if self._status=="Izrasta" and voda>self._potrebna_voda:
# self._rast+=self._brzina_rasta*1.5
#elif self._status=="Mlado" and voda>self._potrebna_voda:
# self._rast+=self._brzina_rasta*1.25
#else:
self._rast+=self._brzina_rasta
self._dani_rasta+=1
self._azuriraj_status()
def main():
zito1=Zito()
print(zito1.izvestaj())
rucni_rast(zito1)
print(zito1.izvestaj())
rucni_rast(zito1)
print(zito1.izvestaj())
if __name__=='__main__':
main() | true |
85d2d32d7c69016b4064c77e700dd110b4cd4eaa | Python | KKosukeee/CodingQuestions | /LeetCode/334_increasing_triplet_subsequence.py | UTF-8 | 1,841 | 4.125 | 4 | [] | no_license | """
Solution for 334. Increasing Triplet Subsequence
https://leetcode.com/problems/increasing-triplet-subsequence/
"""
class Solution:
"""
Runtime: 40 ms, faster than 75.02% of Python3 online submissions for Increasing Triplet
Subsequence.
Memory Usage: 13.5 MB, less than 6.80% of Python3 online submissions for Increasing Triplet
Subsequence.
"""
def increasingTriplet(self, nums):
"""
Given an unsorted array return whether an increasing subsequence of length 3 exists or not
in the array.
Formally the function should:
Return true if there exists i, j, k
such that arr[i] < arr[j] < arr[k] given 0 ≤ i < j < k ≤ n-1 else return false.
Note: Your algorithm should run in O(n) time complexity and O(1) space complexity.
Example 1:
Input: [1,2,3,4,5]
Output: true
Example 2:
Input: [5,4,3,2,1]
Output: false
Args:
nums:
Returns:
"""
seq = []
# loop for each number in nums array
for num in nums:
# check if seq array is empty or num is greather than its previous
if not seq or num > seq[-1]:
# if so, append the num in seq array
seq.append(num)
# if you found i, j and k such that i < j < k, the return True
if len(seq) == 3:
return True
# check if ith element in seq is greater than current num
elif num <= seq[0]:
# if so, replace that element
seq[0] = num
# check if jth element is seq is greater than current num
elif num <= seq[1]:
# if so, replace that element
seq[1] = num
return False
| true |
04bd2683671687e669a0b2df68a6949d41242577 | Python | SaretMagnoslove/Python_3_Basics_Tutorial_Series-Sentdex | /Lesson44_ftplib.py | UTF-8 | 694 | 2.828125 | 3 | [] | no_license | from ftplib import FTP
# in order for this code to work you will have to fill in your credentials
ftp = FTP('DomainName.com')
ftp.login(user='username', passwd='password')
ftp.cwd('/specific_domain/') # depends on the site you connect to
# downloading a file:
def grabFile():
filename = 'filename.extention'
localfile = open(filename, 'wb') # write binary
ftp.retrbinary('RETR ' + filename, localfile.write, 1024) # buffer size is 1024
ftp.quit() # closing the connection
localfile.close() # closing the file
# uploading files to the ftp server
def placeFile():
filename = 'fileName.extention'
ftp.storbinary('STOR ' + filename, open(filename, 'rb')) # read binary | true |
c9cd8edca6ea4f360bc2d266b201f733169af2d5 | Python | garylvov/summer-2020---learning_ROS-Gazebo | /learning_stage/scripts/navHallway.py | UTF-8 | 1,712 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
import math
from sensor_msgs.msg import LaserScan
from std_msgs.msg import String
from geometry_msgs.msg import Twist
rangeAhead = 0.0
maxRange = 0.0
minRange = 0.0
slightLeft = 0.0
slightRight = 0.0
left = 0.0
right = 0.0
def rangeCallback(msg):
global rangeAhead, maxRange, minRange, slightLeft, slightRight, left, right
rangeAhead = msg.ranges[int(len(msg.ranges)/2)]
maxRange = max(msg.ranges)
minRange = min(msg.ranges)
slightLeft = msg.ranges[int(len(msg.ranges)/2) + 20]
slightRight = msg.ranges[int(len(msg.ranges)/2) - 20]
left = msg.ranges[len(msg.ranges)-1]
right = msg.ranges[0]
def velocity():
pub = rospy.Publisher('/cmd_vel', Twist, queue_size = 10)
publ = rospy.Publisher('rangeReading', String, queue_size=10)
blockVelocity = Twist()
while not rospy.is_shutdown():
if(slightLeft > slightRight):
while(slightLeft < rangeAhead and slightLeft + .1 < rangeAhead):
blockVelocity.angular.z = -.1
pub.publish(blockVelocity)
else:
while(slightRight < rangeAhead and slightRight + .1 < rangeAhead):
blockVelocity.angular.z = .1
pub.publish(blockVelocity)
blockVelocity.angular.z = 0
if (minRange > .25):
blockVelocity.linear.x = .1
else:
blockVelocity.linear.x = 0
blockVelocity.angular.z = -.25
if(left>right):
blockVelocity.angular.z = .25
pub.publish(blockVelocity)
if __name__ == '__main__':
rospy.init_node('navCircle')
rangeSub = rospy.Subscriber('/base_scan', LaserScan, rangeCallback)
velocity()
| true |
f6f3d6e5bff3115ce8a9d28c1bf49720f67f3e91 | Python | willianflasky/growup | /python/day07/循环的socket客户端.py | UTF-8 | 396 | 2.890625 | 3 | [] | no_license | import socket
client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect(('192.168.12.110',8080)) #拨通电话
while True:
msg=input('>>: ')
if not msg:continue
client.send(msg.encode('utf-8')) #客户端发消息
print('====>has send')
data=client.recv(1024) #客户端收消息
print('=====>has recv')
print(data)
client.close() #关闭 | true |
4b0cc7770147fee7d01ad57c4956730fed8e6b3f | Python | strengthen/LeetCode | /Python3/1238.py | UTF-8 | 1,114 | 3.046875 | 3 | [
"MIT"
] | permissive | __________________________________________________________________________________________________
sample 180 ms submission
class Solution:
def circularPermutation(self, n: int, start: int) -> List[int]:
def helper(i):
if i == 1:
return [0, 1]
temp = helper(i - 1)
power = 2 ** (i - 1)
return temp + [power + t for t in temp[::-1]]
arr = helper(n)
index = arr.index(start)
return arr[index:] + arr[:index]
__________________________________________________________________________________________________
sample 184 ms submission
class Solution:
def circularPermutation(self, n: int, start: int) -> List[int]:
if n==0:
return [0]
if n==1:
return [0,1] if start==0 else [1,0]
res=[0,1]
for i in range(1,n):
temp=[(1<<i) + each for each in res[::-1]]
res=res + temp
s=res.index(start)
return res[s:]+res[:s]
__________________________________________________________________________________________________
| true |
ea885c686991db638e67624c1e6f1f716798c5e4 | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4000/codes/1592_1805.py | UTF-8 | 263 | 3.40625 | 3 | [] | no_license | xa = float(input("coordenadas do ponto a"))
xb = float(input("coordenadas do ponto a"))
ya = float(input("coordenadas do ponto b"))
yb = float(input("coordenadas do ponto b"))
pontox = xb + xa /2
pontoy = yb + ya /2
print(round(pontox, 1))
print(round(pontoy, 1)) | true |
70d55974a4a8ed78e362f8b1669cd9a732ce5446 | Python | andrewbeattycourseware/pands2021 | /code/week10-objects/lecture/codeForSlides.py | UTF-8 | 1,430 | 3.53125 | 4 | [] | no_license | # this is code I am using the lecture slides
# this is program does not do anything
import datetime
firstname = 'Andrew'
lastname =' Beatty'
dob = datetime.date(2010, 1, 1)
height = 180
weight = 100
person1firstname = 'Andrew'
person1lastname = ' Beatty'
person1dob = datetime.date(2010, 1, 1)
person1height = 180
person1weight = 100
person21firstname = 'Joe'
person21lastname = ' bloggs'
person21dob = datetime.date(2010, 2, 2)
person21height = 170
person21weight = 80
firstnames = ['andrew', 'joe', 'mary']
lastnames = ['beatty', 'bloggs', 'meeseeks']
dobs = [
datetime.date(2010, 1, 1),
datetime.date(2010, 2, 2),
datetime.date(2013, 3, 3)]
heights = [180, 170, 165]
weights = [100, 80, 55]
def gethealthstats(firstname, lastname, dob, height, weight):
pass
gethealthstats(firstnames[2], lastnames[2],dobs[2],heights[2],weights[2])
def displayperson(firstname, lastname, dob, height, weight):
pass
person1 = {
'firstname' : 'Andrew',
'lastname' : 'Beatty',
'dob' : datetime.date(2010, 1, 1),
'height' : 180,
'weight' : 100
}
person2 = {
'firstname': 'Joe',
'lastname': 'Bloggs',
'dob': datetime.date(2010, 2, 2),
'height': 170,
'weight': 80
}
person3 = {
'firstname': 'Mary',
'lastname': 'Meeseeks',
'dob': datetime.date(2015, 3, 3),
'height': 165,
'weight': 55
}
persons = [person1, person2, person3]
| true |
abb22188a2c45807c403b9c9e028ba5b31f03088 | Python | gougo/ip_address_update | /read_sort_ipfile.py | UTF-8 | 2,725 | 2.75 | 3 | [] | no_license | # !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
读取排序的ip文件.
排除一个ip多个地址的情况,并且将可以合并的ip段合并
Created on 2014-9-2
@author: tianfei
'''
import logging
import sys
import os
logging.basicConfig(filename = os.path.join(os.getcwd(), 'sort_ipfile.log'),
level = logging.DEBUG,
format = '%(asctime)s - %(levelname)s: %(message)s')
class GenIPAddressMap(object):
'''
生成ip地址映射
'''
# merge factors 间隔多少内可以合并
MERGE_STANDARD = 1
def __init__(self, input, output):
self.input_file=input
self.output=open(output, 'w')
def run(self):
with open(self.input_file,"r") as inf:
bf_ip = ""
bf_address = []
discard_ip = ""
head_ip = ""
for line in inf:
ip, address = line.strip().split('`',1)
if ip==discard_ip:
continue
if ip==bf_ip:# ip same
if address in bf_address:
continue
else: # 地址不同放弃
bf_address.append(address)
discard_ip = ip
continue
else:
if len(bf_address) == 1:
if address == bf_address[0]:
diff = long(ip)-long(bf_ip)
if diff>self.MERGE_STANDARD:
logging.info("diff:%s", diff)
self.output.write("%s`%s`%s\n" % (head_ip, bf_ip, bf_address[0]))
head_ip=ip
bf_ip=ip
else:
# TODO 输出前面的ip信息
self.output.write("%s`%s`%s\n" % (head_ip, bf_ip, bf_address[0]))
head_ip=ip
bf_ip=ip
bf_address=[address]
else: # 前一个ip有多个地址或初始。重新开始记录
logging.debug("%s has multi address . discard.", bf_ip)
head_ip=ip
bf_ip=ip
bf_address=[address]
# TODO 最终输出
if len(bf_address) == 1:
self.output.write("%s`%s`%s\n" % (head_ip, bf_ip, bf_address[0]))
else:
logging.debug("%s has multi address . discard.", bf_ip)
if __name__=="__main__":
'''
1 input
2 output
'''
ip_map=GenIPAddressMap(sys.argv[1], sys.argv[2])
ip_map.run() | true |
e0530ecc38a94ad4b617386f00aa88c46c93e80f | Python | Helsinki-NLP/OpusFilter | /opusfilter/filters.py | UTF-8 | 21,434 | 2.921875 | 3 | [
"MIT"
] | permissive | """Corpus filtering"""
import difflib
import itertools
import logging
import math
import os
import string
from typing import Iterator, List, Tuple
import regex
from . import FilterABC, ConfigurationError, CLEAN_LOW, CLEAN_HIGH, CLEAN_BETWEEN, CLEAN_TRUE, CLEAN_FALSE
from .lm import CrossEntropyFilter, CrossEntropyDifferenceFilter, LMClassifierFilter # pylint: disable=W0611 # noqa: F401
from .util import check_args_compability
from .word_alignment import WordAlignFilter # pylint: disable=W0611 # noqa: F401
from .embeddings import SentenceEmbeddingFilter # pylint: disable=W0611 # noqa: F401
logger = logging.getLogger(__name__)
class LengthFilter(FilterABC):
"""Sentence length filter"""
score_direction = CLEAN_BETWEEN
def __init__(self, min_length=1, max_length=100, unit='word', pass_empty=False, **kwargs):
min_length, max_length, unit = check_args_compability(
min_length, max_length, unit,
required_types=[(int, float), (int, float), str],
choices=[None, None, ('word', 'char', 'character')],
names=['min_length', 'max_length', 'unit'])
self.min_length = min_length
self.max_length = max_length
self.unit = unit
self.pass_empty = pass_empty
super().__init__(**kwargs)
def get_length(self, segment, idx):
"""Return length of the segment in index"""
if self.unit[idx] == 'word':
return len(segment.split())
return len(segment)
def score(self, pairs):
for pair in pairs:
yield [self.get_length(segment, idx) for idx, segment in enumerate(pair)]
def accept(self, score):
if self.pass_empty and sum(score) == 0:
return True
return all(self.min_length[idx] <= length <= self.max_length[idx] for idx, length in enumerate(score))
class LengthRatioFilter(FilterABC):
"""Character length ratio"""
score_direction = CLEAN_LOW
def __init__(self, threshold=3, unit='word', **kwargs):
self.threshold = threshold
self.unit = check_args_compability(
unit, required_types=[str], choices=[('word', 'char', 'character')], names=['unit'])
super().__init__(**kwargs)
def get_length(self, segment, idx):
"""Return length of the segment in index"""
if self.unit[idx] == 'word':
return len(segment.split())
return len(segment)
def score(self, pairs):
for pair in pairs:
lengths = sorted(self.get_length(segment, idx) for idx, segment in enumerate(pair))
if lengths[0] == 0:
if lengths[-1] == 0:
yield 0
else:
yield float('inf')
else:
yield lengths[-1] / lengths[0]
def accept(self, score):
return score < self.threshold
class LongWordFilter(FilterABC):
"""Word length filter"""
score_direction = CLEAN_LOW
def __init__(self, threshold=40, **kwargs):
self.threshold = check_args_compability(threshold, required_types=[(int, float)], names=['threshold'])
super().__init__(**kwargs)
def score(self, pairs):
for pair in pairs:
yield [max((len(word) for word in segment.split()), default=0) for segment in pair]
def accept(self, score):
return all(length < self.threshold[idx] for idx, length in enumerate(score))
class AverageWordLengthFilter(FilterABC):
"""Average word length filter
Returns zeros for empty segments. If pass_empty is true, pairs
with only empty segments are accepted.
"""
score_direction = CLEAN_BETWEEN
def __init__(self, min_length=2, max_length=20, pass_empty=False, **kwargs):
min_length, max_length = check_args_compability(
min_length, max_length, required_types=[(int, float), (int, float)], names=['min_length', 'max_length'])
self.min_length = min_length
self.max_length = max_length
self.pass_empty = pass_empty
super().__init__(**kwargs)
@staticmethod
def _average_word_len(sentence):
parts = sentence.split()
if parts:
return len(''.join(parts)) / len(parts)
return 0
def score(self, pairs):
for pair in pairs:
yield [self._average_word_len(sent) for sent in pair]
def accept(self, score):
if self.pass_empty and sum(score) == 0:
return True
return all(self.min_length[idx] <= length <= self.max_length[idx] for idx, length in enumerate(score))
class HtmlTagFilter(FilterABC):
"""HTML tag filter"""
score_direction = CLEAN_FALSE
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def check(segment):
"""Return whether segment has HTML tags (or something that breaks bs4)"""
import bs4
try:
found = bool(bs4.BeautifulSoup(segment, 'html.parser').find())
except (TypeError, UnboundLocalError, NotImplementedError,
AssertionError, bs4.builder.ParserRejectedMarkup) as err:
logger.warning("BeautifulSoup parsing failed for %s: %s", repr(segment), err)
found = True
return found
def score(self, pairs):
for pair in pairs:
yield [self.check(sent) for sent in pair]
def accept(self, score):
return not any(score)
class RegExpFilter(FilterABC):
"""Filter out segments that match or do not match a regular expression
You can either provide a single regexp or one for each language in
the parallel data. The regex library is used for the search.
If accept_match is False, the pair is accepted only if none of the
segment match the corresponding regexp. If accept_match is True,
the pair is accepted only if all segments match the corresponding
regexp.
"""
def __init__(self, regexps=None, accept_match=False, **kwargs):
self.regexps = check_args_compability(regexps, required_types=[str], names=['regexps'])
self.accept_match = accept_match
super().__init__(**kwargs)
@property
def score_direction(self):
"""Hint for which score values indicate accept"""
return CLEAN_TRUE if self.accept_match else CLEAN_FALSE
def score(self, pairs):
for pair in pairs:
yield [bool(regex.search(self.regexps[idx], segment)) for idx, segment in enumerate(pair)]
def accept(self, score):
if self.accept_match:
return all(score)
return not any(score)
class AlphabetRatioFilter(FilterABC):
"""Proportion of alphabetic characters in the segment"""
score_direction = CLEAN_HIGH
def __init__(self, threshold=0.75, exclude_whitespace=False, **kwargs):
self.threshold = check_args_compability(threshold, required_types=[(float, int)], names=['threshold'])
self.exclude_whitespace = exclude_whitespace
self.re_whitespace = regex.compile(r'\s')
self.re_not_alphas = regex.compile(r'\p{Alphabetic=No}')
super().__init__(**kwargs)
def score(self, pairs):
for pair in pairs:
scores = []
for segment in pair:
if self.exclude_whitespace:
segment = self.re_whitespace.sub('', segment)
alphas = self.re_not_alphas.sub('', segment)
if segment:
scores.append(len(alphas) / len(segment))
else:
scores.append(1.0)
yield scores
def accept(self, score):
return all(ratio >= threshold for ratio, threshold in zip(score, self.threshold))
class CharacterScoreFilter(FilterABC):
"""Proportion of alphabetic characters that are in the given script
For a list of valid scripts, see e.g.
https://www.regular-expressions.info/unicode.html
"""
score_direction = CLEAN_HIGH
def __init__(self, scripts=None, thresholds=None, **kwargs):
if scripts is None:
raise ConfigurationError("A list of language scripts needs to be defined")
self.scripts = scripts
self.thresholds = [1] * len(scripts) if thresholds is None else thresholds
if len(self.scripts) != len(self.thresholds):
raise ConfigurationError(
f"Mismatch in number of scripts ({len(self.scripts)}) and thresholds ({len(self.thresholds)})")
self.re_not_alphas = regex.compile(r'\p{Alphabetic=No}')
self.re_not_script = [regex.compile(fr'\p{{^Script={script}}}')
for script in self.scripts]
super().__init__(**kwargs)
def score(self, pairs):
for pair in pairs:
if len(pair) != len(self.scripts):
raise ValueError(f"Mismatch in number of scripts ({len(self.scripts)}) and sentences ({len(pair)})")
scores = []
for idx, sent in enumerate(pair):
alphas = self.re_not_alphas.sub('', sent)
if alphas:
script = self.re_not_script[idx].sub('', alphas)
scores.append(len(script) / len(alphas))
else:
scores.append(1.0)
yield scores
def accept(self, score):
return all(ratio >= threshold for ratio, threshold in zip(score, self.thresholds))
class LanguageIDFilter(FilterABC):
"""Language identification confidence filter
Currently this supports three methods:
* langid (default): see :cite:`lui-baldwin-2012-langid`
* cld2: see https://github.com/CLD2Owners/cld2
* fasttext: see :cite:`joulin-etal-2016-fasttext` and :cite:`joulin-etal-2017-bag`
"""
score_direction = CLEAN_HIGH
def __init__(self, languages=None, id_method='langid', thresholds=None,
fasttext_model_path=None, langid_languages=None, cld2_options=None,
**kwargs):
super().__init__(**kwargs)
if languages is None:
raise ConfigurationError("A list of language codes needs to be defined")
# fasttext options
if id_method == 'fasttext':
if not fasttext_model_path:
raise ConfigurationError("FastText language ID method was choosen without specifying "
"any path to fasttext model")
import fasttext
self.fasttext_model = fasttext.load_model(os.path.join(self.workdir, fasttext_model_path))
else:
if fasttext_model_path:
raise ConfigurationError("FastText language ID method was not choosen but fasttext "
"path to model was set")
self.fasttext_model = None
# langid options
if id_method == 'langid':
from langid.langid import LanguageIdentifier, model
self.identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)
if langid_languages:
self.identifier.set_languages(langid_languages)
else:
if langid_languages:
raise ConfigurationError(
"langid_languages option is supported only by the method langid")
self.identifier = None
# cld2 options
if id_method == 'cld2':
self.cld2_options = cld2_options if cld2_options else {}
else:
if cld2_options:
raise ConfigurationError("cld2_options is supported only by the method cld2")
self.cld2_options = None
# global options
self.languages = languages
self.id_method = id_method
self.thresholds = [0] * len(self.languages) if thresholds is None else thresholds
def confidence(self, sentence: str, lan: str) -> float:
"""Return confidence of the identifier"""
if not sentence:
# Prevent filtering empty lines
return 1.0
if self.id_method == 'cld2':
import pycld2
try:
clddetails = pycld2.detect(sentence, **self.cld2_options)
except pycld2.error as err:
logger.warning("pycld2 could not process '%s' due to: %s", sentence, err)
clddetails = (0, 0, ((0, 'un', 0.0), 0))
cldlan = clddetails[2][0][1]
cldconf = round(clddetails[2][0][2]/100, 2)
if cldlan != lan:
cldconf = 0.0
return cldconf
if self.id_method == 'langid':
lidetails = self.identifier.classify(sentence)
lilan, liconf = lidetails[0], round(lidetails[1], 2)
if lilan != lan:
liconf = 0.0
return liconf
if self.id_method == 'fasttext':
lang, confidence = self._fasttext_predict_lang(sentence)
if lang != lan:
liconf = 0.0
else:
liconf = confidence
return liconf
raise ValueError(f"Unknown language identification method '{self.id_method}'")
def score(self, pairs: List[Tuple[str, str]]) -> Iterator[List[float]]:
for pair in pairs:
yield [self.confidence(sent, self.languages[idx]) for idx, sent in enumerate(pair)]
def accept(self, score: Tuple[float, float]) -> bool:
return all(conf > threshold for conf, threshold in zip(score, self.thresholds))
def _fasttext_predict_lang(self, texts: List[str]) -> Tuple[str, float]:
output = self.fasttext_model.predict(texts, k=1)
confidence = output[1][0]
label = output[0][0][9:]
return label, confidence
class TerminalPunctuationFilter(FilterABC):
"""Penalty score with respect to the co-occurrence of terminal punctuation marks
See :cite:`vazquez-etal-2019-university`
"""
score_direction = CLEAN_HIGH
def __init__(self, threshold=-2, **kwargs):
self.threshold = threshold
super().__init__(**kwargs)
def score(self, pairs):
for pair in pairs:
if len(pair) != 2:
raise ValueError("Only bilingual input supported by TerminalPunctuationFilter")
sent1, sent2 = pair
spun = len([c for c in sent1 if c in ['.', '?', '!', '…']])
tpun = len([c for c in sent2 if c in ['.', '?', '!', '…']])
score = abs(spun-tpun)
if spun > 1:
score += spun - 1
if tpun > 1:
score += tpun - 1
score = -math.log(score + 1)
yield score
def accept(self, score):
return score >= self.threshold
class NonZeroNumeralsFilter(FilterABC):
"""Similarity measure between numerals of the two sentences with zeros removed
If require_all is True, all scores (for pairs of n segments) have
to be equal or above the threshold; otherwise at least one the
scores have to be equal or above the threshold. For bilingual
input, it has no effect.
See :cite:`vazquez-etal-2019-university`
"""
score_direction = CLEAN_HIGH
def __init__(self, threshold=0.5, require_all=True, **kwargs):
self.threshold = threshold
self.require_all = require_all
super().__init__(**kwargs)
def score(self, pairs):
for pair in pairs:
nums = [[int(c) for c in sent if c in string.digits and c != '0']
for sent in pair]
ratios = []
for num1, num2 in itertools.combinations(nums, 2):
seq = difflib.SequenceMatcher(None, num1, num2)
ratios.append(seq.ratio())
yield ratios
def accept(self, score):
if self.require_all:
return all(ratio >= self.threshold for ratio in score)
return any(ratio >= self.threshold for ratio in score)
class LongestCommonSubstringFilter(FilterABC):
"""Ratios of longest common substring to the shorter of the strings
If require_all is True, all ratios (for pairs of n segments) have
to be below the threshold; otherwise at least one the ratios have
to be below the threshold. For bilingual input, it has no effect.
"""
score_direction = CLEAN_LOW
def __init__(self, threshold=0.9, require_all=True, **kwargs):
self.threshold = threshold
self.require_all = require_all
super().__init__(**kwargs)
def score(self, pairs):
for pair in pairs:
ratios = []
for seq1, seq2 in itertools.combinations(pair, 2):
seq = difflib.SequenceMatcher(isjunk=None, a=seq1, b=seq2)
_, _, size = seq.find_longest_match(0, len(seq1), 0, len(seq2))
minlen = min(len(seq1), len(seq2))
ratios.append(0 if minlen == 0 else size / minlen)
yield ratios
def accept(self, score):
if self.require_all:
return all(ratio < self.threshold for ratio in score)
return any(ratio < self.threshold for ratio in score)
class SimilarityFilter(FilterABC):
"""Filter on string/sequence similarity
Uses Levenshtein distance implemented in the RapidFuzz library.
The weights parameter can be used to change the costs of the three
operations (insertion, deletion, substitution).
If require_all is True, all ratios (for pairs of n segments) have
to be below the threshold; otherwise at least one the ratios have
to be below the threshold. For bilingual input, it has no effect.
"""
score_direction = CLEAN_LOW
VALID_UNITS = ('word', 'char', 'character')
def __init__(self, threshold=0.9, weights=(1, 1, 1), unit='char', lowercase=False,
require_all=True, **kwargs):
if unit not in self.VALID_UNITS:
raise ConfigurationError(
f"Value of 'unit' are not one of the allowed choices {self.VALID_UNITS}: {unit}")
self.threshold = threshold
self.weights = weights
self.unit = unit
self.lowercase = lowercase
self.require_all = require_all
super().__init__(**kwargs)
def similarity(self, seq1, seq2):
"""Return normalized similarity between the sequences"""
import rapidfuzz
if self.lowercase:
seq1 = seq1.lower()
seq2 = seq2.lower()
if self.unit == 'word':
seq1 = seq1.split()
seq2 = seq2.split()
return rapidfuzz.distance.Levenshtein.normalized_similarity(
seq1, seq2, weights=self.weights)
def score(self, pairs):
for pair in pairs:
yield [self.similarity(seq1, seq2) for seq1, seq2 in itertools.combinations(pair, 2)]
def accept(self, score):
if self.require_all:
return all(ratio < self.threshold for ratio in score)
return any(ratio < self.threshold for ratio in score)
class RepetitionFilter(FilterABC):
"""Filter segments with repeated content
Filter segments with substrings of min_length to max_length
characters that are repeated at least threshold number of times.
The first occurrence is not counted to the threshold, i.e.,
threshold 2 means that the substring has to occur three times.
There may be optional space character(s) between the repeated
strings that are not counted to the length. The repeated string
cannot start with a whitespace character but is not limited
otherwise.
"""
score_direction = CLEAN_LOW
def __init__(self, threshold=2, min_length=3, max_length=100, **kwargs):
if threshold < 1:
raise ConfigurationError("threshold for RepetitionFilter has to be at least one")
if min_length < 1:
raise ConfigurationError("min_length for RepetitionFilter has to be at least one")
self._threshold = threshold
self._min_length = min_length
self._max_length = max_length
self._regexp = self._get_regexp()
super().__init__(**kwargs)
@property
def min_length(self):
"""Minimum number of characters in pattern"""
return self._min_length
@property
def max_length(self):
"""Maximum number of characters in pattern"""
return self._max_length
@property
def threshold(self):
"""Threshold for the number of repetitions"""
return self._threshold
def _get_regexp(self):
"""Return compiled regexp for finding repetitions"""
rstring = f'(\\S.{{{self.min_length-1},{self.max_length}}}?)(?: *\\1){{{self.threshold},}}'
return regex.compile(rstring)
def get_repetitions(self, segment):
"""Return the number of repetitions and the repeated string
Returns the number of repetitions and the repeated string for
the first match of at least self.threshold number of
repetitions. The segment may contain longer repetitions than
the one returned. If there no matched repetitions, zero and
None are returned.
"""
match = self._regexp.search(segment)
if match:
full = match.group(0)
repeated = match.group(1)
return full.count(repeated) - 1, repeated
return 0, None
def score(self, pairs):
for pair in pairs:
yield max(self.get_repetitions(sent)[0] for sent in pair)
def accept(self, score):
return score < self.threshold
| true |
0cf1e98771a83a773954c3d8c8003ca97d72c26d | Python | swapnanildutta/Python-programs | /Duplicates.py | UTF-8 | 230 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 05:55:10 2019
@author: aot
"""
a=[int(i) for i in input("Enter the tuple:").split()]
a=tuple(a)
ar=set(a)
for i in ar:
if(a.count(i)>1):
print(i,end=" ") | true |
90d87d66fc91a4e725cf49f4b5b486c178a9926d | Python | whitbit19/practice-problems | /stock_prices.py | UTF-8 | 1,430 | 3.921875 | 4 | [] | no_license |
def max_profits(stock_prices):
"""
Return max profit using brute force method. O(n^2) solution.
>>> max_profits([10, 7, 5, 11, 9])
6
>>> max_profits([1, 2, 3, 5])
4
>>> max_profits([])
0
"""
max_profits = 0
for i in range(len(stock_prices)):
for j in range(i + 1, len(stock_prices)):
price_difference = stock_prices[j] - stock_prices[i]
if price_difference > 0:
if price_difference > max_profits:
max_profits = price_difference
return max_profits
def get_max_profit(stock_prices):
"""
Return max profits using the greedy approach (O(n)).
Simply update the best answer so far.
>>> get_max_profit([10, 7, 5, 11, 9])
6
>>> get_max_profit([1, 2, 3, 5])
4
>>> get_max_profit([3, 3, 3, 3])
0
>>> get_max_profit([4, 3, 2, 1])
-1
"""
if len(stock_prices) < 2:
raise IndexError('Getting a profit requires more than 2 stock prices.')
min_price = stock_prices[0]
max_profit = stock_prices[1] - stock_prices[0]
for i, current_price in enumerate(stock_prices):
if i == 0:
continue
profit = current_price - min_price
max_profit = max(max_profit, profit)
min_price = min(current_price, min_price)
return max_profit
if __name__ == '__main__':
import doctest
doctest.testmod() | true |
70146ea534ee51280038d750740999034e69f13a | Python | B-Kroske/MatasanoCrypto | /Set1/Challenge8.py | UTF-8 | 1,198 | 3.046875 | 3 | [] | no_license | from CryptoLib import everyNth, validate
from collections import Counter
import binascii
def calcScore(array):
counter = Counter(array)
arrFreq = counter.most_common()
res = 0
for i in range(0,3):
res += arrFreq[i][1]
return res
def main():
ans = "d880619740a8a19b7840a8a31c810a3d08649af70dc06f4fd5d2d69c744cd283e2dd052f6b641dbf9d11b0348542bb5708649af70dc06f4fd5d2d69c744cd2839475c9dfdbc1d46597949d9c7e82bf5a08649af70dc06f4fd5d2d69c744cd28397a93eab8d6aecd566489154789a6b0308649af70dc06f4fd5d2d69c744cd283d403180c98c8f6db1f2a3f9c4040deb0ab51b29933f2c123c58386b06fba186a"
#Read in the entire file and concatenate it to a string.
inputFile = "assets/Set1/inputC8.txt"
posCipher = None
switch = True
lineScore = 0
cipherLine = None
bestScore = 0
#Determine how many characters on each line encrypted with
# the same key are the same.
#The one with the highest score is likely encrypted text
# as opposed to random hex
for line in open(inputFile,"r"):
lineScore = 0
switch = not switch
posCipher = line.rstrip()
posCipher = binascii.unhexlify(posCipher)
#Repeat for every block of bytes that would be encoded
# with the same key.
for i in range(0,16):
lineScore += calcScore(everyNth(posCipher, 16))
if (lineScore > bestScore):
cipherLine = line
bestScore = lineScore
print(cipherLine)
if __name__ == "__main__":
main()
| true |
78507cd67df1ff21a650b368277b630716f88ad8 | Python | JonasWard/facade_p2p | /img_to_facade/img_to_objs.py | UTF-8 | 2,845 | 2.625 | 3 | [] | no_license | from PIL import Image
import numpy as np
dict_rectangles = {
"bathroom" : [
(48, 813),
(129, 676)
],
"window_1_left" : [
(298, 423),
(555, 113)
],
"window_1_right" : [
(785, 423),
(1039, 111)
],
"window_0_right" : [
(780, 943),
(1039, 628)
],
"door" : [
(290, 1092),
(539, 602)
]
}
def img_to_obj(img, rec = None, file_name = None):
img = Image.open(img)
x, y, _ = np.array(img).shape
print(x,y)
print(rec)
if rec is None:
rec = [(0., 0.), (x, y)]
rec = [tuple([float(v) for v in vals]) for vals in rec]
print(rec)
rec_b = [(rec[0][0], rec[1][1]), (rec[1][0], rec[0][1])]
rec_t_s = [(i/x, 1.-j/y) for i,j in rec]
rec_t_bs = [(i/x, 1.-j/y) for i,j in rec_b]
obj_str_list = [
"# JonasWard\n",
"mtllib vp.mtl",
"usemtl anImage",
# "g tetrahedron",
"v {} {} 0.0".format(*rec[0]),
"v {} {} 0.0".format(*rec_b[1]),
"v {} {} 0.0".format(*rec[1]),
"v {} {} 0.0".format(*rec_b[0]),
# "vt {} {}".format(*rec[0]),
# "vt {} {}".format(*rec_b[1]),
# "vt {} {}".format(*rec[1]),
# "vt {} {}".format(*rec_b[0]),
"vt {} {}".format(*rec_t_s[0]),
"vt {} {}".format(*rec_t_bs[1]),
"vt {} {}".format(*rec_t_s[1]),
"vt {} {}".format(*rec_t_bs[0]),
# "vt {} {}".format(*rec_t_bs[0]),
# "vt {} {}".format(*rec_t_s[1]),
# "vt {} {}".format(*rec_t_bs[1]),
# "vt {} {}".format(*rec_t_s[0]),
# "vt {} {}".format(*rec_t_s[1]),
# "vt {} {}".format(*rec_t_bs[0]),
# "vt {} {}".format(*rec_t_s[0]),
# "vt {} {}".format(*rec_t_bs[1]),
"vn 0. 0. 1.",
"vn 0. 0. 1.",
"vn 0. 0. 1.",
"vn 0. 0. 1.",
# "f 1 2 3 4",
# "f 0 1 2",
# "f 0 2 3",
# "f 1/1 2/2 3/3 4/4",
"f 1/1/1 2/2/2 3/3/3 4/4/4",
# "f 0//0 2//2 3//3",
]
if file_name is None:
f = open("default.obj", 'w')
else:
f = open("{}.obj".format(file_name), 'w')
f.write('\n'.join(obj_str_list))
f.close()
print('\n'.join(obj_str_list))
if __name__ == "__main__":
import os
# wondelgem = "/Users/jonas/Desktop/Wondelgem.png"
# allwirlen = "/Users/jonas/Desktop/Allwirlen.png"
# img_to_obj(wondelgem, file_name="wondelgem")
# img_to_obj(allwirlen, file_name="allwirlen")
oberhausen = "/Users/jonas/Desktop/oberhausen.png"
img_to_obj(oberhausen, "oberhausen")
# img_to_obj("/Users/jonas/Documents/reps/facade_p2p/img_to_facade/image.png")
# for name, rec in dict_rectangles.items():
# img_to_obj("/Users/jonas/Documents/reps/facade_p2p/img_to_facade/image.png", rec, name) | true |
84d4661e99da23cd2ced9b4b39abc153608b874a | Python | RajC9/Simple-Calculator | /Main.py | UTF-8 | 576 | 3.515625 | 4 | [] | no_license | number_one=int(input('please enter the first number'))
operator=input('please enter the operator:')
number_two= int(input('please enter the second number'))
if operator == '+':
print('{}+{}= '.format(number_one,number_two))
print(number_one+number_two)
elif operator == '-':
print('{}-{}='.format(number_one,number_two))
print(number_one-number_two)
elif operator == '*':
print('{}*{}='.format(number_one,number_two))
print(number_one*number_two)
elif operator== '/':
print('{}/{}='.format(number_one,number_two))
print(number_one/number_two)
else:
print('invalid')
| true |
0ef34541f73e0fac6e06755758436eb615571276 | Python | Songyang2017/Python-Learn-record | /py基础/输入和输出/1.py | UTF-8 | 2,238 | 4.53125 | 5 | [] | no_license | # str() 返回用户易读
# repr() 返回解释器易读
import math
s = 'Hello Shasha!'
print('str():', str(s))
print('repr():', repr(s))
x = 1.25 * 10
y = 100 * 300
s = "x的值为:" + str(x)+", y的值为:"+repr(y)+"...."
print(s)
# range(start, stop, step) 用于创建整数列表,一般用于for循环中
# start 计数开始,默认从0开始,range(5)等价range(0, 5)
# stop 结束,不包含stop
# step 步数,默认为1
s2 = range(1, 11)
print(s2[-1])
for x in s2:
print('s2:', x)
print('-----------')
s3 = range(0, 20, 3)
for x in s3:
print('s3:', x)
print('-----------平方表-立方表-1')
for x in range(1, 15):
print(str(x).rjust(2), str(x*x).rjust(5), repr(x*x*x).rjust(5))
print('-----------平方表-立方表-2')
for x in range(1, 15):
print('{0:2d} {1:3d} {2:4d}'.format(x, x*x, x*x*x))
# str.format()方法
# {}及里面的字符会被format()中参数替换,{}中数字用于指向传入对象在format()中位置
print('-----------format()---1')
print('{0} 和 {1}'.format(1, 2))
# 若传入format中的对象使用了关键字,则值会指向使用该名字的参数
print('-----------format()---2')
print('名字:{name} 年龄:{age}'.format(name='莎拉', age=29))
# 位置和关键字参数可混用
print('-----------format()---2')
print('{desc}:{0} {1}'.format(105, 179, desc='体重身高'))
print('-----------math--pi-')
print('常量PI近似值:{}.'.format(math.pi))
print('常量PI近似值:{!r}.'.format(math.pi))
# 冒号:和格式标识符可以跟字段名,对值进行更好的格式化
print('-----------冒号:-')
print('常量PI保留3位小数:{0:.3f}'.format(math.pi)) # 保留3位小数
# 冒号:传入整数可以保证区域宽度,用于美化表格
table = {'name': '张三', 'sort': 3, 'score': 92}
print(table.items())
print('name: {0[name]}, sort: {0[sort]}, score: {0[score]}'.format(table))
# 在 table 变量前使用 '**'
print('name: {name:2}, sort: {sort:2} ,score: {score:2}'.format(**table))
for key, value in table.items():
# print(key, value)
print('{0:5} ===> {1:5}'.format(key, value))
# 读取键盘
print('--------读取键盘----')
str1 = input('请输入:')
print('您输入的值是', str1)
| true |
e249fbe512804ea8cc0edc3771fd14920d099c1a | Python | CY2020/Project-1 | /program.py | UTF-8 | 2,164 | 3.359375 | 3 | [] | no_license | #Mobile Emergency
#MakeSPPrep Project
#Heart Health
print "Welcome, please insert your health information."
print "Please select one of the following conditions:"
print "1. arrhythmia"
print "2. high blood pressure"
print "3. history cardic arrest"
print "4. heart murmurs"
print "5. coronary hartry disease"
print "6. congestive heart failure"
print "7. periphial artery disease"
print "8. stroke"
print "9. prior heart attacks"
print "10. cogenital heart disease"
information = raw_input()
if information == 'arrhythmia':
print "We will monitor you heart rate, in case of emergency, we will contact the paramedics."
elif information == 'high blood pressure':
print "We will remind you what to avoid and any habits you fall victim to."
elif information == 'history cardiac arrest':
print "We will constantly make sure your heart is stable."
elif information == 'heart murmurs':
print "We recommend that you visit your doctor often"
elif information == 'coronary artery disease':
print "We recommend that you try to lower your blood pressure to prevent any heart attacks."
elif information == 'congestive heart failure':
print "Go see your doctor to finalize the cause of you problem."
elif information == 'periphial artery disease':
print "Lessen use of tobacco, get more exercise, eat healthy."
elif information == 'stroke':
print "Go take blood thinners as well as tPa."
elif information == 'prior heart attacks':
print "What is recommended is that you take aspirin to prevent blood clots that could lead to a heart attack."
elif information == 'cogenital heart disease':
print "what you should do is to lower your blood pressure by any means"
else:
print "I'm sorry, that is not an option."
print "What is your current mood? Stressed or calm?"
mood = raw_input()
if mood == 'stressed':
print "We recommend that you take cation of your health and be vigilant of any symptoms of diseases."
elif mood == 'calm':
print "Then you are not in danger of any conditions or high blood pressure."
else:
print "I'm sorry, that is not an option."
print "For more information contact: yanezc20@students.spprep.org"
| true |
607221182bf594c512cc0247e5faa9a26b8f8a92 | Python | apuya/python_crash_course | /Part_1_Basics/Chapter_10_Files_Exceptions/exercise10_6.py | UTF-8 | 631 | 4.09375 | 4 | [] | no_license | # Python Crash Course: A Hands-On, Project-Based Introduction To Programming
#
# Name: Mark Lester Apuya
# Date:
#
# Chapter 10: Files And Exceptions
#
# Exercise 10.6 Addition:
# One common problem when prompting for numerical input occurs when people provide text instead of numbers. When you try
# to convert the input to an int, you’ll get a ValueError. Write a program that prompts for two numbers. Add them together
# and print the result. Catch the ValueError if either input value is not a number, and print a friendly error message.
# Test your program by entering two numbers and then by entering some text instead of a number. | true |
47127fe92f48ffc611dcc54e1a7b0d00b671724d | Python | muztim/100-days-of-code-python | /Projects/Day_25/us-states-game/main.py | UTF-8 | 1,096 | 3.5625 | 4 | [] | no_license | import turtle
import pandas as pd
ALIGNMENT = "center"
FONT = ("Arial", 12, "bold")
states_df = pd.read_csv("50_states.csv")
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
pointer = turtle.Turtle()
pointer.penup()
pointer.hideturtle()
guesses = []
while len(guesses) != 50:
answer_state = screen.textinput(title=f"{len(guesses)}/50 States Correct", prompt="What's another state's name?").title()
if answer_state == "Exit":
break
#Check if guess is among 50 states
if answer_state in states_df.values and answer_state not in guesses:
x = int(states_df[states_df.state == answer_state]["x"])
y = int(states_df[states_df.state == answer_state]["y"])
pointer.goto(x,y)
pointer.write(f"{answer_state}", False, align=ALIGNMENT, font=FONT)
guesses.append(answer_state)
#States to learn.csv
states_to_learn_df = states_df.loc[~states_df.state.isin(guesses)].set_index("state").reset_index()
states_to_learn_df.state.to_csv("states_to_learn.csv") | true |
d3595fb1ffb51ad688c743202909f80f1abdc54d | Python | Lusarom/progAvanzada | /ejercicio75.py | UTF-8 | 224 | 3.984375 | 4 | [] | no_license | n = int(input('Ingrese un numero entero positivo:'))
m = int(input('Ingrese un numero entero positivo:'))
d = min(n, m)
while n % d != 0 or m % d !=0:
d = d-1
print("El mayor divisor comun es", n, 'y', m, "es", d) | true |
289e7e5091c0be229f5cbe72ef2c5f1b26056795 | Python | lixuewen1999/app | /appiumcloud/appiumclound.py | UTF-8 | 3,404 | 2.5625 | 3 | [] | no_license | '''
@author: lixuewen
@file: appiumclound.py
@time: 2020/9/23 14:04
@desc: 云测试平台
'''
import socket,os,subprocess,threading
from appium import webdriver
from time import sleep
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class AppiumCloud:
def __init__(self):
pass
def build_devices(self):
list = []
port = 5000
bport = 8000
devices = subprocess.check_output('adb devices').decode().strip().split('\r\n')
for i in range(1,len(devices)):
udid = devices[i].split('\t')[0]
if udid != '':
dic = {}
version = subprocess.check_output('adb -s {} shell getprop ro.build.version.release'.format(udid)).decode().strip()
port = self.find_port(port)
bport = self.find_port(bport)
dic['udid'] = udid
dic['port'] = port
dic['bport'] = bport
dic['version'] = version
list.append(dic)
port += 1
bport += 1
return list
def find_port(self,port):
while True:
if self.check_port(port):
break
else:
port += 1
return port
def check_port(self,port):
s = socket.socket()
try:
s.connect(('127.0.0.1',port))
return False
except:
return True
def start_appium(self,port,bport,udid,version):
log_file = os.path.abspath('.')+'\log\{}'.format(udid.replace(':','_'))
cmd = r'start /b appium -a 127.0.0.1 -p {} -bp {} --udid {} --platform-version {} --log {} --log-level info --log-timestamp'.format(port,bport,udid,version,log_file)
print(cmd)
os.system(cmd)
self.test(udid,version,port)
def test(self,udid,version,port):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = version
desired_caps['deviceName'] = 'Appium'
desired_caps['noReset'] = True
desired_caps['appPackage'] = 'com.miui.calculator'
desired_caps['appActivity'] = '.cal.CalculatorActivity'
desired_caps['udid'] = udid
desired_caps['unicodekeyboard'] = True
driver = webdriver.Remote('http://127.0.0.1:{}/wd/hub'.format(port),desired_caps)
driver.find_element_by_id('com.miui.calculator:id/btn_1_s').click()
driver.find_element_by_id('com.miui.calculator:id/btn_plus_s').click()
driver.find_element_by_id('com.miui.calculator:id/btn_1_s').click()
driver.find_element_by_id('com.miui.calculator:id/btn_equal_s').click()
result = WebDriverWait(driver,10, 0.5).until(EC.presence_of_element_located((By.ID,'com.miui.calculator:id/result')))
if result.get_attribute('text') == '= 2':
print('pass')
else:
print('fail')
if __name__ == '__main__':
os.system('taskkill /IM node.exe /F')
ac = AppiumCloud()
devices = ac.build_devices()
print(devices)
for i in devices:
threading.Thread(target=ac.start_appium,args=(i['port'],i['bport'],i['udid'],i['version'])).start() | true |
accb9abf424d8b3796b3bbeb2c5d54e7c7ba232a | Python | ChrisLMartin/cal_data_processing | /create_shrinkage_csv.py | UTF-8 | 1,313 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 10:14:20 2018
@author: christopher.martin
"""
import datetime
import pandas as pd
# Set excel shrinkage spreadsheet path
path_in = "S:\Current Projects\R&D\Shrinkage.xlsx"
# Read records into pandas dataframe
df = pd.read_excel(path_in, "Records", index_col="Reading ID")
# Select only relevant columns
df = df[["Mix", "Specimen"]]
# Duplicate Mix column to be used for date
df["Date"] = df["Mix"]
# Convert date column to datetime objects
df["Date"] = pd.to_datetime(df["Date"].astype(str).str[:8], format='%Y%m%d')
# Convert datetime objects to dates
df["Date"] = df["Date"].dt.date
# Combine Mix and Specimen columns into single column of strings
df["SpecimenID"] = df["Mix"].astype(str) + df["Specimen"]
# Drop Mix and Specimen columns
df = df[["SpecimenID", "Date"]]
# Drop all duplicate rows
df = df.drop_duplicates("SpecimenID")
# Create array of test days
days = [x for x in range(1, 15)] + \
[y for y in range(16, 29, 2)] + \
[z for z in range(35, 57, 7)]
dates = [datetime.date.today() - datetime.timedelta(x) for x in days]
# Filter for samples within previous 56 days
df = df[df["Date"].isin(dates)]
# Set output path for CSV
path_out = "S:\Current Projects\R&D\ShrinkageCSV.csv"
# Create CSV
df["SpecimenID"].to_csv(path_out, index=False)
| true |
6e197fa2b8fcec9b9542f2c5c40e2869f6f67c7a | Python | LelandYan/sklearn_tensorflow | /chapter04/model_learning.py | UTF-8 | 3,356 | 3.15625 | 3 | [] | no_license | # _*_ coding: utf-8 _*_
import numpy as np
import matplotlib.pyplot as plt
# X = 2 * np.random.rand(100, 1)
# y = 4 + 3 * X + np.random.randn(100, 1)
#
# plt.plot(X, y, "b.")
# plt.xlabel("$x_1$", fontsize=18)
# plt.ylabel("$y$", rotation=0, fontsize=18)
# plt.axis([0, 2, 0, 15])
# # plt.show()
#
# X_b = np.c_[np.ones((100,1)),X]
# theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
#
# X_new = np.array([[0],[2]])
# X_new_b = np.c_[np.ones((2,1)),X_new]
# y_predict = X_new_b.dot(theta_best)
# plt.plot(X_new, y_predict, "r-")
# plt.plot(X, y, "b.")
# plt.axis([0, 2, 0, 15])
# # plt.show()
#
# eta = 0.1
# n_iterations = 1000
# m = 100
# theta = np.random.randn(2,1)
#
# for iteration in range(n_iterations):
# gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
# theta = theta - eta * gradients
# 多项式回归
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X ** 2 + 2 + np.random.randn(m, 1)
#
# # 数据可视化
# plt.scatter(X,y)
# plt.xlabel("$x_1$",fontsize=18)
# plt.ylabel("$y$",fontsize=18)
# plt.axis([-3,3,0,10])
#
# from sklearn.preprocessing import PolynomialFeatures
# poly_features = PolynomialFeatures(degree=2,include_bias=False)
# X_ploy = poly_features.fit_transform(X)
# from sklearn.linear_model import LinearRegression
# lin_reg = LinearRegression()
# lin_reg.fit(X_ploy,y)
X_new = np.linspace(-3,3,100).reshape(100,1)
# X_new_poly = poly_features.transform(X_new)
# y_new = lin_reg.predict(X_new_poly)
# plt.plot(X_new,y_new,"r-")
# plt.show()
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
def plot_learning_curve(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))
val_errors.append(mean_squared_error(y_val_predict, y_val))
plt.plot(np.sqrt(train_errors), 'r-+', linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), 'b-', linewidth=2, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14)
# lin_reg = LinearRegression()
# plot_learning_curve(lin_reg,X,y)
# plt.axis([0, 80, 0, 3])
# plt.show()
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.pipeline import Pipeline
for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
std_scaler = StandardScaler()
lin_reg = LinearRegression()
polynomial_regression = Pipeline([
("poly_features", polybig_features),
("std_scaler", std_scaler),
("lin_reg", lin_reg),
])
polynomial_regression.fit(X,y)
y_newbig = polynomial_regression.predict(X_new)
plt.plot(X_new,y_newbig,style,label=str(degree),linewidth=width)
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
plt.show()
| true |
12e2a3f03fa1d9648595bf5f7e36da64af533a42 | Python | thomasdf/Computer-Vision-Project | /learning/SlidingWindow.py | UTF-8 | 1,031 | 3.125 | 3 | [] | no_license | import numpy as np
from image.Image import Img
from image import Image
def slidingwindowclassify(image: Img, stride: int, height: int, width: int, classifier: callable):
"""Slides a classifier over an image. That is: runs classifier for each height*width frame, stride apart"""
image.normalize()
imgwidth = image.shape[1]
imgheight = image.shape[0]
result = []
if height > imgheight and width > imgwidth:
raise Exception("Image smaller than classifier")
horizontal = np.floor((imgwidth - width) / stride)
vertical = np.floor((imgheight - height) / stride)
ymin = 0
ymax = height
for _ in range(vertical):
xmin = 0
xmax = width
for __ in range(horizontal):
result.append(classify(Img.from_image(image.image).crop(xmin, ymin, xmax, ymax), classifier)) #not efficient at all...
xmin = xmin + stride
xmax = xmax + stride
ymin = ymin + stride
ymax = ymax + stride
return result
def classify(image: Img, classifier: callable):
"""runs the classifier on the image"""
return classifier(image.arr1d)
| true |
575d77ca79381f176b25c9229afc97535f5bb545 | Python | JarvixHsj/Py_Grammar_exercises | /oop/oop_simplestclass.py | UTF-8 | 243 | 3.65625 | 4 | [] | no_license | class Person:
pass #一个空的代码块
class Person2:
def say_hi(self, name = 'Jarvix'):
print('hi {0},how are you?'.format(name))
p = Person();
print(p)
print('-------Person2---------')
p2 = Person2()
p2.say_hi('hsj')
| true |
d946a30e785309b401e0e85efb429979c4fe1150 | Python | chandrakant100/Assignments_Python | /Practical/assignment4/product.py | UTF-8 | 548 | 3.640625 | 4 | [] | no_license | import math
count = input("Enter total numbers want to multiply(max = 4):")
if count.isnumeric() == 0:
print("It is a string!!!")
exit()
count = int(count)
countNum = 1
def multiply(num1 = 1, num2 = 1, num3 = 1, num4 = 1):
return num1*num2*num3*num4
if count > 4:
print("Maximux intput is 4")
exit()
for i in range(count):
num = input("Enter number {0}:".format(i+1))
if num.isnumeric == False:
print("It is a string!!!")
exit()
num = int(num)
countNum *= multiply(num)
print(countNum) | true |
c43f5822fab1496737dbb7a07ac1ce013dd67084 | Python | BrunoLQS/Projects_and_ITA | /Z-Transform/IZ_transform/iztrans_15.Py | UTF-8 | 509 | 2.890625 | 3 | [] | no_license | title="""Simétrico da Fórmula 6"""
N=6
X=Piecewise((z/(-a + z), True), (-Sum(a**n*z**(-n), (n, -oo, -1)), True))
X=X.subs(a,2)
# a deve ser substituído por um INTEIRO !
#######
m=0.999
#######
results=[ iztrans(X,i,m) for i in nparange(-N,N+1)]
fig = figure()
ax1 = fig.add_subplot(111)
ax1.set_ylabel('Valor',fontsize=15)
ax1.set_xlabel('N',fontsize=15)
ax1.set_title(title,fontsize=18)
stem(nparange(-N,N+1),results, use_line_collection=True,linefmt=None,
markerfmt=None, basefmt=None)
show() | true |
39dadbcb094eb81fe86c09e9c048938792a2882c | Python | VaibhavD143/Coding | /leet_reorder_list_2.py | UTF-8 | 670 | 3.375 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
if not head or not head.next or not head.next.next:
return
r = head
f = head.next
while f and f.next:
tf = f
tr = r
while tf.next:
tr = tf
tf = tf.next
r.next = tf
tf.next = f
tr.next = None
r = f
f = f.next
| true |
bb58aa94276cc0a02844da0c4183eb360200b8b0 | Python | vedrankolka/APR | /dz4/ga/population/selection.py | UTF-8 | 2,109 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
class Selection:
def select_parents(self):
pass
def set_population(self, population):
pass
def update(self, population):
pass
class RouletteWheelSelection(Selection):
def __init__(self, n, minimum_effective_fitnes=0.0):
self.minimum_effective_fitness = minimum_effective_fitnes
self.population = np.zeros(n)
self.effective_fitnesses = np.zeros(n)
self.probabilities = np.zeros(n)
def select_parents(self):
index_of_momma = RouletteWheelSelection.choose(self.probabilities, 1)
momma_probability = self.probabilities[index_of_momma]
if not isinstance(momma_probability, float):
print('wtf')
self.probabilities[index_of_momma] = 0.0
scale = 1 - momma_probability
index_of_poppa = RouletteWheelSelection.choose(self.probabilities, scale)
self.probabilities[index_of_momma] = momma_probability
return self.population[index_of_momma], self.population[index_of_poppa]
def set_population(self, population):
self.population = population
self.update(population)
def update(self, old_population):
fitness_sum = 0.0
f_worst = min(old_population).fitness - self.minimum_effective_fitness
for i in range(len(old_population)):
self.effective_fitnesses[i] = old_population[i].fitness - f_worst
fitness_sum += self.effective_fitnesses[i]
self.probabilities = self.effective_fitnesses / fitness_sum
if not isinstance(self.probabilities[0], float):
print('jebo me pas')
@staticmethod
def choose(probabilities, scale):
acc = 0.0
try:
p = np.random.uniform(0, scale, 1)
except ValueError:
print(scale)
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
for i in range(len(probabilities)):
acc += probabilities[i]
if p <= acc:
return i
raise RuntimeError('No one was chosen!\nprobabilities:', probabilities, '\nscale', scale)
| true |
8ecc7fa0d01f0e0f73ff9dc3d58372dffc43af72 | Python | krzysztof-piasecki/python-hackerrank | /HackerRank/MediumTasks/LivingWithoutLoopsSort.py | UTF-8 | 955 | 3.28125 | 3 | [] | no_license | def partitionhelper(x, j, pivot, i):
if j < len(x) - 1:
if x[j + 1] < pivot:
x[j + 1], x[i + 1] = x[i + 1], x[j + 1]
i += 1
j += 1
return partitionhelper(x, j, pivot, i)
j += 1
return partitionhelper(x, j, pivot, i)
else:
return x, i, j, pivot
def quickmove(x):
if len(x) == 1 or len(x) == 0:
return x
else:
pivot = x[0]
i = 0
j = 0
x, i, j, pivot = partitionhelper(x, j, pivot, i)
x[0],x[i] = x[i],x[0]
first_part = quickmove(x[:i])
second_part = quickmove(x[i+1:])
first_part.append(x[i])
return first_part + second_part
def stringtoint(list, i):
if i < len(list):
list[i] = int(list[i])
i += 1
return stringtoint(list, i)
else:
return list
list = input().split()
i = 0
stringtoint(list, i)
quickmove(list)
print(*quickmove(list))
| true |
fd91e5f4127b050ad7b667fa9f7c71d715fa9b75 | Python | veralake777/toolkitten | /summer-of-code/week-03/nltkCh1.py | UTF-8 | 617 | 2.953125 | 3 | [
"MIT"
] | permissive | import nltk
#for line in text1:
# for word in line.split():
# if word.endswith('ing'):
# print(word)
#text1.concordance("man")
#text1.concordance("woman")
#text1.dispersion_plot(["man", "woman", "child"])
#print(len(text1))
#prints dictionary of vocab in text
##print(sorted(set(text1)))
#prints number of unique words in text
##print(len(set(text1)))
#Lexical richenss of text
##lex_rich = len(set(text1)) / len(text1)
##print(lex_rich)
##def lex_diversity(text):
##return len(set(text)) / len(text)
##def precentage(count, total):
##return 100 * count / total
#nltk.chat.chatbots() | true |
aafd6ef840aeaf79862a4eafe3e5208cc6601fd5 | Python | maggieaschneider/WarGame---BAMG | /screen_end.py | UTF-8 | 1,065 | 3.484375 | 3 | [] | no_license | from tkinter import *
class Screen_End(Frame):
def __init__(self, master, call_on_next, winner):
super(Screen_End, self).__init__(master)
self.call_on_selected = call_on_next
if winner=="p":
self.winner="You"
if winner=="c":
self.winner="Computer"
self.create_widgets()
self.grid()
def create_widgets(self):
Label(self, text = "Thank you for playing!", fg = "Red").grid(row = 0, column = 0, sticky = N)
Label(self, text = ("%s won the game! Congratulations to %s!" % (self.winner, self.winner.lower())),
font="system 18").grid(row=1, column=0, sticky=N)
image = PhotoImage(file="cardImages/back_cards-07.png")
c = Label(self, image=image)
c.photo = image
c.grid(row=2, column=0)
Button(self, text = "Click To Exit! Play again!", fg = "blue2", font="system 16", bg="cyan2",
command=self.exit_clicked).grid(row = 3, column = 0, sticky = N)
def exit_clicked(self):
self.call_on_selected() | true |
2914bf4b980a3a68445a8b48432469af6d34d3d0 | Python | m0sk/MFTI | /01_turtle/003.py | UTF-8 | 590 | 3.5625 | 4 | [] | no_license | #!C:\UDISK\ProgramFiles\python\3.6.2\python.exe
'''
by coordinate (x,y), define in which quarter point is
^y
II | I
|
----+---->
|0 x
III | IV
'''
x = int(input()) # intput() - enter from input (keyboard)
y = int(input())
'''
x>0, y>0 = I
x<0, y<0 = II
x<0, y<0 = III
x>0, y<0 = IV
'''
if x>0:
if y>0:
print('I (First quarter)')
else:
print('IV (Fourth quarter)')
else:
if y>0:
print('II (Second quarter)')
else:
print('III (Third quarter)')
input() | true |
48ce3f277d1e416a79299a9009bdfe9e683fdb10 | Python | njyaron/SMOP | /analizer/analizer/resultsParagraphAnalysis.py | UTF-8 | 1,812 | 2.8125 | 3 | [] | no_license | #### Paragraph analysis ####
import trigraphArray
import matplotlib.pyplot as plt
people = trigraphArray.load_all()
# distribution of similarity in sessions from the same person, for some users
names = ['Nir Yaron', 'Adi Asher', 'Yovel Rom', 'Guy Levanon']
for name in names:
person = [p for p in people if p.name == name][0]
plt.hist(person.vars)
plt.show()
# distribution of similarity in sessions from the same person, for all users
varsSimilar = [var for person in people for var in person.vars]
plt.hist(varsSimilar)
plt.show()
# distribution of similarity in sessions of different people
varsDifferent = []
for person in people:
people2 = [p for p in people if p != person]
sessions = [s for person1 in people2 for s in person1.sessions]
for session1 in person.sessions:
for session2 in sessions:
if trigraphArray.can_compare(session1, session2):
varsDifferent.append(trigraphArray.compare_sessions(session1, session2))
plt.hist(varsSimilar, normed=True)
plt.hist(varsDifferent, normed=True)
plt.show()
# distribution of common trigraphs for same person sessions
common_trigraphs = [c for p in people for c in p.get_common_trigraphs()]
plt.hist(common_trigraphs)
plt.show()
#sucess rates of this method
learned_people = [p for p in people if len(p.sessions) > 1]
sessions = [(person1.name,s) for person1 in learned_people for s in person1.sessions]
for name, session in sessions:
print(name, name==trigraphArray.best_fit(people,session))
successes = [name==trigraphArray.best_fit(people,session) for name, session in sessions]
print(sum(successes)/len(successes))
#gets 0.8837209302325582 (fails on Greenberg and Caspi because of typos)
#if i change trigraphArray.MINIMAL_COMMON_KEYS = 10:
#gets 0.9767441860465116 (fails only once with Guy=Efi)
| true |
0fd064a50f701a9036f3e113e3ca64fc6765d7a1 | Python | BossaMelon/python_tetris | /rotate_system/rotate_srs.py | UTF-8 | 1,711 | 3.140625 | 3 | [] | no_license | from tetromino import Tetromino
class SRS:
# TODO anti clockwise
# coordinate origin in left up corner
wall_kick_JLSTZ_clockwise = (((0, 0), (-1, 0), (-1, -1), (0, 2), (-1, 2)),
((0, 0), (1, 0), (1, 1), (0, -2), (1, -2)),
((0, 0), (1, 0), (1, -1), (0, 2), (1, 2)),
((0, 0), (-1, 0), (-1, 1), (0, -2), (-1, -2)))
wall_kick_I_clockwise = (((0, 0), (-2, 0), (1, 0), (-2, 1), (1, -2)),
((0, 0), (-1, 0), (2, 0), (-1, -2), (2, 1)),
((0, 0), (2, 0), (-1, 0), (2, -1), (-1, 2)),
((0, 0), (1, 0), (-2, 0), (1, 2), (-2, -1)))
wall_kick_JLSTZ = {'clockwise': wall_kick_JLSTZ_clockwise, 'anticlockwise': wall_kick_JLSTZ_clockwise}
wall_kick_I = {'clockwise': wall_kick_I_clockwise, 'anticlockwise': wall_kick_I_clockwise}
wall_kick_table = {'J': wall_kick_JLSTZ,
'L': wall_kick_JLSTZ,
'S': wall_kick_JLSTZ,
'T': wall_kick_JLSTZ,
'Z': wall_kick_JLSTZ,
'I': wall_kick_I}
def __init__(self, tetromino, clockwise=True):
self.tetromino = tetromino
# clockwise or anticlockwise
self.rotate_direction = 'clockwise' if clockwise is True else 'anticlockwise'
def check_rotate(self):
if self.tetromino.name == 'O':
return (0, 0),
return self.wall_kick_table[self.tetromino.name][self.rotate_direction][self.tetromino.rotation]
if __name__ == '__main__':
s = SRS(tetromino=Tetromino(3, 0, 2))
a = s.check_rotate()
print(a)
| true |
d1b97a9678290136bb886329683f683f61656746 | Python | trunksio/Supbot2 | /supbot/results.py | UTF-8 | 438 | 2.53125 | 3 | [
"MIT"
] | permissive | from enum import Enum
class GotoStateResult(Enum):
"""
When goto state successfully does its operation,
even if it doesnt reach the `to` state (as it might just take one step sometimes`
"""
SUCCESS = 0,
"""
When element is not found
"""
ELEMENT_NOT_FOUND = 1,
"""
When check fails
"""
CHECK_FAILED = 2
class ActionStatus(Enum):
WAITING = 0,
SUCCESS = 1,
UNSUCCESS = 2
| true |
17da5a2536348093407bfc9f2b25b261f970386c | Python | AEliu/QK_learn | /05 数据持久化/上课代码/01 文件操作/02 文件的读取.py | UTF-8 | 234 | 2.953125 | 3 | [] | no_license | """
r 读取文件
"""
with open('hello.txt', mode='r', encoding='utf-8') as f:
# text = f.read()
text = f.readline()
print(text)
text = f.readline()
print(text)
text = f.readlines()
print(text) | true |
6861e25631aa9d7cd51e0b770b4a872a789e9ca7 | Python | NJ-zero/LeetCode_Answer | /string/isLongPressedName.py | UTF-8 | 1,492 | 3.890625 | 4 | [] | no_license | # coding=utf-8
# Time: 2019-10-19-17:15
# Author: dongshichao
'''
你的朋友正在使用键盘输入他的名字 name。偶尔,在键入字符 c 时,按键可能会被长按,而字符可能被输入 1 次或多次。
你将会检查键盘输入的字符 typed。如果它对应的可能是你的朋友的名字(其中一些字符可能被长按),那么就返回 True。
示例 1:
输入:name = "alex", typed = "aaleex"
输出:true
解释:'alex' 中的 'a' 和 'e' 被长按。
示例 2:
输入:name = "saeed", typed = "ssaaedd"
输出:false
解释:'e' 一定需要被键入两次,但在 typed 的输出中不是这样。
示例 3:
输入:name = "leelee", typed = "lleeelee"
输出:true
示例 4:
输入:name = "laiden", typed = "laiden"
输出:true
解释:长按名字中的字符并不是必要的。
思路:
遍历typed,遇到和name 相同的数,pos +1
遍历结束,pos 应该和 name 长度一样
'''
class Solution(object):
def isLongPressedName(self, name, typed):
"""
:type name: str
:type typed: str
:rtype: bool
"""
pos = 0
i=0
while i < len(typed):
if typed[i] == name[pos]:
print(name[pos],pos)
pos += 1
if pos > len(name)-1:
break
i +=1
return pos == len(name)
s= Solution()
print(s.isLongPressedName("aleeexx","aaleeeexxx"))
a = "abc"
print(a[0]) | true |
3b7b7b40d2d726924ba455759b2cf497c17a72ec | Python | YIYANGLAI1997/leetcode | /past/python/Median_of_Two_Sorted_Arrays.py | UTF-8 | 883 | 3.734375 | 4 | [] | no_license | # 4 Median of Two Sorted Arrays Hard
# Should be optimized by the divide-conquer approach
class Solution:
# @return a float
def findMedianSortedArrays(self, A, B):
array = []
i = j = 0
while True:
if i == len(A):
while j < len(B):
array.append(B[j])
j+=1
break
elif j == len(B):
while i < len(A):
array.append(A[i])
i+=1
break
else:
if A[i] < B[j]:
array.append(A[i])
i+=1
else:
array.append(B[j])
j+=1
num = len(array)
if num % 2 == 0:
return (float(array[num/2]) + array[num/2-1])/2
else:
return array[num/2]
| true |
e772da07c3ddc9fecc60876b6c3c0df5b051d72a | Python | janakhpon/Datavisualization-plot | /ex002.py | UTF-8 | 639 | 3.09375 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
years = [1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2016, 2017, 2018, 2019]
pops = [0.00, 1.92, 2.14, 2.22, 2.37, 2.34, 2.26, 2.10, 1.71, 1.21, 1.25, 0.94, 0.67, 0.81, 0.69, 0.64, 0.61, 0.63]
deaths = [0.00, 0.77, 0.68, 0.54, 0.38, 0.23, 0.11, 0.02, 0.05, 0.12, 0.18, 0.25, 0.31, 0.35, 0.37, 0.38]
plt.plot(years, pops, '--', color = (255/255, 100/255, 100/255))
plt.plot(years, deaths, color = (255/255, 50/255, 80/255))
plt.ylabel("Population percentage rate")
plt.xlabel("Population Growth")
plt.title("Myanmar Population Survey")
plt.show()
| true |
ec29402ab0456b6c3828cae955ae009866658649 | Python | l-boisson/cv-dockers | /dockerfiles/cv-devenv/test/hello_world.py | UTF-8 | 1,867 | 2.5625 | 3 | [
"MIT"
] | permissive | # # %%
# import cv2 as cv
# import sys
# img = cv.imread("lena.tif")
# if img is None:
# sys.exit("Could not read the image.")
# cv.imshow("Display window", img)
# k = cv.waitKey(0)
# # %%
# from __future__ import absolute_import, division, print_function, unicode_literals
# # Tensorflow imports
# import tensorflow as tf
# print("Tensorflow version: " + tf.__version__)
# from tensorboard.plugins.hparams import api as hp
# # Comment below in order to run on CPU
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
# %%
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
plt.ion()
# %%
I = np.eye(25, dtype="int8")
plt.figure()
plt.imshow(2*I)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
# %%
# import sys
# from PyQt5 import QtCore, QtWidgets
# from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget
# from PyQt5.QtCore import QSize
# class HelloWindow(QMainWindow):
# def __init__(self):
# QMainWindow.__init__(self)
# self.setMinimumSize(QSize(640, 480))
# self.setWindowTitle("Hello world - pythonprogramminglanguage.com")
# centralWidget = QWidget(self)
# self.setCentralWidget(centralWidget)
# gridLayout = QGridLayout(self)
# centralWidget.setLayout(gridLayout)
# title = QLabel("Hello World from PyQt", self)
# title.setAlignment(QtCore.Qt.AlignCenter)
# gridLayout.addWidget(title, 0, 0)
# if __name__ == "__main__":
# app = QtWidgets.QApplication(sys.argv)
# mainWin = HelloWindow()
# mainWin.show()
# sys.exit( app.exec_() )
# %%
| true |
b77a0742ab8913b3a48ef5a72045f4349fc61020 | Python | etxyc/academic-network-scraping-and-analysis | /Analysis/CNAnalysis.py | UTF-8 | 4,412 | 2.9375 | 3 | [] | no_license | '''
This file is used to generate the citation network from the MAG data in the Mongo database
'''
import pymongo as mg
import re
import networkx as nx
import numpy as np
import pickle
from collections import Counter
from DrawGraph import drawHistogramSequence
'''
This function is to get a particular category from mag_papers_all
- mongo_url: the url used to connect to the mongo database
- category: type(string), the category selected to construct the citation network
'''
def loadMagPaperAll(mongo_url, category): # load from collection: mag_papers_all (non field-specific)
edges = []
nodes = []
edge_pickle_file = "../pickle_file/CN_edges_{0}.pkl".format(category)
node_pickle_file = "../pickle_file/CN_nodes_{0}.pkl".format(category)
try:
client = mg.MongoClient(mongo_url, serverSelectionTimeoutMS = 1)
results = client.arxiv.mag_papers_all.find({'fos': re.compile(category), 'year': {'$gte': 2007}, "authors": {'$exists': True, '$ne': None}}, no_cursor_timeout = True)
except Exception as err:
raise err
return results
def loadDiGraphList(edges, nodes): # don't need to connect with the mongo database again and delete the mongo_url
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
def loadDiGraphPickle(f_edges, f_nodes):
edges = pickle.load(open(f_edges, 'rb'))
nodes = pickle.load(open(f_nodes, 'rb'))
graph = loadDiGraphList(edges, nodes)
return graph
# load from the the field-specific collection
def loadSpecDatabase(mongo_url, collection_name):
try:
client = mg.MongoClient(mongo_url, serverSelectionTimeoutMS = 1)
db = client.arxiv
collection = db[collection_name]
# get all the information stored in the database
results = collection.find(no_cursor_timeout = True)
except Exception as err:
print(err)
return results
def loadDiGraphData(results):
edges = []
nodes = []
edges_m = {}
nodes_m = {}
for record in results:
if 'references' in record.keys():
ref = record['references']
for item in ref:
if (record['_id'], item) not in edges_m.keys():
edges_m[(record['_id'], item)] = 0
if item not in nodes_m.keys():
nodes_m[item] = 0
if record['_id'] not in nodes_m.keys():
nodes_m[record['_id']] = 0
for item in edges_m.keys():
edges.append(item)
for item in nodes_m.keys():
nodes.append(item)
return edges, nodes
def main(): #pragma: no cover
category = "Astrophysics" # Machine learning # Astrophysics # Computer Science # Statistics
mongo_url = "mongodb://xxx:xxx@xxx:xxx/arxiv"
'''
res = loadMagPaperAll(mongo_url, category)
edges, nodes = loadDiGraphData(res)
edge_pickle_file = "../pickle_file/CN_edges_{0}_2007.pkl".format(category)
node_pickle_file = "../pickle_file/CN_nodes_{0}_2007.pkl".format(category)
graph_file = ("../pickle_file/CN_graph({0})_2007.pkl").format(category)
'''
############ above - load from the mag_paper_all and have some limitation
############ below - load from the specific database
collection_name = "computer_science" # machine_learning # astrophysics # computer_science # statistics
edge_pickle_file = "../pickle_file/mag_spec_cn_edges_{0}.pkl".format(collection_name)
node_pickle_file = "../pickle_file/mag_spec_cn_nodes_{0}.pkl".format(collection_name)
graph_file = ("../pickle_file/mag_spec_cn_graph_{0}.pkl").format(collection_name)
res = loadSpecDatabase(mongo_url, collection_name)
edges, nodes = loadDiGraphData(res)
############ Analysis and store the edges, nodes, and graph
#store the edges and nodes
with open(edge_pickle_file, 'wb') as f:
pickle.dump(edges, f)
with open(node_pickle_file, 'wb') as f:
pickle.dump(nodes, f)
# load the edges and nodes
edges = None
nodes = None
with open(edge_pickle_file, 'rb') as f:
edges = pickle.load(f)
with open(node_pickle_file, 'rb') as f:
nodes = pickle.load(f)
graph = loadDiGraphList(edges, nodes)
#store the graph
with open(graph_file, 'wb') as f:
pickle.dump(graph, f)
if __name__ == "__main__":
main()
| true |
17ec3a5a73ea4295c7b2bd185f5e4c702c9574ac | Python | sevetseh28/csv_proc_async | /producer/producer.py | UTF-8 | 4,398 | 3.0625 | 3 | [] | no_license | import argparse
import csv
import json
import logging
import logging.config
import os
import re
from typing import Generator, Tuple
import chardet
from tasks import insert_person_db
def setup_logging(
default_path='logging.json',
default_level=logging.INFO,
env_key='LOG_CFG'
):
"""
Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
class Producer(object):
"""
Class that represents the Producer of tasks sent to the broker
"""
def __init__(self):
self.logger = logging.getLogger('producer')
def __get_csv_fullnames_emails(self, filepath: str) -> Generator[Tuple[str, str], None, None]:
"""
Generator function that reads a CSV file and returns fullname and email if found.
- Email is checked against a regex and 254 char limit (http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690)
- Fullname is the first value in a row that doesnt match the email regex and is under 256 chars
(should be enough for a fullname).
** If one of both is not found then the row is filtered out.
:param filepath: abs path of CSV location
:return: tuple (fullname, email)
"""
try:
# Guess the encoding first
rawdata = open(filepath, "rb").read(1024)
result = chardet.detect(rawdata)
self.logger.info(f'Encoding detected for {filepath}: {result}')
with open(filepath, encoding=result['encoding'], newline='', mode='r') as f:
try:
# Sniff the file to guess the dialect (separator, newline, etc)
dialect = csv.Sniffer().sniff(f.readline()) # TODO check that the line is not too big
# Reset the position of reading
f.seek(0)
reader = csv.reader(f, dialect=dialect)
for row in reader:
fullname = None
email = None
for val in row:
val = val.strip()
if email is None and re.match(r"[^@]+@[^@]+\.[^@]+", val) and len(val) <= 254:
self.logger.debug(f'Found email: {val}')
email = val
elif fullname is None and len(val) <= 256: # Check the length of fullname
self.logger.debug(f'Found fullname: {val}')
fullname = val
if fullname is not None and email is not None:
yield fullname, email
break # We have found both
except csv.Error: # File appears not to be in CSV format; move along
self.logger.exception(f'Generic error reading the CSV file: {filepath}')
except UnicodeDecodeError:
self.logger.exception(msg='')
except FileNotFoundError:
self.logger.exception('CSV file was not found')
def process_csv(self, path):
"""
Reads an arbitrary CSV file and sends tasks to a worker process that inserts (fullname, email) into
a database table if found.
:return:
"""
count = 0
for count, (fullname, email) in enumerate(self.__get_csv_fullnames_emails(path), start=1):
insert_person_db.apply_async(kwargs={'fullname': fullname, 'email': email})
self.logger.info(f'Task sent to broker: FULLNAME:{fullname} - EMAIL: {email}')
if count == 0:
self.logger.info(f'No fullnames/emails found in {path}')
if __name__ == '__main__':
setup_logging()
parser = argparse.ArgumentParser(
description='Reads an arbitrary CSV file from the filesystem and sends tasks to a worker process that inserts (fullname, email) \
into a database table if found.')
parser.add_argument('-p', '--path', type=str, help='path of the CSV file to process')
args = parser.parse_args()
p = Producer()
p.process_csv(args.path)
| true |
f8fe92b5ec2fef3c750d1f021f8feac037b18d71 | Python | ChandanShukla/Hackerrank_Solved | /Merge_The_Tools.py | UTF-8 | 210 | 2.640625 | 3 | [] | no_license | from collections import OrderedDict
def merge_the_tools(string, k):
chunks = [string[i:i+k] for i in range(0, len(string), k)]
for chunk in chunks:
print ("".join(OrderedDict.fromkeys(chunk))) | true |
0ca9e24404185874b6655e2fb6980840cc603306 | Python | by777/python-mooc-ml-pratice | /分类/上证指数涨跌预测-svm.py | UTF-8 | 2,651 | 3.640625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 22:24:54 2020
@author: Xu Bai
上证指数涨跌预测
实验目的:
根据给出时间前150天的历史数据,预测当天上证指数的涨跌
技术路线:
sklearn.svm.SVC
选取5列特征:
收盘价、最高价、最低价、开盘价、成交量
------------------------------
交叉验证思想
先将数据集D划分为k个大小相似的互斥子集,每个子集都尽可能保持数据分布的一致性,
即从D中通过分层采样得到。
然后,每次用k-1个子集的并集作为训练集,余下的那个子集作为测试集;
这样就可以获得k组训练/测试集,从而可以进行k次训练和测试,最终返回的是这个k个测试结果的均值
。通常把交叉验证称为‘K者交叉验证“,K最常用的取值是10
"""
import pandas as pd
import numpy as np
from sklearn import svm
# 在0.18以上的sklearn版本中,cross_validation sklearn.learning_curve
# sklearn.grid_search,都在model_selection下面
from sklearn.model_selection import train_test_split
data = pd.read_csv('000777.csv',encoding='gbk',parse_dates=[0],index_col=0)
# 按照事件升序
data.sort_index(0,ascending=True,inplace=True)
print(data)
# 选取150天
dayfeature = 150
# 选取的特征
featurenum = 5 * dayfeature
# 150天的5个特征值
# data.shape[0] - dayfeature意思是因为我们要用150天数据做训练,
# 对于条目为200条的数据,只有50条数据是前150天的数据来训练的,所以训练集大小就是
# 200 - 150 ,对于每一条数据,它的特征是前150天的所有特征数据,即150 * 5 , +1是
# 将当天开盘价引入作为一条特征数据
x = np.zeros((data.shape[0]-dayfeature,featurenum+1))
# 记录涨或跌
y = np.zeros((data.shape[0]-dayfeature))
for i in range(0,data.shape[0]-dayfeature):
x[i,0:featurenum]=np.array(data[i:i+dayfeature] \
[[u'收盘价',u'最高价',u'最低价',u'开盘价',u'成交量']]).reshape((1,featurenum))
x[i,featurenum]=data.ix[i+dayfeature][u'开盘价']
for i in range(0,data.shape[0]-dayfeature):
if data.ix[i+dayfeature][u'收盘价']>=data.ix[i+dayfeature][u'开盘价']:
y[i]=1
else:
y[i]=0
# 如果当天收盘价高于开盘价 , y[i]=1代表涨否则0跌
# 默认rbf,linear,poly,sigmoid
clf=svm.SVC(kernel='rbf')
result = []
for i in range(5):
# 切分训练集测试集
x_train, x_test, y_train, y_test = \
train_test_split(x, y, test_size = 0.2)
clf.fit(x_train, y_train)
result.append(np.mean(y_test == clf.predict(x_test)))
print("svm classifier accuacy:")
print(result) | true |
5ce05a1e61cd3b0d2b68e083afa716469ba397f5 | Python | tech-with-veena/snake-games | /snake.py | UTF-8 | 1,345 | 3.625 | 4 | [] | no_license | from turtle import Turtle
startingpositions=[(0,0),(-20,0),(-40,0)]
movedistance=20
up=90
down=270
left=180
right=0
class Snake:
def __init__(self):
self.segments=[]
self.createsnake()
self.head=self.segments[0]
def createsnake(self):
for position in startingpositions:
self.addsegment(position)
def addsegment(self,position):
newsegment = Turtle("square")
newsegment.color("white")
newsegment.penup()
newsegment.goto(position)
self.segments.append(newsegment)
def extend(self):
self.addsegment(self.segments[-1].position())
def move(self):
for segnum in range(len(self.segments) - 1, 0, -1):
newx = self.segments[segnum - 1].xcor()
newy = self.segments[segnum - 1].ycor()
self.segments[segnum].goto(newx, newy)
self.head.forward(movedistance)
def up(self):
if self.head.heading() !=down:
self.head.setheading(up)
def down(self):
if self.head.heading() != up:
self.head.setheading(down)
def left(self):
if self.head.heading() != right:
self.head.setheading(left)
def right(self):
if self.head.heading() != left:
self.head.setheading(right)
| true |
1a0c6e8ce11a16b50bac1d9db20ae213fbbfaa3e | Python | AthenaTsai/Python | /python_K_exercise/exercise_kevin.py | UTF-8 | 1,214 | 3.125 | 3 | [] | no_license |
##ex1
##
##member_with_error = ['sam_liao@payez.com.tw',
## 'kevin_chen@payez.com.tw',
##‘I_LOVE_SAM',
## 'jc_wang@payez.com.tw', '456']
##
## ['sam_liao@payeasy.com.tw', 'kevin_huang@payeasy.com.tw',
## 'jc_wang@payeasy.com.tw']
##
##1. create list variable member_without_error to store data
##2. for loop member_with_error to iterate string
##3. use if else to find out which string should be processed
##4. replace string to correct one
##5. append correct string to member_without_error
member_with_error = ['sam_liao@payez.com.tw', 'kevin_chen@payez.com.tw', 'I_LOVE_SAM', 'jc_wang@payez.com.tw', '456']
print ('original data:')
print (member_with_error)
member_without_error = []
for i in member_with_error:
if '@' in i:
member_without_error.append(i)
print ('filtered data:')
print (member_without_error)
##ex2
## staffs = [['sam', 613, 'sam_liao@payez.com.tw'],
## ['charlotte', 712, 'charlotte_wang@payeasy.com.tw'],
## ['kevin', 617, 'kevin_huang@康迅.公司.台灣']]
## ['payez.com.tw', 'payeasy.com.tw', '康迅.公司.台灣']
staffs = [['sam', 613, 'sam_liao@payez.com.tw'],
['charlotte', 712, 'charlotte_wang@payeasy.com.tw'],
['kevin', 617, 'kevin_huang@康迅.公司.台灣']]
print ('original data:')
print (staffs)
staffs_domain = []
for i in staffs:
staffs_domain.append(i[2].split('@')[1])
print ('extract domain:')
print (staffs_domain)
| true |
0b4a723a8c38d3404dec74821306afda62e39ab8 | Python | murakumo512/aaaa | /6.py | UTF-8 | 77 | 3 | 3 | [] | no_license | max = 0
for a in [10,9,13,17,2,1]:
if a > max:
max = a
print(max) | true |
374c6f28ec3061ec24174c4cd2d07bd6f2402425 | Python | galviset/hen-manager | /henmanager/rcontrol.py | UTF-8 | 1,841 | 3.078125 | 3 | [] | no_license | import RPi.GPIO as GPIO
import Adafruit_DHT
class Device():
def __init__(self, label, *args):
"""
Add a device controlled by relay(s).
:param label: name of the device (string)
:param *args: GPIO pin number(s) (int)
"""
self.name = label
self.relays = []
for pin in args:
self.relays.append(Relay(pin))
for relay in self.relays:
relay.deactivate()
def enable(self, rl=0):
self.relays[rl].activate()
def disable(self, rl=0):
self.relays[rl].deactivate()
class Relay():
def __init__(self, p):
"""
:param p: GPIO pin number
:var on: status of the relay
"""
self.pin = p
self.on = False
GPIO.setup(self.pin, GPIO.OUT)
def activate(self):
GPIO.output(self.pin, 0)
self.on = True
def deactivate(self):
GPIO.output(self.pin, 1)
self.on = False
class Sensor():
def __init__(self, p):
self.pin = p
GPIO.setup(self.pin, GPIO.IN)
def get_data(self):
return GPIO.input(self.pin)
class SensorDHT():
types = { '11' : Adafruit_DHT.DHT11,
'22' : Adafruit_DHT.DHT22,
'2302' : Adafruit_DHT.AM2302 }
def __init__(self, p, stype):
"""
:param p: WARNING : use GPIO number over pin number
:param stype: '11', '22' or '2302'
"""
self.pin = p
self.st = SensorDHT.types[stype]
def read_data(self):
"""
:return: humidity (int), temperature (int)
"""
#Both humidity and temperature are None if read failed
data = Adafruit_DHT.read_retry(self.st, self.pin)
if data[0] == None:
print("DHT read failed !")
data = [0, 0]
return data
| true |
18d2d8615d2a126dcd35585c8f491705b7c2c3da | Python | clarelaroche/python_AtlasOEM_lib | /OEM_PH_example.py | UTF-8 | 915 | 2.96875 | 3 | [
"MIT"
] | permissive | from AtlasOEM_PH import AtlasOEM_PH
import time
def main():
PH = AtlasOEM_PH() # create an OEM PH object
PH.write_active_hibernate(1) # tell the circuit to start taking readings
while True:
if PH.read_new_reading_available(): # if we have a new reading
pH_reading = PH.read_PH_reading() # get it from the circuit
print("OEM pH reading: " + str(pH_reading)) # print the reading
PH.write_new_reading_available(0) # then clear the new reading register
# so the circuit can set the register
# high again when it acquires a new reading
else:
#print("waiting")
time.sleep(.5) #if theres no reading, wait some time to not poll excessively
if __name__ == '__main__':
main() | true |
929300ab9483ef896d94a7f2c30b3fdc454e2b71 | Python | Tony-Y/cgnn | /src/cgnn.py | UTF-8 | 10,101 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2019 Takenori Yamamoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main program of CGNN."""
import time
import json
import os
import copy
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Subset, SubsetRandomSampler
from models import GGNN
from model_utils import Model
from data_utils import GraphDataset, graph_collate
def use_setpLR(param):
ms = param["milestones"]
return ms[0] < 0
def create_model(device, model_param, optimizer_param, scheduler_param):
model = GGNN(**model_param).to(device)
clip_value = optimizer_param.pop("clip_value")
optim_name = optimizer_param.pop("optim")
if optim_name == "sgd":
optimizer = optim.SGD(model.parameters(), momentum=0.9,
nesterov=True, **optimizer_param)
elif optim_name == "adam":
optimizer = optim.Adam(model.parameters(), **optimizer_param)
elif optim_name == "amsgrad":
optimizer = optim.Adam(model.parameters(), amsgrad=True,
**optimizer_param)
else:
raise NameError("optimizer {} is not supported".format(optim_name))
use_cosine_annealing = scheduler_param.pop("cosine_annealing")
if use_cosine_annealing:
params = dict(T_max=scheduler_param["milestones"][0],
eta_min=scheduler_param["gamma"])
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **params)
elif use_setpLR(scheduler_param):
scheduler_param["step_size"] = abs(scheduler_param["milestones"][0])
scheduler_param.pop("milestones")
scheduler = optim.lr_scheduler.StepLR(optimizer, **scheduler_param)
else:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, **scheduler_param)
return Model(device, model, optimizer, scheduler, clip_value)
def main(device, model_param, optimizer_param, scheduler_param, dataset_param, dataloader_param,
num_epochs, seed, load_model):
print("Seed:", seed)
print()
torch.manual_seed(seed)
dataloader_param["collate_fn"] = graph_collate
# Create dataset
dataset = GraphDataset(dataset_param["dataset_path"], dataset_param["target_name"])
# split the dataset into training, validation, and test sets.
split_file_path = dataset_param["split_file"]
if split_file_path is not None and os.path.isfile(split_file_path):
with open(split_file_path) as f:
split = json.load(f)
else:
print("No split file. Default split: 256 (train), 32 (val), 32 (test)")
split = {"train": range(256), "val": range(256, 288), "test": range(288, 320)}
print(" ".join(["{}: {}".format(k, len(x)) for k, x in split.items()]))
# Create a CGNN model
model = create_model(device, model_param, optimizer_param, scheduler_param)
if load_model:
print("Loading weights from model.pth")
model.load()
#print("Model:", model.device)
# Train
train_sampler = SubsetRandomSampler(split["train"])
val_sampler = SubsetRandomSampler(split["val"])
train_dl = DataLoader(dataset, sampler=train_sampler, **dataloader_param)
val_dl = DataLoader(dataset, sampler=val_sampler, **dataloader_param)
model.train(train_dl, val_dl, num_epochs)
if num_epochs > 0:
model.save()
# Test
test_set = Subset(dataset, split["test"])
test_dl = DataLoader(test_set, **dataloader_param)
outputs, targets = model.evaluate(test_dl)
names = [dataset.graph_names[i] for i in split["test"]]
df_predictions = pd.DataFrame({"name": names, "prediction": outputs, "target": targets})
df_predictions.to_csv("test_predictions.csv", index=False)
print("\nEND")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Crystal Graph Neural Networks")
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--n_node_feat", type=int, default=4)
parser.add_argument("--n_hidden_feat", type=int, default=16)
parser.add_argument("--n_graph_feat", type=int, default=32)
parser.add_argument("--n_conv", type=int, default=3)
parser.add_argument("--n_fc", type=int, default=2)
parser.add_argument("--activation", type=str, default="softplus")
parser.add_argument("--use_batch_norm", action='store_true')
parser.add_argument("--node_activation", type=str, default="None")
parser.add_argument("--use_node_batch_norm", action='store_true')
parser.add_argument("--edge_activation", type=str, default="None")
parser.add_argument("--use_edge_batch_norm", action='store_true')
parser.add_argument("--n_edge_net_feat", type=int, default=16)
parser.add_argument("--n_edge_net_layers", type=int, default=0)
parser.add_argument("--edge_net_activation", type=str, default="elu")
parser.add_argument("--use_edge_net_batch_norm", action='store_true')
parser.add_argument("--use_fast_edge_network", action='store_true')
parser.add_argument("--fast_edge_network_type", type=int, default=0)
parser.add_argument("--use_aggregated_edge_network", action='store_true')
parser.add_argument("--edge_net_cardinality", type=int, default=32)
parser.add_argument("--edge_net_width", type=int, default=4)
parser.add_argument("--use_edge_net_shortcut", action='store_true')
parser.add_argument("--n_postconv_net_layers", type=int, default=0)
parser.add_argument("--postconv_net_activation", type=str, default="elu")
parser.add_argument("--use_postconv_net_batch_norm", action='store_true')
parser.add_argument("--conv_bias", action='store_true')
parser.add_argument("--edge_net_bias", action='store_true')
parser.add_argument("--postconv_net_bias", action='store_true')
parser.add_argument("--full_pooling", action='store_true')
parser.add_argument("--gated_pooling", action='store_true')
parser.add_argument("--conv_type", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--optim", type=str, default="adam")
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--clip_value", type=float, default=0)
parser.add_argument("--milestones", nargs='+', type=int, default=[10])
parser.add_argument("--gamma", type=float, default=0.1)
parser.add_argument("--cosine_annealing", action='store_true')
parser.add_argument("--num_epochs", type=int, default=5)
parser.add_argument("--dataset_path", type=str, default="datasets/Nomad2018")
parser.add_argument("--target_name", type=str, default="formation_energy_ev_natom")
parser.add_argument("--split_file", type=str, default="datasets/Nomad2018/split.json")
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--seed", type=int, default=12345)
parser.add_argument("--load_model", action='store_true')
parser.add_argument("--use_extension", action='store_true')
options = vars(parser.parse_args())
if not torch.cuda.is_available():
options["device"] = "cpu"
print("Device:", options["device"])
print()
device = torch.device(options["device"])
# Model parameters
model_param_names = [
"n_node_feat", "n_hidden_feat", "n_graph_feat", "n_conv", "n_fc",
"activation", "use_batch_norm", "node_activation", "use_node_batch_norm",
"edge_activation", "use_edge_batch_norm", "n_edge_net_feat",
"n_edge_net_layers", "edge_net_activation", "use_edge_net_batch_norm",
"use_fast_edge_network", "fast_edge_network_type",
"use_aggregated_edge_network", "edge_net_cardinality", "edge_net_width",
"use_edge_net_shortcut", "n_postconv_net_layers", "postconv_net_activation",
"use_postconv_net_batch_norm", "conv_bias", "edge_net_bias", "postconv_net_bias",
"full_pooling", "gated_pooling", "conv_type", "use_extension"]
model_param = {k : options[k] for k in model_param_names if options[k] is not None}
if model_param["node_activation"].lower() == 'none':
model_param["node_activation"] = None
if model_param["edge_activation"].lower() == 'none':
model_param["edge_activation"] = None
print("Model:", model_param)
print()
# Optimizer parameters
optimizer_param_names = ["optim", "lr", "weight_decay", "clip_value"]
optimizer_param = {k : options[k] for k in optimizer_param_names if options[k] is not None}
if optimizer_param["clip_value"] == 0.0:
optimizer_param["clip_value"] = None
print("Optimizer:", optimizer_param)
print()
# Scheduler parameters
scheduler_param_names = ["milestones", "gamma", "cosine_annealing"]
scheduler_param = {k : options[k] for k in scheduler_param_names if options[k] is not None}
print("Scheduler:", scheduler_param)
print()
# Dataset parameters
dataset_param_names = ["dataset_path", "target_name", "split_file"]
dataset_param = {k : options[k] for k in dataset_param_names if options[k] is not None}
print("Dataset:", dataset_param)
print()
# Dataloader parameters
dataloader_param_names = ["num_workers", "batch_size"]
dataloader_param = {k : options[k] for k in dataloader_param_names if options[k] is not None}
print("Dataloader:", dataloader_param)
print()
main(device, model_param, optimizer_param, scheduler_param, dataset_param, dataloader_param,
options["num_epochs"], options["seed"], options["load_model"])
| true |
c13e7bf89f403b5683157fca74e549b9c3cb61fe | Python | shanavas786/coding-fu | /python/foobar/tree.py | UTF-8 | 475 | 3.28125 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env python3
cache = {}
def par_index(root, i):
if i in cache:
return cache[i]
if i == root:
return -1
left = root // 2
if (i == left) or i == (root - 1):
return root
if i < left:
return par_index(left, i)
else:
return left + par_index(left, i - left)
def get_index(h, i):
n = 2 ** h - 1
l = [par_index(n, j) for j in i]
print(l)
get_index(3, [3, 4, 7])
# print(par_index(7, 4))
| true |
a9ce1021ce950743d58844233131aae62cdddbda | Python | mcastrov78/ucr-ml-lab3 | /mnist_digits.py | UTF-8 | 8,912 | 3.21875 | 3 | [] | no_license | import read_idx
import lab3
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
# use None to process all images
NUMBER_OF_IMAGES_TO_PROCESS = 500
class TwoLayerNN(nn.Module):
"""
Two Layer Neural Network.
"""
def __init__(self, input_n, hidden_n, output_n, activation_fn):
"""
Initialize TwoLayerNN.
:param input_n: number of inputs
:param hidden_n: number of hidden neurons
:param output_n: number of outputs
:param activation_fn: activation function for the hidden layer
"""
super(TwoLayerNN, self).__init__()
print("\n*** TwoLayerNN i: %s - h: %s - o: %s ***" % (input_n, hidden_n, output_n))
self.hidden_linear = nn.Linear(input_n, hidden_n)
self.hidden_activation = activation_fn
self.output_linear = nn.Linear(hidden_n, output_n)
self.softmax = nn.Softmax(1)
def forward(self, input):
"""
Pass the input through the NN layers.
:param input: input to the module
:return: output from the module
"""
hidden_t = self.hidden_linear(input)
activated_t = self.hidden_activation(hidden_t)
output_t = self.output_linear(activated_t)
return self.softmax(output_t)
def train_nn(iterations, nn_model, optimizer, nn_loss_fn, tensor_x, tensor_y, input_n, output_n):
"""
Train Neural Network.
:param iterations: epochs
:param nn_model: NN model
:param optimizer: optimizer
:param nn_loss_fn: loss function
:param tensor_x: X tensor X
:param tensor_y: Y tensor
:param input_n: number of inputs
:param output_n: number of outputs
:return:
"""
print("\n*** TRAINING NN ***")
tensor_x_reshaped = tensor_x.view(-1, input_n)
#print("\ntensor_x_reshaped (%s): %s" % (tensor_x_reshaped.shape, tensor_x_reshaped))
#print("\ntensor_y (%s): %s" % (tensor_y.shape, tensor_y))
for it in range(1, iterations + 1):
tensor_y_pred = nn_model(tensor_x_reshaped)
loss_output = nn_loss_fn(tensor_y_pred, tensor_y)
optimizer.zero_grad()
loss_output.backward()
optimizer.step()
if it % 100 == 0:
print("N: %s\t | Loss: %f\t" % (it, loss_output))
def get_images_and_labels_tensors(images_filename, labels_filename):
"""
Creates tensors for images and labels data.
:param images_filename: images filename
:param labels_filename: labels filename
:return: tensors for images and labels data
"""
images_tensor, images_data_dims = read_idx.read(images_filename, NUMBER_OF_IMAGES_TO_PROCESS)
labels_tensor, labels_data_dims = read_idx.read(labels_filename, NUMBER_OF_IMAGES_TO_PROCESS)
# convert tensors to the appropriate data types, also shape and normalize images data
images_tensor = torch.tensor(images_tensor, dtype=torch.float)
images_tensor = images_tensor.view((-1, 28 * 28))
images_tensor /= 255
labels_tensor = torch.tensor(labels_tensor).long()
#print("images_tensor (%s): %s" % (images_tensor.size(), images_tensor))
#print("labels_tensor (%s): %s" % (labels_tensor.size(), labels_tensor))
return images_tensor, labels_tensor
def print_digit(image_tensor, label):
"""
Prints the data in the image tensor and its label.
:param image_tensor: image tensor
:param label: true label
:return: nothing
"""
print("\nlabel: %s" % label)
this_image_tensor = (image_tensor * 255).type(torch.int)
print("data: %s" % (this_image_tensor.view(28, 28)))
lab3.show_image(this_image_tensor, "sample_images\{}.png".format(label), scale=lab3.SCALE_OFF)
def train(trainingdataf, traininglabelf, model, learning_rate):
"""
Macro training phase to be reused with different models and learning rates.
:param trainingdataf: training images filename
:param traininglabelf: training labels filename
:param model: model to be trained
:param learning_rate: learning rate
:return: tensors for training images and labels data
"""
print("\n--------------- TRAINING - --------------")
# read training data
train_data, train_labels = get_images_and_labels_tensors(trainingdataf, traininglabelf)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
print("\nmodel: %s" % model)
train_nn(1000, model, optimizer, loss_fn, train_data, train_labels, 28 * 28, 10)
train_y_pred = model(train_data)
train_predictions = train_y_pred.max(1).indices
# print sample image tensors data along with its label and the image itself
for i in range(3):
print_digit(train_data[i], train_labels[i])
#print("\ntrain_y_pred (%s): %s ..." % (train_y_pred.size(), train_y_pred[:10, :]))
print("\ntrain_labels (%s): %s ..." % (len(train_labels), train_labels[:100]))
print("train_predic (%s): %s ..." % (len(train_predictions), train_predictions[:100]))
return train_data, train_labels
def generate_classification_report(test_labels, test_predictions):
"""
Generates and prints classification report and other info.
:param test_labels: true labels
:param test_predictions: predicted labels
:return: nothing
"""
conf_matrix = confusion_matrix(test_labels, test_predictions)
print("\nconf_matrix (%s): \n%s" % (len(conf_matrix), conf_matrix))
precision = precision_score(test_labels, test_predictions, average=None)
print("\nprecision (%s): \n%s" % (len(precision), precision))
recall = recall_score(test_labels, test_predictions, average=None)
print("\nrecall (%s): \n%s" % (len(recall), recall))
clasif_report = classification_report(test_labels, test_predictions)
print("\nclasif_report (%s): \n%s" % (len(clasif_report), clasif_report))
def validate(testdataf, testlabelf, model):
"""
Macro validation phase to be reused with different models.
:param testdataf: test images filename
:param testlabelf: test labels filename
:param model: model to validate
:return: tensors for test images and labels data
"""
print("\n--------------- VALIDATION - --------------")
# read test data
test_data, test_labels = get_images_and_labels_tensors(testdataf, testlabelf)
# predict test labels using trained model
test_y_pred = model(test_data)
test_predictions = test_y_pred.max(1).indices
print("\ntest_y_pred (%s): %s ..." % (test_y_pred.size(), test_y_pred[:10, :]))
print("\ntest_labels (%s): %s ..." % (len(test_labels), test_labels[:100]))
print("test_predic (%s): %s ..." % (len(test_predictions), test_predictions[:100]))
generate_classification_report(test_labels, test_predictions)
return test_data, test_labels
def analyze_weights(model):
"""
Analyzes weights and prints relevant info.
:param model: model with the paramters to evaluate.
:return: nothing
"""
print("model: %s" % model)
named_parameters = model.named_parameters()
print("\nNAMED PARAMETERS: %s" % named_parameters)
for name, param, in named_parameters:
print("\nname: %s" % name)
print("tensor(%s): %s" % (param.size(), param.data))
if name == "hidden_linear.weight":
for i in range(param.size()[0]):
this_image_tensor = (param.data[i] * 255).type(torch.int)
#print("\ndata(%s): %s" % (i, this_image_tensor.view(28, 28)))
lab3.show_image(this_image_tensor, "neuron_weight_images/{}.png".format(i), scale=lab3.SCALE_OFF)
def main(trainingdataf="train-images.idx3-ubyte", traininglabelf="train-labels.idx1-ubyte",
testdataf="t10k-images.idx3-ubyte", testlabelf="t10k-labels.idx1-ubyte"):
"""
Main function.
:param trainingdataf: training images data filename
:param traininglabelf: training labels data filename
:param testdataf: testing images data filename
:param testlabelf: testing labels data filename
:return: nothing
"""
# we want to see tensor rows in a single line
torch.set_printoptions(linewidth=300)
# --------------- SIGMOID ---------------
# NOTE FOR LAB: 50 neurons works the best. More than that doesn't really improve.
# NOTE FOR LAB: 1e-3 works better than 1e-2 and 1e-2. 1e-4 can cause errors
model = TwoLayerNN(28 * 28, 50, 10, nn.Sigmoid())
train(trainingdataf, traininglabelf, model, 1e-3)
validate(testdataf, testlabelf, model)
# --------------- RELU ---------------
model = TwoLayerNN(28 * 28, 50, 10, nn.ReLU())
train(trainingdataf, traininglabelf, model, 1e-3)
validate(testdataf, testlabelf, model)
analyze_weights(model)
if __name__ == "__main__":
main()
| true |
02b7ed4222112fcb147cc0fcab6dc6b85d5e875e | Python | bloXroute-Labs/bxcommon | /test/unit/utils/test_expiration_queue.py | UTF-8 | 4,187 | 3.171875 | 3 | [
"MIT"
] | permissive | import time
import unittest
from mock import MagicMock
from bxcommon.utils.expiration_queue import ExpirationQueue
class ExpirationQueueTests(unittest.TestCase):
def setUp(self):
self.time_to_live = 60
self.queue = ExpirationQueue(self.time_to_live)
self.removed_items = []
def test_expiration_queue(self):
# adding 2 items to the queue with 1 second difference
item1 = 1
item2 = 2
self.queue.add(item1)
time_1_added = time.time()
time.time = MagicMock(return_value=time.time() + 1)
self.queue.add(item2)
time_2_added = time.time()
self.assertEqual(len(self.queue), 2)
self.assertEqual(int(time_1_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item1, self.queue.get_oldest())
# check that nothing is removed from queue before the first item expires
self.queue.remove_expired(time_1_added + self.time_to_live / 2, remove_callback=self._remove_item)
self.assertEqual(len(self.queue), 2)
self.assertEqual(len(self.removed_items), 0)
# check that first item removed after first item expired
self.queue.remove_expired(time_1_added + self.time_to_live + 1, remove_callback=self._remove_item)
self.assertEqual(len(self.queue), 1)
self.assertEqual(len(self.removed_items), 1)
self.assertEqual(self.removed_items[0], item1)
self.assertEqual(int(time_2_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item2, self.queue.get_oldest())
# check that second item is removed after second item expires
self.queue.remove_expired(time_2_added + self.time_to_live + 1, remove_callback=self._remove_item)
self.assertEqual(len(self.queue), 0)
self.assertEqual(len(self.removed_items), 2)
self.assertEqual(self.removed_items[0], item1)
self.assertEqual(self.removed_items[1], item2)
def test_remove_oldest_item(self):
items_count = 10
for i in range(items_count):
self.queue.add(i)
self.assertEqual(items_count, len(self.queue))
removed_items_1 = []
for i in range(items_count):
self.assertEqual(i, self.queue.get_oldest())
self.queue.remove_oldest(removed_items_1.append)
self.queue.add(1000 + i)
for i in range(items_count):
self.assertEqual(i, removed_items_1[i])
self.assertEqual(items_count, len(self.queue))
removed_items_2 = []
for i in range(items_count):
self.assertEqual(i + 1000, self.queue.get_oldest())
self.queue.remove_oldest(removed_items_2.append)
for i in range(items_count):
self.assertEqual(i + 1000, removed_items_2[i])
self.assertEqual(0, len(self.queue))
def test_remove_not_oldest_item(self):
# adding 2 items to the queue with 1 second difference
item1 = 9
item2 = 5
self.queue.add(item1)
time_1_added = time.time()
time.time = MagicMock(return_value=time.time() + 1)
self.queue.add(item2)
self.assertEqual(len(self.queue), 2)
self.assertEqual(int(time_1_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item1, self.queue.get_oldest())
self.queue.remove(item2)
self.assertEqual(len(self.queue), 1)
self.assertEqual(int(time_1_added), int(self.queue.get_oldest_item_timestamp()))
self.assertEqual(item1, self.queue.get_oldest())
def test_remove_oldest_items_with_limits(self):
time.time = MagicMock(return_value=time.time())
for i in range(20):
self.queue.add(i)
time.time = MagicMock(return_value=time.time() + 5)
self.assertEqual(20, len(self.queue))
time.time = MagicMock(return_value=time.time() + self.time_to_live)
self.queue.remove_expired(limit=5)
self.assertEqual(15, len(self.queue))
self.queue.remove_expired()
self.assertEqual(0, len(self.queue))
def _remove_item(self, item):
self.removed_items.append(item)
| true |
345c90a4d884c90e78039a8b7b37903cea45879c | Python | victor8504/Password-Locker | /tester_test.py | UTF-8 | 3,577 | 3.203125 | 3 | [
"MIT"
] | permissive | import pyperclip # Impporting the pyperclip module
import unittest # Importing the unittest module
from tester import User #Importing the user class
class TestUser(unittest.TestCase):
'''
Test class that defines test cases for the user class behaviours.
Args:
unittest.Testcase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
Set up method to run before each test cases
'''
self.new_account = User("Facebook", "victormoore254@gmail.com", "victor8504") # create user object
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
User.user_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_account.account_name,"Facebook")
self.assertEqual(self.new_account.email_address,"victormoore254@gmail.com")
self.assertEqual(self.new_account.password,"victor8504")
def test_save_account(self):
'''
test_save_account test case to test if the account object is saved into
the user list
'''
self.new_account.save_account() # saving the new account
self.assertEqual(len(User.user_list),1)
def test_save_multiple_account(self):
'''
test_save_multiple_account to check if we can save multiple account
objects to our user_list
'''
self.new_account.save_account()
test_account = User("Twitter", "victormoore254@gmail.com", "michybats") # new account
test_account.save_account()
self.assertEqual(len(User.user_list),2)
def test_delete_account(self):
'''
test_delete_account to test if we can remove an account from our user_list
'''
self.new_account.save_account()
test_account = User("Twitter", "victormoore254@gmail.com", "michybats") # new account
test_account.save_account()
self.new_account.delete_account() # Deleting an account object
self.assertEqual(len(User.user_list),1)
def test_find_account_by_account_name(self):
'''
test to check if we can find an account by account_name and display information
'''
self.new_account.save_account()
test_account = User("Twitter", "victormoore254@gmail.com", "michybats") # new account
test_account.save_account()
found_account = User.find_by_account_name("Twitter")
self.assertEqual(found_account.email_address,test_account.email_address)
def test_account_exists(self):
'''
test to check if we can return a Boolean if we cannot find an account.
'''
self.new_account.save_account()
test_account = User("Twitter", "michybats@gmail.com", "michybats") # new account
test_account.save_account()
account_exists = User.account_exists("Twitter")
self.assertTrue(account_exists)
def test_display_all_accounts(self):
'''
method that returns a list of all accounts saved
'''
self.assertEqual(User.display_accounts(),User.user_list)
def test_copy_email_address(self):
'''
Test to confirm that we are copying the email address from a found account.
'''
self.new_account.save_account()
User.copy_email_address("Victor Njuguna")
self.assertEqual(self.new_account.email_address,pyperclip.paste())
if __name__ == '__main__':
unittest.main()
| true |
99c10717fb45b38ed64a5b91c294c5996eccd2e4 | Python | kobaltkween/python2 | /Lesson 03 - Test Driven Development/testadder.py | UTF-8 | 997 | 4.09375 | 4 | [] | no_license | """
Demonstrates the fundamentals of unittest.
adder() is a function that lets you 'add' integers, strings, and lists.
"""
from adder import adder # keep the tested code separate from the tests
import unittest
class TestAdder(unittest.TestCase):
def testNumbers(self):
self.assertEqual(adder(3,4), 7, "3 + 4 should be 7")
def testStrings(self):
self.assertEqual(adder('x', 'y'), 'xy', "'x' + 'y' should be 'xy'")
def testLists(self):
self.assertEqual(adder([1,2], [3,4]), [1,2,3,4], "[1,2] + [3,4] should be [1,2,3,4]")
def testNumberNString(self):
self.assertEqual(adder(1, 'two'), '1two', "1 + 'two' should be '1two'")
def testNumberNList(self):
self.assertEqual(adder(4, [1, 2, 3]), [1, 2, 3, 4], "4 + [1, 2, 3] should be [1, 2, 3, 4]")
def testStringNList(self):
self.assertEqual(adder('x', [1, 2, 3]), [1, 2, 3, 'x'], "'x' + [1, 2, 3] should be [1, 2, 3, 'x']")
if __name__ == "__main__":
unittest.main() | true |
08967275e0b70c4b93b0c517a738217cfdbc15d2 | Python | rehrler/rguard | /src/sensor/capture_data.py | UTF-8 | 1,351 | 2.75 | 3 | [] | no_license | import sqlite3
import time
import datetime
from scd30_i2c import SCD30
class SensorInterface(object):
def __init__(self):
self.scd30 = SCD30()
self.scd30.set_measurement_interval(2)
self.scd30.set_auto_self_calibration(active=True)
time.sleep(5)
self.scd30.start_periodic_measurement()
time.sleep(2)
@staticmethod
def _write_to_db(temp: float, hum: float, co2: float):
conn = sqlite3.connect("src/db/rguard_db.db")
cursor = conn.cursor()
cursor.execute("INSERT INTO DHT_DATA values(datetime('now','localtime'), (?), (?), (?))", (temp, hum, co2))
conn.commit()
conn.close()
def start_measurement(self):
while True:
if self.scd30.get_data_ready():
measurement = self.scd30.read_measurement()
if measurement is not None:
temp, hum, co2 = round(measurement[1], 3), round(measurement[2], 3), round(measurement[0], 3)
print("[INFO]: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t" + str(
temp) + "\t" + str(hum) + "\t" + str(co2))
self._write_to_db(temp=temp, hum=hum, co2=co2)
time.sleep(2)
if __name__ == "__main__":
sensor = SensorInterface()
sensor.start_measurement()
| true |
df0d7e15319dc251c1dff76bdd51cb33c45c72bc | Python | jarvyii/techroom | /techroom.py | UTF-8 | 1,820 | 2.984375 | 3 | [] | no_license | import os
import sqlite3
from database import *
from login import *
from sqlite3 import Error
from modeltechroom import *
#This is to use function from login.py files
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
print(conn)
return conn
except Error as e:
print(e)
return None
#To create the connection with SQLite.
#DataBase db/techroom.db
def connectDB():
database = "db/techroom.db"
# create a database connection
conn = create_connection(database)
if not conn:
CreateDB()
curs = conn.cursor()
#curs.execute("SELECT * FROM status order by idstatus")
#rows = curs.fetchall()
#for row in rows:
# print(row)
return conn
##
def Menu():
Option = '9'
while (Option not in ('1','2','3','4','5','0')):
os.system('cls')
print("\n\nOption Menu\n")
print("1 - Add new Equipment")
print("2 - Modify Equipment")
print("3 - Delete Equipment")
print("4 - See your Daily Production")
print("5 - Show your Work")
print("0 - Quit")
Option = input("\nPlease select your Option: ")
return Option
conn = connectDB()
User = Login(conn)
os.system('cls')
if User == 0:
quit()
flag = True
while flag != '0':
flag = Menu()
os.system('cls')
if flag == '1':
AddEquipment(conn, User)
elif flag == '2':
ModifyEquipment(conn, User)
elif flag == '3':
DeleteEquipment(conn, User)
elif flag == '4':
GraphInfo(conn, User)
elif flag == '5':
ShowWork(conn, User)
| true |
74bcf28aa7815bd795d632a6e06ebf3ede2b1156 | Python | qangelot/streamlitNyc | /apps/app2.py | UTF-8 | 1,791 | 2.953125 | 3 | [] | no_license | import pydeck as pdk
from utils.utils import timed, df, st
import numpy as np
# CREATING FUNCTION FOR MAPS
@timed
def map(data, lat, lon, zoom):
st.write(pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
initial_view_state={
"latitude": lat,
"longitude": lon,
"zoom": zoom,
"pitch": 50,
},
layers=[
pdk.Layer(
"HexagonLayer",
data=data,
get_position=["pickup_longitude", "pickup_latitude"],
radius=100,
elevation_scale=4,
elevation_range=[0, 1000],
pickable=True,
extruded=True,
),
]
))
def app():
st.header("**Filtered by hour**")
hours_selected = st.slider("Select hours of pickup", 0, 23, (12, 14))
# FILTERING DATA BY HOUR SELECTED
data = df[df['hour_pickup'].between(hours_selected[0],hours_selected[1])]
st.write("")
st.header("**Geospatial analysis**")
st.write("")
# LAYING OUT THE MIDDLE SECTION OF THE APP WITH THE MAPS
# SETTING THE ZOOM LOCATIONS FOR THE AIRPORTS
la_guardia= [40.7900, -73.8700]
jfk = [40.6650, -73.7821]
zoom_level = 12
midpoint = (np.average(data["pickup_latitude"]), np.average(data["pickup_longitude"]))
st.write("**All New York City from %i:00 and %i:00**" % (hours_selected[0], (hours_selected[1]) % 24))
map(data, midpoint[0], midpoint[1], 11)
st.write("")
row2_3, row2_4 = st.columns((1,1))
with row2_3:
st.write("**La Guardia Airport**")
map(data, la_guardia[0],la_guardia[1], zoom_level)
with row2_4:
st.write("**JFK Airport**")
map(data, jfk[0],jfk[1], zoom_level) | true |