blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
77ff9339b3dc4ae2f998087301703b615aa3d0a7 | Python | JACKEY22/Ref_Image_Processing_Basic | /2.3.bilinearInterpolation.py | UTF-8 | 1,029 | 3.9375 | 4 | [] | no_license | import numpy as np
def bilinear1d(x, y, ratio):
return (1-ratio)*x + ratio*y
def bilinear2d(point_4, x_ratio, y_ratio):
return (1-x_ratio)*(1-y_ratio)*point_4[0][0] + x_ratio*(1-y_ratio)*point_4[1][0] + (1-x_ratio)*y_ratio*point_4[0][1] + x_ratio*y_ratio*point_4[1][1]
def bilinear_interpolation(point_4, x_ratio, y_ratio):
"""
(0,0), (0,1), (1,0), (1,1)의 값이 있을떄 (x_ratio,y_ratio) 위치의 bilinear interpolation 결과값을 구한다.
"""
point_4 = np.float32(point_4)
# 1d bilinear를 이용하여 2d bilinear 결과를 가져올 수 있다.
x1 = bilinear1d(point_4[0,0], point_4[1,0], x_ratio)
x2 = bilinear1d(point_4[0,1], point_4[1,1], x_ratio)
y1 = bilinear1d(x1, x2, y_ratio)
# 2d bilinear 결과값
y2 = bilinear2d(point_4, x_ratio, y_ratio)
print(f'result using 1d bilinear: {y1}')
print(f'result using 2d bilinear: {y2}')
if __name__ == "__main__":
point_4 = np.arange(0, 4).reshape((2, 2))
bilinear_interpolation(point_4, 0.5, 0.5)
| true |
7b2134b50aee1ec0b3a0f41fe254aa5e41231370 | Python | bellalee01/leetcode | /offer20.py | UTF-8 | 667 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
'''
Date: 2021-01-15 09:39:44
Github: https://github.com/bellalee01
LastEditors: lixuefei
LastEditTime: 2021-01-15 09:40:03
FilePath: /leetcode/offer20.py
Description:
请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。
例如,字符串"+100"、"5e2"、"-123"、"3.1416"、"-1E-16"、"0123"都表示数值,
但"12e"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
| true |
6a21dacbf2be85e7d78c015ba990c5aa14c16f98 | Python | ColinKennedy/USD-Cookbook | /concepts/variant_set_in_stronger_layer/python/variant_set.py | UTF-8 | 2,184 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module that shows how to author Variant Sets from stronger USD Layers.
Important:
All of the Stages here are created in-memory to avoid writing to
disk. Because of that, we use identifiers to refer to those Stages.
In production code, these identifiers should actually be paths to
files or some kind of URI that USD can resolve into a consumable
resource.
"""
# IMPORT THIRD-PARTY LIBRARIES
from pxr import Usd, UsdGeom
def create_basic_stage():
stage = Usd.Stage.CreateInMemory()
sphere = UsdGeom.Sphere.Define(stage, "/SomeSphere")
stage.GetRootLayer().documentation = "A layer that authors some variant set"
variants = sphere.GetPrim().GetVariantSets().AddVariantSet("some_variant_set")
variants.AddVariant("variant_name_1")
variants.AddVariant("variant_name_2")
variants.AddVariant("variant_name_3")
variants.SetVariantSelection("variant_name_1")
with variants.GetVariantEditContext():
sphere.GetRadiusAttr().Set(1)
variants.SetVariantSelection("variant_name_2")
with variants.GetVariantEditContext():
sphere.GetRadiusAttr().Set(2)
variants.SetVariantSelection("variant_name_3")
with variants.GetVariantEditContext():
sphere.GetRadiusAttr().Set(3)
return stage
def create_override_stage(identifier):
stage = Usd.Stage.CreateInMemory()
stage.GetPrimAtPath("/SomeSphere")
root = stage.GetRootLayer()
root.subLayerPaths.append(identifier)
sphere = UsdGeom.Sphere(stage.GetPrimAtPath("/SomeSphere"))
# Here's an example of adding a completely new variant set
sphere.GetPrim().GetVariantSets().AddVariantSet("another")
variants = sphere.GetPrim().GetVariantSets().GetVariantSet("some_variant_set")
variants.AddVariant("foo")
variants.SetVariantSelection("foo")
with variants.GetVariantEditContext():
sphere.GetRadiusAttr().Set(100)
return stage
def main():
basic_stage = create_basic_stage()
stage = create_override_stage(basic_stage.GetRootLayer().identifier)
print(stage.GetRootLayer().ExportToString())
if __name__ == "__main__":
main()
| true |
93dba1eacff304d5a7717d907f0ce73573e10c34 | Python | sunshot/LeetCode | /22. Generate Parentheses/solution1.py | UTF-8 | 756 | 3.6875 | 4 | [
"MIT"
] | permissive | from typing import List
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
if n == 0:
return ['']
if n == 1:
return ['()']
if n == 2:
result = []
result.append('()()')
result.append('(())')
return result
ans = []
for i in range(n):
for left in self.generateParenthesis(i):
for right in self.generateParenthesis(n-1-i):
ans.append('({}){}'.format(left, right))
return ans
if __name__== '__main__':
solution = Solution()
n = 3
ans = solution.generateParenthesis(n)
print(ans)
n = 4
ans = solution.generateParenthesis(n)
print(ans) | true |
c1f7ad1d43472755edca748e023ab860df61350c | Python | Kmerck88/100DaysofCode | /100Days/Day5/mad_libs.py | UTF-8 | 832 | 4.03125 | 4 | [] | no_license | adjective1 = input("Enter an adjective1").lower()
game = input("Enter the name of an outdoor game").lower()
adjective2 = input("Enter another adjective1").lower()
friend = input("Enter the name of a friend").capitalize()
verb = input("Enter a verb ending in ing: ").lower()
adjective3 = input("Enter one more adjective").lower()
story = (
f'It was a {adjective1} summer day at the beach. My friends and I were in the water playing {game}.As a {adjective2} wave came closer, my friend {friend} yelled, '
f'"Look! There\'s a jellyfish {verb}!" As we got closer, we saw the the jellyfish was indeed {verb}! {friend} ran out of the water and onto sand. {friend} was afraid of {verb} jellyfish. The rest of '
f'The rest of us stayed in the water playing {game} because {verb} jellyfish are {adjective3}.')
print(story)
| true |
a5872fe7f9a6c8fe6d10a502206ec46007cb8ba7 | Python | MicaelSousa15/Exercicios | /69.py | UTF-8 | 133 | 3.125 | 3 | [] | no_license | >>> idade = 18
>>> # Comparações com números
>>> idade > 15
True
>>> idade < 28
True
>>> idade >= 12
True
>>> idade <= 24
True
>>> | true |
c8bfb43e89e7e351487d20e115b2d8bf3efff515 | Python | FrankZhaoYX/mobileanalyticslogging | /util/file_util.py | UTF-8 | 3,538 | 2.59375 | 3 | [] | no_license | import csv
import os
import re
import uuid
import pandas as pd
from pathlib import Path
from util import shell_util
def get_project_name_list(csv_path):
column_names = ['Keyword', 'Layout', 'LoggingLibrary', 'ProjectInfo', 'SearchResult']
csv_data = pd.read_csv(csv_path, names=column_names)
project_info_list = csv_data.ProjectInfo.tolist()
return project_info_list[1:]
def export_info_to_csv(list_to_export, csv_path):
with open(csv_path, "w") as f_output:
csv_output = csv.writer(f_output)
csv_output.writerow(['repo url', 'earliest committer', 'second earliest committer', 'latest committer',
'top three contributor', 'total commit'])
csv_output.writerows(list_to_export)
def get_project_repo_url(csv_path: str):
df = pd.read_csv(csv_path)
repo_url_list = df['repo url'].to_list()
return repo_url_list
def get_project_repo_id(csv_path: str, repo_url: str):
df = pd.read_csv(csv_path)
new_df = df[df["repo url"] == repo_url]
repo_id = new_df['repo id']
return repo_id
def get_keyword_list(csv_path: str, repo_url: str):
df = pd.read_csv(csv_path)
new_df = df[df["repo url"] == repo_url]
keyword_list = new_df['keyword'].to_list()
str_keyword = keyword_list[0]
replaced_str_keyword = str_keyword.replace('\n', ' ').replace('\r', ' ')
list_str_keyword = replaced_str_keyword.split(' ')
list_str_keyword = list(filter(None, list_str_keyword))
return list_str_keyword
def get_all_java_kotlin_files(repo_path: str) -> [Path]:
repo_p = Path(repo_path)
try:
java_file_list = list(repo_p.glob('**/*.java'))
kt_file_list = list(repo_p.glob('**/*.kt'))
ktm_file_list = list(repo_p.glob('**/*.ktm'))
kts_file_list = list(repo_p.glob('**/*.kts'))
file_list = java_file_list + kt_file_list + ktm_file_list + kts_file_list
except Exception:
file_list = shell_util.run_command\
('find {} -name "*.java" -o -name "*.kt" -o -name "*.ktm" -o -name "*.kts"'.format(repo_path)).split()
for file in file_list:
if is_test_file(str(file)):
file_list.remove(file)
return file_list
def generate_random_file_name_with_extension(file_extension: str) -> str:
return "{}.{}".format(generate_hex_uuid_4(), file_extension)
def generate_hex_uuid_4() -> str:
"""Generate UUID (version 4) in hexadecimal representation.
:return: hexadecimal representation of version 4 UUID.
"""
return str(uuid.uuid4().hex)
def is_test_file(file_path: str):
result = False
file_name = os.path.basename(file_path)
pattern = '^[Mm]ock|[Mm]ock$|.*[Tt]est.*'
match = re.search(pattern, file_name)
# print(match)
if match is not None:
result = True
return result
def is_java_or_kotlin_file(file_path: str):
if file_path.endswith('.java') or file_path.endswith('.kt') or file_path.endswith('.ktm') \
or file_path.endswith('.kts'):
return True
else:
return False
def generate_file_from_blob(file_blob, file_extension):
java_name = generate_random_file_name_with_extension(file_extension)
java_p = Path(java_name)
java_p.write_bytes(file_blob.data_stream.read())
return str(java_p.resolve())
def delete_if_exists(file_path: str):
path = Path(file_path)
if path.exists():
path.unlink()
def is_java_file(file_path: str):
if file_path.endswith('.java'):
return True
else:
return False
| true |
e8b7b7c5ae80acba160c89210b9727441c52440c | Python | sakost/expiring_object | /expiring_object/expiring_object.py | UTF-8 | 1,518 | 3.15625 | 3 | [
"MIT"
] | permissive | from __future__ import print_function, with_statement
import time
import weakref
from threading import Thread
from collections import deque
class Dispatcher(Thread):
"""delete elements in thread in given expiring time
"""
def __init__(self, expiring_time, maxlen=None):
"""
:param expiring_time - lifetime of all objects
:type expiring_time int
:param maxlen - max length
:type maxlen int None
"""
super(Dispatcher, self).__init__()
self.expiring_time = expiring_time
self.container = deque(maxlen=maxlen)
self._running = True
def run(self):
while self._running:
if len(self.container) < 1:
continue
if self.container[0][1] <= time.time():
if hasattr(self.container[0][0], '_handler'):
getattr(self.container[0][0], '_handler')()
self.container.popleft()
def add(self, obj):
"""add an element to a container"""
self.container.append((obj, time.time() + self.expiring_time))
def stop(self):
"""stop thread"""
self._running = False
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def object_factory(obj, dp: Dispatcher):
"""Call a `weakref.proxy` on given object
You shouldn't already have non weak references to this object"""
dp.add(obj)
return weakref.proxy(obj)
| true |
c2ae6aea8228b7346a8b633665b85d4626272609 | Python | piyushgoyal1620/Python | /SumOfDigits.py | UTF-8 | 464 | 3.640625 | 4 | [] | no_license | '''
You're given an integer N. Write a program to calculate the sum of all the digits of N.
Input
The first line contains an integer T, total number of testcases. Then follow T lines, each line contains an integer N.
Output
Calculate the sum of digits of N.
Constraints
1 ≤ T ≤ 1000
1 ≤ N ≤ 1000000
Example
Input
3
12345
31203
2123
Output
15
9
8
'''
#CODE
t=int(input())
for i in range(t):
p = [int(g) for g in str(int(input()))]
print(sum(p))
| true |
45ffe9fddaac5045abd8e5a418b237865d3f3fb0 | Python | MarcelinoChagas/Python | /Android-IOS/012_EstruturaDados/iterandoLista.py | UTF-8 | 589 | 4.21875 | 4 | [] | no_license | # Não realiza a soma
# lista_numeros = [100,200,300,400]
# for item in lista_numeros:
# item += 1000
# print(lista_numeros)
# Codigo com Range
# lista_numeros = [100,200,300,400,2]
# for item in range(len(lista_numeros)):
# lista_numeros[item] += 1000
# print(lista_numeros)
print(range(0,4))
print(list(range(0,4)))
print()
l = ['a','b','c','d']
print(enumerate(l))
print(list(enumerate(l)))
print()
#Função com enumerate ao invés de range
lista_numeros = [100,200,300,400,2]
for idx,item in enumerate(lista_numeros): #
lista_numeros[idx] += 1000
print(lista_numeros) | true |
07c1b685ec6569a696598db068c5fb519ac4f05d | Python | zhaola/583DataCollection | /extractionpass/build_cfgs.py | UTF-8 | 1,724 | 2.546875 | 3 | [] | no_license | import json
import sys
import os
import errno
def try_mkdir(dirname):
try:
os.mkdir(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def build_cfgs(data_file, bb_dir):
with open(data_file, 'r') as infile:
for line in infile:
try:
dicti = json.loads(line)
except json.decoder.JSONDecodeError:
continue
adjacency_list = {}
seen_bbs = set()
for edge, prob in dicti['edgeToProb'].items():
edge_split = edge.split(',')
function = edge_split[0]
source, dest = int(edge_split[1]), int(edge_split[2])
seen_bbs.add(source)
seen_bbs.add(dest)
try:
adj_list = adjacency_list[source]
except KeyError:
adjacency_list[source] = []
adj_list = adjacency_list[source]
adj_list.append((dest, prob))
unseen_sources = seen_bbs - set(adjacency_list.keys())
for source in unseen_sources:
adjacency_list[source] = []
if len(adjacency_list) > 0:
local_bb_dir = bb_dir + '/' + function
try_mkdir(local_bb_dir)
with open(local_bb_dir + '/CFG.json', 'w') as outfile:
json.dump(adjacency_list, outfile)
def main():
data_file, bb_dir = parse_args()
build_cfgs(data_file, bb_dir)
def parse_args():
if len(sys.argv) != 3:
raise ValueError("Invalid args...")
return sys.argv[1], sys.argv[2]
if __name__ == "__main__":
main() | true |
c8c79525e86cd6e950c05782012eb33cdf161746 | Python | Leodyfang/git_py | /IEMS 5703 NetworkCodingAndSystemDesign/week#7/cp_server.py | UTF-8 | 1,468 | 2.828125 | 3 | [] | no_license | import asyncio
import websockets
async def consumer_handler(websocket):
while True:
message = await websocket.recv()
# For each message received, pass it to the consumer coroutine
await consumer(message)
# Producer handler
async def producer_handler(websocket):
while True:
# Wait for a message to be produced by the producer coroutine
message = await producer()
await websocket.send(message)
async def handler(websocket, path):
# Create coroutines from the consumer and producer handler functions
consumer_task = asyncio.ensure_future(consumer_handler(websocket))
producer_task = asyncio.ensure_future(producer_handler(websocket))
# Wait for any of these tasks to be completed
# (One of them will terminate when the client disconnects)
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED, # Only need to wait until one of them terminates
)
# Cancel any task that is not yet terminated
for task in pending:
task.cancel()
total = 0
async def consumer(message):
global total
total += int(message)
async def producer():
global total
await asyncio.sleep(2)
message = "Current total is {:d}".format(total)
return message
start_server = websockets.serve(handler, 'localhost', 50001)
loop = asyncio.get_event_loop()
loop.run_until_complete(start_server)
loop.run_forever()
| true |
9e51cc04d5583f9c1ebcf58320c6dda5a8790745 | Python | kim-hwi/Python | /해시/나는야포켓몬마스터이다솜.py | UTF-8 | 316 | 2.640625 | 3 | [] | no_license | D,Q = input().split()
dogamnum = {}
dogamstr = {}
for i in range(int(D)):
name = input()
dogamstr[name] = i+1
dogamnum[i+1] = name
for i in range(int(Q)):
qu = input()
try:
qu=int(qu)
print(dogamnum[qu])
except:
print(dogamstr[qu])
# print(dogamstr)
# print(dogamnum)
| true |
f6212e709038a9bf0d563f3468cf8b1693f3b7cc | Python | jayednahain/Essential-Functionality-Python | /ord_function.py | UTF-8 | 165 | 2.984375 | 3 | [] | no_license | """ord() function helps to find the unicode of character"""
print("unicode of B is",ord('B'))
print("unicode of b is",ord('b'))
print("unicode of C is",ord('C'))
| true |
7d4d03a3ea8d41b34802acf633d0ece9e3d05acb | Python | neuropheno-org/DeepNMA | /utils_DL.py | UTF-8 | 3,110 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 09:03:31 2020
@author: adonay
"""
import numpy as np
import math
from matplotlib import pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import (Dense, Flatten, Dropout, Conv1D,
MaxPooling1D, Input, concatenate)
import tensorflow as tf
from tensorflow.keras import Input, layers
def model_1D_Seq(X, y, ker_sz1=10, ker_sz2=10, n_ker1=40, n_ker2=40, n_flat=100):
n_timesteps, n_features, n_outputs = (X.shape[1], X.shape[2],
y.shape[1])
model = Sequential()
model.add(Conv1D(filters=n_ker1, kernel_size=ker_sz1, activation='relu',
input_shape=(n_timesteps, n_features)))
model.add(Conv1D(filters=n_ker2, kernel_size=ker_sz2, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(n_flat, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
return model
def model_1d_low_high(X, y):
i_shape = X.shape[1:3]
drop_out_rate = 0.501
input_shape = i_shape
input_tensor = Input(shape=(input_shape))
x = layers.Conv1D(8, 11, padding='valid', activation='relu',
strides=1)(input_tensor)
x = layers.MaxPooling1D(2)(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Conv1D(16, 7, padding='valid', activation='relu', strides=1)(x)
x = layers.MaxPooling1D(2)(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Conv1D(32, 5, padding='valid', activation='relu', strides=1)(x)
x = layers.MaxPooling1D(2)(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Conv1D(64, 5, padding='valid', activation='relu', strides=1)(x)
x = layers.MaxPooling1D(2)(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Conv1D(128, 3, padding='valid', activation='relu', strides=1)(x)
x = layers.MaxPooling1D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(drop_out_rate)(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(drop_out_rate)(x)
output_tensor = layers.Dense(y.shape[1], activation='softmax')(x)
model = tf.keras.Model(input_tensor, output_tensor)
return model
def layer_viz(model):
# summarize filter shapes
for layer in model.layers:
# check for convolutional layer
if 'conv' not in layer.name:
continue
# get filter weights
filters, biases = layer.get_weights()
n_filt = filters.shape[-2:]
print(layer.name, filters.shape)
fig, axs = plt.subplots(n_filt[0], n_filt[1], sharex='col',
sharey='row', gridspec_kw={'hspace': 0})
fil_flat = np.reshape(filters, [-1, np.prod(n_filt) ])
for ax, fil in zip(axs.flat, fil_flat.T):
ax.plot(fil.T)
fig.suptitle(f"Name: {layer.name}, shape: {filters.shape}")
| true |
d0f2552ea6e9f690750e0c163a4b2d4910dd5b1f | Python | mohit-iitb/sagar | /IIT Guwahati _Python_DataScience/day3/194161013/194161013_q3a.py | UTF-8 | 834 | 3.109375 | 3 | [] | no_license | #fw=open('q3atextfile.txt','w+')
import tweepy
consumer_key = "deU56q5KBq6zogXd4W9U88LD6"
consumer_secret = "1X0o2gbMcZ6jYuVL0ns1gsGGlRvlDLmq0p1xmzZCsEPjZ3jg54"
access_token = "2894833632-CKKOP0j1odN2NfBUOgmuUSWNyTBlqivSBT0brjI"
access_token_secret = "rjbh9cRC1TuRkHUQdIwdnXcqizbvhfqUjofU5T4oncqKb"
# Creating the authentication object
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# Setting your access token and secret
auth.set_access_token(access_token, access_token_secret)
# Creating the API object while passing in auth information
api = tweepy.API(auth)
# Using the API object to get tweets from your timeline, and storing it in a variable called public_tweets
x=input('Enter Keyword\n')
#public_tweets = api.home_timeline()
tweets=api.search(q=x,count=500)
# foreach through all tweets pulled
for tweet in tweets:
# printing the text stored inside the tweet object
print (tweet.text)
#fw.write(tweet.text)
| true |
f63d2c53c12ce69b1331248efd5553588f2c0268 | Python | AttilaAV/szkriptnyelvek | /nyegyedik_o/dia.py | UTF-8 | 673 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python3
def diamond(szam):
if (szam % 2) != 1:
print("Kérlek, páratlan számot adj meg!")
else:
gyemantfel = int(szam/2)+1
sorcsillag = 1
for i in range(gyemantfel):
csillag =sorcsillag*"*"
sorcsillag += 2
print(csillag.center(szam))
sorcsillag -= 2
for i in range(gyemantfel-1):
sorcsillag -= 2
csillag =sorcsillag*"*"
print(csillag.center(szam))
return ''
def main():
hanyas = int(input("Mekkora legyen a gyémántod? "))
print(diamond(hanyas))
if __name__== '__main__':
main() | true |
9df2e343dcd5f697027d5a5c1a796a3d72aa4e15 | Python | slougn/PythonWF | /PythonCrashCourse/ch3/ex3.py | UTF-8 | 133 | 3.1875 | 3 | [] | no_license | names = ['xiaoming','xiaohong','dalei','hanmeimei']
print(names)
print(names[0])
print(names[3])
print(names[0].title()+" ,welcom!") | true |
334c2555361979cb2d87df2894660b00d2820f71 | Python | doosea/god_like | /myDataStructuresAndAlgorithms/BinarySearch/leetcode000.py | UTF-8 | 386 | 3.765625 | 4 | [] | no_license | """
二分查找升序数组
"""
def binary_search(nums, target):
l = 0
r = len(nums) - 1
while l <= r:
m = (l + r) // 2
if nums[m] == target:
return m
if nums[m] < target:
l = m + 1
else:
r = m - 1
return -1
if __name__ == '__main__':
nums = [1, 2]
r = binary_search(nums, 2)
print(r)
| true |
c951aceeba4c5f64a37578a7bde5bc7c9f804945 | Python | Orb-H/nojam | /source/nojam/2740.py | UTF-8 | 380 | 2.53125 | 3 | [] | no_license | n, m = map(int, input().split())
a = [list(map(int, input().split())) for _ in range(n)]
m, k = map(int, input().split())
b = [list(map(int, input().split())) for _ in range(m)]
r = [[0] * k for _ in range(n)]
for i in range(n):
for j in range(k):
for h in range(m):
r[i][j] += a[i][h] * b[h][j]
for i in range(n):
print(' '.join(str(x) for x in r[i])) | true |
e99111ecad1255d285887610d9c2722e01b62ced | Python | drewblount/2014-2015 | /thesis/code/numpy_dace/ego.py | UTF-8 | 15,344 | 3.046875 | 3 | [] | no_license | ## an object-oriented EGO algorithm machine
from math import exp, pi, sqrt
import numpy as np
from scipy import linalg as la
from scipy.optimize import minimize
from scipy.stats import norm
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from operator import add, sub
import logging
log = logging.getLogger('ego.log')
# 'lazyprop's are lazily-evaluated properties that save the results of
# certain matrix operations. also imports a lazyprop-deleter
from lazyprop import lazyprop, reset_lps
# func_handler contains tools for the communication between an EGO optimizer instance and an objective function
import func_handler
class egoist:
## The solver is initialized by passing it the starting
## inputs and outputs.
## X's type: 2d array of input k-vectors. Y is a 1d array of outputs.
## F is a k-to-1-dimensional function (sin_plus_quad)
def __init__(self, X0=[[]], Y0=[], F = func_handler.sin_plus_quad(), logger=None):
self.log = logger or logging.getLogger(__name__)
logging.basicConfig(filename='ego.log', level=logging.INFO)
self.log.info('new ego class initialized')
self.X = np.array(X0)
self.Y = np.array(Y0)
# number of dimensions
self.k = len(self.X[0])
# number of evaluated points
self.n = len(self.X)
# nearzero values for Q parameters lead to singular matrices, hence
self.eps = 1e-2
## temporary: regression parameters are initialized at default values
## (should eventually be chosen by max. likelihood)
self.P = [1.3 for _ in range(self.k)]
self.Q = [3.8 for _ in range(self.k)]
#logs the creation of the object
# adds an evaluated point:
def addpoint(self, x_new, y_new, verbose=False):
self.X = np.append(self.X, [x_new])
self.Y = np.append(self.Y, y_new)
if verbose:
print ('X = ' + str(E.X))
print ('Y = ' + str(E.Y))
# have to reset lazy properties
reset_lps(self)
# samples the function f at the new point and saves the result to data
def sample_point(self, f, x_new,verbose=False):
self.addpoint(x_new, f(x_new),verbose)
# distance in input-space (x1 is an array; an input vector)
def dist(self, x1, x2):
return np.sum( [
self.Q[i] *
abs( x1[i] - x2[i] ) ** self.P[i]
for i in range( self.k )
] )
# correlation between the function values at two points in input-space
def corr(self, x1, x2):
return exp(-self.dist(x1,x2))
# following are a bunch of lazyprops, which are lazily evaluated properties that are
# saved so that the same linear algebra is never performed twice.
# When the ego object at hand changes, e.g. by manual manipulation of the parameters
# P and Q, these saved properties must be refreshed.
def reset(self):
# can't change a dict while iterating through it, hence intermediate list
lazy_keys = [k for k in self.__dict__ if (k[0:6] == '_lazy_') ]
for key in lazy_keys:
delattr(self, key)
# R is the n*n matrix whose i,jth entry is the correlation between the i,jth {evaluated inputs
@lazyprop
def R(self):
#return np.fromiter( ( self.corr(self.X[i],self.X[j]) for i in range(self.n) for j in range(self.n) ), dtype=int).reshape(self.n,self.n)
# would like to do the below without the intermediate non-numpy array:
# ((how do you do 2d numpy array list comprehension? above is a failed attempt))
return np.array([[self.corr(self.X[i],self.X[j]) for i in range(self.n)] for j in range(self.n)])
# like a column of R
def corr_vector(self, x_new):
return np.array([self.corr(self.X[i],x_new) for i in range(self.n)])
@lazyprop
def R_inv(self):
return la.inv(self.R)
@lazyprop
def R_det(self):
return la.det(self.R)
@lazyprop
def ones(self):
return np.ones(self.n)
# this one is used a bunch
@lazyprop
def ones_R_inv(self):
return(self.ones.T.dot(self.R_inv))
@lazyprop
def ones_R_inv_Y(self):
return self.ones_R_inv.dot(self.Y)
@lazyprop
def ones_R_inv_ones(self):
return self.ones_R_inv.dot(self.ones)
# best predictor of the mean mu, Jones eq 5
@lazyprop
def mu_hat(self):
return self.ones_R_inv_Y / self.ones_R_inv_ones
@lazyprop
def Y_min_mu(self):
return self.Y - (self.ones*self.mu_hat)
@lazyprop
def R_inv_Y_min_mu(self):
return self.R_inv.dot(self.Y_min_mu)
# Jones eq(6)
@lazyprop
def var_hat(self):
return ( self.Y_min_mu.T.dot(self.R_inv_Y_min_mu) ) / self.n
# Jones eq 4 w/ 5, 6 inserted for mu, stdev
def conc_likelihood(self, new_P=None, new_Q=None):
if new_P!=None: self.P=new_P
if new_Q!=None: self.Q=new_Q
if new_P!=None or new_Q!=None: reset_lps(self)
inv_linear_term =(2.0 * pi * self.var_hat)**(self.n/2.0) * self.R_det ** (0.5)
return exp(self.n/2.0)/inv_linear_term
# Sets P and Q so as to maximize the above likelihood function
# param_range is a series of parenthesized min/max tuples:
# ((P[0]min,P[0]max),...(Q[n-1]min,Q[n-1]max))
# note that None is a valid upper/lower bound
'''
# works in 1d but WARNING: MIGHT ONLY WORK FOR 1D (because of )
def max_likelihood(self, param_range=((1.0,2.0),(1e15,None)), verbose=False):
# the function to be minimized. note that P is the first half of z, Q the second
def neg_conc(z): return (-1 * self.conc_likelihood(z[:self.k],z[self.k:]))
z0 = self.P + self.Q
res = minimize(neg_conc, z0, method='L-BFGS-B',bounds=param_range)
return res
# Sets P and Q so as to maximize the above likelihood function
# bounds is a tuple of min/max tuples:
# ( (P[0]min,P[0]max), (P[1]min,P[1]max), ..., (Q[n-1]min,Q[n-1]max) )
# note that None is a valid upper/lower bound on one dimension, but if bounds=None
# a default will be used
# eps is included because having a lower bound of 0 leads to singular matrices
'''
def max_likelihood(self, bounds=None, verbose=False):
# default parameter range is each p from 1 to 2, each q > 0
if not bounds:
p_bounds = [(1,2) for _ in range(self.k)]
q_bounds = [(self.eps,None) for _ in range(self.k)]
bounds = tuple(p_bounds+q_bounds)
# the function to be minimized. note that P is the first half of z, Q the second
def neg_conc(z): return (-1 * self.conc_likelihood(z[:self.k],z[self.k:]))
z0 = self.P + self.Q
res = minimize(neg_conc, z0, method='L-BFGS-B',bounds=bounds)
# now save the output to P and Q, and reset lazyprops
self.P = res.x[:self.k]
self.Q = res.x[self.k:]
reset_lps(self)
print('P and Q have been set to maximize the likelihood equation.')
return res
# the so-called best linear unbiased predictor, Jones Eq. 7
def predict(self, x_new):
# correlation between x_new and each evaluated x:
#r = np.fromfunction(lambda i: self.corr(x_new, self.X[i]), self.n, dtype=int)
# wanted: an rewrite of the below to not use an intermediate non-numpy list
# ((how do you make numpy arrays w list comprehension??))
r = self.corr_vector(x_new)
return self.mu_hat + r.dot(self.R_inv_Y_min_mu)
# the error of the predictor, Jones Eq. 9
def pred_err(self, x_new):
r = self.corr_vector(x_new)
R_inv_r = self.R_inv.dot(r)
# was getting some weird tiny (magnitude) negative number float errors
out = self.var_hat * (1 - r.dot(R_inv_r) + ( ( 1 - self.ones.dot( R_inv_r) )**2 / (self.ones_R_inv_ones)) )
return (max(out, 0.0))
# what follows below is are the components required to maximize the expected improvement
# function (Jones Eq. 15)
@lazyprop
def stdev(self):
return sqrt(self.var_hat)
# current minimum function value (assume optimization problem is minimization)
@lazyprop
def f_min(self):
return np.min(self.Y)
# expected improvement function (Jones Eq. 15) (eps is included for floating point rounding errs)
def exp_improvement(self, x_new):
# should predict(x) be stored lazily? don't want to double-call the predictor function
# (maybe unnecessary; is the predictor function ever explicitly used in the iterative process?)
y = self.predict(x_new)
# improvement over current minimum
improvement = self.f_min - y
# s
err = self.pred_err(x_new)
if (err < 0):
print('Error: pred_err(x) < 0 for x = ' + str(x_new) + '; pred_err(x) = ' + str(err))
st_dev = sqrt(self.pred_err(x_new))
# catches when x_new is in X (already evaluated points, 100% certain of prediction)
if (st_dev == 0.0): return(0.0)
normed_improvement = improvement/st_dev
return(improvement * norm.cdf(normed_improvement) + st_dev * norm.pdf(normed_improvement))
## In prototype stage: this is the function that when called, will return
## the next x value that should be evaluated by the black box function
def iterate(self):
# works by choosing P, Q params to maximize likelihood equation
self.generate_predictor()
# works by minimizing the ex
# assumes a 1d x-space
def pred_over(x_range):
return [ self.predict( [x]) for x in pred_range ]
# does all the 1d plotting stuff without calling plt.show
def plot1d_no_show(self, x_min=0.0, x_max=5.0, x_delta=0.01, y_min=0.0, y_max=1.0):
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
plt.xlabel('x')
ax.set_ylabel('predicted y(x)')
plt.title('Toy Problem')
pred_range = np.arange(x_min, x_max, x_delta)
preds = [ self.predict( [x]) for x in pred_range ]
# errors are illustrated with 2x sigma^2 (~three standard deviations)
# to illustrate a 95% confidence interval
errors = [ 2*self.pred_err([x]) for x in pred_range ]
# elem-wise sum/difference of above two arrays
pl_errors = map(add, preds, errors)
mi_errors = map(sub, preds, errors)
# plot the predictor and +/- errors
pred_line, = ax.plot(pred_range, preds)
p_err_line, = ax.plot(pred_range, pl_errors, color="green")
m_err_line, = ax.plot(pred_range, mi_errors, color="green")
plt.axis([x_min, x_max, y_min, y_max])
# make another axis (exp improv. is at a smaller scale than predictor)
# plot the expected improvement
ax2 = ax.twinx()
imps = [ self.exp_improvement([x]) for x in pred_range ]
exp_imp_line, = ax2.plot(pred_range, imps, color='r')
ax2.set_ylabel('expected improvement', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
# plot the actual sample points
points = ax.plot([x[0] for x in self.X],self.Y, 'ko')
# plots the predictor and its error:
def plot1d(self, x_min=0.0, x_max=5.0, x_delta=0.01, y_min=0.0, y_max=1.0):
self.plot1d_no_show(x_min,x_max,x_delta,y_min,y_max)
plt.show()
# Performs the above, with sliders to manipulate P and Q
# show expected improvement
def plot1d_sliders(self, x_min=0.0, x_max=5.0, x_delta=0.01, y_min=0.0, y_max=1.0, P_min=1.0,P_max=2.0,Q_min=None,Q_max=10.0):
# cludge b/c default Q_min can't be a feature of self
if not Q_min: Q_min = self.eps
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
plt.xlabel('x')
ax.set_ylabel('predicted y(x)')
plt.title('Toy Problem')
pred_range = np.arange(x_min, x_max, x_delta)
preds = [ self.predict( [x]) for x in pred_range ]
# errors are illustrated with 2x sigma^2 (~three standard deviations)
# to illustrate a 95% confidence interval
errors = [ 2*self.pred_err([x]) for x in pred_range ]
# elem-wise sum/difference of above two arrays
pl_errors = map(add, preds, errors)
mi_errors = map(sub, preds, errors)
# plot the predictor and +/- errors
pred_line, = ax.plot(pred_range, preds)
p_err_line, = ax.plot(pred_range, pl_errors, color="green")
m_err_line, = ax.plot(pred_range, mi_errors, color="green")
plt.axis([x_min, x_max, y_min, y_max])
# make another axis (exp improv. is at a smaller scale than predictor)
# plot the expected improvement
ax2 = ax.twinx()
imps = [ self.exp_improvement([x]) for x in pred_range ]
exp_imp_line, = ax2.plot(pred_range, imps, color='r')
ax2.set_ylabel('expected improvement', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
# plot the actual sample points
points = ax.plot([x[0] for x in self.X],self.Y, 'ko')
# sets slider locations
axP = plt.axes([0.25, 0.05, 0.65, 0.03])
axQ = plt.axes([0.25, 0.1, 0.65, 0.03])
slidP = Slider(axP, 'P', P_min, P_max, valinit=self.P[0])
slidQ = Slider(axQ, 'Q', Q_min, Q_max, valinit=self.Q[0])
def update(val):
self.P = [slidP.val]
self.Q = [slidQ.val]
# all the lazyprops are inaccurate after P or Q change
reset_lps(self)
# re-draw each line (lazyprops will now be re-evaluated)
preds = [ self.predict( [x]) for x in pred_range ]
pred_line.set_ydata(preds)
errors = [ self.pred_err([x]) for x in pred_range ]
# elem-wise sum/difference of above two arrays
pl_errors = map(add, preds, errors)
p_err_line.set_ydata(pl_errors)
mi_errors = map(sub, preds, errors)
m_err_line.set_ydata(mi_errors)
imps = [ self.exp_improvement([x]) for x in pred_range ]
exp_imp_line.set_ydata(imps)
# redraw the sample points so they are on top layer
points = ax.plot([x[0] for x in self.X],self.Y, 'ko')
fig.canvas.draw_idle()
slidP.on_changed(update)
slidQ.on_changed(update)
plt.show()
| true |
417710346fdc87291d351916d46a53ab2a550b7e | Python | DiegoHeer/sec_web_scraping | /cik_ticker_mapping.py | UTF-8 | 659 | 3.34375 | 3 | [] | no_license | import requests
def get_cik_from_ticker(ticker):
# Official SEC url that contains all the CIK x Ticker data
base_url = 'https://www.sec.gov/include/ticker.txt'
txt_content = requests.get(base_url).text
mapping_dict = dict()
for mapping in txt_content.split('\n'):
company_ticker = mapping.split('\t')[0]
company_cik = mapping.split('\t')[1]
mapping_dict[company_ticker] = company_cik
return mapping_dict[ticker.lower()]
# Only for testing purposes
if __name__ == '__main__':
ticker = 'MSFT'
cik_number = get_cik_from_ticker(ticker)
print(f'The SEC sik number of {ticker} is: ' + cik_number)
| true |
a8f4727744387f7b550c35830b20eee737a46334 | Python | python-provy/provy | /provy/more/debian/users/ssh.py | UTF-8 | 2,712 | 2.875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Roles in this namespace are meant to provide SSH keygen utilities for Debian distributions.
'''
from os.path import join
from Crypto.PublicKey import RSA
from provy.core import Role
class SSHRole(Role):
'''
This role provides SSH keygen utilities for Debian distributions.
Example:
::
from provy.core import Role
from provy.more.debian import SSHRole
class MySampleRole(Role):
def provision(self):
with self.using(SSHRole) as role:
role.ensure_ssh_key(user='someuser', private_key_file="private-key")
'''
def ensure_ssh_key(self, user, private_key_file):
'''
Ensures that the specified private ssh key is present in the remote server. Also creates the public key for this private key.
The private key file must be a template and be accessible to the :meth:`Role.render <provy.core.roles.Role.render>` method.
:param user: Owner of the keys.
:type user: :class:`str`
:param private_key_file: Template file for the private key.
:type private_key_file: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import SSHRole
class MySampleRole(Role):
def provision(self):
with self.using(SSHRole) as role:
role.ensure_ssh_key(user='someuser', private_key_file="private-key")
'''
path = '/home/%s' % user
ssh_path = join(path, '.ssh')
self.ensure_dir(ssh_path, sudo=True, owner=user)
private_key = self.render(private_key_file)
key = RSA.importKey(private_key)
public_key = key.publickey().exportKey(format='OpenSSH')
self.__write_keys(user, private_key, public_key)
def __write_keys(self, user, private_key, public_key):
path = '/home/%s' % user
ssh_path = join(path, '.ssh')
pub_path = join(ssh_path, 'id_rsa.pub')
priv_path = join(ssh_path, 'id_rsa')
host = self.execute_python('import os; print os.uname()[1]', stdout=False)
host_str = "%s@%s" % (user, host)
pub_text = "%s %s" % (public_key, host_str)
pub_file = self.write_to_temp_file(pub_text)
priv_file = self.write_to_temp_file(private_key)
result_pub = self.update_file(pub_file, pub_path, sudo=True, owner=user)
result_priv = self.update_file(priv_file, priv_path, sudo=True, owner=user)
if result_pub or result_priv:
self.log("SSH keys generated at server!")
self.log("Public key:")
self.log(pub_text)
| true |
88432a33f4595c75c106fd12bfcbc9a40861e152 | Python | mdxys/library-feature-generation | /tests/transformation/gym/test_gym_create_data.py | UTF-8 | 2,671 | 2.75 | 3 | [] | no_license | import numpy as np
import pytest
from alphai_feature_generation.transformation import GymDataTransformation
from tests.transformation.gym.helpers import load_preset_config, gym_data_fixtures
@pytest.mark.parametrize("index", [0, 1, 2])
def test_create_data(index):
expected_n_samples = 49
expected_n_time_dict = {'hour_value': 5, 'temperature_value': 5, 'number_people_value': 5}
expected_n_symbols = 1
expected_n_features = 3
expected_n_bins = 5
expected_n_forecasts = 1
config = load_preset_config(expected_n_symbols, index)
gym_transform = GymDataTransformation(config)
train_x, train_y = gym_transform.create_train_data(gym_data_fixtures)
assert len(train_x.keys()) == expected_n_features
if index < 2:
assert set(train_x.keys()) == set(expected_n_time_dict.keys())
# Check shape of arrays
for key in train_x.keys():
assert train_x[key].shape == (expected_n_samples, expected_n_time_dict[key], expected_n_symbols)
for key in train_y.keys():
assert train_y[key].shape == (expected_n_samples, expected_n_forecasts , expected_n_symbols, expected_n_bins)
# Now check contents
if index == 2:
x_key = 'hour_value_15T'
y_key = 'number_people_value_150T'
else:
x_key = 'hour_value'
y_key = 'number_people_value'
exp_x_mean, exp_y_mean, expected_sample = _expected_results(index)
x_mean = train_x[x_key].flatten().mean()
if np.isnan(exp_x_mean):
assert np.isnan(x_mean)
else:
assert np.isclose(x_mean, exp_x_mean)
y_mean = train_y[y_key].flatten().mean()
assert np.isclose(y_mean, exp_y_mean)
if index == 0: # Check feature ordering is preserved. This mimics the extraction of data in oracle.py
numpy_arrays = []
for key, value in train_x.items():
numpy_arrays.append(value)
stacked_train_x = np.stack(numpy_arrays, axis=0)
sample_data = stacked_train_x.flatten()[0:4]
np.testing.assert_array_almost_equal(sample_data, expected_sample)
def _expected_results(iteration):
return_value_list = [
{'x_mean': 8.06938775510204, 'y_mean': 0.2},
{'x_mean': 8.06938775510204, 'y_mean': 0.2}, # Test classification and normalisation
{'x_mean': -6.57070769676113e-17, 'y_mean': 0.2}, # Test length/resolution requests
]
try:
return_value = return_value_list[iteration]
expected_sample = [23., 0., 1., 6.]
return return_value['x_mean'], return_value['y_mean'], expected_sample
except KeyError:
raise ValueError('Requested configuration not implemented') | true |
e20ac63bb4a6254ee018535ec79331beb88b63e0 | Python | esther-soyoung/Coding-Challenge | /KakaoBlind2020/lock.py | UTF-8 | 3,092 | 3.390625 | 3 | [] | no_license | def solution(key, lock):
M = len(key)
N = len(lock)
# Get the coordinates of holes in lock
holes = []
for i in range(N):
for j in range(N):
if lock[i][j] == 0:
holes.append((i, j))
# Get the coordinates of bumps in key
bumps = []
for i in range(M):
for j in range(M):
if key[i][j] == 1:
bumps.append((i, j))
# Not enough number of bumps
if len(bumps) < len(holes):
return False
# No holes. Key should have no bumps
if len(holes) == 0:
if len(bumps) == 0:
return True
return False
# All holes. Key should have all bumps
if len(holes) == N * N:
if len(bumps) == N * N:
return True
return False
# Naive check
if check(bumps, holes, N):
return True
# Rotate and Check
i = 0
while i < 3:
bumps = rotate(bumps, M)
i += 1
if check(bumps, holes, N):
return True
return False
''' Check if bumps can fill all the holes
'''
def check(bumps, holes, N):
#print('checking')
bumps.sort(key = lambda x : (x[0], x[1]))
holes.sort(key = lambda x : (x[0], x[1]))
# Check relative locations (Move key)
# for each pivot bump,
pivot_match = 0
gaps = ()
for b in bumps: # pivot bump
bumps_rest = bumps.copy()
bumps_rest.remove(b)
extra = bumps_rest.copy()
gaps = (holes[0][0] - b[0], holes[0][1] - b[1])
for i in range(1, len(holes)):
h_x, h_y = holes[i]
for r in bumps_rest:
g_x = h_x - r[0]
g_y = h_y - r[1]
if (g_x == gaps[0]) and (g_y == gaps[1]): # matching bump for holes[i]
pivot_match += 1
extra.remove(r)
break # move on to next hole
if pivot_match < i: # hole doesn't have matching bump
break # wrong pivot bump
if pivot_match != (len(holes) - 1):
continue # wrong pivot bump
# Check surroundings
if extra == []: # No extra bumps
#print('True: no extra')
return True # No need to check surroundings
conflict = False
for e in extra:
rel_loc = (e[0] + gaps[0], e[1] + gaps[1])
# Conflicting bumps
if rel_loc[0] < N and rel_loc[1] < N:
conflict = True
break # move on to next pivot bump
# Clean
if not conflict:
#print('True: no conflicts')
return True
''' Rotate the points in given matrix for 90 degrees
'''
def rotate(bumps, M):
r90 = []
for x, y in bumps:
x_ = y
y_ = (M - 1) - x
r90.append((x_, y_))
return r90
if __name__ == "__main__":
key = [[0, 0, 0], [1, 0, 0], [0, 1, 1]]
lock = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]
print(solution(key, lock)) # True
key = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
print(solution(key, lock)) # False
| true |
f9fc1da173dce4b537448b8ffc3fab195a689a19 | Python | OSHistory/wikidata2geojson | /wikifetcher.py | UTF-8 | 845 | 2.921875 | 3 | [
"MIT"
] | permissive |
import json
import urllib.request as request
class WikiFetcher():
def __init__(self):
self.id_query_templ = "https://www.wikidata.org/w/api.php?action=wbgetentities&ids={idlist}&format=json&languages=en"
pass
def get_json_resp(self, url):
req = request.urlopen(url)
cont = req.read()
cont = cont.decode("UTF-8")
resp_json = json.loads(cont)
return resp_json
def get_items_from_query(self, url):
json_resp = self.get_json_resp(url)
items = json_resp["items"]
items = ["Q" + str(item) for item in items]
return items
def get_json_for_id_list(self, id_list):
#print(self.id_query_templ.format(idlist=id_list))
json_resp = self.get_json_resp(self.id_query_templ.format(idlist="|".join(id_list)))
return json_resp
| true |
06555616862865657e87f45bc9b9e7c07555c391 | Python | chenQ1114/HR-BiLSTM | /deploy/server.py | UTF-8 | 3,025 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Very simple HTTP server in python for logging requests
Usage::
./server.py [<port>]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import numpy as np
from keras.models import Model
import sys
import os
from io import BytesIO
import simplejson
sys.path.append('..')
from preprocess import process_one
from model_eval import model_construct
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def load_model(weight_path='../my_model_weights.h5'):
model = model_construct()
model.load_weights(weight_path)
return model
def predict(model, question_feature, relation_feature, relation_all_feature):
question_feature = np.array(question_feature)
relation_feature = np.array(relation_feature)
relation_all_feature = np.array(relation_all_feature)
result = model.predict([question_feature, relation_feature, relation_all_feature], batch_size=1024)
similarity = bytes(str(dict({'similarity': result.tolist()})), encoding='utf-8')
return similarity
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
self._set_response()
self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = simplejson.loads(self.rfile.read(content_length)) # <--- Gets the data itself
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n", str(self.path), str(self.headers), post_data)
self._set_response()
question_feature = list()
relation_feature = list()
relation_all_feature = list()
for i in range(len(post_data['relation'])):
q_f, r_f, r_a_f = process_one(post_data['question'], post_data['relation'][i])
question_feature.append(q_f)
relation_feature.append(r_f)
relation_all_feature.append(r_a_f)
similarity = predict(model, question_feature, relation_feature, relation_all_feature)
similarity = bytes(str(dict({'similarity': similarity})), encoding='utf-8')
self.wfile.write("POST request for {}".format(self.path).encode('utf-8'))
self.wfile.write(similarity)
def run(server_class=HTTPServer, handler_class=S, port=9000):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
if __name__ == '__main__':
from sys import argv
model = load_model()
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| true |
97fa38228ec387b0a8d4d604b76926052dd0c244 | Python | hermanwongkm/AlgoPratice | /Permutations/subetII.py | UTF-8 | 1,147 | 3.578125 | 4 | [] | no_license | # Given an integer array nums that may contain duplicates, return all possible subsets (the power set).
# The solution set must not contain duplicate subsets. Return the solution in any order.
# Example 1:
# Input: nums = [1,2,2]
# Output: [[],[1],[1,2],[1,2,2],[2],[2,2]]
# Example 2:
# Input: nums = [0]
# Output: [[],[0]]
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
result = [[]]
startIndex = 0
endIndex = 0
nums.sort()
for i in range(len(nums)):
startIndex = 0
if i > 0 and nums[i] == nums[i -1]: #If duplicate, i want to loop through only previous subnets added
startIndex = endIndex #So we take the previous end len. Dont have to add 1 as it is length
endIndex = len(result) #This is your normal end of subset length
for j in range(startIndex, endIndex): #Looping through your subsets
subset = result[j].copy()
subset.append(nums[i])
result.append(subset)
return result
| true |
6d19163bc3d439584363cc58170f28af2d460f1f | Python | bayoishola20/Python-All | /Random/CartographicSoftwareAdaptationExercises/HW_1.py | UTF-8 | 2,573 | 4.6875 | 5 | [] | no_license | ##### Topic 1: Working with standard Python lists
''' Create a random list
Create a nested list of 60 elements arranged in 10 sub-lists of 6 entries each to be filled with randomly distributed integer values between 0 and 15 '''
import random
# using list comprehension
print "Nested list (list comprehension): ", [[random.randrange(0,15) for _ in range(10)] for _ in range(6)]
print "\n"
# using explicit for loops
outer = []
for _ in range(6):
inner = []
for _ in range(10):
inner.append( random.randrange(0,15) )
outer.append(inner)
print "Nested list (for loop): ", outer
print "\n"
''' Produce a histogram from the values
Then, propagate through the list of 60 integer elements and count how often each value exists within the nested list.
A dictionary of the type {value : frequency, value : frequency, value :
frequency} will be an ideal structure to take up the results.
'''
flatten = []
for inner in outer:
for i in inner:
flatten.append(i)
print "Flattened: ", flatten
print "\n"
my_counter = {} # empty dictionary to store value: frequency
# first flatten list
for inner in outer:
for i in inner:
# my_counter.get(index, value). Value is optional. Returns the value of the item with the specified key
# my_counter[i] = ... sets value associated with key i.
# Putting 0 so it returns integer and plus 1 to continue else would give all zeros.
my_counter[i] = my_counter.get(i, 0) + 1
print "Frequency: ", my_counter
print "\n"
''' Display the histogram
Produce a simple but well-formatted screen output to display the histogram.
It will be up to you to use a tabular form (formatted print) or, with a
higher effort, in a graphic form. The latter will require to have a look into
the package "Matplotlib" which provides a nearly unlimited number of
graphic options. For the option of formatted print, a clever shortcut is to
cast the integer values with the python repr()-Operator for easy
justification. '''
print "{:<8} {:<8}".format('Value', 'Frequency')
for key, val in my_counter.items():
frequency = val # not so necessary line of code
print "{:<8} {:<8}".format(key, val)
print "\n"
''' Outlook
We can get the same data and structures in an even more efficient way by
storing the nested list as a NumPy array (but this is a topic for a later
session). '''
import numpy as np
# 0, 15 is the range
# size of 2D array as a tuple (6,10) [Could be an int] and casting to type int
print np.random.randint(0, 15, (6,10)).astype(int)
print "\n"
print np.random.randint(0,15,(6,10)).astype(int)
| true |
88fa3acd27055cf95e0717de5ab6766d31c5d258 | Python | caiohrgm/Projeto-PIBIC-UFCG-2019-2020---Machine-Learning-e-Apostas-Esportivas | /baseFunctions_NaiveBayes.py | UTF-8 | 4,909 | 3.28125 | 3 | [] | no_license | import pandas as pd
import xlrd
def createDataFrame():
colunas = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11',
'x12', 'x13', 'x14', 'x15', 'x16', 'x17', 'x18', 'x19', 'x20']
df = pd.DataFrame(columns=colunas)
return df
def treinaIndiceMandante(rodadas,dataSet):
list = []
rounds = rodadas
tempDf = dataSet
for j in range(20):
boss = 0
out = 0
wins = 0
for i in range(rounds):
if tempDf.iloc[i, j] == 1:
boss = boss + 1
if tempDf.iloc[i, 20] == "S":
wins = wins + 1
elif tempDf.iloc[i, j] == -1:
out = out + 1
losses = boss + out - wins
teamstat = [boss, out, wins, losses]
list.append(teamstat)
return list
def boss_victory_prob(list1): #Probabilidade de ganhar como mandante;
boss_victory_frequency = []
for elm in list1:
prob = round((float(elm[2]) / float(elm[0])), 4)
boss_victory_frequency.append(prob)
return boss_victory_frequency
def boss_defeat_prob(list1): #Probabilidade de perder como mandante;
boss_defeat_frequency = []
for elm in list1:
prob = round((float(elm[0] - elm[2]) / float(elm[1])),
4) # Atenção: pode dar zero. Talvez usar um suavizador de numero baixo.
'''if prob == 0:
prob = 0.001''' # ajuste,se necessário;
boss_defeat_frequency.append(prob)
return boss_defeat_frequency
def boss_win_probability(vitorias, derrotas): #Probabilidade total de um time ganhar, caso seja mandante;
win_probabilities = []
'''Calculo da probabilidade, usando o teorema:
P(S / V) = P(V/S) x 0.33 / P(V/S) x 0.33 + P(V/~S) x 0.67
P(V/S):prob de ser mandante e ganhar;
P(V/~S: prob de ser mandante e não ganhar;'''
for elm1, elm2 in zip(vitorias, derrotas):
prob_boss_winner = float(elm1 * 0.33) / float(elm1 * 0.33 + elm2 * 0.67)
win_probabilities.append(prob_boss_winner)
return win_probabilities
def winner_boss_probability_10rounds(listBoss,testData,round):
df = testData
beg = round
end = beg + 10
results = []
for j in range(20):
foreseen = ""
list = []
for i in range(380):
if i > beg and i <= end:
if df.iloc[i,j] == 1:
real = df.iloc[i,20]
if listBoss[j] > 0.65:
foreseen = "S" # Atribui a "previsao" sucesso (aposta na vitoria);
if foreseen == real: # Checa se a previsao é igual à realidade;
list.append(True)
else:
list.append(False)
results.append(list)
return results
def odd_rates(listBoss,testData,round):
df = testData
beg = round
end = beg + 10
results = []
for j in range(20):
foreseen = ""
list = []
for i in range(380):
if i > beg and i <= end: # Está lendo 9 a acada 9 jogos;
if df.iloc[i,j] == 1:
real = df.iloc[i,20]
if listBoss[j] > 0.65:
foreseen = "S"
odd = df.iloc[i,21]
list.append(odd)
if foreseen == real:
list.append("Vitoria")
else:
list.append("Derrota")
results.append(list)
return results
def bets(listBoss,testData,rodada,bet):
df = testData
beg = rodada
end = beg + 10
results = []
for j in range(20):
foreseen = ""
for i in range(380):
if i > beg and i <= end: # Lendo de 10 em 10
if df.iloc[i,j] == 1:
real = df.iloc[i,20]
if listBoss[j] > 0.5:
foreseen = "S"
odd = df.iloc[i,21]
if foreseen == real:
profit = round(bet*(odd-1),2)
tupla = ("G",profit)
results.append((tupla))
else:
tupla = ("P",-bet)
results.append((tupla))
else:
odd = 1/(1-float(1/float(df.iloc[i,21])))
foreseen = "F"
if foreseen == real:
profit = round(bet * (odd - 1), 2)
tupla = ("G",profit)
results.append((tupla))
else:
tupla = ("P",-bet)
results.append(tupla)
return results
| true |
1ac165c269ec578bd71d53b1e3f08aa8e6989133 | Python | kunyuan/ParquetMC | /utility/angle.py | UTF-8 | 2,879 | 3.140625 | 3 | [] | no_license | from scipy.special import eval_legendre
from scipy import integrate
import sys
import os
import numpy as np
import unittest
def mult_along_axis(A, B, axis):
"""
return A[..., i, ...]*B[i],
where B[i] is broad casted to all elements of A[..., i, ...]
"""
A = np.array(A)
B = np.array(B)
# shape check
if axis >= A.ndim:
raise ValueError(r"{axis} is larger than {A.ndim}")
if A.shape[axis] != B.size:
raise ValueError(
"'A' and 'B' must have the same length along the given axis")
# Swap the given axis with the last one, to get the swapped
# 'shape' tuple here (swapay only returns a view of the
# supplied array, so no unneccesary copy)
shape = np.swapaxes(A, A.ndim-1, axis).shape
# Broadcast to an array with the shape as above(no new array created)
B_brc = np.broadcast_to(B, shape)
# Swap back the axes (again, this is only a view of the same array)
B_brc = np.swapaxes(B_brc, A.ndim-1, axis)
return A * B_brc
def LegendreCoeff(Data, AngleGrid, lList, axis=0):
"""
Calculate the Legendre Coefficients: \frac{1}{2} \int_{-1}^{1} f(x)*P_l (x) dx
Data: array with at least one angle axis
AngleGrid: Grid for cos(theta) in [-1, 1]
lList: list of angular momentum quantum number
axis: axis to perform angle integration
"""
Coeff = {}
for l in lList:
data = np.copy(Data)
legendreP = np.array([eval_legendre(l, x) for x in AngleGrid])
data = mult_along_axis(data, legendreP, axis)
Coeff[l] = integrate.trapz(data, AngleGrid, axis=axis)/2.0
# Coeff[l] = integrate.simps(data, AngleGrid, axis=axis)/2.0
return Coeff
def AngleFunc(Coeff, AngleGrid):
"""
f(x)=\sum_0^∞ (2l+1)*C_l*P_l(x)
"""
data = np.zeros_like(AngleGrid)
for l in Coeff.keys():
legendreP = np.array([eval_legendre(l, x) for x in AngleGrid])
data += (2.0*l+1)*Coeff[l]*legendreP
return data
class TestAngle(unittest.TestCase):
def test_LegendreCoff(self):
grid = np.linspace(-1.0, 1.0, 128)
l = 3
def poly(x): return 0.5*(5.0*x**3-3.0*x)
d = np.array([poly(x) for x in grid])
data = np.zeros((2, len(grid)))
data[0, :] = d
data[1, :] = d*2.0
coeff = LegendreCoeff(data, grid, [l, ], axis=1)[l]
coeff *= 2.0*l+1.0
# print(coeff)
self.assertTrue(abs(coeff[0]-1.0) < 1.0e-3)
self.assertTrue(abs(coeff[1]-2.0) < 1.0e-3)
def test_backforth(self):
grid = np.linspace(-1.0, 1.0, 128)
l = range(6)
y = grid**3
coeff = LegendreCoeff(y, grid, l)
yp = AngleFunc(coeff, grid)
# print(abs(yp-y))
# print(np.amax(abs(yp-y)))
self.assertTrue(np.amax(abs(y-yp)) < 1.0e-3)
if __name__ == '__main__':
unittest.main()
| true |
630c398193967fe06d8b9eedc1c418e8df879e32 | Python | ahviplc/pythonLCDemo | /com/lc/demo/numpyDemo/numpyDemo.py | UTF-8 | 1,367 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
numpyDemo
Version: 0.1
Author: LC
DateTime: 2018年6月17日21:41:55
一加壹博客最Top-一起共创1+1>2的力量!~LC
LC博客url: http://oneplusone.top/index.html
"""
import numpy
print ('使用列表生成一维数组')
data = [1,2,3,4,5,6]
x = numpy.array(data)
print (x) #打印数组
print (x.dtype) #打印数组元素的类型
print ('使用列表生成二维数组')
data = [[1,2],[3,4],[5,6]]
x = numpy.array(data)
print (x) #打印数组
print (x.ndim) #打印数组的维度
print (x.shape) #打印数组各个维度的长度。shape是一个元组
print ('使用zero/ones/empty创建数组:根据shape来创建')
x = numpy.zeros(6) #创建一维长度为6的,元素都是0一维数组
print (x)
x = numpy.zeros((2,3)) #创建一维长度为2,二维长度为3的二维0数组
print (x)
x = numpy.ones((2,3)) #创建一维长度为2,二维长度为3的二维1数组
print (x)
x = numpy.empty((3,3)) #创建一维长度为2,二维长度为3,未初始化的二维数组
print (x)
print ('使用arrange生成连续元素')
print (numpy.arange(6)) # [0,1,2,3,4,5,] 开区间
print (numpy.arange(0,6,2)) # [0, 2,4]
"""
python之numpy的基本使用 - cxmscb的博客 - CSDN博客
https://blog.csdn.net/cxmscb/article/details/54583415
注意:这里的代码python版本不是3.0+
"""
| true |
319882a52f0c9a9aa2122c17b2c676072606087c | Python | palaciossruben/acerto | /testing_webpage/basic_common.py | UTF-8 | 1,095 | 2.875 | 3 | [] | no_license | """
This a base file, that cannot import any models.
As it will refactor code among models. Its is used to solve the
circular dependency problem of having "common.py" import models and also refactor model code.
"""
import re
ADMIN_USER_EMAIL = 'admin@peaku.co'
def change_to_international_phone_number(phone, calling_code, add_plus=False):
plus_symbol = '+' if add_plus else ''
if phone:
phone = phone.replace('-', '').replace(' ', '')
# Adds the '+' only
if re.search(r'^' + calling_code + '.+', phone) is not None:
phone = plus_symbol + phone
elif phone[0] != '+': # Adds the '+' and country code phone
# TODO: change for other countries
max_phone_length = 10
if len(phone) > max_phone_length:
phone = ''.join(phone[-10:-1])
phone = plus_symbol + calling_code + phone
return phone
def not_admin_user(request):
return request.user.username != ADMIN_USER_EMAIL
def is_admin(user):
username = user.get_username()
return user.get_username() == ADMIN_USER_EMAIL | true |
dc9c7ae9dfba0ddf683f27aab5ed985a18734bc3 | Python | youhusky/Facebook_Prepare | /285. Inorder Successor in BST.py | UTF-8 | 561 | 3.59375 | 4 | [
"MIT"
] | permissive | # Given a binary search tree and a node in it, find the in-order successor of that node in the BST.
# Note: If the given node has no in-order successor in the tree, return null.
class Solution(object):
def inorderSuccessor(self, root,p):
succ = None
while root:
if p.val < root.val:
succ = root
root = root.left
else:
root = root.right
return succ
# follow up
def inorderPrevsessor(self, root, p):
prev = None
while root:
if p.val < root.val:
root = root.left
else:
prev = root
root = root.right
return prev | true |
29492745c9bed224bad11640ad9263a1e634238c | Python | krishauser/reem | /reem/accessors.py | UTF-8 | 10,519 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | from __future__ import print_function
from threading import Thread, Lock
import redis
from .utilities import *
_ROOT_VALUE_READ_NAME = "{}ROOT{}".format(ROOT_VALUE_SEQUENCE, ROOT_VALUE_SEQUENCE)
_TYPEMAP = {
'object':dict,
'array':list,
'integer':int,
'number':float,
'boolean':bool
}
class MetadataListener:
def __init__(self, client):
self.client = client
self.pubsub = self.client.pubsub()
#import time
#t0 = time.time()
self.pubsub.psubscribe(['__keyspace@0__:*'])
#print("Time to psubscribe on metadata",time.time()-t0)
self.listeners = {}
def add_listener(self, key_name, reader):
listen_name = "__keyspace@0__:{}".format(key_name)
self.listeners.setdefault(listen_name,[]).append(reader)
def flush(self):
while True:
item = self.pubsub.get_message()
if item is None:
break
channel = item['channel'].decode("utf_8")
try:
for listener in self.listeners[channel]:
listener.pull_metadata = True
except KeyError:
pass
class KeyAccessor:
"""Main class for accessing sub-keys of a KeyValueStore."""
def __init__(self, parent, writer, reader, initial_path=[]):
self.parent = parent
self.writer = writer
self.reader = reader
self.path = initial_path
self.path_str = None #caches path string
def __str__(self):
if self.path_str is None:
self.path_str = key_sequence_to_path_ext(self.path)
return "reem.KeyAccessor({} {})".format(self.writer.top_key_name,self.path_str)
def __getitem__(self, key):
assert check_valid_key_name_ext(key),"{} is not a valid key under path {}".format(key,key_sequence_to_path(self.path) if self.path_str is None else self.path_str)
return self.__class__(self,self.writer,self.reader,self.path+[key])
def get(self, key, default_value = None):
"""Similar to dict's get() method, returns a default value if the key doesn't exist.
Essentially equivalent to
```
try:
value = self[key].read()
except:
value = default_value
```
"""
try:
return self[key].read()
except redis.exceptions.ResponseError:
return default_value
def __setitem__(self, key, value):
if isinstance(value,KeyAccessor):
#sometimes this happens on += / *= on subkeys
if value.parent is self:
return
else:
raise ValueError("Cannot set a KeyAccessor to another KeyAccessor... {}[{}] = {}".format(self.path,key,value.path))
assert check_valid_key_name_ext(key),"{} is not a valid key under path {}".format(key,key_sequence_to_path(self.path) if self.path_str is None else self.path_str)
if self.path_str is None:
self.path_str = key_sequence_to_path_ext(self.path)
if self.path_str.endswith('.'): #root
if isinstance(key,int):
path = '[%d]'%(key)
else:
path = self.path_str + key
else:
if isinstance(key,int):
path = self.path_str + '[%d]'%(key,)
else:
path = self.path_str + '.' + key
self.writer.send_to_redis(path, value)
def __delitem__(self,key):
assert check_valid_key_name_ext(key),"{} is not a valid key under path {}".format(key,key_sequence_to_path(self.path) if self.path_str is None else self.path_str)
if self.path_str is None:
self.path_str = key_sequence_to_path_ext(self.path)
if self.path_str.endswith('.'): #root
if isinstance(key,int):
path = '[%d]'%(key)
else:
path = self.path_str + key
else:
if isinstance(key,int):
path = self.path_str + '[%d]'%(key,)
else:
path = self.path_str + '.' + key
self.writer.delete_from_redis(path)
def read(self):
"""Actually read the value referred to by this accessor."""
if self.path_str is None:
self.path_str = key_sequence_to_path_ext(self.path)
server_value = self.reader.read_from_redis(self.path_str)
#if it's special, then its value is under _ROOT_VALUE_READ_NAME
try:
return server_value[_ROOT_VALUE_READ_NAME]
except Exception:
return server_value
def write(self, value):
"""Writes value to the path referred to by this accessor."""
if self.path_str is None:
self.path_str = key_sequence_to_path_ext(self.path)
self.writer.send_to_redis(self.path_str, value)
def _do_rejson_call(self,fn,*args):
assert isinstance(fn,str)
if self.path_str is None:
self.path_str = key_sequence_to_path_ext(self.path)
with self.writer.interface.INTERFACE_LOCK:
return getattr(self.writer.interface.client,fn)(self.writer.top_key_name,self.path_str,*args)
def type(self):
"""Returns the type of the object"""
t = self._do_rejson_call('jsontype')
return _TYPEMAP[t]
def __len__(self):
"""Returns the length of an array / number of keys in dict"""
try:
return self._do_rejson_call('jsonobjlen')
except:
return self._do_rejson_call('jsonarrlen')
def __iadd__(self,rhs):
"""Adds a value to an integer / float value, or concatenates a list
to an array.
Type checking is not performed, so the user should know what they're doing.
"""
if not hasattr(rhs,'__iter__'):
#treat as value
if not isinstance(rhs,(int,float)):
raise ValueError("+= can only accept int or float arguments")
self._do_rejson_call('jsonnumincrby',rhs)
else:
self._do_rejson_call('jsonarrappend',*rhs)
return self
def __isub__(self,rhs):
"""Subtracts a value from an integer / float value
Type checking is not performed, so the user should know what they're doing.
"""
self += -rhs
return self
def __imul__(self,rhs):
"""Multiplies a value by an integer / float value.
Type checking is not performed, so the user should know what they're doing.
"""
#treat as value
if not isinstance(rhs,(int,float)):
raise ValueError("*= can only accept int or float arguments")
self._do_rejson_call('jsonnummultby',rhs)
return self
def __idiv__(self,rhs):
"""Divides a value by an integer / float value
Type checking is not performed, so the user should know what they're doing.
"""
self *= 1.0/rhs
return self
def append(self,rhs):
"""Appends a value to an array
Type checking is not performed, so the user should know what they're doing.
"""
self._do_rejson_call('jsonarrappend',rhs)
def __enter__(self):
"""Reads the value, returns the json struct which can then be manipulated,
and writes back on __exit__. This is useful when you want to make many
small reads/writes which would otherwise bog down Redis.
Usage::
kvs = KeyValueStore()
with kvs['foo']['bar'] as val:
#all this stuff is done client side with no communication
for i in range(len(val['baz'])):
val['baz'][i] += 1
#now val will be written back to the KeyValueStore
"""
self._value = self.read()
return self._value
def __exit__(self,exc_type,exc_val,traceback):
if exc_type is not None:
self.write(self._value)
delattr(self._value)
class WriteOnlyKeyAccessor(KeyAccessor):
def __init__(self,*args,**kwargs):
KeyAccessor.__init__(self,*args,**kwargs)
def read(self):
raise NotImplementedError()
def __delitem__(self,key):
raise NotImplementedError()
def __iadd__(self,rhs):
raise NotImplementedError()
def __isub__(self,rhs):
raise NotImplementedError()
def __imul__(self,rhs):
raise NotImplementedError()
def __idiv__(self,rhs):
raise NotImplementedError()
def append(self,rhs):
raise NotImplementedError()
class ActiveSubscriberKeyAccessor(KeyAccessor):
def __init__(self,*args,**kwargs):
KeyAccessor.__init__(self,*args,**kwargs)
def write(self):
raise NotImplementedError()
def __setitem__(self,key,value):
raise NotImplementedError()
def __delitem__(self,key):
raise NotImplementedError()
def __iadd__(self,rhs):
raise NotImplementedError()
def __isub__(self,rhs):
raise NotImplementedError()
def __imul__(self,rhs):
raise NotImplementedError()
def __idiv__(self,rhs):
raise NotImplementedError()
def append(self,rhs):
raise NotImplementedError()
def read(self):
return_val = self.parent.local_copy
if len(self.path) == 0:
pass
else:
for key in self.path:
return_val = return_val[key]
if type(return_val) == dict:
return copy_dictionary_without_paths(return_val, [])
return return_val
class ChannelListener(Thread):
def __init__(self, client, channel_name, callback_function, kwargs):
Thread.__init__(self)
self.client = client
self.channel_name = channel_name
self.callback_function = callback_function
self.kwargs = kwargs
self.first_item_seen = False
def run(self):
#import time
#t0 = time.time()
self.pubsub = self.client.pubsub()
self.pubsub.psubscribe([self.channel_name])
#print("Time to establish psubscribe",time.time()-t0)
for item in self.pubsub.listen():
# First Item is a generic message that we need to get rid of
if not self.first_item_seen:
self.first_item_seen = True
continue
item = json_recode_str(item)
channel = item['channel']
message = item['data']
self.callback_function(channel=channel, message=message, **self.kwargs)
| true |
cda6e95660df620b50c37ef4611032d72abe8804 | Python | hillbs/Trigonometry-Programlets | /stdutils.py | UTF-8 | 3,516 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | # A module for various math-related functions
import sys
import math
from fractions import Fraction
# Special Unicode characters
specials = {'pi': 'π', 'degree': '°', 'theta': 'θ', 'approx': '≈', 'root': '√'}
cTheta = 'θ'
cPi = 'π'
cDegree = '°'
cRoot = '√'
cApprox = '≈'
# Constants for a user to convert values in input
D2R = math.pi/180
SQRT = math.sqrt
PI = math.pi
# Formats equations using a dictionary of values
def prettyFunction(stringBase, values):
# Insert special characters
newString = stringBase.replace("{pi}",specials['pi'])
newString = newString.replace("{degree}",specials['degree'])
newString = newString.replace("{approx}",specials['approx'])
newString = newString.replace("{theta}",specials['theta'])
sValues = {}
for k,v in values.items():
if 'float' in k:
sValues[k] = str(float(v))
elif 'add' in k and v > 0 and v % 1 == 0:
sValues[k] = "+" + str(v)
elif 'add' in k and v > 0:
sValues[k] = '+ (' + str(v) + ')'
elif (v % 1) == 0:
sValues[k] = str(v)
else:
sValues[k] = "(" + str(v) + ")"
newString = newString.format(**sValues) # Add dictionary values
return newString
def prettyFunction(stringBase):
# Insert special characters
newString = stringBase.replace("{pi}",specials['pi'])
newString = newString.replace("{degree}",specials['degree'])
newString = newString.replace("{approx}",specials['approx'])
newString = newString.replace("{theta}",specials['theta'])
return newString
def prettyFraction(frac, forceOp=False, forceFloat=False):
if forceFloat:
return str(float(frac))
elif forceOp and frac > 0 and frac % 1 == 0:
return "+" + str(frac)
elif forceOp and frac > 0:
return '+ (' + str(frac) + ')'
elif (frac % 1) == 0:
return str(frac)
else:
return "(" + str(frac) + ")"
# Obtain a list of arguments from the user
def inputAsDict(arguments):
return {x: getFraction(float(eval(input(x+": ")))) for x in arguments}
# Rounds a value to 6 significant digits
def prettyFloat(value):
return "{:.6g}".format(value)
def inputFraction(prompt):
return getFraction(input(prompt))
def getFraction(flt):
return Fraction(flt).limit_denominator()
def giveMenu(functionDict):
resDict = {}
outStr = "Options:\n"
x = 0
for k,v in functionDict.items():
x+=1
resDict[x] = v
outStr = outStr + "{}: {}".format(str(k),x)
print(outStr)
userInput = int(input("Selection: "))
final = resDict[userInput]
if final is None:
print("Invalid selection.")
sys.exit(1)
return final
primes = [2,3,5,6,7,8,10,11]
class NiceValue:
def __init__(self, base, root=1, pi=False):
self.base = getFraction(base)
self.root = root
self.pi = pi
def getFloat(self):
return self.base*math.sqrt(self.root)*(math.pi if self.pi else 1)
def getString(self):
return str(self.base) + ((specials['root']+str(self.root)) if self.root != 1 else '') + ((specials['pi']) if self.pi else '')
def niceRoot(val):
if isRational(val):
return NiceValue(val)
for x in primes:
if isRational(val/(math.sqrt(x)*math.pi)):
return NiceValue(val/(math.sqrt(x)*math.pi),root=x,pi=True)
elif isRational(val/math.sqrt(x)):
return NiceValue(val/math.sqrt(x),root=x)
else:
continue
return NiceValue(val)
def isRational(val): # This totally works 100% of the time. Trust me.
return Fraction(val).limit_denominator().denominator < 1000
| true |
a8a677e3570024195332d61034fc2d4632aba8d3 | Python | Ronan-H/advent-of-code-2020 | /day-4/part-2.py | UTF-8 | 1,383 | 2.875 | 3 | [] | no_license | import re
def hgt_validator(hgt):
unit = hgt[-2:]
num = hgt[:-2]
if unit == 'cm':
return 150 <= int(num) <= 193
elif unit == 'in':
return 59 <= int(num) <= 76
else:
return False
hcl_pattern = re.compile(r'^#[0-9a-f]{6}$')
pid_pattern = re.compile(r'^[0-9]{9}$')
eye_colours = {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'}
field_validators = {
'byr': lambda x: 1920 <= int(x) <= 2002,
'iyr': lambda x: 2010 <= int(x) <= 2020,
'eyr': lambda x: 2020 <= int(x) <= 2030,
'hgt': lambda x: hgt_validator(x),
'hcl': lambda x: hcl_pattern.match(x),
'ecl': lambda x: x in eye_colours,
'pid': lambda x: pid_pattern.match(x),
}
field_keys = field_validators.keys()
valid = 0
with open('./input') as input_file:
valid_fields = set()
for line in input_file:
if line == '\n': # end of current passport fields
if all(field in valid_fields for field in field_keys):
valid += 1
valid_fields = set()
else:
for key, value in [field.split(':') for field in line[:-1].split(' ')]:
if key in field_keys and field_validators[key](value):
valid_fields.add(key)
# I added an extra new line to the end of the input file so the 'if' branch wouldn't have to be repeated here
input_file.close()
print(valid)
| true |
6dd8fe1a35a8133bcd87f90ab587085eeec3951f | Python | hyh2010/ECE1548-project | /Queuesim/testTrafficSource.py | UTF-8 | 2,007 | 2.796875 | 3 | [] | no_license | import unittest
import simpy
import numpy as np
from TrafficSource import TrafficSourceConstInterarrival
from Server import ServerConstServiceTime
class testTrafficSource(unittest.TestCase):
def setUp(self):
service_time = 5
env = simpy.Environment()
self.__server = ServerConstServiceTime(env, capacity=1, service_time=service_time)
def test_single_source(self):
interarrival_time = 2
number_of_packets = 5
source = TrafficSourceConstInterarrival(self.__server, interarrival_time)
source.add_traffic_generator_process(number_of_packets)
self.__server.env().run()
response_times = [x.response_time() for x in source.traffic()]
expected_response_times = [5, 8, 11, 14, 17]
np.testing.assert_almost_equal(response_times, expected_response_times)
def test_multiple_sources(self):
interarrival_time_source1 = 3
number_of_packets_source1 = 3
interarrival_time_source2 = 4
number_of_packets_source2 = 2
source1 = TrafficSourceConstInterarrival(self.__server,
interarrival_time_source1)
source2 = TrafficSourceConstInterarrival(self.__server,
interarrival_time_source2)
source1.add_traffic_generator_process(number_of_packets_source1)
source2.add_traffic_generator_process(number_of_packets_source2)
self.__server.env().run()
response_times_source1 = [x.response_time() for x in source1.traffic()]
response_times_source2 = [x.response_time() for x in source2.traffic()]
expected_response_times_source1 = [5, 12, 19]
expected_response_times_source2 = [9, 15]
np.testing.assert_almost_equal(response_times_source1, expected_response_times_source1)
np.testing.assert_almost_equal(response_times_source2, expected_response_times_source2)
if __name__ == '__main__':
unittest.main()
| true |
8739c5a460633d1327e22f20c21ca596a45eb682 | Python | pucekdts12/Python2020 | /Zestaw04/main.py | UTF-8 | 2,238 | 3.4375 | 3 | [] | no_license | import argparse,ast,itertools as it,re
def zadanie3(args):
nested_lists = ast.literal_eval(args.list)
output = [ sum(l) for l in nested_lists ]
print(output)
def zadanie4(args):
if not re.findall('^(M{0,3})(CM|CD|D{0,1}C{0,3})(XC|XL|L{0,1}X{0,3})(IX|IV|V{0,1}I{0,3})$',args.roman):
print(f"{args.roman} nie poprawnie zapisa liczba")
return
# nie wiem o co dokłanie chodzi z tymi róznymi sposobami na stworzenie słownika
# domyślam się że chodzi o to że można stworzyć wprost:
d={'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000,' ':0}
# albo
# d=dict()
# d['I']=1
# d['V']=5 # itd.?
r = args.roman+' '
last = 0
sum = 0
print(args.roman)
for i in range(0,len(r)-1):
if d[r[i]]>=d[r[i+1]]:
sum+=(d[r[i]]-last)
last = 0
else:
last+=d[r[i]]
print(sum)
def zadanie5(args):
input = ast.literal_eval(args.list)
def iteracyjna(input):
n = args.right-args.left+1
print(input)
for i in range(0,int(n/2)):
input[args.left+i],input[args.right-i] = input[args.right-i],input[args.left+i]
print(input)
def rekurencyjna(input,l,r):
if l>=r: return
rekurencyjna(input,l+1,r-1)
input[l],input[r]=input[r],input[l]
print("WERSJA ITERACYJNA")
iteracyjna(input.copy()) # przekazuje kopię zeby rekurencyjna dostała oryginał
print("WERSJA REKURENCYJNA")
print(input)
rekurencyjna(input,args.left,args.right)
print(input)
if __name__=="__main__":
arg_parser = argparse.ArgumentParser()
subparsers = arg_parser.add_subparsers(help='numer zadania',dest='zadanie')
zad3_p = subparsers.add_parser('3')
zad3_p.add_argument("list",help="lista w formacie pythona [1,2,[3,4],...]",type=str)
zad4_p = subparsers.add_parser('4')
zad4_p.add_argument("roman",help="liczba w formacie rzymskim do odczytania",type=str)
zad5_p = subparsers.add_parser('5')
zad5_p.add_argument("list",help="lista w formacie pythona [1,2,[3,4],...]",type=str)
zad5_p.add_argument("left",help="odkąd zacząć odwracać",type=int)
zad5_p.add_argument("right",help="dokąd odwracać",type=int)
args = arg_parser.parse_args()
zadania={"3":zadanie3,"4":zadanie4,"5":zadanie5}
zadania[args.zadanie](args)
| true |
d911d72ca965641517a9217285ca4435eceb58fa | Python | 42Swampy/testdateien | /test_rss.py | UTF-8 | 534 | 2.9375 | 3 | [] | no_license | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Test von feedparser
import os
import feedparser
import time
# Deklarieren der Variabeln
ganzer_feed = ""
# Feed herunterladen
print ("Lade RSS-Feed")
d=feedparser.parse("http://rss.kicker.de/news/2bundesliga")
print ("RSS-Feed geladen")
# Titel als erstes
ganzer_feed = ganzer_feed+(d.feed.title)+" --- "
# Die Feeds danach
for i in range(len(d['entries'])): # Anzahl der Eintraege: len(d['entries'])
ganzer_feed = ganzer_feed+(d.entries[i].title)+" --- "
print (ganzer_feed)
| true |
fe6b399d7b89ec5df3f31a1383067b31e83ae62a | Python | rizquadnan/codeForces | /14_stonesOnATable.py | UTF-8 | 413 | 3.234375 | 3 | [] | no_license | num_raw = int(input())
stones = list(input())
count = 0
if stones.count("R") > 1 or stones.count("B") > 1 or stones.count("G") > 1:
for idx, stone in enumerate(stones):
if idx == 0:
if stone == stones[idx + 1]:
count += 1
elif idx == len(stones) - 1:
pass
else:
if stone == stones[idx + 1]:
count += 1
print(count)
| true |
38ed7a5f5c9d4b8373ba519bd0109172f2dcee67 | Python | BenMusch/social-chess | /social-chess/chessnouns/tournament.py | UTF-8 | 7,513 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | """
This class will keep track of an individual tournament
"""
import chessnouns
from . import slot
from . import player
from . import game
from datetime import date
from chessutilities import tiebreakers
import logging
import logging.config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('tournament')
class Tournament(object):
def __init__(self, schedule, tournament_name, tournament_date=None):
# The draw dictionary has the player ids
# as keys, and the draw objects as values
if not tournament_date:
self._event_date = date.today()
else:
self._event_date = tournament_date
self._name = tournament_name
self._schedule = schedule
self._playoff = None # This will just be a game
self._winner = None # This will be the id of the winner
# Now we need to build a dictionary for the players,
# where the the key is the id, value is the draw
self._tournament_draw_dict = {ind_player.get_id(): ind_player.get_draw() for ind_player in
self._schedule.get_players()}
def create_random_results_all(self):
rounds = self._schedule.get_rounds()
count = 1
logger.debug("Creating random results in round {}".format(count))
for ind_round in rounds:
self.create_random_results_for_round(ind_round)
def create_random_results_for_round(self, tournament_round):
for ind_game in tournament_round:
logger.debug("Setting result for game: {} ".format(ind_game))
ind_game.set_likely_random_result()
logger.debug("Result: {} vs ")
def return_result_numbers(self):
"""
This method is just a check on the data.
It will return wins, losses, and draws for
the tournament.
If there are no draws, it should return
40 wins, 40 losses for 40 games, etc.
"""
wins = 0
byes = 0
losses = 0
draws = 0
for player_key, draw in self._tournament_draw_dict.items():
for ind_game in draw.get_games():
if ind_game.was_drawn():
draws += 1
elif ind_game.was_bye():
byes += 1
elif ind_game.did_player_id_win(player_key):
wins += 1
else:
losses += 1
return wins, byes, losses, draws
def get_total_number_of_games(self):
return self._schedule.get_total_number_of_games()
def get_leaderboard(self, maximum_number=0):
"""
This method will return a list of tuples, sorted
We will go through the draw dictionary, tally up the score, and then
add the entries to a list of slot objects, and then sort them
"""
slot_list = []
for player_key, draw in self._tournament_draw_dict.items():
tourney_player = self._schedule.get_scheduled_player_for_id(player_key)
raw_points = draw.get_total_raw_points()
weighted_points = draw.get_total_weighted_score()
rounds_completed = draw.get_number_of_rounds_completed()
slot_list.append(slot.Slot(tourney_player, rounds_completed, raw_points,
str(round(weighted_points, chessnouns.WEIGHTED_SCORE_DECIMAL_PRECISION))))
if maximum_number == 0:
# We are saying 0 means return all of them
return sorted(slot_list)
else:
# So we only want the top X players
return sorted(slot_list)[:maximum_number]
def calculate_playoff_candidates(self):
"""
Here we are trying to figure out the top two people,
or, if there are ties, the people tied for the top
two slots
:return bool, list - we are returning True if we used
tiebreaks, false if not, and then a list of finalists
"""
# We want to track how often we use tiebreaks, so we
# will return one of these values. We are using
# these variable names to make the code more readable
# and less subject to error
we_used_tiebreaks = True
did_not_use_tiebreaks = False
finalists = []
# First, let's get the list
leader_list = sorted(self.get_leaderboard())
logger.info("\nHere is the leaderboard going into this:\n")
logger.info(leader_list)
""" We are going to get the first person, who is
going to have the highest score, but they might
not be the only person with that score. """
top_person = leader_list[0]
top_score = top_person.get_weighted_score()
logger.debug("Top score was: {}".format(top_score))
"""What we need to know now is if he's alone
at the top of the leaderboard."""
next_person = leader_list[1]
next_score = next_person.get_weighted_score()
logger.debug("Next score was: {}".format(next_score))
third_person = leader_list[2]
third_score = third_person.get_weighted_score()
logger.debug("Third score was: {}".format(third_score))
# Let us see if the first person is alone
if top_score > next_score:
# OK, leader is alone, so add him
finalists.append(top_person)
# But now we need to know if there are ties beneath him
if next_score > third_score:
# Great, then we can just add the second and be done
finalists.append(next_person)
return did_not_use_tiebreaks, finalists
else:
# So we are in a situation where the first guy is alone,
# but the next two (or more) are tied
# So let's pop off the first guy, and send the list
# for processing
leader_list.pop(0)
# Let's get just the tied ones
tied_list = tiebreakers.get_tied_list(leader_list)
outstanding_list = tiebreakers.get_one_playoff_contender_from_all_tied(self._schedule, tied_list)
if len(outstanding_list) == 1:
finalists.append(outstanding_list[0])
return we_used_tiebreaks, finalists
else:
# So we still have more than one. Let's see if we can get just
# one by calculating against losses
loss_calculated_list = tiebreakers.extract_using_losses(1, outstanding_list)
return we_used_tiebreaks, loss_calculated_list
# So now we are looking at the possibility of multiple people tied at the top
elif top_score == next_score:
# We need to see if the third one is lower,
# simplifying things
if next_score > third_score:
finalists.append(top_person)
finalists.append(next_person)
return did_not_use_tiebreaks, finalists
else:
# Everyone is tied at the top
# We will use a function to resolve and try to get 2
# But first we need to just get the tied players at the top
tied_list = tiebreakers.get_tied_list(leader_list)
loss_calculated_list = tiebreakers.extract_using_losses(2, tied_list)
return we_used_tiebreaks, loss_calculated_list
| true |
b6253d791848d2c0769e120db480265aa03c646f | Python | profnssorg/capm | /CAPM - Versão final (sem unittest)/Unit tests/data_collect.py | UTF-8 | 1,025 | 2.703125 | 3 | [] | no_license | # Packages importation
import pandas as pd
from yahoofinancials import YahooFinancials
from sgs import SGS
# Defining Tickers, Market Return and Risk-Free Rate for usage
Tickers = ['USIM5.SA']
MarketReturn = ['^BVSP']
RiskFree = SGS()
# Collecting Tickers Historical Data
Period = 'daily'
Start_Date = '2019-02-15'
End_Date = '2019-03-01'
RF_Start_Date = '15/02/2019'
RF_End_Date = '01/03/2019'
RF_Code = 12
# Functions for collecting Tickers and Market Return data
def Get_Tickers(T):
YF_Tickers = YahooFinancials(T)
YF_Tickers_Hist = YF_Tickers.get_historical_price_data(Start_Date,End_Date,Period)
return YF_Tickers_Hist
def Get_MarketReturn(MR):
YF_MarketReturn = YahooFinancials(MR)
YF_MarketReturn_Hist = YF_MarketReturn.get_historical_price_data(Start_Date,End_Date,Period)
return YF_MarketReturn_Hist
# Function for collecting Risk-Free Rate data
def Get_RiskFree():
YF_RiskFree_Hist = RiskFree.get_valores_series(12,RF_Start_Date,RF_End_Date)
return YF_RiskFree_Hist
| true |
dc3f14819cbc3fe98b91bb64d9228b11e0b064ac | Python | realayo/mentorship-py | /week-1/checksum.py | UTF-8 | 262 | 3.796875 | 4 | [] | no_license | def CheckNums(num1, num2):
if num1 == num2:
return - 1
elif num2 > num1:
return True
else:
return False
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
print (CheckNums(num1, num2)) | true |
21572c35e9ceaf06dea5ebf46ea2383108c5068b | Python | dizid2539/embadded_software | /run.py | UTF-8 | 2,517 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python3
from ev3dev.ev3 import *
from time import sleep
#임시 설정
temp_left_wheel = LargeMotor('outA')
temp_right_wheel = LargeMotor('outD')
temp_led = Leds()
temp_led.all_off()
sleep(1)
temp_left_wheel.run_forever(speed_sp = 100)
temp_right_wheel.run_forever(speed_sp = 100)
sleep(5.5)
temp_left_wheel.stop(stop_action = 'brake')
temp_right_wheel.stop(stop_action = 'brake')
del temp_left_wheel
del temp_right_wheel
del temp_led
# 초기 설정
from classMachine import *
from algorithm import *
machine = Machine()
machine.set_field()
machine.set_LCD()
machine.set_LED()
machine.set_color('in3', 'in2', 'in1')
machine.set_wheel('outA', 'outD')
machine.set_passenger('outB', 'outC')
machine.set_gyro('in4')
machine.direction = 90
machine.location_x = 0
machine.location_y = 0
machine.set_initial_direction()
machine.location_x = 0
machine.location_y = 0
# 1. 이동하면서 컬러를 탐지합니다
for i in range(6):
machine.set_store_color()
machine.run(1, machine.wheel.RUN_SPEED)
machine.turn(0, machine.wheel.TURN_SPEED)
for i in range(4):
machine.set_store_color()
machine.run(1, machine.wheel.RUN_SPEED)
machine.turn(270, machine.wheel.TURN_SPEED)
for i in range(6):
machine.set_store_color()
machine.run(1, machine.wheel.RUN_SPEED)
machine.turn(180, machine.wheel.TURN_SPEED)
for i in range(3):
machine.set_store_color()
machine.run(1, machine.wheel.RUN_SPEED)
machine.turn(90, machine.wheel.TURN_SPEED)
for i in range(6):
machine.set_store_color()
machine.run(1, machine.wheel.RUN_SPEED)
short_move(machine, 0, 0)
machine.locate(-1.5, 0, 'x')
machine.led.on('RED')
sleep(3)
machine.led.off()
sleep(1)
# 2 . 정보마당으로 이동하고 상점 색상을 인식, 최단경로를 계산합니다
machine.locate(0, 0, 'x')
short_move(machine, 0, 4)
machine.locate(-2.25, 4, 'x')
target_color = machine.color.COLOR_WORD[machine.color.middle_get_value()]
route = TSP(machine.field.store_color, target_color)
route_display = ""
for [x,y] in route:
route_display += "%c%d " % (x+65, 5-y)
machine.lcd.print_text(route_display)
sleep(5)
# 3 . 최단경로를 따라 승객을 내려주고 출구로 이동합니다.
machine.locate(0, 4, 'x')
if machine.field.store_color[4][0] == target_color:
machine.drop_passenger()
for i in range(1, len(route)):
short_move(machine, route[i][0], route[i][1])
if machine.field.store_color[route[i][1]][route[i][0]] == target_color:
machine.drop_passenger()
machine.locate(7.5, 0, 'x') | true |
73fd361478de7a553f27256cc706725d61e9ecd6 | Python | aRToLoMiay/Special-Course | /Первые шаги/area_and_circumference.py | WINDOWS-1251 | 305 | 3.5 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# 3.
from math import pi
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
r = 2
A = pi*r**2
C = 2*pi*r
print " = %f \n = %f" % (A, C) | true |
fed60295f19343edd2d06c2a9c3fd5538410010e | Python | tm-26/Enhancing-stock-price-prediction-models-by-using-concept-drift-detectors | /src/experiments/experiment7.py | UTF-8 | 2,784 | 3.3125 | 3 | [] | no_license | # Experiment 7 evaluates method 3
import csv
import matplotlib.pyplot
import os
import pandas
import sys
sys.path.append("..")
from main import main
if __name__ == "__main__":
"""
Parameters:
args[0] --> splitDays parameter
Controls how many days the model waits before starting to train on the current distribution.
"""
# Handle arguments
splitDays = 5
if len(sys.argv) >= 2:
if sys.argv[1].lower() == "all":
splitDays = -1
else:
try:
splitDays = int(sys.argv[1])
if not 3 <= splitDays <= 30:
raise ValueError
elif splitDays == 0:
splitDays = 5
except ValueError:
print("Argument error: " + str(sys.argv[1]) + " not a valid splitDays parameter")
print("splitDays parameter needs to be an integer between 3 and 30")
exit(-2)
if splitDays == -1:
if not os.path.exists("../../results"):
os.makedirs("../../results")
if not os.path.exists("../../results/method3.csv"):
file = open("../../results/method3.csv", "w+")
writer = csv.writer(file)
writer.writerow(["splitDays", "ACC", "MCC"])
file.close()
results = pandas.read_csv("../../results/method3.csv")
complete = []
for i in results["splitDays"]:
complete.append(int(i))
for i in range(3, 31):
if i not in complete:
print("+------------------------------+")
print("Starting splitDays parameter = " + str(i))
print("+------------------------------+")
acc, mcc = main(True, 3, i)
results = results.append({"splitDays": i, "ACC": acc, "MCC": mcc}, ignore_index=True)
results.to_csv("../../results/method3.csv", index=False)
splitDays = []
acc = []
mcc = []
for i in results.iterrows():
splitDays.append(int(i[1]["splitDays"]))
acc.append(float(i[1]["ACC"].split(' ')[0]))
mcc.append(float(i[1]["MCC"].split(' ')[0]))
matplotlib.pyplot.plot(splitDays, acc)
matplotlib.pyplot.xlabel("splitDays parameter")
matplotlib.pyplot.ylabel("ACC")
matplotlib.pyplot.show()
matplotlib.pyplot.plot(splitDays, mcc)
matplotlib.pyplot.xlabel("splitDays parameter")
matplotlib.pyplot.ylabel("MCC")
matplotlib.pyplot.show()
else:
print("+------------------------------+")
print("Starting splitDays parameter = " + str(splitDays))
print("+------------------------------+")
acc, mcc = main(True, 3, splitDays)
| true |
acc740793e9528d3885c10bce9bb2a775f07d9d0 | Python | Aasthaengg/IBMdataset | /Python_codes/p03855/s915979030.py | UTF-8 | 3,020 | 2.9375 | 3 | [] | no_license | from collections import deque
from heapq import heapify,heappop,heappush,heappushpop
from copy import copy,deepcopy
from itertools import permutations,combinations
from collections import defaultdict,Counter
from pprint import pprint
def myinput():
return map(int,input().split())
def mycol(data,col):
return [ row[col] for row in data ]
def mysort(data,col):
data.sort(key=lambda x:x[col],reverse=False)
return data
def mymax(data):
M = -1*float("inf")
for i in range(len(data)):
m = max(data[i])
M = max(M,m)
return M
def mymin(data):
m = float("inf")
for i in range(len(data)):
M = min(data[i])
m = min(m,M)
return m
class UnionFind():
# parentsは「要素が根の場合に"そのグループの要素数*(-1)"を格納するリスト」
def __init__(self, n):
self.n = n
self.parents = [-1] * n
# 要素xが属するgroupの根を返す
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
# 要素xが属するgroupと要素yが属するgroupを併合する
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.parents[x] > self.parents[y]:
x, y = y, x
self.parents[x] += self.parents[y]
self.parents[y] = x
# 要素xが属するgroupのサイズ(要素数)を返す
def size(self, x):
return -self.parents[self.find(x)]
# 要素x,yが同じgroupに属するかをTrueかFalseで返す
def same(self, x, y):
return self.find(x) == self.find(y)
# 要素xが属するgroupに属する要素をリストで返す
def members(self, x):
root = self.find(x)
return [i for i in range(self.n) if self.find(i) == root]
# 全ての根の要素をリストで返す
def roots(self):
return [i for i, x in enumerate(self.parents) if x < 0]
# groupの数を返す
def group_count(self):
return len(self.roots())
# {ルート要素: [そのグループに含まれる要素のリスト], ...}の辞書を返す
def all_group_members(self):
return {r: self.members(r) for r in self.roots()}
# ルート要素: [そのグループに含まれる要素のリスト]を文字列で返す
def __str__(self):
return '\n'.join('{}: {}'.format(r, self.members(r)) for r in self.roots())
n,k,l = myinput()
pq = [ list(myinput()) for _ in range(k) ]
rs = [ list(myinput()) for _ in range(l) ]
uf1 = UnionFind(n)
for i in range(k):
p = pq[i][0]
q = pq[i][1]
uf1.union(p-1,q-1)
uf2 = UnionFind(n)
for i in range(l):
r = rs[i][0]
s = rs[i][1]
uf2.union(r-1,s-1)
d = defaultdict(lambda: 0)
# print(d)
for i in range(n):
d[(uf1.find(i),uf2.find(i))] += 1
# print(d)
for i in range(n):
ans = d[(uf1.find(i),uf2.find(i))]
print(ans) | true |
7fe820cee2066161071adaac4b7fbb53b61ca523 | Python | Tommo2365/pythonMiscCode | /CSVWrite.py | UTF-8 | 399 | 3 | 3 | [] | no_license | import numpy
import numpy as np
import matplotlib.pyplot as plt
import csv
def CSVWrite(fileName, numpyArray):
print('WritingFile File: ' + fileName)
with open(fileName,'w', newline = '') as csv_file:
csv_writer= csv.writer(csv_file, delimiter = ',')
# line_count = 0
for row in numpyArray:
csv_writer.writerow(row)
| true |
cc702e154322159bbc03e4b42cb9c0294dff0385 | Python | JKodner/gofish | /gofish.py | UTF-8 | 3,194 | 3.171875 | 3 | [] | no_license | import random
import sys
deck = []
ud = []
udt = []
cd = []
cdt = []
COUNT = " "
count = 1
card_count = 0
first = ["user", "com"]
choices = ["", "deck", "draw", "exit", "com", "decks"]
def sep(s, n):
print s * n
NUM = " "
sep(" ", 1)
sep("-*-", 1)
while type(NUM) != int:
try:
NUM = int(raw_input("To What Range do You Want Your Numbers on Your Cards to be?"))
if not NUM >= 9:
NUM = " "
print "Range Must be Greater or Equal to 9."
except ValueError:
None
COUNT = " "
while type(COUNT) != int:
try:
COUNT = int(raw_input("How Many Cards do You Want in Each Deck?"))
if not COUNT >= 7:
COUNT = " "
print "Deck Count Must be Greater or Equal to 7."
except ValueError:
None
sep("-*-", 1)
num = range(1, NUM + 1)
sep(" ", 1)
sep("-", 30)
print "Creating the Game Deck..."
sep("-", 30)
for i in range(COUNT*2):
deck.append(random.choice(num))
for i in range(3):
random.shuffle(deck)
for i in range(COUNT):
ud.append(deck.pop())
for i in range(COUNT):
cd.append(deck.pop())
for i in range(52):
deck.append(random.choice(num))
def ruc():
ud.append(deck.pop())
print "You Have Drawn a %d" % ud[-1]
def rcc():
cd.append(deck.pop())
print "The Computer has Drawn a %d." % cd[-1]
ask = " "
def response():
global ask
ask = " "
global count
while ask.lower() not in choices:
sep(" ", 1)
sep("-*-", 1)
print "Press [Enter] if You Would Like to Continue."
print "Type [Deck] if You Would Like to See Your Deck & Statistics."
print "Type [Draw] if You Believe It is Necessary to Draw a Card."
print "Type [Exit] to Exit the Program if Needed."
ask = raw_input("")
sep("-*-", 1)
sep(" ", 1)
if ask.lower() == choices[1]:
sep("-", 30)
ud.sort()
print "Your Deck:"
print ud
sep("-", 30)
sep(" ", 1)
sep("#", 30)
print "You Have %d cards in Your deck." % len(ud)
print "You are Currently in Round #%d" % count
print "The Game Deck Currently has %d Cards." % len(deck)
sep("#", 30)
sep(" ", 1)
elif ask.lower() == choices[2]:
ruc()
print "You Have Drawn: %d" % ud[-1]
sep(" ", 1)
elif ask.lower() == choices[3]:
print "Exiting the Program..."
print sep(" ", 1)
sys.exit()
elif ask.lower() == choices[4]:
sep("-", 30)
cd.sort()
print "Computer's Deck:"
print cd
sep("-", 30)
sep(" ", 1)
sep("#", 20)
print "The Computer Has %d cards in Their deck." % len(ud)
print "You are Currently in Round #%d" % count
print "The Game Deck Currently has %d Cards." % len(deck)
sep("#", 20)
sep(" ", 1)
while len(ud) != 0 or len(cd) != 0 or len(deck) != 0:
response()
if ask.lower() == choices[0]:
ask_card = " "
while ask_card not in ud:
try:
ask_card = int(raw_input("What Card Do You Wish to Call?"))
except ValueError:
None
if ask_card in cd:
print "Foo"
for i in cd:
if i == ask_card:
"You Have Taken a %d From the Computer." % i
udt.append(i)
cd.remove(i)
else:
rcc()
com_ask_card = random.choice(num)
print "The Computer has asked for all of your %d's" % com_ask_card
if com_ask_card in ud:
for i in ud:
if i == ask_card:
print "The Computer Has Taken a %d From You." % i
cdt.append(i)
ud.remove(i)
else:
ruc()
| true |
5a880150e2f5e807f5d8f7e30aa14848fdbce069 | Python | Superbeet/LeetCode | /Minimum_Height_Trees.py | UTF-8 | 1,800 | 3.359375 | 3 | [] | no_license | # 116, remove outdegree 0 nodes one by one until every node only has one adjcent node
class Solution(object):
def findMinHeightTrees(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
adj_list = [set() for j in range(n)]
# store the tree
for s, e in edges:
adj_list[s].add(e)
adj_list[e].add(s)
leaves = [i for i in range(n) if len(adj_list[i])<=1]
while n>2:
n -= len(leaves)
new_leaves = []
for x in leaves:
for y in adj_list[x]:
adj_list[y].remove(x)
# calculate outdegrees
if len(adj_list[y])==1:
new_leaves.append(y)
leaves = new_leaves
return leaves
class Solution(object):
def findMinHeightTrees(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
if n==1:
return [0]
res = []
indegree = [0 for i in range(n)]
adj_list = [set() for i in range(n)]
queue = []
for x,y in edges:
adj_list[x].add(y)
indegree[y] += 1
adj_list[y].add(x)
indegree[x] += 1
for i in xrange(n):
if indegree[i]==1:
queue.append(i)
while n>2:
size = len(queue)
for j in xrange(0, size):
node = queue.pop(0)
n -= 1
for i in adj_list[node]:
indegree[i] -= 1
if indegree[i]==1:
queue.append(i)
return queue
| true |
301747c7e6513e4c6b58dd4ed9681765d93414cb | Python | CatOfTheCannals/programming-language-paradigms | /practicas/resolucion_logica/Charly/ex2.py | UTF-8 | 1,633 | 3.375 | 3 | [] | no_license | I.
i)
FNC: p v (¬p)
FC: {p, ¬p}
negamos
{¬p}, {p}
el resolvente es []
la formula original es tutologia
ii)
FNC: ¬p v ¬q v p
FC: {¬p, ¬q, p}
negamos
{p}, {q}, {¬p}
resolvente de p y ¬p es []
es tautologia
iii)
FNC: (¬p v p) ^ (¬q v p)
FC: {¬p, p}, {¬q, p}
negamos
(p ^ ¬p) v (q ^ ¬p)
distribuimos
((p v (q ^ ¬p)) ^ (¬p v (q ^ ¬p)))
distribuimos
((p v q ) ^ (p v ¬p)) ^ ((¬p v q) ^ (¬p v ¬p))
FC: {p, q}, {p, ¬p}, {¬p, q}, {¬p, ¬p}
resolvente de {p, q} y {¬p, q} es {q}
{p, q}, {p, ¬p}, {¬p, q}, {¬p, ¬p}, {q}
resolvente de {p, ¬p} y {¬p, ¬p} es {¬p}
{p, q}, {p, ¬p}, {¬p, q}, {¬p, ¬p}, {q}, {¬p}
como no podemos calcular mas resolventes, no podemos afirmar que esta formula sea insatisfacible
con lo cual, la formula original no es tautologia. esto puede verse para el caso p = true ^ q = false
iv)
FNC: (¬p v p)
FC: {¬p, p}
negamos
p ^ ¬p
resolvente []
con lo cual la formula original es tautologia
II.
qvq: (¬p ⊃ q) ∧ (p ⊃ q) ∧ (¬p ⊃ ¬q) ⊃ (p ∧ q)
simplificar implicaciones
¬((p v q) ∧ (¬p v q) ∧ (p v ¬q)) v (p ∧ q)
negar
((p v q) ∧ (¬p v q) ∧ (p v ¬q)) ∧ ¬(p ∧ q)
pasar a FNC
(p v q) ∧ (¬p v q) ∧ (p v ¬q) ∧ (¬p v ¬q)
FC: {p, q}, {¬p, q}, {p, ¬q}, {¬p, ¬q}
resolvente de {p, q}, {¬p, q} es {q}
{p, q}, {¬p, q}, {p, ¬q}, {¬p, ¬q}, {q}
resolvente de {p, ¬q} y {¬p, ¬q} es {¬q}
{p, q}, {¬p, q}, {p, ¬q}, {¬p, ¬q}, {q}, {¬q}
resolvente de {q} y {¬q} es []
esta formula no es valida
la formula inicial es tautologia
podemos afirmar que la implicacion original es correcta
| true |
8b30945ddac420b2a86ae254dcc970f77bee936b | Python | Ghostkeeper/Luna | /plugins/configuration/configurationtype/configuration_error.py | UTF-8 | 1,294 | 2.984375 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes. | true |
4fd30e2067ab752c9590d2289b25043f357463ec | Python | VeritasCurat/Synja | /project/dialog/simpleNLU_EN.py | UTF-8 | 1,295 | 2.84375 | 3 | [] | no_license | '''
Created on 17.05.2019
@author: Johannes
Problem: RASA NLU kann teilweise sogar nicht trainingsdaten zu intents parsen (>40%)
Beispiel: 'intent': {'name': 'gruss', 'confidence': 0.344466498747804}, 'entities': [], 'text': 'hi'}
Bildet die trainingsdaten auf intents ab. Als sicherung das einfachste Eingaben richtig erkannt werden.
'''
import os
import io
import json
verzeichnispfad = os.path.realpath(__file__)
class simpleNLU_EN(object):
intentreg = {}
def toleranz(self, text):
text = text.lower()
text = text.replace(",", "")
text = text.replace(".", "")
text = text.replace(";", "")
text = text.replace("!", "")
text = text.replace("?", "")
text = text.replace(" ", "")
return text
def toleranzpruefung(self,text):
text = self.toleranz(text)
if(text in self.intentreg.keys()):
return self.intentreg[text]
else: return ""
def laden(self):
path = os.path.join(os.path.dirname(verzeichnispfad), 'nlu', 'en','training_en.json')
with io.open(path) as f:
data = json.load(f)['rasa_nlu_data']['common_examples']
for l in data:
self.intentreg[self.toleranz(l['text'])] = l['intent']
def __init__(self):
self.laden()
| true |
fe5e7b215a96d58b90f31da24d5804ce2a699575 | Python | trucktrav/MATE_HELPER | /DB_Helper/select_attrs.py | UTF-8 | 670 | 2.578125 | 3 | [] | no_license | import tkinter as tk
import sqlite3
from sqlite3 import Error
import os
import pandas as pd
import pySelector
def get_header():
sql_headers = 'SELECT header FROM tbl_header;'
pwd = os.path.abspath(os.path.dirname(__file__))
database = pwd + '\\metrics_database.db'
db = sqlite3.connect(database)
head = pd.read_sql(sql_headers, con=db)
return head
def main():
header = get_header()
h_list = header['header'].tolist()
app = pySelector.ListApp()
data = {'intermediates': h_list, 'attributes': h_list, 'calcs': h_list}
app.set_data(list_data=data)
app.mainloop()
print('done')
if __name__ == "__main__":
main()
| true |
b2721795d9d15dd2adf364db3f36427de3bb78a4 | Python | DevinKlepp/barnsley-fern | /barnsleyfern.py | UTF-8 | 1,137 | 3.8125 | 4 | [] | no_license | # Program that produces barnsley fern image
# Devin Klepp July 21st 2018
import time
import graphics as g
import random as r
# Calculating time to see which program is faster
start_time = time.time()
xold = 0 # Initial points are at the origin
yold = 0
width = 700
height = 700
# Plotting surface
win = g.GraphWin("Barnsley Fern", width, height)
win.setBackground("black")
# −2.1820 < x < 2.6558 and 0 ≤ y < 9.9983 given plot ranges
# Looping for wanted number of points
for i in range(1, 50000):
prob = r.random()
if prob < 0.01:
xnew = 0
ynew = 0.16 * yold
elif prob < 0.86:
xnew = 0.85 * xold + 0.04 * yold
ynew = -0.04 * xold + 0.85 * yold + 1.6
elif prob < 0.93:
xnew = 0.20 * xold - 0.26 * yold
ynew = 0.23 * xold + 0.22 * yold + 1.6
else:
xnew = -0.15 * xold + 0.28 * yold
ynew = 0.26 * xold + 0.24 * yold + 0.44
xold = xnew
yold = ynew
win.plot(width / 2 + (xnew * 120),-1 * ynew * 65 + height, "green")
#print(prob, xnew, ynew)
print("Original took", time.time() - start_time, "seconds to run.")
| true |
db71bcd3022acd85dc5f06673c2e29fda63fc70e | Python | narang99/nbtex | /nbtex/core/operators/Operator.py | UTF-8 | 1,276 | 2.515625 | 3 | [
"MIT"
] | permissive | from nbtex.LatexInterface.LatexFormatters import LatexBasicFormatter
from functools import partial
class BasicOperator:
def __init__(self, precedence, combine):
self._precedence, self._combine = precedence, combine
def __call__(self, *args):
return self._combine(*args)
@property
def precedence(self):
return self._precedence
class InvertibleOperator(BasicOperator):
def __init__(self, precedence, combine, invert_combine=None):
super().__init__(precedence, combine)
self._invert_combine = invert_combine
def __call__(self, *args):
return super().__call__(*args)
def __invert__(self):
if self._invert_combine is not None:
return InvertibleOperator(
self.precedence, self._invert_combine, self._combine
)
else:
return self
def infix_combine(op):
return partial(LatexBasicFormatter.binary_operation_output, op)
class InvertibleInfixOperator(InvertibleOperator):
def __init__(self, precedence, op, invert_op=None):
c, invc = infix_combine(op), infix_combine(invert_op)
if invert_op is None:
super().__init__(precedence, c)
else:
super().__init__(precedence, c, invc)
| true |
9b5f1c1e2d2c09754e7c0a6c5075c685dec2a4c7 | Python | kapoor-rakshit/Miscellaneous | /Rotate array.py | UTF-8 | 846 | 3.859375 | 4 | [] | no_license | def leftrotate():
reverse(0,op-1) #reverse first op elements
reverse(op,n-1) #reverse remaining elements
reverse(0,n-1) #revese entire list
def rightrotate():
reverse(n-op,n-1) #reverse last op elements
reverse(0,n-op-1) #reverse remaining elements
reverse(0,n-1) #reverse entire list
def reverse(start,end):
tp=0
while start<end: #function to reverse
tp=a[start]
a[start]=a[end]
a[end]=tp
start+=1
end-=1
n=int(input())
a=list(map(int,input().split()))
op=int(input()) #no. of rotations
op=op%n #if rotations are greater than no. of elements
rightrotate() #call to rightrotate
print(*a)
#leftrotate()
#print(*a) #call to leftrotate
| true |
bc18ee74f7efd828fa1834c30ff5e94df86bd336 | Python | sgametrio/hockey-stick-monitor | /server/lab.py | UTF-8 | 312 | 2.640625 | 3 | [] | no_license | import numpy as np
import pandas as pd
from scipy import integrate
gen = np.random.RandomState(0)
x = gen.randn(100, 10)
names = [chr(97 + i) for i in range(10)]
df = pd.DataFrame(x, columns=names)
print(df.head())
df = df.apply(lambda x: np.insert(integrate.cumtrapz(x.values), 0, 0, axis=0))
print(df.head())
| true |
36ceae0a1d02d2cb76877650f817a4c9e6320a51 | Python | lllchen/python100daysscript | /08/08practice1.py | UTF-8 | 657 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python3
#-*- coding=utf-8 -*-
'''
定义一个类描述数字时钟
'''
__auther__ = 'BrilliantDawn'
import time
class numClock(object):
def __init__(self) -> None:
self.__hours = 0
self.__minutes = 0
self.__seconds = 0
def run(self):
while True:
self.__hours = time.localtime(time.time()).tm_hour
self.__minutes = time.localtime(time.time()).tm_min
self.__seconds = time.localtime(time.time()).tm_sec
print(f'{self.__hours}:{self.__minutes}:{self.__seconds}')
time.sleep(1)
if __name__ == '__main__':
lc = numClock()
lc.run() | true |
282578da6d2ab198b66b411ecbb3d39227593ee2 | Python | slickFix/Python_algos | /DS_ALGO/GFG_practise/Doubly_ll_merge_sort.py | UTF-8 | 4,642 | 3.6875 | 4 | [] | no_license | import random
class Node:
def __init__(self,data):
self.data = data
self.next_node = None
self.prev_node = None
class Doubly_ll:
def __init__(self):
self.head = None
def insert_l(self,data):
new_node = Node(data)
curr = self.head
if curr is None:
self.head = new_node
return
else:
while curr.next_node is not None:
curr = curr.next_node
curr.next_node = new_node
new_node.prev_node = curr
def print_ll_pos(self,first_node):
curr = first_node
while curr is not None:
print(curr.data, end= ' ')
curr = curr.next_node
print()
def print_ll(self):
curr = self.head
while curr is not None:
print(curr.data, end= ' ')
curr = curr.next_node
print()
def _length_nodes(self,pointer):
#print('head lenght pointer ',pointer.data)
count = 0
while pointer is not None:
#print(pointer.data,end=' ')
count+= 1
pointer = pointer.next_node
#print()
return count
def _get_left_right_head_nodes(self,first_node,length):
curr = first_node
count = 0
while count != length-1:
curr = curr.next_node
count +=1
right_head = curr.next_node
curr.next_node = None
right_head.prev_node = None
return first_node,right_head
def _merge_ll(self,left_head,right_head):
left_pointer = left_head
right_pointer = right_head
if left_pointer.data < right_pointer.data:
ret_head = left_pointer
else:
ret_head = right_pointer
while left_pointer is not None and right_pointer is not None:
if left_pointer.data < right_pointer.data:
if left_pointer.next_node:
if left_pointer.next_node.data <= right_pointer.data:
left_pointer = left_pointer.next_node
else:
temp = left_pointer.next_node
left_pointer.next_node = right_pointer
right_pointer.prev_node = left_pointer
left_pointer = temp
continue
else:
temp = left_pointer.next_node
left_pointer.next_node = right_pointer
right_pointer.prev_node = left_pointer
left_pointer = temp
continue
else:
if right_pointer.next_node:
if right_pointer.next_node.data <= left_pointer.data:
right_pointer = right_pointer.next_node
else:
temp = right_pointer.next_node
right_pointer.next_node = left_pointer
left_pointer.prev_node = right_pointer
right_pointer = temp
continue
else:
temp = right_pointer.next_node
right_pointer.next_node = left_pointer
left_pointer.prev_node = right_pointer
right_pointer = temp
continue
# self.print_ll_pos(ret_head)
return ret_head
def merge_sort_ll(self,first_node):
length = self._length_nodes(first_node)
if length == 1:
return first_node
if length >1:
left_head,right_head = self._get_left_right_head_nodes(first_node,length//2)
sorted_left = self.merge_sort_ll(left_head)
sorted_right = self.merge_sort_ll(right_head)
# print('left')
# self.print_ll_pos(sorted_left)
# print('right')
# self.print_ll_pos(sorted_right)
return self._merge_ll(sorted_left,sorted_right)
if __name__ == '__main__':
d_ll = Doubly_ll()
print('inserting random order node (10 nodes) ')
for i in range(10):
data = random.randint(10,99)
d_ll.insert_l(data)
# for i in [44,62,34,47,71,56,18,62,56,26]:
# data = random.randint(0, 100)
#
# d_ll.insert_l(i)
print('printing the nodes which are added randomly')
d_ll.print_ll()
# print(d_ll._length_nodes(d_ll.head))
print('merge sorting the d_ll')
d_ll.head = d_ll.merge_sort_ll(d_ll.head)
print('printing the merge sorted d_ll')
d_ll.print_ll() | true |
b7be81be771d87da097a7fac8bf8f8c240a63931 | Python | julie-ngu/Unit0-04 | /hello_world_international.py | UTF-8 | 615 | 2.921875 | 3 | [] | no_license | # Created by: Julie Nguyen
# Created on: Sept 2017
# Created for: ICS3U
# Daily Assignment - Unit0-04
# This program is the Hello, World! program, but as a GUI with 3 buttons
import ui
def english_touch_up_inside(sender):
# displays the English version
view['hello_world_label'].text = ('Hello, World!')
def french_touch_up_inside(sender):
# displays the French version
view['hello_world_label'].text = ('Bonjour, Monde!')
def spanish_touch_up_inside(sender):
# displays the Spanish version
view['hello_world_label'].text = ('iHola, Mundo!')
view = ui.load_view()
view.present('full_screen')
| true |
94b4edcce45a58fbd1ca21776c521f66bc15c8b1 | Python | baxpr/sct-singularity | /fmri_pipeline/make_gm_rois.py | UTF-8 | 3,453 | 2.8125 | 3 | [] | no_license | #!/opt/sct/python/envs/venv_sct/bin/python
#
# Load fmri space masks and create dorsal and ventral ROIs
import sys
import nibabel
import numpy
import scipy.ndimage
gm_file = sys.argv[1]
label_file = sys.argv[2]
# Load images
gm = nibabel.load(gm_file)
label = nibabel.load(label_file)
# Verify that geometry matches
if not (label.get_qform() == gm.get_qform()).all():
raise Exception('GM/LABEL mismatch in qform')
if not (label.get_sform() == gm.get_sform()).all():
raise Exception('GM/LABEL mismatch in sform')
if not (label.affine == gm.affine).all():
raise Exception('GM/LABEL mismatch in affine')
if not label.header.get_data_shape() == gm.header.get_data_shape():
raise Exception('GM/LABEL mismatch in data shape')
# Verify that orientation is RPI (as SCT calls it) or LAS (as nibabel calls it)
ort = nibabel.aff2axcodes(gm.affine)
if not ort == ('L', 'A', 'S'):
raise Exception('GM image orientation is not nibabel LAS')
# Split GM into horns, slice by slice at center of mass
gm_data = gm.get_data()
gm_data[gm_data>0] = 1
dims = gm.header.get_data_shape()
if not (dims[2]<dims[0] and dims[2]<dims[1]):
raise Exception('Third dimension is not slice dimension?')
nslices = dims[2]
horn_data = numpy.zeros(dims)
for s in range(nslices):
slicedata = numpy.copy(gm_data[:,:,s])
quadrants = numpy.zeros(dims[0:2])
com = [int(round(x)) for x in scipy.ndimage.center_of_mass(slicedata)]
# Label quadrants. For correct data orientation, these are
# 1 - left ventral
# 2 - right ventral
# 3 - left dorsal
# 4 - right dorsal
quadrants[com[0]+1:,com[1]+1:] = 1
quadrants[:com[0],com[1]+1:] = 2
quadrants[com[0]+1:,:com[1]] = 3
quadrants[:com[0],:com[1]] = 4
# Set centerline values to zero
slicedata[com[0]:com[0]+1,:] = 0
slicedata[:,com[1]:com[1]+1] = 0
# Label the four horns
horn_data[:,:,s] = numpy.multiply(slicedata,quadrants)
# Save labeled horns to file with CSV index
leveldict = {
1: "Lventral",
2: "Rventral",
3: "Ldorsal",
4: "Rdorsal"
}
horn = nibabel.Nifti1Image(horn_data,gm.affine,gm.header)
nibabel.save(horn,'fmri_gmcut.nii.gz')
with open('fmri_gmcut.csv','w') as f:
f.write("horn,label\n")
f.write(leveldict.get(1) + ",1\n")
f.write(leveldict.get(2) + ",2\n")
f.write(leveldict.get(3) + ",3\n")
f.write(leveldict.get(4) + ",4\n")
# Mask labels by gray matter and write to file
label_data = label.get_data()
gm_inds = gm_data>0
gm_data[gm_inds] = label_data[gm_inds]
gmmasked = nibabel.Nifti1Image(gm_data,gm.affine,gm.header)
nibabel.save(gmmasked,'fmri_gmlabeled.nii.gz')
# Label by level and horn:
# 301 - C3, left ventral
# 302 - C3, right ventral
# etc
label_data = numpy.multiply(label_data,horn_data>0)
horn_data = numpy.multiply(horn_data,label_data>0)
hornlevel_data = 100*label_data + horn_data
hornlevel = nibabel.Nifti1Image(hornlevel_data,gm.affine,gm.header)
nibabel.save(hornlevel,'fmri_gmcutlabel.nii.gz')
hvals=numpy.round(numpy.unique(hornlevel_data))
hvals = hvals[hvals!=0]
with open('fmri_gmcutlabel.csv','w') as f:
f.write("horn_level,horn,level,label\n")
for hval in hvals:
thishorn = str(int(hval))[-1]
thislevel = str(int(hval))[0:-2]
thishornlevel = "%s_%s" % (leveldict.get(int(thishorn)),thislevel)
f.write("%s,%s,%s,%d\n" % (thishornlevel,leveldict.get(int(thishorn)),thislevel,hval))
| true |
7f529d9518c6c3602b509eaba1d05e7501ef4cae | Python | est22/PS_algorithm | /스택,큐,덱/10828.py | UTF-8 | 575 | 3.359375 | 3 | [] | no_license | import sys
input = sys.stdin.readline
stack = []
for i in range(int(input())):
func = input().split()
if func[0] == 'push':
stack.append(func[1])
elif func[0] == 'pop':
if len(stack) == 0:
print(-1)
else:
print(stack.pop())
elif func[0] == 'size':
print(len(stack))
elif func[0] == 'empty':
if len(stack) == 0:
print(1)
else:
print(0)
elif func[0] == 'top':
if len(stack) == 0:
print(-1)
else:
print(stack[-1])
| true |
a0fc186f1a97fabdecde3288f79c0f74d42d9eb3 | Python | dhanin/Hangman | /Problems/Prime number/main.py | UTF-8 | 271 | 3.625 | 4 | [] | no_license | number = int(input())
if number > 1:
i = 2
while i * i <= number:
if number % i == 0:
print("This number is not prime")
exit()
i += 1
print("This number is prime")
elif number == 1:
print('This number is not prime') | true |
6ec8176571e52e9899413e7febc5220c7162e2a2 | Python | Tulip4attoo/tetris_python | /game_objects.py | UTF-8 | 4,413 | 3.09375 | 3 | [] | no_license | import numpy as np
import utils
import bricks
import cfg
import random
class Field():
"""
the field contains a numpy array that represents the field.
"""
def __init__(self):
"""
"""
self.padding_size = cfg.PADDING
self.field_render = np.zeros(cfg.FIELD_SHAPE)
self.field_padding = np.zeros((cfg.FIELD_SHAPE[0] + 2*cfg.PADDING, \
cfg.FIELD_SHAPE[1] + 2*cfg.PADDING))
def add_brick(self, brick_cl):
"""
when a brick hit the floor, it will be attached into the field
"""
self.field_render, self.field_padding = \
utils.calc_move(self.field_padding, brick_cl)
def check_and_clear_rows(self):
self.field_render = utils.clear_rows(self.field_render)
self.field_padding = utils.clear_rows(self.field_padding)
def revert_to_state(self, f_render, f_padding):
self.field_render = f_render.copy()
self.field_padding = f_padding.copy()
class Brick():
"""
the brick has 3 values:
- a dictionary that contains 4 numpy arrays size (4, 4)
as the 4 rotation renders of the brick itself.
- a (x_coord, y_coord) list that is the top-left coord of the above
numpy array.
- a number that show the rotation of the brick.
"""
def __init__(self, coord=cfg.DEFAULT_COORD):
self.brick = {"brick": {},
"coord": coord[:],
"rotation": 0}
self.next_brick = {"brick": {},
"coord": coord[:],
"rotation": 0}
self.random_brick()
self.random_next_brick()
self.dumb_brick = self.copy_brick(self.brick)
self.action_dict = {-1: self.do_nothing,
ord("w"): self.rotate,
ord("a"): self.move_left,
ord("d"): self.move_right,
ord("s"): self.move_down,
ord(" "): self.move_to_floor}
def copy_brick(self, brick_a):
"""
return brick_a.copy()
"""
new_brick = {"brick": brick_a["brick"].copy(),
"coord": brick_a["coord"].copy(),
"rotation": brick_a["rotation"]}
return new_brick
def create_new_brick(self):
self.brick = self.copy_brick(self.next_brick)
self.random_next_brick()
def random_brick(self):
brick_list = bricks.BRICKS_LIST[:]
random_brick = random.choice(brick_list)
self.brick["brick"] = random_brick
self.brick["rotation"] = random.choice(range(len(random_brick)))
return self.brick
def random_next_brick(self):
brick_list = bricks.BRICKS_LIST[:]
random_brick = random.choice(brick_list)
self.next_brick["brick"] = random_brick
self.next_brick["rotation"] = random.choice(range(len(random_brick)))
return self.next_brick
def control_brick(self, key, field):
"""
this function is to control the brick, includes rotation= and move
key is a number of ord(key)
will refactor by adding into a dict of moveset later
"""
self.dumb_brick = self.copy_brick(self.brick)
action = self.action_dict[key]
action(field)
def revert(self):
"""
in case there is a invalid move, we can revert to previous state.
"""
self.brick = self.copy_brick(self.dumb_brick)
def revert_to_state(self, saved_brick, saved_next_brick):
"""
revert into a saved state
"""
self.brick = self.copy_brick(saved_brick)
self.next_brick = self.copy_brick(saved_next_brick)
def do_nothing(self, field):
pass
def rotate(self, field):
"""
this function is to change the rotation value in self.brick
always turn right aka +1 to the value
"""
self.brick["rotation"] = (self.brick["rotation"] + 1) % 4
def move_left(self, field):
self.brick["coord"][1] -= 1
def move_right(self, field):
self.brick["coord"][1] += 1
def move_down(self, field):
self.brick["coord"][0] -= 1
def move_to_floor(self, field):
while utils.check_valid(field, self):
self.brick["coord"][0] -= 1
self.brick["coord"][0] += 1
| true |
a70a1f0f1c0789e8b9271c5028a7d5847a9fc6b3 | Python | anabcm/Social_Network_Analysis | /code/Social_network_analysis.py | UTF-8 | 20,467 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Contruyendo la estructura de la red
#Este codigo explora la estructura y directorio de los datos de transparencia que se encuentran en el POT
#publicados por INAI en abril del 2016
#El código interactua con una base de datos en postgres, las consultas son basicas en SQL
#asi que es posible conectar otra base y sería necesario configurar el conector adecuado
#ESPECIFICACIONES
#Python 2 y 3
#Base de datos en Postgres
#database="dir"
#user="postgres"
#password="postgres"
#Crear una carpeta llamada "data" para almacenar la salida de las dependencias
#LIBRERIAS Y PAQUETES
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
import json
import csv
import psycopg2
import networkx as nx
import os
#import matplotlib.pyplot as plt
from networkx.readwrite import json_graph
from igraph import *
#A la base de datos se integraron datos de SEP e INE como atributos para cada nodo
#=========conecta a una base de datos, regresa el cursor========================
def connect_database(dbname,u,p):
con=None
try:
con=psycopg2.connect(database=dbname, user=u, password=u)
return con
except psycopg2.DatabaseError, e:
print ('Error %s' % e )
sys.exit(1)
return 0
#==================Contruye red por depenendecia con file=======================
#costruye la red de datos
#Consulta la tabla estructura y construye la red basado en la jerarquia que ocupa
#cada servidor publico
def explorando_red(con):
f=open("output_redes.csv",'wb')
cursor=con.cursor()
#consulta para traer las diferentes dependencias
query="select id, id_cargos,id_dependencia from dir_clean order by id_dependencia ASC"
cursor.execute(query)
rows=cursor.fetchall()
#escribe en el archibo para cada servidor y su cargo superior
f.write("id,id_dependecia,id_cargos,superiores")
for row in rows:
lista=[]
lista=visited_node(row[1],row[2],lista,con)
f.write("\n"+str(row[0])+","+row[2]+","+row[1]+",")
lista=lista[:len(lista)-1]
c=0
for i in lista:
c=c+1
if c<len(lista):
f.write(str(i)+",")
else:
f.write(str(i))
#================visita cada nodo y busca el superior=======================
#funcion recursiva que trae los id de los puestos superiores, con base en la tabla estrucura
def visited_node(id,dependencia,lista,con):
cursor=con.cursor()
query="select id_cargo_superior from estructura where id_cargos like '"+str(id)+"' and id_dependencia like '"+str(dependencia)+"';"
cursor.execute(query)
rows=cursor.fetchall()
if len(rows)!=0:
if rows[0][0] not in lista:
lista.append(rows[0][0])
visited_node(rows[0][0],dependencia,lista,con)
return lista
#===========================Red por dependencia==============================
#Crea una red por dependencia, en la base de datos hay cerca de 277
def net_by_dependecia(con):
cursor=con.cursor()
edges=[]
#explorando el nodo superior aca cada nodo y trayendo el id
#todas las dependencias
query="Select distinct(id_dependencia) from estructura order by id_dependencia desc"
cursor.execute(query)
dependencias=cursor.fetchall()
#id de de dependencias, lista
for dep in dependencias:
query="select id from dir_clean where id_dependencia= '"+str(dep[0])+"';"
cursor.execute(query)
nodos=cursor.fetchall()
#Creamos el graph
G=nx.Graph()
a=[]
#Creamos los nodos para agregar a la red
for n in nodos:
#Funcion que genera los atributos para cada nodo
atributes=attributes(n[0],con) #genera el diccionario de atributos
#Agragando el id y los atributos
G.add_node(n[0],id_titulo=atributes['id_titulo'],id_institucion=atributes['id_institucion'],partido=atributes['partido'])#genera la lista de nodos y atributos
#obtenemos el cargo para cada nodo para poder buscar su superior
query="select id,id_cargos from dir_clean where id_dependencia= '"+str(dep[0])+"';"
cursor.execute(query)
nodos=cursor.fetchall()
#EDGES
#Ahora se generaran los edges
for n in nodos:
id_cargo_superior=who(n[1],dep[0],con)
#buscando al funcionario de cargo superior
query="select id from dir_clean where id_cargos='"+str(id_cargo_superior)+"' and id_dependencia ='"+str(dep[0])+"';"
cursor.execute(query)
ans=cursor.fetchall()
if len(ans)>0:
#para el nodo si riene un superior se calcula el peso
wei=weight(attributes(n[0],con),attributes(ans[0][0],con))
#Ya que fue calculado el peso se agrega al edge
G.add_edge(n[0],ans[0][0], weight=wei)
#si no tiene un superior se agrega un edge a su mismo con peso 0
else:
G.add_edge(n[0],n[0], weight=0)
#edges.append([n[0],n[0]], weight=wi)
#para cada dependencia se construye una red
#DEVELOP GRAPH
#agregando los atributos
cursor.execute("select dependencia from estructura where id_dependencia like '"+str(dep[0])+"'")
ans=cursor.fetchall()
nombre_dependencia=ans[0][0]
#llama la función de calcula las medidas
measure=measurements(G,con,nombre_dependencia.decode(encoding='UTF-8',errors='strict'),str(dep[0]))
#clasificando las carreras, reduciendo la lista con dedupe y calculando la distancia al más cercano.
#===============Calculando peso para el edge =====================================
def weight(list1,list2):
peso=0.5 #por pertenecer a la misma dependencia
if list1['id_titulo']==list2['id_titulo']:
peso=peso+0.15 #tienen la misma carrera(de hecho deberia ser mas fuerte si ademas estuvieron en la misma escuela)
if (list1['id_institucion']==list2['id_institucion']):
peso=peso+0.3 #estudiaron en la misma escuela
if (list1['partido']==list2['partido']):
peso=peso+0.05 # pertenecen al mismo partido, le reste importancia ya que no todos tienen afiliación política
return peso
#=================Calcula los clique de la red===================================
# Calculando medidas de clique,community
def community(G,con,nombre,id):
#Based on the algorithm published by Bron & Kerbosch (1973) [R198] as adapated by Tomita,
#Tanaka and Takahashi (2006) [R199] and discussed in Cazals and Karande (2008) [R200].
#The method essentially unrolls the recursion used in the references to avoid issues of recursion stack depth.
#This algorithm is not suitable for directed graphs.
#This algorithm ignores self-loops and parallel edges as clique is not conventionally defined with such edges.
#calcula todos los cliques
cliques=list(nx.find_cliques(G))
# Create the maximal clique graph of a graph.
#Finds the maximal cliques and treats these as nodes.
#The nodes are connected if they have common members in the original graph.
#Theory has done a lot with clique graphs, but I haven’t seen much on maximal clique graphs.
maximal=list(nx.make_max_clique_graph(G))
#This module provides functions and operations for bipartite graphs.
#Bipartite graphs B = (U, V, E) have two node sets U,V and edges in E
#that only connect nodes from opposite sets. It is common in the literature
#to use an spatial analogy referring to the two node sets as top and bottom nodes.
bipartite= list(nx.make_clique_bipartite(G, fpos=None, create_using=None, name=None))
#obtiene el número de cliques en la red
graph_clique_number=nx.graph_clique_number(G, cliques=None)
#Componentes conectados
component_connected_g=[]
compo=nx.connected_component_subgraphs(G)
for c in compo:
component_connected_g.append(list(c))
return cliques,maximal,bipartite,graph_clique_number,component_connected_g
#==============Calculo de medidas para la red===================================
def measurements(G,con,nombre,id):
#Configuración para el plot
#plt.figure(figsize=(10,10))
#plt.axis('off')
#plt.title(nombre)
nx.draw_spring(G, with_labels=True)
nx.draw(G, with_labels=True)
#calcula centrality
central,top_de,top_clos,bet_top,top_de_c=centrality(G,con)
#calculo del linkedin prediction
linkedin,jackard,pref=linkedin_prediction(G)
#calculo de clustering y cliques
cliques,maximal,bipartite,graph_clique_number,component_connected_g=community(G,con,nombre,id)#AGREGUE ESTO
#Generando archivos JSON para salida de datos
path=os.getcwd()+"/data/"+str(id)+".json"
with open(path, 'w') as outfile1:
outfile1.write(json.dumps(json_graph.node_link_data(G)))
path=os.getcwd()+"/data/"+str(id)+"_centrality.json"
with open(path, 'w') as outfile1:
outfile1.write(json.dumps(central))
path=os.getcwd()+"/data/"+str(id)+"_linkedin_prediction.json"
with open(path, 'w') as outfile1:
outfile1.write(json.dumps(linkedin))
path=os.getcwd()+"/data/"+str(id)+"_all.csv"
report(path,top_de,top_clos,bet_top,top_de_c,jackard,pref, cliques,maximal,bipartite,graph_clique_number,component_connected_g)
path=os.getcwd()+"/data/"+str(id)+".gexf"
#clustering(G,con,nombre,id)
#guardando la red con el id de la dependencia
nx.write_gexf(G, path,encoding='utf-8')
path=os.getcwd()+"/images/"+str(id)
#ploteando
#plt.savefig(path)
#plt.show()
def report(path,top_de,top_clos,bet_top,top_de_c,jackard,pref,cliques,maximal,bipartite,graph_clique_number,component_connected_g):
with open(path, "wb") as f:
writer = csv.writer(f)
writer.writerow(["*Análisis de Redes*"])
writer.writerow(["================Centrality(Elite)==========="])
writer.writerow(["*Top degree centrality*"])
writer.writerows(top_de)
writer.writerow(["*Top closeness*"])
writer.writerows(top_clos)
writer.writerow(["*Top Betweetness*"])
writer.writerows(bet_top)
writer.writerow(["*Top degree centrality*"])
writer.writerows(top_de_c)
writer.writerow(["==================Community================"])
writer.writerow(["*Cliques*"])
writer.writerows(cliques)
writer.writerow(["*Maximal clique*"])
writer.writerow(maximal)
writer.writerow(["*Bipartite clique*"])
writer.writerow(bipartite)
writer.writerow(["*Graph clique number*"])
writer.writerow([graph_clique_number])
writer.writerow(["*Component connected Graph*"])
writer.writerows(component_connected_g)
writer.writerow(["==================Prediction================"])
writer.writerow(["*Linkedin Prediction*"])
writer.writerow(["*Jackard*"])
writer.writerows(jackard)
writer.writerow(["*Preferencial*"])
writer.writerows(pref)
#================Linkedin Prediction=======================================
#Cuales son los nodos más probables a tener conexión en el futuro
def linkedin_prediction(G):
#Link Prediction,Jaccard Coefficient
#Top 5 que tienen más probabilidad de conectarse
jackard=[]
preds_jc = nx.jaccard_coefficient(G)
pred_jc_dict = {}
for u, v, p in preds_jc:
pred_jc_dict[(u,v)] = p
Jaccard_Coefficient=[]
Jaccard_Coefficient_10=sorted(pred_jc_dict.items(), key=lambda x:x[1], reverse=True)[:10]
for c in Jaccard_Coefficient_10:
j={"Nodes":c[0],"probability":c[1]}
Jaccard_Coefficient.append(j)
jackard.append([c[0],c[1]])
#Preferential attacment, top 5 más importante o reelevantes a conectarse
preds_pa = nx.preferential_attachment(G)
pref=[]
pred_pa_dict = {}
for u, v, p in preds_pa:
pred_pa_dict[(u,v)] = p
preferential=[]
preferential_10=sorted(pred_pa_dict.items(), key=lambda x:x[1], reverse=True)[:10]
for c in preferential_10:
j={"Nodes":c[0],"measure":c[1]}
preferential.append(j)
pref.append([c[0],c[1]])
dir={"Jaccard_Coefficient":Jaccard_Coefficient,"preferential":preferential}
return dir,jackard,pref
#==============================Centrality=======================================
#Cuales son los nodos centrales en la red
def centrality(G,con):
cursor=con.cursor()
#Top 5 de nodos centrales
centre=sorted(G.degree().items(), key=lambda x:x[1], reverse=True)[:5]
top_degree=[]
top_de=[]
#Busca quienes son los nodos centrales en la base de datos
for c in centre:
query="select nombre,primer_apellido,segundo_apellido from dir_clean where id ="+str(c[0])+""
cursor.execute(query)
ans=cursor.fetchall()
top={"nombre":ans[0][0],"primer_apellido":ans[0][1],"segundo_apellido":ans[0][2],"top":c[1]}
top_degree.append(top)
top_de.append([ans[0][0],ans[0][1],ans[0][2],c[1]])
#calcula el Closeness centrality para la red
closeness_centrality = nx.closeness_centrality(G)
closeness=[]
#Top 5
closs_5= sorted(closeness_centrality.items(), key=lambda x: x[1], reverse=True)[:5]
top_clos=[]
for c in closs_5:
clo={"id":c[0],"closeness":c[1]}
closeness.append(clo)
top_clos.append([c[0],c[1]])
#Calcula el Betweeness centrality el top 5
betweeness_centrality = nx.betweenness_centrality(G)
betw_5=sorted(betweeness_centrality.items(), key=lambda x: x[1], reverse=True)[:5]
betweeness_centrality=[]
bet_top=[]
for c in betw_5:
be={"id":c[0],"betweeness":c[1]}
betweeness_centrality.append(be)
bet_top.append([c[0],c[1]])
#Closnes
degree_centrality = nx.degree_centrality(G)
#top centrality degree
top_de_c=[]
top_degree_centrality=[]
top_degree_centrality_5=sorted(degree_centrality.items(), key=lambda x: x[1], reverse=True)[:5]
for c in top_degree_centrality_5:
t={"id":c[0],"top_degree_centrality":c[1]}
top_degree_centrality.append(t)
top_de_c.append([c[0],c[1]])
#genera un diccionario para las medidas
dir={"centrality":top_degree,"closeness":closeness,"betweeness_centrality":betweeness_centrality,"top_degree_centrality":top_degree_centrality}
return dir,top_de,top_clos,bet_top,top_de_c
#===================Calcula el grafo central===============================
def centrality_graph_degree(G):
#plt.figure(figsize=(10,10))
#plt.axis('on')
deg=nx.degree(G)
#h=plt.hist(deg.values(),100)
#plt.loglog(h[1][1:],h[0])
#=====================Elimina nodos con un treashold de grado==================
#Si se desea eliminar nodos con poca conección
def trim_degrees(g, degree=2):
g2=g.copy()
d=nx.degree(g2)
for n in g2.nodes():
if d[n]<=degree:
g2.remove_node(n)
return g2
#consulta los datos de cada nodo y genera un diccionario que será agregado como atributo a la red
#==============================Atributes======================================
#Consulta la base de datos para traer los atributos para cada nodo e incluirlos
def attributes( id,con):
cursor=con.cursor()
query="select id_titulo,id_institucion from sep_ascii where id like '"+str(id)+"'";
cursor.execute(query)
ans=cursor.fetchall()
#si existe la información
if len(ans)>0:
id_titulo=ans[0][0] #titulo profesional
id_institucion=ans[0][1] #escuela de prosedencia
else:
id_titulo=""
id_institucion=""
query="select partido from dir_clean where id="+str(id)+"";
cursor.execute(query)
ans=cursor.fetchall()
#consulta el partido
if len(ans)>0:
partido=ans[0][0]
else:
partido=""
attribute={'id_titulo':str(id_titulo),'id_institucion':str(id_institucion),'partido':str(partido)}
return attribute
#==================================Quien es el nodo superior===============================
#acorde al cargo se consulta en la tabla estructura quien es el cargo superior
def who(id_cargos,dependencia,con):
cursor=con.cursor()
query="select id_cargo_superior from estructura where id_cargos like '"+str(id_cargos)+"' and id_dependencia like '"+str(dependencia)+"';"
cursor.execute(query)
ans=cursor.fetchall()
return ans[0][0]
#===============================Genera Red con un archivo============================
#Si se tiene una lista de ids de funcionarios se genera la red a partir de esta lista
def develop_net_dependencia_with_FIlE(file,con):
cursor=con.cursor()
g = Graph()
edges=[]
vertices=[]
partidos=[]
nombres=[]
with open(file,'rb') as csvfile:
reader=csv.reader(csvfile)
for row in reader:
query="select id,nombre,primer_apellido,segundo_apellido from dir_clean where id_cargos like '"+row[2]+"' and id_dependencia like '"+row[1]+"';"
vertices.append(str(row[0]))
cursor.execute(query)
result=cursor.fetchall()
for r in result:
#print r[0],r[1]
if row[0]!=r[0]:
edges.append((str(row[0]),str(r[0])))
nombres.append(r[1])
query="select id from partidos_ascii where nombre like upper('"+str(r[1])+"') and apellido_paterno like upper('"+r[2]+"') and apellido_materno like upper('"+r[3]+"');"
#print query
cursor.execute(query)
results=cursor.fetchall()
if len(results)>0:
query="select partido from partidos where id= "+str(results[0][0])
cursor.execute(query)
resu=cursor.fetchall()
if len(resu)>0:
partidos.append(resu[0][0])
else:
#print "no"
partidos.append("No")
#si no esta en la base de datos
if len(result)==0:
print ("No encontre este",row[0])
g.add_vertices(vertices)
g.add_edges(edges)
g.vs
#g.vs["nombres"]=vertices
g.vs["partidos"]=partidos
layout = g.layout("fr")
visual_style = {}
visual_style["layout"] = layout
visual_style["vertex_size"] = 25
visual_style["label_size"]=9
visual_style["vertex_label"] =g.vs["label"]=vertices
n=["PRI","PAN","PRD","No","MCI","NA","MOR","PVE"]
c=["red","BLUE","YELLOW","GRAY","ORANGE","TURQUOISE","BROWN","green"]
visual_style["legend"]=[1, 95, n,c]
color_dict={"PRI":"red","PAN":"BLUE","PRD":"YELLOW","No":"GRAY","MCI":"ORANGE","NA":"TURQUOISE","NA.":"TURQUOISE","MOR":"BROWN","PVE":"green"}
#visual_style["vertex_label"] = g.vertices
visual_style["vertex_color"] = [color_dict[partido] for partido in g.vs["partidos"]]
visual_style["bbox"] = (1000, 1000)
plot(g, **visual_style)
#==================================INicio del programa========================
#Se pueden decomentar las funciones que se desean ejecutar
def start():
database="dir"
user="postgres"
password="postgres"
con=connect_database(database,user,password)
#explorando_red(con)
#cleaning_data_estados(con) #limpia los datos de los estados y los integra
#archivo="infoteq.csv" #en caso de tener una lista de ids, agregar
#develop_net_dependencia(archivo,con) #si se quiere construir la red por archivo
net_by_dependecia(con) #genera las redes para todas las dependencias y calcula las medidas
start()
| true |
774138d7482fb4c8889559eebf09e208759f589b | Python | IanMendozaJaimes/SuperTT | /MexpTokenizer/convertExpressions.py | UTF-8 | 610 | 2.609375 | 3 | [] | no_license | from NSequenceToLatex import Converter
import os
csv_file = '/Users/ianMJ/Downloads/CROHME_dataset_v5/tokenized.csv'
BEGIN = 1000
END = 1001
c = Converter()
file = open(csv_file, 'r')
info = file.read().split('\n')
tokens = open('expressions.txt', 'w')
for moreInfo in info:
temp = moreInfo.split(',')
sequence = temp[1].split(' ')
end_pos = 0
for x in range(0, len(sequence)):
if int(sequence[x]) == END:
end_pos = x
break
sequence[x] = int(sequence[x])
tokens.write(temp[0] + '$' + c.seq2Lat(sequence[1:end_pos]).replace('\\right ', '').replace('\\left ', '') + '\n')
| true |
d4a8c872c42d460c85830e6ae0ae891f1241791f | Python | shizzard/veon-test-task | /opt/03_movie_reserve_test.py | UTF-8 | 2,048 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
from common import *
import unittest
class ReservationAddTestCase(unittest.TestCase):
def setUp(self):
self.imdb_id = generate_random_string(16)
self.screen_id = generate_random_string(16)
self.available_seats = 1
self.conn = get_connection()
data = movie_register_req(imdb_id=self.imdb_id, screen_id=self.screen_id, available_seats=self.available_seats)
response, body = do_request(self.conn, "/movie/store", data)
self.assertEqual(response.status, 201)
def tearDown(self):
self.conn.close()
def test_01_can_reserve(self):
data = movie_reserve_req(imdb_id=self.imdb_id, screen_id=self.screen_id)
response, body = do_request(self.conn, "/reserve/add", data)
self.assertEqual(response.status, 201)
self.assertEqual(body["imdbId"], self.imdb_id)
self.assertEqual(body["screenId"], self.screen_id)
self.assertTrue("reservationId" in body.keys())
def test_02_can_get_conflict_on_reservations_exceeded(self):
data = movie_reserve_req(imdb_id=self.imdb_id, screen_id=self.screen_id)
response, body = do_request(self.conn, "/reserve/add", data)
self.assertEqual(response.status, 201)
response, body = do_request(self.conn, "/reserve/add", data)
self.assertEqual(response.status, 409)
def test_03_cannot_reserve_seat_with_invalid_imdb_id(self):
data = movie_reserve_req(imdb_id=generate_random_string(16), screen_id=self.screen_id)
response, body = do_request(self.conn, "/reserve/add", data)
self.assertEqual(response.status, 404)
def test_04_cannot_reserve_seat_with_invalid_screen_id(self):
data = movie_reserve_req(imdb_id=self.imdb_id, screen_id=generate_random_string(16))
response, body = do_request(self.conn, "/reserve/add", data)
self.assertEqual(response.status, 404)
suite = unittest.TestLoader().loadTestsFromTestCase(ReservationAddTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| true |
f0d595da8e91774785874dc02091d23a5bda6065 | Python | farjadfazli/code-challenges | /binary-search.py | UTF-8 | 480 | 4 | 4 | [] | no_license | def binary_search(arr, target):
min_idx = 0
max_idx = len(arr) - 1
while min_idx < max_idx:
mid_idx = (min_idx + max_idx) // 2
if target == arr[mid_idx]:
return mid_idx
elif arr[mid_idx] < target:
min_idx = mid_idx + 1
else:
max_idx = mid_idx - 1
return -1
example_arr = [3, 4, 6, 7, 12, 26, 90, 104]
example_target = 26
print(binary_search(example_arr, example_target)) | true |
dec9ba9788986a0a595aecb3e402e8b90515decf | Python | HugoCotton/TKintwer-Converter | /conversion.py | UTF-8 | 9,944 | 2.796875 | 3 | [] | no_license | import tkinter
from tkinter import ttk
from tkinter import *
root = tkinter.Tk()
root.title('Conversion')
root.config(bg = 'gray15')
weightans = 'Please enter an answer'
tabControl = ttk.Notebook(root)
mainframe = Frame(tabControl)
mainframe.configure(bg = '#040000')
mainframe.pack(pady = 125, padx = 225)
mainframe.place()
bordercolour = Frame(root, background = 'black')
s = ttk.Style(root)
s.configure('TNotebook', background = 'gray20')
s.configure('TLabel', background = 'gray20', foreground = 'white')
s.configure('TFrame', background = 'gray20', foreground = 'white')
s.configure('TMenubutton', background = 'gray20', foreground = 'white')
s.configure('TEntry', background = 'gray20')
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tab3 = ttk.Frame(tabControl)
tab4 = ttk.Frame(tabControl)
tab5 = ttk.Frame(tabControl)
tabControl.add(tab1, text='Weight')
tabControl.add(tab2, text='Distance')
tabControl.add(tab3, text='Denary, Binary and Hex')
tabControl.add(tab4, text='Temperature')
tabControl.add(tab5, text='Volume')
tabControl.pack(expand=1, fill="both")
weightconversionfactors = [
[ 1, 0.001, 0.000001, 0.035274, 0.002205, 0.000157],
[ 1000, 1, 0.001, 35.274, 2.205, 0.157],
[ 1000000, 1000, 1, 35274, 2205, 157],
[ 28.35, 0.02835, 0.00002835, 1, 0.0625, 0.004464],
[ 454, 0.454, 0.000454, 16, 1, 0.07143],
[ 6350, 0.157, 6.369, 224, 14, 1]
]
weightOPTIONS = ['Grams', 'Kilograms', 'Tonnes', 'Ounces', 'Pounds', 'Stones']
distconversionfactors = [
[ 1, 0.1, 0.001, 0.000001, 0.0394, 0.00328, 0.0010941, 0.0000006215],
[ 10, 1, 0.01, 0.00001, 0.394, 0.0328, 0.010941, 0.000006215],
[ 1000, 100, 1, 0.001, 39.37, 3.281, 1.094, 0.0006215],
[ 1000000, 100000, 1000, 1, 39370, 3281, 1094, 0.6215],
[ 25.4, 2.54, 0.0254, 0.0000254, 1, 0.0833, 0.02778, 0.000015783],
[ 305, 30.48, 0.3048, 0.0003048, 12, 1, 0.33333, 0.0001894],
[ 914, 91.44, 0.9141, 0.0009141, 36, 3, 1, 0.0005682],
[ 1609000, 160934, 1609, 1.609, 63360, 5280, 1760, 1],
]
distOPTIONS = ['Millimetres', 'Centimetres', 'Metres', 'Kilometres', 'Inches', 'Foot', 'Yard', 'Mile']
compOPTIONS = ['Denary', 'Binary', 'Hex']
tempOPTIONS = ['Celcius', 'Farenheit', 'Kelvin']
def weightcalculate():
weightask = float(weightentry1.get())
weightfrom = weighttkvar.get()
weightto = weight2tkvar.get()
weightfromindex = weightOPTIONS.index(weightfrom)
weighttoindex = weightOPTIONS.index(weightto)
weightans = weightask * weightconversionfactors[weightfromindex][weighttoindex]
weightans2 = ttk.Label(tab1, text = weightans)
weightans2.place(relx = 0.5, rely = 0.7, anchor = CENTER)
def distcalculate():
distask = float(distentry1.get())
distfrom = disttkvar.get()
distto = dist2tkvar.get()
distfromindex = distOPTIONS.index(distfrom)
disttoindex = distOPTIONS.index(distto)
distans = distask * distconversionfactors[distfromindex][disttoindex]
distans2 = ttk.Label(tab2, text = distans)
distans2.place(relx = 0.5, rely = 0.7, anchor = CENTER)
def compcalculate():
compask = float(compentry1.get())
compfrom = comptkvar.get()
compto = comp2tkvar.get()
if compfrom == 'Denary':
if compto == 'Denary':
compans = compask
if compto == 'Binary':
compans = bin(compask)
if compto == 'Hexadecimal':
compans = hex(compask)
if compfrom == 'Binary':
if compto == 'Denary':
compans = int(compask, 2)
if compto =='Binary':
compans = compans
if compto == 'Hexadecimal':
compans = hex(int(compask, 2))
if compfrom == 'Hexadecimal':
if compto == 'Denary':
compans = int(compask, 16)
if compto == 'Binary':
compans = bin(int(compask, 16))
if compto == 'Hexadecimal':
compans = compask
def tempcalculate():
tempask = float(tempentry.get())
tempfrom = temptkvar.get()
tempto = temp2tkvar.get()
if tempfrom == 'Celcius':
if tempto == 'Celcius':
tempans = tempask
if tempto == 'Farenheit':
tempans = (tempask * 9/5) + 32
if tempto == 'Kelvin':
compans = tempask + 273.15
if tempfrom == 'Farenheit':
if tempto == 'Celcius':
compans = (tempask - 32) * 5/9
def volcalculate():
print('ham')
##################################################################
weighttkvar = StringVar(tab1)
weight2tkvar = StringVar(tab1)
weightpopupMenu = OptionMenu(tab1, weighttkvar, *weightOPTIONS)
weightpopupMenu.config(bg = 'gray20', fg = 'white')
weightpopupMenu['menu'].config(bg = 'gray20', fg = 'white')
weightpopupMenu2 = OptionMenu(tab1, weight2tkvar, *weightOPTIONS)
weightpopupMenu2.config(bg = 'gray20', fg = 'white')
weightpopupMenu2['menu'].config(bg = 'gray20', fg = 'white', activebackground = 'gray20', activeforeground = 'white')
weighttkvar.set('Please pick an option')
weight2tkvar.set('Please pick an option')
weightpopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab1, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
weightpopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
weightentry1 = Entry(tab1, justify = CENTER )
weightentry1.place(relx = 0.5, rely = 0.4, anchor = CENTER)
weightbutton1 = Button(tab1, text = 'Submit', justify = CENTER, command = weightcalculate)
weightbutton1.place(relx = 0.5, rely = 0.6, anchor = CENTER)
##################################################################
disttkvar = StringVar(tab2)
dist2tkvar = StringVar(tab2)
distpopupMenu = OptionMenu(tab2, disttkvar, *distOPTIONS)
distpopupMenu.config(bg = 'gray20', fg = 'white')
distpopupMenu['menu'].config(bg = 'gray20', fg = 'white')
distpopupMenu2 = OptionMenu(tab2, dist2tkvar, *distOPTIONS)
distpopupMenu2.config(bg = 'gray20', fg = 'white')
distpopupMenu2['menu'].config(bg = 'gray20', fg = 'white', activebackground = 'gray20', activeforeground = 'white')
disttkvar.set('Please pick an option')
dist2tkvar.set('Please pick an option')
distpopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab2, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
distpopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
distentry1 = Entry(tab2, justify = CENTER )
distentry1.place(relx = 0.5, rely = 0.4, anchor = CENTER)
distbutton1 = Button(tab2, text = 'Submit', justify = CENTER, command = distcalculate)
distbutton1.place(relx = 0.5, rely = 0.6, anchor = CENTER)
##################################################################
comptkvar = StringVar(tab3)
comp2tkvar = StringVar(tab3)
comppopupMenu = OptionMenu(tab3, comptkvar, *compOPTIONS)
comppopupMenu.config(bg = 'gray20', fg = 'white')
comppopupMenu['menu'].config(bg = 'gray20', fg = 'white')
comppopupMenu2 = OptionMenu(tab3, comp2tkvar, *compOPTIONS)
comppopupMenu2.config(bg = 'gray20', fg = 'white')
comppopupMenu2['menu'].config(bg = 'gray20', fg = 'white', activebackground = 'gray20', activeforeground = 'white')
comptkvar.set('Please pick an option')
comp2tkvar.set('Please pick an option')
comppopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab3, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
comppopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
compentry1 = Entry(tab3, justify = CENTER )
compentry1.place(relx = 0.5, rely = 0.4, anchor = CENTER)
compbutton1 = Button(tab3, text = 'Submit', justify = CENTER, command = compcalculate)
compbutton1.place(relx = 0.5, rely = 0.6, anchor = CENTER)
##################################################################
temptkvar = StringVar(tab4)
temp2tkvar = StringVar(tab4)
temppopupmenu = OptionMenu(tab4, temptkvar, *tempOPTIONS)
temppopupmenu.config(bg = 'gray20', fg = 'white')
temppopupmenu['menu'].config(bg = 'gray20', fg = 'white')
temppopupmenu2 = OptionMenu(tab4, temp2tkvar, *tempOPTIONS)
temppopupmenu2.config(bg = 'gray20', fg = 'white')
temppopupmenu2['menu'].config(bg = 'gray20', fg = 'white', activebackground = 'gray20', activeforeground = 'white')
temptkvar.set('Please pick an option')
temp2tkvar.set('Please pick an option')
temppopupmenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab4, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
temppopupmenu2.place(relx = 0.6, rely = 0.2, anchor = W)
tempentry = Entry(tab4, justify = CENTER )
tempentry.place(relx = 0.5, rely = 0.4, anchor = CENTER)
tempbutton = Button(tab4, text = 'Submit', justify = CENTER, command = tempcalculate)
tempbutton.place(relx = 0.5, rely = 0.6, anchor = CENTER)
##################################################################
volOPTIONS = ['Millilitres', 'Litres', 'Centilitres', 'Decilitres', 'Pint', 'Quart', 'Gallon', 'Teaspoon', 'Tablespoon', 'Fluid ounce']
voltkvar = StringVar(tab5)
vol2tkvar = StringVar(tab5)
volpopupMenu = OptionMenu(tab5, voltkvar, *volOPTIONS)
volpopupMenu.config(bg = 'gray20', fg = 'white')
volpopupMenu['menu'].config(bg = 'gray20', fg = 'white')
volpopupMenu2 = OptionMenu(tab5, vol2tkvar, *volOPTIONS)
volpopupMenu2.config(bg = 'gray20', fg = 'white')
volpopupMenu2['menu'].config(bg = 'gray20', fg = 'white', activebackground = 'gray20', activeforeground = 'white')
voltkvar.set('Please pick an option')
vol2tkvar.set('Please pick an option')
volpopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab5, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
volpopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
volentry = Entry(tab5, justify = CENTER )
volentry.place(relx = 0.5, rely = 0.4, anchor = CENTER)
volbutton = Button(tab5, text = 'Submit', justify = CENTER, command = volcalculate)
volbutton.place(relx = 0.5, rely = 0.6, anchor = CENTER)
##################################################################
root.mainloop() | true |
4dd3f0c40c78ddb9780e146698314e52435d4137 | Python | codio-content/Python_Maze-Logical_thinking_decompositions_and_algorithms_-functions | /.guides/tests/ch-3.py | UTF-8 | 779 | 2.65625 | 3 | [
"MIT"
] | permissive |
energy = 0
score = 0
steps = 1
def getEnergy():
global energy
return energy
def setEnergy(val):
global energy
energy = val
def setScore(val):
global score
score = val
def getSteps():
global steps
return steps
try:
execfile('/home/codio/workspace/public/py/ch-3.py')
hitEnergyEvent()
if energy != 5 or score != 25:
raise ValueError('incorrect values')
energy = 6
score = 0
steps = 1
hitMonsterEvent()
if energy != 1 or score != 5:
raise ValueError('incorrect values')
energy = 5
score = 0
steps = 1
s = calcScore()
if s != 25:
raise ValueError('incorrect values')
print 'well done'
exit(0)
except (IOError, SyntaxError, ValueError) as e:
pass
print 'Not quite right, try again!'
exit(1)
| true |
6b8143ee3eb834496fd93eca8f6a0a1f3d5332f5 | Python | amundmr/FYS2210-Report | /Plotters/Curren-Vg_10um.py | UTF-8 | 1,164 | 2.9375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import sys
#Reading file
#f = open(sys.argv[1],"r")
f1 = open("../Data/10um/w2_vg_1v_10um_50x50","r")
f2 = open("../Data/10um/w2_vg_2v_10um_50x50","r")
f3 = open("../Data/10um/w2_vg_3v_10um_50x50","r")
f4 = open("../Data/10um/w2_vg_4v_10um_50x50","r")
f5 = open("../Data/10um/w2_vg_5v_10um_50x50","r")
Allfiles = [f1, f2, f3, f4, f5]
Alldata = np.zeros((5,21))
i = 0
for file in Allfiles:
file.readline()
j=0
for line in file.readlines():
Alldata[i][j] = float(line.split()[2])
j+=1
i+=1
Vd = np.linspace(0,8,21)
#print(Vd)
#print(Alldata)
#Plotting read data
for i in range(len(Alldata)):
plt.scatter(Vd,Alldata[i]*1000,s = 20, label= r"$V_G =$ %d $V$"%float(i+1))
plt.plot(Vd,Alldata[i]*1000, linewidth=1)
plt.ylabel(r"Drain current, $I_d$ / [mA]")
plt.xlabel(r"Drain voltage, $V_d$, / [V]")
plt.tick_params(direction='in', top = 'true', right = 'true')
plt.title(r"Current with different gate voltages, $10um$, $50x50$")
#plt.yscale('log')
plt.ylim(0,13.5)
plt.legend(loc="upper left")
plt.savefig("../Figures/I-V_10um_50x50.png", bbox_inches='tight')
#plt.show()
| true |
45fae260f3ae471347747792610a79d46e3ea460 | Python | hbcbh1999/pydp | /tests/cluster_test.py | UTF-8 | 2,040 | 2.53125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets as dss
from scipy.spatial.distance import euclidean
import src.cluster as cluster
import time
from mpl_toolkits.mplot3d import Axes3D
from __non_private_cluster__ import find_cluster
sample_number, dimension = 10000, 2
blobs = dss.make_blobs(sample_number, dimension, cluster_std=70)
blob = np.round(blobs[0], 2)
approximation, failure, eps, delta = 0.1, 0.1, 0.5, 2**-10
domain_end = max(abs(np.min(blob)), np.max(blob))
domain = (domain_end, 0.01)
desired_amount_of_points = 500
start_time = time.time()
radius, center = cluster.find(blob, dimension, domain, desired_amount_of_points,
approximation, failure, eps, delta, use_histograms=True)
end_time = time.time()
ball = [p for p in blob if euclidean(p, center) <= radius]
# blob = [p for p in blob if tuple(p) not in map(tuple, ball)]
fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
ax = fig.add_subplot(211, aspect='equal')
zipped_data = zip(*blob)
ax.scatter(*zipped_data, c='g')
zipped_ball = zip(*ball)
ax.scatter(*zipped_ball, c='r')
print "Good-radius: %d" % radius
print "Good-center: %s" % str(center)
print "Desired number of points in resulting ball: %d" % desired_amount_of_points
print "Number of points in the resulting ball: %d" % len(ball)
print "Run-time: %.2f seconds" % (end_time - start_time)
start_time = time.time()
test_radius, test_center = find_cluster(blob, desired_amount_of_points)
end_time = time.time()
ball = [p for p in blob if euclidean(p, test_center) <= test_radius]
zipped_data = zip(*blob)
ax2 = fig.add_subplot(212, aspect='equal')
ax2.scatter(*zipped_data, c='b')
zipped_ball = zip(*ball)
ax2.scatter(*zipped_ball, c='r')
print "Test-radius: %d" % test_radius
print "Test-center: %s" % str(test_center)
print "Desired number of points in resulting ball: %d" % desired_amount_of_points
print "Number of points in the resulting ball: %d" % len(ball)
print "Run-time: %.2f seconds" % (end_time - start_time)
plt.show()
| true |
4e419dd7be38adc40546bebea74f03146e71b403 | Python | anderslatif/alg | /util/commit_handling/commit_or_tag_parser.py | UTF-8 | 1,195 | 3.25 | 3 | [
"MIT"
] | permissive | import collections
def commit_or_tag_parser(raw, start=0, dictionary=None):
if not dictionary:
# This is set to None as a parameter to avoid recursive calls
dictionary = collections.OrderedDict()
space = raw.find(b' ', start)
newline = raw.find(b'\n', start)
# Base case
if (space < 0) or (newline < space):
# If a newline without a space appears
# Then it's a blank line and the remainder is the message
assert(newline == start)
dictionary[b''] = raw[start+1:]
return dictionary
# Recursive case
# read a key value pair and recurse for the next
key = raw[start:space]
# Find the end of the value
end = start
# Loop until a space is found
while True:
end = raw.find(b'\n', end+1)
if raw[end+1] != ord(' '): break
value = raw[space+1:end].replace(b'\n ', b'\n')
#
if key in dictionary:
if type(dictionary[key]) == list:
dictionary[key].append(value)
else:
dictionary[key] = [dictionary[key], value]
else:
dictionary[key] = value
return commit_or_tag_parser(raw, start=end+1, dictionary=dictionary) | true |
230a589e6a1fb902c77ed2c6e6b47e3088c1b31b | Python | zmahoor/TPR-1.0 | /analysis/group_mind.py | UTF-8 | 5,861 | 2.734375 | 3 | [] | no_license | '''
Author: Zahra Mahoor
My attempt in studying social influence in TPR, group mind.
'''
import sys
import matplotlib.pyplot as plt
sys.path.append('../bots')
from database import DATABASE
import numpy as np
from random import shuffle, choice
from copy import deepcopy
def find_repeats(mylist):
count = 0
for i in range(1, len(mylist)):
if mylist[i] == mylist[i-1]: count += 1
return count
def random_list(mylen):
return ['y' if np.random.uniform(0,1) > 0.5 else 'n' for i in range(mylen)]
db = DATABASE()
# sql="""SELECT displayID, count(*) as count FROM TwitchPlays.reward_log where
# reward='y' or reward='n' group by displayID having count=2 and displayID is not NULL;"""
# records = db.Execute_Select_Sql_Command(sql, ' ')
# total = 0
# disagreements = 0
# total_array = []
# double_vote = 0
# disagreements_with_yourself = 0
# for record in records:
# displayID = record['displayID']
# sql="""SELECT userName,reward FROM TwitchPlays.reward_log where
# (reward='y' or reward='n') and displayID=%d;"""%(displayID)
# feedback = db.Execute_Select_Sql_Command(sql, ' ')
# if feedback[0]['userName'] == feedback[1]['userName']:
# double_vote += 1
# if feedback[0]['reward'] != feedback[1]['reward']:
# disagreements_with_yourself += 1
# if feedback[0]['userName'] != feedback[1]['userName']:
# total += 1
# total_array.append([int(feedback[0]['reward']=='y'), int(feedback[1]['reward']=='y')])
# if feedback[0]['reward'] != feedback[1]['reward']:
# disagreements += 1
# print len(records), double_vote, disagreements_with_yourself, total, disagreements, float(disagreements)/total
# print np.array(total_array)
################################################################################
# sql="""SELECT displayID, count(*) as count FROM TwitchPlays.reward_log where
# reward='y' or reward='n' group by displayID having count>=2 and displayID is not NULL;"""
# records = db.Execute_Select_Sql_Command(sql, ' ')
# total = 0
# disagreements = 0
# reward_array = []
# true_agreement_count = 0
# for record in records:
# displayID = record['displayID']
# sql="""SELECT userName,reward, timeArrival FROM TwitchPlays.reward_log where
# (reward='y' or reward='n') and displayID=%d order by timeArrival ASC;"""%(displayID)
# feedback = db.Execute_Select_Sql_Command(sql, ' ')
# # print feedback
# userNames = set([ f['userName'] for f in feedback ])
# rewards = [ f['reward'] for f in feedback ]
# reward_array.append( rewards )
# if len(userNames) < len(feedback): continue
# true_agreement_count += find_repeats(rewards)
# # print rewards, true_agreement_count
# # print reward_array
# random_agreement_count = []
# for i in range(0, 1000):
# count = 0
# for rewards in reward_array:
# # shuffle(rewards)
# rewards = ['y' if np.random.uniform(0,1)> 0.5 else 'n' for i in range(len(rewards))]
# count += find_repeats(rewards)
# random_agreement_count.append(count)
# print "random count: ", np.percentile(random_agreement_count, 99), "true count: ", true_agreement_count
################################################################################
def count_disagreements(feedback):
# print feedback
count = 0
for key in feedback:
if len(set(feedback[key])) > 1:
count += 1
return count
def shuffle_feedback(feedback):
if len(feedback) == 1:
return feedback
# print "before shuffle: ", feedback
rand_keys = np.random.choice(feedback.keys(), 2, replace=False)
key1, key2 = rand_keys
index1 = np.random.randint(0, len(feedback[key1]))
index2 = np.random.randint(0, len(feedback[key2]))
feedback[key1][index1], feedback[key2][index2] = feedback[key2][index2], feedback[key1][index1]
# print "after shuffle: ", feedback
return feedback
data = {}
sql = """SELECT robotID, cmdTxt from display where (numYes+numYes)>=2 group by robotID, cmdTxt;"""
records = db.execute_select_sql_command(sql, 'Failed fetch...')
for record in records:
robotID, cmdTxt = record['robotID'], record['cmdTxt']
# print record
sql = """SELECT r.displayID, r.reward, d.robotID, d.cmdTxt, r.userName FROM
TwitchPlays.reward_log as r join TwitchPlays.display as d on d.displayID=r.displayID where
robotID=%d and cmdTxt='%s' and (r.reward='y' or r.reward='n');"""%(robotID, cmdTxt)
evaluations = db.execute_select_sql_command(sql, 'Failed fetch...')
if len(evaluations) < 2:
continue
# print evaluations
# print
robot_command_dict = {}
for evals in evaluations:
username = evals['userName']
reward = evals['reward']
displayID = evals['displayID']
if displayID in robot_command_dict:
robot_command_dict[displayID].append((username,reward))
else: robot_command_dict[displayID] = [(username,reward)]
print robotID, cmdTxt, robot_command_dict
print
for key, val in robot_command_dict.items():
if len(val) == 1:
del robot_command_dict[key]
if len(val) == 2 and val[0][0] == val[1][0]:
del robot_command_dict[key]
if len(robot_command_dict) != 0:
data[(robotID, cmdTxt)] = robot_command_dict
# print data
# true_disagreements = 0
# for key in data:
# true_disagreements += count_disagreements(data[key])
# print "true disagreements: ", true_disagreements
# random_disgreements = []
# for i in range(0, 1000):
# count = 0
# for key in data:
# temp = shuffle_feedback( deepcopy(data[key]) )
# count += count_disagreements(temp)
# random_disgreements.append(count)
# # print count
# print "random count: ", np.percentile(random_disgreements, 99)
| true |
6fe9930cb53d7e905bab099b646f25483a3face3 | Python | FengZhang-git/EJSC | /code/data_process/data_loader.py | UTF-8 | 4,911 | 2.578125 | 3 | [] | no_license | from data_process.sampling import EpisodeDescriptionSampler
from data_process.dataset_spec import DatasetSpecification
from data_process.config import EpisodeDescriptionConfig
from collections import defaultdict
import gin
import json
from data_process.learning_spec import Split
import numpy as np
def get_data(path):
result = []
with open(path, 'r') as src:
for line in src:
line = json.loads(line)
result.append(line)
return result
def get_classes_infomation(vocabPath, filePath):
classes = []
examples = defaultdict(list)
class_names_to_ids = {}
with open(vocabPath, 'r') as src:
for line in src:
classes.append(line.split('\n')[0])
datas = get_data(filePath)
for line in datas:
examples[line["intent"]].append(line)
class_names = {}
examples_per_class = {}
for i in range(len(classes)):
class_names[i] = classes[i]
examples_per_class[i] = len(examples[classes[i]])
if(len(classes) != len(examples.keys())):
print("Wrong vocab")
for key in class_names.keys():
class_names_to_ids[class_names[key]] = key
return class_names, examples_per_class, examples, class_names_to_ids
def write_data(path, datas):
tmp = datas['support'] + datas['query']
with open(path, 'w') as fout:
for line in tmp:
fout.write("%s\n" % json.dumps(line, ensure_ascii=False))
class Dataloader():
def __init__(self, vocabPath, filePath):
super(Dataloader, self).__init__()
self.class_names, self.examples_per_class, self.examples, self.class_names_to_ids = get_classes_infomation(vocabPath, filePath)
self.dataset_spec = DatasetSpecification(
name=None,
images_per_class=self.examples_per_class,
class_names=self.class_names,
path=None,
file_pattern='{}.tfrecords'
)
print(self.dataset_spec)
self.config = EpisodeDescriptionConfig(
num_ways = None,
num_support = None,
num_query = None,
min_ways = 3,
max_ways_upper_bound = 10,
max_num_query = 20,
max_support_set_size = 100,
max_support_size_contrib_per_class = 20,
min_log_weight = -0.69314718055994529, # np.log(0.5),
max_log_weight = 0.69314718055994529, # np.log(2),
min_examples_in_class = 2
)
self.sampler = EpisodeDescriptionSampler(
dataset_spec=self.dataset_spec,
split=Split.TRAIN,
episode_descr_config=self.config,
pool=None
)
def get_episode_datas(self):
class_descriptions = self.sampler.sample_episode_description()
episode_datas = defaultdict(list)
class_ids, num_support, support_examples, query_examples = [], [], [], []
num_classes = len(class_descriptions)
num_query = class_descriptions[0][2]
for class_des in class_descriptions:
class_ids.append(class_des[0])
num_support.append(class_des[1])
examples_per_class = self.examples[self.class_names[class_des[0]]]
np.random.seed(np.random.randint(0, 100000))
np.random.shuffle(examples_per_class)
support_per_class = examples_per_class[:class_des[1]]
query_per_class = examples_per_class[class_des[1]: class_des[1]+class_des[2]]
support_examples.extend(support_per_class)
query_examples.extend(query_per_class)
episode_datas['support'] = support_examples
episode_datas['query'] = query_examples
# print(f"Number of classes: {num_classes}")
# print(f"List of class_ids: {str(class_ids)}")
# print(f"List of support numbers: {str(num_support)}")
# print(f"Number of query examples: {num_query}")
# print(f"Support size: {len(support_examples)}")
# print(f"Query size: {len(query_examples)}")
# print(episode_datas)
# write_data("code/slot_and_intent/data-process/test.json", episode_datas)
return num_classes, class_ids, num_support, num_query, episode_datas
def split_data(self, episode_datas):
support_labels, support_text, query_labels, query_text, slots = [], [], [], [], []
for line in episode_datas['support']:
support_labels.append(self.class_names_to_ids[line['intent']])
support_text.append(line['text_u'])
slots.append(line["slots"].split(', '))
for line in episode_datas['query']:
query_labels.append(line["intent"])
query_text.append(line["text_u"])
slots.append(line["slots"].split(', '))
return support_labels, support_text, query_labels, query_text, slots
| true |
039bcbe0adafa6b1788d17b527aa3f38dfaa7455 | Python | Viktor32-sours/Eiler | /26/26.py | UTF-8 | 1,354 | 3.921875 | 4 | [] | no_license | """
Взаимные циклы
Задача 26
Дробная единица содержит 1 в числителе. Десятичное представление дробных единиц с знаменателями от 2 до 10 дано:
1/2 = 0.5
1/3 = 0.(3)
1/4 = 0.25
1/5 = 0.2
1/6 = 0.1(6)
1/7 = 0.(142857)
1/8 = 0.125
1/9 = 0.(1)
1/10 = 0.1
Где 0,1 (6) означает 0,166666 ... и имеет повторяющийся цикл из 1 цифры. Можно видеть , что 1 / 7 имеет повторяющийся цикл 6 цифр.
Найдите значение d <1000, для которого 1 / d содержит самый длинный повторяющийся цикл в своей десятичной дробной части.
Ответ: 983
"""
import time
start = time.time()
def dec_period(p):
while p % 2 == 0:
p /= 2
while p % 5 == 0:
p /= 5
if p <= 1:
return 0
m = 1*10 % p
period = 1
while m > 1:
m = m*10 % p
period += 1
return period
len_period = 0
key = 0
for i in range(2,1001):
d = dec_period(i)
if d > len_period:
len_period = d
key = i
print(f'key : {key} - len_period: {len_period}')
print(time.time() - start)
| true |
5a5e0f0e236bf22291e193afa17d9dccac67591d | Python | lemcke/md_extraction_analysis | /analysis/eos/eos.py | UTF-8 | 3,625 | 3.5 | 4 | [] | no_license | import numpy as np
from scipy.optimize import curve_fit
from scipy.misc import derivative
class EnergyVolumeEOS:
"""
Energy-volume equation of state (EOS).
Attributes
----------
volumes: <numpy.ndarray> of volumes
energies: <numpy.ndarray> of energies
fit_func: <callable> function to use as EOS
fit_guess: <array-like> of guess parameters for EOS
fit_covar: <numpy.ndarray> EOS covariance matrix
fit_coeff: <numpy.ndarray> EOS coefficients
Methods
-------
fit: fit `fit_func` to `volumes` and `energies`
using <scipy.optimize.curve_fit>
model: defined as `fit_func` with `fit_coeff`
...as well as getters for all attributes,
and setters for `volumes`, `energies`, `fit_func`,
`fit_guess`, `fit_coeff`.
Aliases
-------
`energies` => `e`
`volumes` => `v`
Example
-------
>>> s = EnergyVolumeEOS(volume_data, energy_data)
>>>
>>> v_fine = np.linspace(s.volumes.min(), s.volumes.max(), 100)
>>>
>>> # ---- Plot data and EOS models ---- #
>>> fig, ax = plt.subplots()
>>> ax.set_xlabel('volume')
>>> ax.set_ylabel('energy')
>>>
>>> ax.plot(s.volumes, s.energies, 'xr', label='data')
>>>
>>> # Using fourth-degree polynomial:
>>> s.fit_func = EnergyVolumeEOS.poly_4
>>> s.fit()
>>> ax.plot(v_fine, s.model(v_fine), ':b', label='4th-deg. polynomial')
>>>
>>> # Using exponential model:
>>> s.fit_func = EnergyVolumeEOS.exponential
>>> s.fit()
>>> ax.plot(v_fine, s.model(v_fine), '--g', label='exponential')
"""
def __init__(self, volumes, energies, fit_func=None, guess=None):
"""
Initialize <State> object.
Input
-----
volumes: <array-like> volume "x" data
energies: <array-like> energy "y" data
fit_func: <callable> energy-volume model
guess: <array-like> guess coefficients for fit function
"""
self._v = np.asarray(volumes).flatten()
self._e = np.asarray(energies).flatten()
assert self._v.size == self._e.size
self._fit_func = fit_func
self._fit_guess = guess
self._fit_coeff = None # Calculated by fitting model to data.
self._fit_covar = None # ...
# ---- fit model to data ---- #
def fit(self):
coeffs, cov = curve_fit(
self._fit_func, self._v, self._e, self._fit_guess
)
self._fit_coeff = coeffs
self._fit_cov = cov
def model(self, volumes):
return self._fit_func(volumes, *self._fit_coeff)
# ---- built-in models ---- #
@staticmethod
def poly_4(x, x0, a, b, c, d, e):
"""Fourth-degree Taylor polynomial."""
_x = x - x0
return a + b*_x + c*_x*_x + d*_x*_x*_x + e*_x*_x*_x*_x
@staticmethod
def exponential(x, x0, a, b, c):
"""Exponential model."""
return a * np.exp(b * (x - x0)) + c
# ---- physical functions ---- #
def p_v(self, volumes=self._v):
"""Returns pressure(s) given volume(s)."""
return -1.0 * derivative(self.model, volumes)
# ---- aliases/getters ---- #
@property
def v(self):
return self._v
@property
def e(self):
return self._e
@property
def volumes(self):
return self._v
@property
def energies(self):
return self._e
@property
def fit_func(self):
return self._fit_func
@property
def fit_guess(self):
return self._fit_guess
@property
def fit_coeff(self):
return self._fit_coeff
@property
def fit_covar(self):
return self._fit_covar
# ---- setters ---- #
@volumes.setter
def volumes(self, value):
self._v = value
@energies.setter
def energies(self, value):
self._e = value
@fit_func.setter
def fit_func(self, func):
self._fit_func = func
@fit_guess.setter
def fit_guess(self, value):
self._fit_guess = value
@fit_coeff.setter
def fit_coeff(self, value):
self._fit_coeff = value
| true |
2cb3cad2afff7ef1839f06d8139207e0fc490cd0 | Python | jdudley390/automation-and-practive | /Practice Projects/tax calculator.py | UTF-8 | 286 | 4 | 4 | [] | no_license | price = float(input("Enter the price of the item: $"))
while price != 0:
tax = price * .08
total = tax + price
print("The full price of the item with tax is: $", format(total, '.2f'), "\n")
price = float(input("Enter proce of another item orS press 0 to exit: $")) | true |
8b69f1da2d6b9595fd507d5d5fc12a2cd11f4773 | Python | koleon03/SKSoftware | /luefterOn.py | UTF-8 | 2,197 | 2.796875 | 3 | [] | no_license | import gpiozero
import time
import adafruit_bme280
import board
import busio
#Globale Variablen
isOpen = False
is2Open = False
luefterOn = False
aufP = "BOARD36"
aufM = "BOARD32"
zuP = "BOARD31"
zuM = "BOARD33"
lPin = "BOARD37"
delay = 5
#Initialisieren der Relais und Sensoren
relayAP = gpiozero.OutputDevice(pin=aufP, active_high=True, initial_value=False)
relayAM = gpiozero.OutputDevice(pin=aufM, active_high=True, initial_value=False)
relayZP = gpiozero.OutputDevice(pin=zuP, active_high=True, initial_value=False)
relayZM = gpiozero.OutputDevice(pin=zuM, active_high=True, initial_value=False)
relayL = gpiozero.OutputDevice(pin=lPin, active_high=True, initial_value=False)
i2c = busio.I2C(board.SCL, board.SDA)
tempSensor = adafruit_bme280.Adafruit_BME280_I2C(i2c, address = 0x76)
#Alle Relais ausschalten
def clear():
relayAM.off()
relayAP.off()
relayZP.off()
relayZM.off()
#Funktion zum Öffnen des Motors
def openMotor():
relayAM.on()
relayAP.on()
time.sleep(delay)
clear()
isOpen = True
#Funktion zum Schließen des Motors
def closeMotor():
relayZM.on()
relayZP.on()
time.sleep(delay)
clear()
isOpen = False
#Funktion zum Auslesen der Temperatur
def readTemp():
try:
temp = tempSensor.temperature
print(temp)
return temp
except RuntimeError as e:
print(e.args[0])
return None
def afterOpening(oldValue):
global is2Open
global luefterOn
time.sleep(60)
newValue = readTemp()
if(newValue > oldValue):
if(is2Open == False):
openMotor()
is2Open = True
if(luefterOn == False):
relayL.on()
luefterOn = True
afterOpening(newValue)
elif(newValue > 30):
if(is2Open == False):
openMotor()
is2Open = True
if(luefterOn == True):
relayL.off()
afterOpening(newValue)
elif(newValue < 30):
if(is2Open == True):
closeMotor()
is2Open = False
if(luefterOn == True):
relayL.off()
luefterOn = False
relayL.on()
time.sleep(10)
| true |
97ea0abfdec33da9d35b82a76806fd0c52acbd0f | Python | ryfeus/lambda-packs | /Tensorflow/source/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py | UTF-8 | 6,609 | 2.625 | 3 | [
"MIT"
] | permissive | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Approximate kernel mapper for RBF kernel based on Random Fourier Features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
# TODO(sibyl-vie3Poto,felixyu): add an option to control whether the parameters in the
# kernel map are trainable.
class RandomFourierFeatureMapper(dkm.DenseKernelMapper):
r"""Class that implements Random Fourier Feature Mapping (RFFM) in TensorFlow.
The RFFM mapping is used to approximate the Gaussian (RBF) kernel:
```
exp(-||x-y||_2^2 / (2 * sigma^2))
```
The implementation of RFFM is based on the following paper:
"Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht.
(link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
The mapping uses a matrix `Omega \in R^{d x D}` and a bias vector `b \in R^D`
where `d` is the input dimension (number of dense input features) and `D` is
the output dimension (i.e., dimension of the feature space the input is mapped
to). Each entry of `Omega` is sampled i.i.d. from a (scaled) Gaussian
distribution and each entry of `b` is sampled independently and uniformly from
[0, 2 * pi].
For a single input feature vector x in R^d, its RFFM is defined as:
```
sqrt(2/D) * cos(x * Omega + b)
```
where `cos` is the element-wise cosine function and `x, b` are represented as
row vectors. The aforementioned paper shows that the linear kernel of
RFFM-mapped vectors approximates the Gaussian kernel of the initial vectors.
"""
def __init__(self, input_dim, output_dim, stddev=1.0, seed=1, name=None):
"""Constructs a RandomFourierFeatureMapper instance.
Args:
input_dim: The dimension (number of features) of the tensors to be mapped.
output_dim: The output dimension of the mapping.
stddev: The standard deviation of the Gaussian kernel to be approximated.
The error of the classifier trained using this approximation is very
sensitive to this parameter.
seed: An integer used to initialize the parameters (`Omega` and `b`) of
the mapper. For repeatable sequences across different invocations of the
mapper object (for instance, to ensure consistent mapping both at
training and eval/inference if these happen in different invocations),
set this to the same integer.
name: name for the mapper object.
"""
# TODO(sibyl-vie3Poto): Maybe infer input_dim and/or output_dim (if not explicitly
# provided). input_dim can be inferred lazily, the first time map is called.
# output_dim can be inferred from input_dim using heuristics on the error of
# the approximation (and, by extension, the error of the classification
# based on the approximation).
self._input_dim = input_dim
self._output_dim = output_dim
self._stddev = stddev
self._seed = seed
self._name = name
@property
def name(self):
"""Returns a name for the `RandomFourierFeatureMapper` instance.
If the name provided in the constructor is `None`, then the object's unique
id is returned.
Returns:
A name for the `RandomFourierFeatureMapper` instance.
"""
return self._name or str(id(self))
@property
def input_dim(self):
return self._input_dim
@property
def output_dim(self):
return self._output_dim
def map(self, input_tensor):
"""Maps each row of input_tensor using random Fourier features.
Args:
input_tensor: a `Tensor` containing input features. It's shape is
[batch_size, self._input_dim].
Returns:
A `Tensor` of shape [batch_size, self._output_dim] containing RFFM-mapped
features.
Raises:
InvalidShapeError: if the shape of the `input_tensor` is inconsistent with
expected input dimension.
"""
input_tensor_shape = input_tensor.get_shape()
if len(input_tensor_shape) != 2:
raise dkm.InvalidShapeError(
'The shape of the tensor should be 2. Got %d instead.' %
len(input_tensor_shape))
features_dim = input_tensor_shape[1]
if features_dim != self._input_dim:
raise dkm.InvalidShapeError(
'Invalid dimension: expected %d input features, got %d instead.' %
(self._input_dim, features_dim))
# Add ops that compute (deterministically) omega_matrix and bias based on
# the provided seed.
# TODO(sibyl-vie3Poto): Storing the mapper's parameters (omega_matrix and bias) as
# constants incurs no RPC calls to the parameter server during distributed
# training. However, if the parameters grow too large (for instance if they
# don't fit into memory or if they blow up the size of the GraphDef proto),
# stroring them as constants is no longer an option. In this case, we should
# have a heuristic to choose out of one of the following alternatives:
# a) store them as variables (in the parameter server)
# b) store them as worker local variables
# c) generating on the fly the omega matrix at each step
np.random.seed(self._seed)
omega_matrix_shape = [self._input_dim, self._output_dim]
bias_shape = [self._output_dim]
omega_matrix = constant_op.constant(
np.random.normal(
scale=1.0 / self._stddev, size=omega_matrix_shape),
dtype=dtypes.float32)
bias = constant_op.constant(
np.random.uniform(
low=0.0, high=2 * np.pi, size=bias_shape),
dtype=dtypes.float32)
x_omega_plus_bias = math_ops.add(
math_ops.matmul(input_tensor, omega_matrix), bias)
return math.sqrt(2.0 / self._output_dim) * math_ops.cos(x_omega_plus_bias)
| true |
e2226e13c3e425d51d099c914c72e63cf63d73c1 | Python | andb0t/legofy | /legofy.py | UTF-8 | 3,624 | 3.09375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from PIL import Image, ImageFilter
from sklearn.neighbors import NearestNeighbors
import src.utils as utils
def load_colors():
black_and_white = False
print('Getting available colors ...')
# download the color table website
headers = {'User-Agent': "Mozilla/5.0"}
req = requests.get("https://www.bricklink.com/catalogColors.asp", headers=headers)
# parse the information
tree = BeautifulSoup(req.text, "lxml")
# select the fourth table in the HTML for solid colors
html_table = tree.select("table")[3].select("table")[0]
# let pandas read the table
color_table = pd.read_html(str(html_table), header=0)[0]
color_table = color_table.drop(["Unnamed: 1", "Unnamed: 2"], axis=1)
# select the RGB values from the HTML background color and build three new columns with them
rgb_table = pd.DataFrame([utils.hextoint(td.attrs["bgcolor"]) for td in html_table.select("td[bgcolor]")],
columns=["r", "g", "b"])
color_table = color_table.merge(rgb_table, left_index=True, right_index=True)
# select colors which are available in 2017 and which are not rare to find
current_colors = color_table[color_table["Color Timeline"].str.contains("2017")]
current_colors = current_colors[~(current_colors["Name"].str.contains("Flesh")
| current_colors["Name"].str.contains("Dark Pink")
| (current_colors["Name"] == "Lavender")
| current_colors["Name"].str.contains("Sand Blue"))]
if black_and_white:
current_colors = color_table[color_table["Name"].str.contains("White")
| color_table["Name"].str.contains("Black")]
print('Available colors:')
pd.set_option('display.expand_frame_repr', False)
print(current_colors)
return current_colors
def quantize_colors(picture):
current_colours = load_colors()
print('Quantize colors ...')
# fit the NN to the RGB values of the colours; only one neighbour is needed
nn = NearestNeighbors(n_neighbors=1, algorithm='brute')
nn.fit(current_colours[["r", "g", "b"]])
# helper function; finds the nearest colour for a given pixel
def legofy_pixels(pixel, neighbors, colours):
new_pixel = neighbors.kneighbors(pixel.reshape(1, -1), return_distance=False)[0][0]
return tuple(colours.iloc[new_pixel, -3:])
# Quantize!
picture = np.array(picture)
picture = np.apply_along_axis(legofy_pixels, 2, picture, nn, current_colours)
picture = Image.fromarray(np.uint8(pixelated), mode="RGB")
return picture
def pixelate_picture(picture):
print('Pixelate image ...')
picture = picture.filter(ImageFilter.MedianFilter(7)).resize((2*w10, 2*h10))
return picture
print('Open image')
image = Image.open("heman.jpg")
print('Scale image')
# get a 10th of the image dimensions and the aspect ratio
w10 = int(image.size[0]/10)
h10 = int(image.size[1]/10)
ratio = image.size[0]/image.size[1]
# smooths the image and scales it to 20%
# pixelate
pixelated = pixelate_picture(image)
# quantize colors
pixelated = quantize_colors(pixelated)
print('Scale back')
# reduce the size of the image according to aspect ratio (32,x)
if ratio < 1:
h = 32
w = int(32*ratio)
else:
w = 32
h = int(32/ratio)
print('Get final image')
final = pixelated.resize((w, h))
final.save('lego_heman.jpg')
print('Transform to numpy array')
final_arr = np.array(final) # image as array
| true |
c1cc3037694976b41a04e573b3a534a1f33d4411 | Python | ratakas/pythonAcademy | /appMusic/appv1/convertir.py | UTF-8 | 186 | 2.703125 | 3 | [] | no_license | import re
texto='Afaz Natural - "Quizás" LETRA (Video Lyric)'
removeSpecialChars = texto.translate ({ord(c): "" for c in "!@#$%^&*()\"[]{};:,./<>?\|`~-=_+"})
print(removeSpecialChars) | true |
dae13781a32056075e2db8fff99eff3651a11dc7 | Python | s-tefan/pygletgrejer | /pygletgrejer/tredeplotter.py | UTF-8 | 2,437 | 2.859375 | 3 | [] | no_license | import math
import numpy as np
import pyglet
from pyglet.gl import *
class PlotWindow(pyglet.window.Window):
def __init__(self):
super(PlotWindow, self).__init__()
self.drawn = False
batch = None
def on_draw(self):
# clear the screen
#glClear(GL_COLOR_BUFFER_BIT)
# vi kör den här istället
#win.clear()
#if not self.drawn:
if True:
self.batch.draw()
print("Ritar!")
self.drawn = True
pyglet.graphics.vertex_list(2,('v3f',(-1,-1,-1,1,1,1)),('c3b',(1,0,0,0,0,1))).draw(pyglet.gl.GL_LINES)
# funkar inte efter subklassningen
def on_resize(self, width, height):
print("Resize")
self.drawn = False
# set the Viewport
glViewport(0, 0, width, height)
# using Projection mode
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspectRatio = width / height
gluPerspective(35, aspectRatio, 1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glScalef(100,100,0.1) # scaling i z here changes the clip [-1,1]
glTranslatef(3, 2, 0) # translate before scaling
# center is at origin (lower left) so need to translate
gluLookAt(0.5,1,2, 0,0,0, 0,1,0) # eye, center, up
#glRotatef(85,1,0,0)
self.batch.draw()
def on_activate(self):
print("Activated!")
def mesh(fun):
xmin, xmax, ymin, ymax = -1, 1, -1, 1
#dx, dy = 0.1, 0.1
#nx, ny = 1+int((xmax-xmin)//dx), 1+int((ymax-ymin)//dy)
nx = 5
ny = 5
dx = (xmax-xmin)/(nx-1)
dy = (ymax-ymin)/(ny-1)
vlist = []
quadlist = []
linelist = []
n = 0
for ky in range(ny):
y = ymin + ky*dy
for kx in range(nx):
x = xmin + kx*dx
vlist += [x, y, fun(x,y)]
if kx > 0:
linelist += [n-1,n]
if ky > 0:
linelist += [n-nx,n]
if kx > 0:
quadlist += [n-nx-1, n-nx, n, n-1]
n += 1
return vlist, linelist, quadlist
def meshbatch():
vlist, linelist, quadlist = mesh(lambda x,y : 1)
ba = pyglet.graphics.Batch()
ba.add_indexed(len(vlist)//3, pyglet.gl.GL_LINES, None, linelist,
('v3f', vlist))
return ba
print(mesh(lambda x,y : 1))
win = PlotWindow()
win.batch = meshbatch()
@win.event
pyglet.app.run()
| true |
ede58ca6861e6946fa361c7e118436e56c430aec | Python | heidariank/PodiumScraper | /python/test.py | UTF-8 | 931 | 3.390625 | 3 | [] | no_license | import unittest
from sentiment import get_positivity_scores
import types
class SentimentTestCase(unittest.TestCase):
def get_reviews(self):
return [
[["I love everything!", "user1", "Title1"], ["I hate everything!", "user2", "Title2"]],
[["I neither love nor hate everything.", "user3", "Title3"], ["I love everything but sometimes I also hate it.", "user4", "Title4"]]
]
"""Tests that get_positivity_scores() returns a list of tuples containing
([<review text>, <username>, <title>], <positivity score>)`."""
def test_get_positivity_scores_type(self):
reviews = self.get_reviews()
result = get_positivity_scores(reviews)
self.assertTrue(type(result) is types.ListType)
print result
for item in result:
self.assertTrue(type(item) is types.TupleType)
self.assertTrue(type(item[0]) is types.ListType)
self.assertTrue(type(item[1]) is types.FloatType)
if __name__ == '__main__':
unittest.main() | true |
75b1167ee008f2c06e6b9ff5068c827e39a538fc | Python | bwargo/python-scripts | /brandNameCompAttrJsonOutputterOnlyBaseSku.py | UTF-8 | 1,303 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import json
from pyexcel_xls import get_data
import csv
import itertools as it
data = get_data("ProductSelectorFeatures_RJM_010317.xls")
rows = data["brandname Comparison Attributes"]
rows.pop(0) #removing first row of category names for now
#have to do this because rows may have different lengths
fixedStoreRows = []
for storeRow in rows:
#print storeRow
diff_len = len(rows[0]) - len(storeRow)
if diff_len < 0:
raise AttributeError('Length of row is too long')
fixedStoreRows.extend([storeRow + [''] * diff_len])
#this groups together all attributes per product
#print fixedStoreRows
transposedProductAttributes = [[storeRow[j] for storeRow in fixedStoreRows] for j in range(len(fixedStoreRows[0]))]
#print '-------------------------'
#print transposedProductAttributes
attributeNames = transposedProductAttributes.pop(0)
#print attributeNames
products = {}
for x in range(len(transposedProductAttributes)):
#label each attribute
productWithAttributeName = dict(zip(attributeNames,transposedProductAttributes[x]))
#print productWithAttributeName
products[productWithAttributeName['SKU']] = productWithAttributeName
#print products
#open up file to write results to
with open("productAttributes.json", "w") as outfile:
json.dump(products, outfile, indent=1)
| true |
69f2a55e16399cb95d03617762e25cca7b0cb9e1 | Python | foobar999/Suchmaschine | /src/vector/cos_score_calculator.py | UTF-8 | 2,293 | 2.75 | 3 | [
"BSD-2-Clause"
] | permissive | import logging
from scipy.sparse import coo_matrix
from src.term import Term
from src.ranked_posting import RankedPosting
class CosScoreCalculator(object):
def fast_document_cosinus_scores(self, index, numdocs):
logging.debug('creating matrix from index, {} terms, {} docs'.format(len(index), numdocs))
index_mat = self._index_to_sparse_mat(index, numdocs)
logging.debug('calculating document similarity scores')
res = index_mat.T.dot(index_mat)
return res.todense().tolist()
# TODO irgendwann raus
def cosine_score(self, queryDoc, index, numdocs):
#logging.debug('calculating cosine(DxD), query {}, numdocs {}'.format(queryDoc, numdocs))
scores = [0] * numdocs
for termAndW in queryDoc:
posting_list = index[termAndW[0]].postings
for posting in posting_list:
wf_t_d = posting.rank
scores[posting.docID] += wf_t_d * termAndW[1]
return [RankedPosting(docID,score) for docID,score in enumerate(scores)]
def fast_cosine_score(self, query, index, numdocs):
logging.debug('calculating fast cosine, query {}, numdocs {}'.format(query, numdocs))
scores = [0] * numdocs
for term in query:
logging.debug('fast cos: proc term {} (type {})'.format(term, type(term)))
term_postings = index.get(Term(term))
if term_postings is not None:
for posting in term_postings.postings:
wf_t_d = posting.rank
scores[posting.docID] += wf_t_d
else:
logging.debug('term {} not present in this index'.format(term))
return [RankedPosting(docID,score) for docID,score in enumerate(scores)]
def _index_to_sparse_mat(self, index, numdocs):
logging.debug('converting index to sparse matrix')
rows, cols, values = [], [], []
for i, posting_list in enumerate(index.values()):
for posting in posting_list.postings:
rows.append(i)
cols.append(posting.docID)
values.append(posting.rank)
return coo_matrix((values, (rows, cols))).tocsr()
| true |
2a8a1976fb041b73d4cfad5e3496ed9f05c24b8d | Python | kran9910/OBSoft-Internship | /jasonplaceholder's API.py | UTF-8 | 3,507 | 3.5625 | 4 | [] | no_license | import requests
import json
# Getters to fetch data from jsonplaceholder's API
#Fetch all posts in the API
def get_all_posts():
request = requests.get(
str('https://jsonplaceholder.typicode.com/posts'),
)
posts = request.json()
return posts
#Fecth a post from the API using its id
def get_post_by_id(post_id):
url = 'https://jsonplaceholder.typicode.com/posts/' + str(post_id)
request = requests.get(
str(url),
)
post = request.json()
return post
#Fetch all photos' url in the API
def get_all_photos():
request = requests.get(
str('https://jsonplaceholder.typicode.com/photos'),
)
photos = request.json()
return photos
#Fecth a photo url from the API using its id
def get_photo_by_id(photo_id):
url = 'https://jsonplaceholder.typicode.com/photos/' + str(photo_id)
request = requests.get(
str(url),
)
photo = request.json()
return photo
#Fetch all todos in the API
def get_all_todos():
url = 'https://jsonplaceholder.typicode.com/todos/'
request = requests.get(
str(url),
)
todos = request.json()
return todos
#Fecth a todo from the API using its id
def get_todo_by_id(post_id):
url = 'https://jsonplaceholder.typicode.com/todos/' + str(post_id)
request = requests.get(
str(url),
)
todo = request.json()
return todo
#Functions to Display Data fetched using the Getters
def print_three_posts():
counter = 0
print("\nThe first 3 bodies of the Posts that are fetched from the API will be printed below:\n")
all_posts = get_all_posts()
for post in all_posts:
if counter < 3:
print(post.get('body'))
print("\n")
counter += 1
else:
return 0
def print_five_todos():
counter = 0
print("\nThe first 5 TODOs that are fetched from the API will be printed below:\n")
all_todos = get_all_todos()
for todo in all_todos:
if counter < 5:
print(todo)
counter += 1
else:
return 0
def print_five_photos():
counter = 0
print("\nThe first 5 photos' URLs fetched from the API will be printed below:\n")
all_photos = get_all_photos()
for photo in all_photos:
if counter < 5:
print(photo.get('url'))
print("\n")
counter += 1
else:
return 0
def print_todo_by_id():
id = input("\nEnter the ID (integer between 1 and 200) of your todo and its details will be printed below: ")
todo = get_todo_by_id(id)
if (todo == {}):
print("No TODO matches this ID")
else:
print(todo)
def print_post_by_id():
id = input("\nEnter the ID (integer between 1 and 100) of your post and its details will be printed below: ")
post = get_post_by_id(id)
if (post == {}):
print("No post matches this ID")
else:
print(post)
def print_photo_by_id():
id = input("\nEnter the ID (integer between 1 and 5000) of your photo and its URL will be printed below: ")
photo = get_photo_by_id(id)
if (photo == {}):
print("No photo matches this ID")
else:
print(photo.get('url'))
print_three_posts()
print_five_todos()
print_five_photos()
print_todo_by_id()
print_post_by_id()
print_photo_by_id()
exit = input("\nPress enter to exit...")
| true |
a2921282226c1cbe775b978060f33735c2c73580 | Python | Santiago2693/Calendario-2020 | /src/ejercicio24.py | UTF-8 | 6,052 | 3.296875 | 3 | [
"MIT"
] | permissive | PATH='puzzle_input/ejercicio24.txt'
PATH2 = 'puzzle_input/ejercicio24M.txt'
def procesarDatos(ruta):
"la funcion devuelve una lista con los movimientos separados por comas"
movimientos = list()
with open(ruta) as archivo:
for line in archivo:
auxiliar=line.strip()
i=0
while i<len(auxiliar):
if auxiliar[i]=="e" or auxiliar[i]=="w":
auxiliar=auxiliar[:i+1]+","+auxiliar[i+1:]
i+=2
else:
auxiliara=auxiliar[:i+2]+","+auxiliar[i+2:]
i+=1
movimientos.append(auxiliar[:-1].strip())
return movimientos
def volterarPorDias(baldosas,dias,adyacentes):
#ciclo para realizar este proceso de acuerdo al numero de dias
for contador in range(dias):
#creamos una copia del diccionario de baldosa para trabajar con este y no
#modificar el real, por que todas las baldosas se voltean al mismo tiempo
diccionarioAuxiliar=baldosas.copy()
#por cada baldosa definida en el diccionario se va a realizar este proceso
for clave in diccionarioAuxiliar:
#se define un contador de baldas adyacentes negras
contadorNegro=0
#solo comprueba las baldosas definidas en el diccionrio
for i in adyacentes:
#se obtiene las coordenadas de todas las baldosas adyacentes a la actual
adyacenteX=adyacentes[i][0]+clave[0]
adyacenteY=adyacentes[i][1]+clave[1]
#si esa baldosa esta definido en el diccionario y es negra se aumenta el contador
if (adyacenteX,adyacenteY) in diccionarioAuxiliar and diccionarioAuxiliar[adyacenteX,adyacenteY]=="negro":
contadorNegro+=1
#si la baldosa actual es negra y tiene 0 o mas de 2 baldasos adyacentes
#de color negro se la cambia a blanco
if diccionarioAuxiliar[clave]=="negro"and (contadorNegro==0 or contadorNegro>2):
baldosas[clave]="blanco"
#si la baldosa actual es blanca y tiene dos baldosas adyacentes de color
#negro se cambia a negro
if diccionarioAuxiliar[clave]=="blanco"and contadorNegro==2:
baldosas[clave]="negro"
#aca se van a comprobar las baldosas blancas que no estan definidas
#en el diccionario
#si la baldosa actual es negra sirve para comprobar las baldosas
#que no estan definas en el diccionario
if diccionarioAuxiliar[clave]=="negro":
#obtengo todas las baldosas adyacentes a esa baldosa negra
for i in adyacentes:
adyacenteX=adyacentes[i][0]+clave[0]
adyacenteY=adyacentes[i][1]+clave[1]
#si la baldosa adyacente no esta definidas
if not(adyacenteX,adyacenteY) in diccionarioAuxiliar :
#contador para las baldosas negras adyacentes
contadorNegroVacios=0
#se comprueban todas las baldas adyacentes a esa baldosa no defnida
for j in adyacentes:
#si la baldosa existe y es negra
if (adyacenteX+adyacentes[j][0],adyacenteY+adyacentes[j][1]) in diccionarioAuxiliar and diccionarioAuxiliar[adyacenteX+adyacentes[j][0],adyacenteY+adyacentes[j][1]]=="negro":
#se aumenta el contador de negros
contadorNegroVacios+=1
#si el contador es 2 se define la baldosa y se lo pone en negro
if contadorNegroVacios==2:
baldosas[adyacentes[i][0]+clave[0],adyacentes[i][1]+clave[1]]="negro"
def main(ruta):
#en este diccionario se encuentra
#los valor x,y que debe moverse respecto al centro actual
valoresMovimiento=dict()
#representan todas las baldosas y su color actual
baldosas=dict()
listaMovimientos=procesarDatos(ruta)
#los valores que deben aumentarse si se mueve a otro hexagono
valoresMovimiento["e"]=(692,0)
valoresMovimiento["w"]=(-692,0)
valoresMovimiento["se"]=(346,-600)
valoresMovimiento["sw"]=(-346,-600)
valoresMovimiento["ne"]=(346,600)
valoresMovimiento["nw"]=(-346,600)
#print(listaMovimientos)
#se coge cada patron de volteo de baldosas
for movimiento in listaMovimientos:
#se crea un diccionario que cada elemento respresenta cada movimiento,
#para llegar a la baldosa a voltear
listaAuxiliar=list()
listaAuxiliar=movimiento.split(',')
#la coordenadas actuales en el mapa de baldosas
coordenadaX=0
coordenadaY=0
#print(movimiento)
for i in listaAuxiliar:
#por cada movimiento la coordenada actual aumentara
#print(valoresMovimiento[i][0],",",valoresMovimiento[i][1])
coordenadaX+=valoresMovimiento[i][0]
coordenadaY+=valoresMovimiento[i][1]
#print(coordenadaX,",",coordenadaY)
#si la baldosa que se debe voltear esta definida la voltea
if (coordenadaX,coordenadaY) in baldosas:
if baldosas[coordenadaX,coordenadaY]=="negro":
baldosas[coordenadaX,coordenadaY]="blanco"
else:
baldosas[coordenadaX,coordenadaY]="negro"
#si no esta definida, la define y la voltea a negro
else:
baldosas[coordenadaX,coordenadaY]="negro"
contador=0
for clave in baldosas:
if baldosas[clave]=="negro":
contador+=1
print ("El total de baldosas en color negro es de: ",contador)
volterarPorDias(baldosas,100,valoresMovimiento)
contador=0
for clave in baldosas:
if baldosas[clave]=="negro":
contador+=1
print ("El total de baldosas en color negro despues de 100 dia es de: ",contador)
main(PATH2)
| true |
ace347c132f925125e90a05cc3f733df4173d384 | Python | stharrold/demo | /tests/test_utils/ARCHIVED/archived_test_utils.py | UTF-8 | 1,708 | 2.921875 | 3 | [
"MIT"
] | permissive | # #!/usr/bin/env python
# # -*- coding: utf-8 -*-
# r"""Archived pytests for demo/archived_utils.py
# """
# # Import standard packages.
# # Import __future__ for Python 2x backward compatibility.
# from __future__ import absolute_import, division, print_function
# import sys
# sys.path.insert(0, '.') # Test the code in this repository.
# # Import installed packages.
# import numpy as np
# import pandas as pd
# # Import local packages.
# import demo
# def test_shuffle_dataframe(
# df=pd.DataFrame(
# data=[('A', 'a', 0), ('B', 'b', 1), ('C', 'c', 2)],
# index=['r0', 'r1', 'r2'], columns=['c0', 'c1', 'c2']),
# seed_row=0, seed_col=0,
# ref_df_shuffled=pd.DataFrame(
# data=[(2, 'c', 'C'), (1, 'b', 'B'), (0, 'a', 'A')],
# index=['r2', 'r1', 'r0'], columns=['c2', 'c1', 'c0'])):
# r"""Pytest for demo/utils.py:
# shuffle_dataframe
# """
# test_df_shuffled = demo.utils.shuffle_dataframe(
# df=df, seed_row=seed_row, seed_col=seed_col)
# assert ref_df_shuffled.equals(test_df_shuffled)
# return None
# # TODO
# # def robust_cohen_d(arr1, arr2):
# # r"""Calculate Cohen d statistic for two arrays using rank-based statistics.
# # The Cohen d statistic is a measure of the size of an effect.
# # Rank-based statistics are robust (more tolerant of) outliers.
# # Args:
# # arr1 (numpy.ndarray):
# # arr2 (numpy.ndarray):
# # Arrays for the two data sets.
# # Returns:
# # cohen_d (float)
# # Notes:
# # * cohen_d = (mean(arr1) - mean(arr2)) / from Ch 2 [1]_.
# # References:
# # .. [1]
# # """
| true |
1ed9fb5123f8cd0bcac0d3028d46690e5a7212eb | Python | juarezpaulino/coderemite | /problemsets/Codeforces/Python/A994.py | UTF-8 | 150 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | """
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
i=lambda:input().split()
n,m=map(int,i())
a,b=i(),i()
print(*[x for x in a if x in b]) | true |
77fac98d215cf446b8b4a8b4910e0fbbd4b4ef4c | Python | SolDavidCloud/sre-bootcamp | /auth_api/python/methods.py | UTF-8 | 1,588 | 2.78125 | 3 | [] | no_license | import hashlib
import jwt
import mysql.connector
# Constants
JWT_SECRET = "my2w7wjd7yXF64FIADfJxNs1oupTGAuW"
class Token:
def generate_token(self, received_username, received_password):
# Get record from the database
database = mysql.connector.connect(
host="bootcamp-tht.sre.wize.mx",
database="bootcamp_tht",
user="secret",
password="noPow3r",
)
cursor = database.cursor()
query = "select password, salt, role from users where username = %s"
cursor.execute(query, (received_username,))
for (password, salt, role) in cursor:
print(f"{received_username=}, {password=}, {salt=}, {role=}")
cursor.close()
database.close()
# Generate the salted and hashed password
salted_password = received_password + salt
print(f"{salted_password=}")
hashed_salted_password = hashlib.sha512(salted_password.encode()).hexdigest()
print(f"{hashed_salted_password=}")
if password != hashed_salted_password:
# If invalid credentials return null value
return
# %% If valid createndials create JWT
encoded_jwt = jwt.encode({"role": role}, JWT_SECRET, algorithm="HS256")
print(f"{encoded_jwt=}")
return encoded_jwt
class Restricted:
def access_data(self, authorization):
try:
jwt.decode(authorization, JWT_SECRET, algorithms=["HS256"])
except jwt.InvalidSignatureError:
return "Invalid JWT"
return "You are under protected data"
| true |
31e284bb6105b1ba35340643f5b6376bea2359cb | Python | Arturok/TEC | /Intrucción y Taller de Programación/Progras en Clase/Fibonacci_Ver1.0.py | UTF-8 | 224 | 3.609375 | 4 | [
"MIT"
] | permissive | def fib(n):
if isinstance(n, int) and n>=0:
return fib_aux(n)
else:
return "ERROR"
def fib_aux(n):
if n==0 or n==1:
return 1
else:
return fib_aux(n-1)+fib_aux(n-2)
| true |
642d75b64c7fb155508466b4a524a80a607e60ad | Python | Aasthaengg/IBMdataset | /Python_codes/p02948/s293551604.py | UTF-8 | 291 | 2.609375 | 3 | [] | no_license | from heapq import heappop,heappush
n,m=map(int,input().split())
L=[[] for _ in range(m)]
for _ in range(n):
a,b=map(int,input().split())
if a<=m:
L[m-a].append(b)
s=0
h=[]
for i in range(m-1,-1,-1):
for l in L[i][::-1]:
heappush(h,-l)
if len(h)>0:
s-=heappop(h)
print(s)
| true |
82379bad53f71da051eb4e8b981525c1dc0df24d | Python | Lagom92/algorithm | /0326/cart.py | UTF-8 | 781 | 2.953125 | 3 | [] | no_license | # 전기 카트
T = int(input())
def perm(n, k): # 순열 만들기
if k == n:
res.append([1] + p + [1]) # 순열 앞뒤로 사무실 추가
else:
for i in range(k, n):
p[i], p[k] = p[k], p[i]
perm(n, k+1)
p[i], p[k] = p[k], p[i]
for tc in range(1, T+1):
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
p = [x for x in range(2, N+1)]
res = []
perm(N-1, 0)
minV = 1000000
for i in range(N-1):
s = 0
for j in range(N):
a, b = res[i][j], res[i][j+1]
s += arr[a-1][b-1]
if s > minV:
break
if minV > s: # 합 s 중 최소 찾기
minV = s
print('#{} {}'.format(tc, s))
| true |
5d181cc2d44672b47237da6cea9d91a6de8ba307 | Python | JianFengY/alien_invasion | /chapter14_scoring/alien_invasion.py | UTF-8 | 2,699 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
Created on 2018年1月21日
@author: Jeff Yang
'''
import pygame
from pygame.sprite import Group
from chapter14_scoring.settings import Settings
from chapter14_scoring.ship import Ship
from chapter14_scoring import game_functions as gf
from chapter14_scoring.game_stats import GameStats
from chapter14_scoring.button import Button
from chapter14_scoring.scoreboard import Scoreboard
def run_game():
"""初始化游戏并创建一个屏幕对象"""
# 初始化背景设置,让pygame能够正确的工作
pygame.init()
# 创建一个名为screen的显示窗口,这个游戏的所有图形元素都在其中绘制
# 实参(1000, 550)是一个元组,指定游戏窗口尺寸,宽1000像素高550像素
# 对象screen是一个surface,surface是屏幕的一部分,这个游戏中每个元素如飞船等都是surface
# display.set_mode()返回的surface表示整个游戏窗口
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
# 创建play按钮
play_button = Button(ai_settings, screen, "Play")
# 创建一个用于存储游戏统计信息的实例,并创建记分牌
stats = GameStats(ai_settings)
sb = Scoreboard(ai_settings, screen, stats)
# 创建一艘飞船
# 注意,整个游戏运行期间,我们只创建了一个飞船实例
ship = Ship(ai_settings, screen)
# 创建一个用于存储子弹的编组
bullets = Group()
# 创建一个用于存储外星人的编组
aliens = Group()
# # 创建一个外星人
# alien = Alien(ai_settings, screen)
# 创建外星人群
gf.create_fleet(ai_settings, screen, ship, aliens)
# 开始游戏的主循环
while True:
# 监视键盘和鼠标事件
gf.check_events(ai_settings, screen, stats, sb,
play_button, ship, aliens, bullets) # 检查玩家输入
if stats.game_active: # 游戏处于活动状态才调用下列函数
ship.update() # 更新飞船位置
# 子弹相关信息的更新
gf.update_bullets(ai_settings, screen, stats, sb,
ship, aliens, bullets)
# 更新外星人的位置,要先更新子弹,因为要检查是否有子弹撞到了外星人
gf.update_aliens(ai_settings, screen, stats, sb,
ship, aliens, bullets)
# 绘制屏幕
gf.update_screen(ai_settings, screen, stats, sb,
ship, aliens, bullets, play_button)
run_game()
| true |
0a7adaf1b5b7a3248a54d1d0997ebea28acb6a5c | Python | hjimce/tensorflow-nlp | /2_word_segment/segment.py | UTF-8 | 371 | 2.625 | 3 | [] | no_license |
import jieba
import codecs
def fenci(filename,outname) :
out=codecs.open(outname,'wb',"utf-8")
with codecs.open(filename,'rb',"utf-8") as f:
for l in f.readlines():
seg_list = jieba.cut(l,cut_all=False)
out.writelines(" ".join(seg_list))
#print
out.close()
fenci("../1_data_prepare/data.utf8",'segment.utf8') | true |
42ebe075c1c87d920954d82682c5b77814626b0d | Python | emreozb/HackerRank_Exercises | /breakingRecords.py | UTF-8 | 430 | 3.125 | 3 | [] | no_license | def breakingRecords(scores):
s_min = 0
s_max = 0
s_min = scores[0]
s_max = scores[0]
count_min = 0
count_max = 0
for score in scores[1:]:
if score > s_max:
s_max = score
count_max += 1
if score < s_min:
s_min = score
count_min += 1
return count_max, count_min
scores = [3,4,21,36,10,28,35,5,24,42]
print(breakingRecords(scores))
| true |
0432e7fecfdce14635f48a1baee4799bbaa446cb | Python | nocLyt/CIS700 | /P1/P0/ex.py | UTF-8 | 1,349 | 3 | 3 | [] | no_license | """
r.txt is init file
Non-anonymized dataset: edge.txt
Mapping: mapping.txt
Anonymized dataset: edge_id.txt
"""
class DirectedUnweightedGraph:
def __init__(self):
self.dc = dict()
self.n = 0
self.m = 0
def add_node(self, u):
pass
def get_node_id(self, u):
pass
def add_edge(self, u, v):
pass
def clear(self):
self.dc = dict()
self.n = 0
self.m = 0
def tran2file_1():
fname1 = "r.txt"
fname2 = 'edge.txt'
f = open(fname1)
fout = open(fname2, 'w')
for line in f.readlines():
fout.write(','.join(line.strip().split(' '))+"\n")
f.close()
fout.close()
def anonymize():
dc = dict()
def add_node(u):
if u not in dc:
dc[u] = len(dc) + 1
return dc[u]
fin = open('edge.txt', 'r')
fout = open('edge_id.txt', 'w')
for line in fin.readlines():
n1, n2 = line.strip().split(',')
fout.write("%d,%d\n" % (add_node(n1), add_node(n2)))
fin.close()
fout.close()
fmap = open('mapping.txt', 'w')
dc1 = dict()
for name,id in dc.items():
dc1[id] = name
for i in range(len(dc1)):
fmap.write('%s,%d\n' % (dc1[i+1], i+1))
fmap.close()
if __name__ == "__main__":
# tran2file_1()
# anonymize()
pass | true |