blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
81f455f95ed24cf73bea4314751a67bd3caf081c | Python | steph-meyering/DSandAlgorithms | /leetcode/1512.py | UTF-8 | 246 | 2.75 | 3 | [] | no_license | class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
count = Counter(nums)
res = 0
for val in count.values():
if val >= 2:
res += ((val-1) ** 2 + val-1)//2
return res | true |
eb2272045be8056c51150519580fec9d77d5ce5f | Python | amaurilopez90/SampledSoundSynth | /PCPrototype/code/tools.py | UTF-8 | 5,622 | 3.265625 | 3 | [
"MIT"
] | permissive | # ####################################################################################################
#
# => Contributors: Amauri Lopez, Darrien Pinkman
# => Course: Senior Project I
# => Semester: Fall 2017
# => Advisor: Dr. Anthony Deese
# => Project name: Polyphonic Sampled Sound Synthesizer
# => Description: This project aims to recreate such a music synthesizer that uses sampled sounds as
# WAV files downloaded from the internet and wave manipulation algorithms to remodel
# these sounds to desired outputs, using a raspberry pi as the base computational platform
# and keys built onto a breadboard to model a launchpad for synthesis.
# => Filename: tools.py
# => Description: This file holds all the tools necessary to put together the synthesizer. This "toolbox"
# includes the methods to create keyboard text files that contain the keys pressed by the user
# to outline their keyboard, read the created keyboard text files, and create the configuration files
# that holds the mappings between the keys and their corresponding .WAV files
# => Last Data modified on: 12/16/2017
#
# #####################################################################################################
import pygame
import csv
import os
import shutil
######################################################################################
"""
=> Definition: make_keyboard()
=> Description: This definition opens an interactive sessoin that lets you hit the keys of
your keyboard in the desired order. This definition saves the input into a text
file at the file location specified. Press escape the finish
=> Parameters:
=> - outputfile: Filepath in which the keyboard textfile will be saved to
=> Postcondition: Creates a file to the specified location by outputfile, containing the keys
input by the user for their keyboard
"""
######################################################################################
def make_keyboard(outputfile):
txt_file = open(outputfile, 'w')
pygame.init()
screen = pygame.display.set_mode((640,480))
while True:
event = pygame.event.wait()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
break
else:
name = pygame.key.name(event.key)
print (name)
txt_file.write(name + '\n')
txt_file.close()
pygame.quit()
return outputfile
######################################################################################
"""
=> Definition: read_keyboard()
=> Description: This definition reads a keyboard file and returns a list of keys
=> Parameters:
=> - txt_file: Filepath in which the keyboard textfile will be read from
=> Precondition: Assumes that the input txt_file exists and is one created using the make_keyboard definition in tools.py
=> Postcondition: Retuns a list of keys read fromt the keyboard text file
"""
######################################################################################
def read_keyboard(txt_file):
return [ key.strip('\n').split('|') for key in open(txt_file, 'r')]
######################################################################################
"""
=> Definition: make_conf()
=> Description: This definition creates a configuration file out of a samples folder and an input keyboard file.
This configuration file holds a mapping between the keys in the keyboard file and the .WAV files in the samples folder
=> Parameters:
=> - samplefolder: The input folder holding all of the .WAV or .ogg files to be played on the keyboard
- output: Filepath in which the configuration file will be saved to
- keyboardfile: A keyboard text file holding the keys that were earlier pressed by the user when making their keyboard
=> Precondition: Assumes that the input samplefolder exists. Also assumes that the keyboardfile exists and is one created using the
make_keyboard definition in tools.py
=> Postcondition: Creates a configuration file at the specified output filepath. Returns the name of that filepath
"""
######################################################################################
def make_conf(samplefolder, output, keyboardfile, startfile=0):
keyslist = read_keyboard(keyboardfile) #get the keys in a list
conf_file = csv.writer(open(output, 'w'), delimiter=',')
files = filter(lambda s : s.endswith(('.wav','.ogg')),
os.listdir(samplefolder))
files = sorted(files)[startfile:]
if keyslist is None:
keyslist = len(files) * [['#']]
for name,keys in zip(files,keyslist):
for k in keys:
conf_file.writerow([k,' ' + '%s/%s'%(samplefolder,name)])
return output
# def merge_two_folders(folder1, folder2, outputfolder):
# if not os.path.exists(outputfolder):
# os.makedirs(outputfolder)
# src_files = os.listdir(folder1)
# for file_name in src_files:
# full_file_name = os.path.join(folder1, file_name)
# if (os.path.isfile(full_file_name)):
# shutil.copy(full_file_name, outputfolder)
# src_files = os.listdir(folder2)
# for file_name in src_files:
# full_file_name = os.path.join(folder2, file_name)
# if (os.path.isfile(full_file_name)):
# shutil.copy(full_file_name, outputfolder) | true |
4554164e9cc132fcac3e2e324b644e279897c0e7 | Python | subhashl7/subhashpython | /voworconst.py | UTF-8 | 267 | 3.46875 | 3 | [] | no_license | #subbu#vowel or consonant:
yy=raw_input()
if((yy>='a' and yy<='z') or (yy>='A' and yy<='Z')):
if (yy in ['a','e','i','o','u','A','E','I','O','U']):
print('Vowel')
else:
print('Consonant')
else:
print('invalid')
| true |
5d6f74295d04aa03b2ba327ebf050c912487a58a | Python | liusy182/tensorflow-trial | /5_input_data.py | UTF-8 | 3,053 | 2.640625 | 3 | [] | no_license | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS = {'directory': 'tmp/mnist', 'validation_size':5000}
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values)
for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def csv_input():
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age", "dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm", "age", "dis", "tax", "ptratio"]
LABEL = "medv"
training_set = pd.read_csv("data/boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("data/boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
prediction_set = pd.read_csv("data/boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="tmp/boston_model")
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
#evaluation
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS['directory'], name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
writer.close()
def record_input():
data_sets = mnist.read_data_sets(FLAGS['directory'],
dtype=tf.uint8,
reshape=False,
validation_size=FLAGS['validation_size'])
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
record_input() | true |
39303fc5a2b5528a2c2bdc5e61d2884747b242e8 | Python | aitorlopez98/iw-ejercicios-python | /Ejercicios/_8_Clases_objetos/Ejercicio 4.py | UTF-8 | 1,180 | 3.84375 | 4 | [] | no_license | import math
class clsTriangulo:
def __init__(self, cat1, cat2, base):
self.cat1 = cat1
self.cat2 = cat2
self.base = base
def area(self):
_base = self.base
_cat1 = self.cat1
_cat2 = self.cat2
s = _cat1 + _cat2 + _base
a = (s*(s-_cat1)*(s-_cat2)*(s-_base))
return math.sqrt(a)
def forma(self):
_cat1 = self.cat1
_cat2 = self.cat2
_base = self.base
if _cat1 == _cat2 and _cat1 == _base:
return "Triangulo equilatero"
elif _cat1 == _cat2:
return "Triangulo isosceles"
else:
return "Triangulo irregular"
def perimetro(self):
_cat1 = self.cat1
_cat2 = self.cat2
_base = self.base
p = _cat1 + _cat2 + _base
return p
cateto1 = float(input("Dato cateto 1: "))
cateto2 = float(input("Dato cateto 2: "))
hipo = float(input("Dato base: "))
tr = clsTriangulo(cateto1, cateto2, hipo)
op = input("Area-a/Forma-f/Perimetro-p: ")
op = op.lower()
if op.__eq__("a"):
print(tr.area())
elif op.__eq__("f"):
print(tr.forma())
elif op.__eq__("p"):
print(tr.perimetro())
| true |
1bb48e404d22a8dcee29468a0d126487036dd2a0 | Python | NewtonLicciardiJr/persona | /persona/intent/model.py | UTF-8 | 2,566 | 2.703125 | 3 | [] | no_license | import numpy as np
from keras.models import Sequential
from keras.layers import Input, LSTM, Dense, Embedding
def IntentModel(model):
model = model.lower()
if model == "onehot":
return OneHotModel
elif model == "embeddings":
return EmbeddingsModel
else:
print("{} does not exist".format(model))
return None
class BaseIntentModel:
def __init__(self):
self.x_train = None
self.y_train = None
self.model = None
self.history = None
def train(self, optimizer='rmsprop', loss='categorical_crossentropy',
batch_size=64, epochs=100, validation=0.0, summary=False):
if summary:
self.model.summary()
self.model.compile(optimizer, loss)
self.history = self.model.fit(
self.x_train, self.y_train,
batch_size=batch_size, epochs=epochs,
validation_split=validation)
def decode(self, input_sequence, output_word_model):
output_tokens = self.model.predict(input_sequence)
token_index = np.argmax(output_tokens[0])
intent = output_word_model.index2word[token_index]
confidence = max(output_tokens[0])
return intent, confidence
class OneHotModel(BaseIntentModel):
def __init__(self, x_train, y_train,
input_len, output_len,
latent_dim=128, activation='relu'):
self.x_train = x_train
self.y_train = y_train
self.model = \
self.build_model(input_len, output_len, latent_dim, activation)
def build_model(self, input_len, output_len, latent_dim, activation):
model = Sequential()
model.add(
LSTM(latent_dim, activation=activation,
input_shape=(None, input_len)))
model.add(Dense(output_len, activation='softmax'))
return model
class EmbeddingsModel(BaseIntentModel):
def __init__(self, x_train, y_train,
input_dim, output_dim,
latent_dim=128, activation='relu'):
self.x_train = x_train
self.y_train = y_train
self.model = \
self.build_model(input_dim, output_dim, latent_dim, activation)
def build_model(self, input_dim, output_dim, latent_dim, activation):
model = Sequential()
model.add(Embedding(input_dim, output_dim,))
model.add(
LSTM(latent_dim, activation=activation,
input_shape=(None, input_dim)))
model.add(Dense(output_dim, activation='softmax'))
return model
| true |
5e04a33096bebb931a1fbe7027ac2caef11659d5 | Python | songhappy/pythonlearn | /src/leetCode/binarySearchTree/n86bst.py | UTF-8 | 1,999 | 4.3125 | 4 | [] | no_license | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
Example of iterate a tree:
iterator = BSTIterator(root)
while iterator.hasNext():
node = iterator.next()
do something for node
"""
class BSTIterator:
"""
@param: root: The root of binary tree.
"""
def __init__(self, root):
# do intialization if necessary
self.stack = []
curr = root
while curr:
self.stack.append(curr)
curr = curr.left
"""
@return: True if there has next node, or false
"""
def hasNext(self):
# write your code here
return len(self.stack) > 0
"""
@return: return next node
"""
def next(self):
# write your code here
node = self.stack[-1]
if node.right:
curr = node.right
while curr:
self.stack.append(curr)
curr = curr.left
else:
curr = self.stack.pop()
while len(self.stack) > 0 and self.stack[-1].right == curr:
curr = self.stack.pop()
return node
class BSTIterator:
def __init__(self, root):
# Array containing all the nodes in the sorted order
self.nodes_sorted = []
# Pointer to the next smallest element in the BST
self.index = -1
# Call to flatten the input binary search tree
self._inorder(root)
def _inorder(self, root):
if not root:
return
self._inorder(root.left)
self.nodes_sorted.append(root.val)
self._inorder(root.right)
def next(self) -> int:
"""
@return the next smallest number
"""
self.index += 1
return self.nodes_sorted[self.index]
def hasNext(self) -> bool:
"""
@return whether we have a next smallest number
"""
return self.index + 1 < len(self.nodes_sorted)
| true |
82f5c8edfc1ac4746784f8c884c9e892f90ff8dc | Python | patrick333/euler-project-solutions | /euler052/solution.py | UTF-8 | 242 | 3.453125 | 3 | [] | no_license | #!/usr/bin/python
#Permuted multiples
def getDigits(N):
return sorted(str(N))
def main():
n=9999
while not getDigits(2*n)==getDigits(3*n)==getDigits(4*n)==getDigits(5*n)==getDigits(6*n):
n+=9
print n
# print getDigits(194967)
main() | true |
c893541616ae4d3b6c1a2c6783acc9e432cb19cf | Python | SimonWithWoogi/SchedulingProto | /Environment/Renderer.py | UTF-8 | 2,082 | 2.71875 | 3 | [] | no_license | import numpy as np
import tkinter as tk
import matplotlib.colors as mcolors
import pyscreenshot
from PIL import Image
from PIL import ImageTk
ColorList = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf',
'#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
class GanttChart:
def __init__(self, Size, Number_Of_Machines, Number_Of_Kinds):
if len(ColorList) < Number_Of_Kinds:
raise Exception('Over range: Please set parameter(Number_Of_Kinds) less than ' + str(len(ColorList)+1))
self.M = Number_Of_Machines
self.P = Number_Of_Kinds
self.BoardSize = Size
self.BackBoard = np.zeros(self.BoardSize)
# GUI 설정
self.win = tk.Tk()
# GanttChart board
pil_image = Image.fromarray(self.BackBoard)
imgtk = ImageTk.PhotoImage(image=pil_image)
self.GUIBackBoard = tk.Label(self.win, image=imgtk)
# GanttChart Machines
self.GUIMachine = []
for i in range(Number_Of_Machines):
self.GUIMachine.append(tk.Button(self.win, text='M'+str(i+1)))
self.GUIMachine[i].grid(row=i, column=0, sticky=tk.N+tk.S)
self.GUIBackBoard.grid(row=0, column=1, rowspan=Number_Of_Machines, sticky=tk.W)
def Reset(self):
self.BackBoard = np.zeros(self.BoardSize)
def Render(self):
pil_image = Image.fromarray(self.Backboard)
imgtk = ImageTk.PhotoImage(image=pil_image)
self.GUIBackBoard.config(image=imgtk)
self.win.update()
pil_image.close()
pil_image.__exit__()
imgtk.__del__()
def Capture(self, Save, Name):
im = pyscreenshot.grab(bbox=(10, 10, 510, 510)) # X1,Y1,X2,Y2
if Save:
im.save('./ScreenShot/'+Name+'.png')
return im
def SetTitle(self, Text):
self.win.title = Text
def main():
Chart = GanttChart((76, 224), 17, 10)
Chart.win.mainloop()
if __name__ == '__main__':
main()
| true |
602a02519f9fc071fff818289715fbefb0235112 | Python | samaeen/leet_code_solutions | /longestCommonPrefix.py | UTF-8 | 215 | 3.171875 | 3 | [] | no_license | class Solution:
def longestCommonPrefix(self, strs):
for i in range(len(strs)):
print(strs[i][0])
a=["flower","flow","flight"]
#print(a[0][2])
#print(len(a))
print(Solution().longestCommonPrefix(a)) | true |
e4ece13eedb6016d008604443d064190c0fa51cb | Python | roger1993/text_classification | /python3/segment.py | UTF-8 | 754 | 2.984375 | 3 | [
"MIT"
] | permissive | import jieba
def main():
stopwordset = set()
with open('/Users/roger/Downloads/text_classification/stopwords.txt','r',encoding='utf-8') as sw:
for line in sw:
stopwordset.add(line.strip('\n'))
texts_num = 0
output = open('wiki_seg.txt','w')
with open('wiki_texts.txt','r') as content :
for line in content:
line = line.strip('\n')
words = jieba.cut(line, cut_all=False)
for word in words:
if word not in stopwordset:
output.write(word +' ')
texts_num += 1
if texts_num % 1000 == 0:
print("已完成前 %d 行的斷詞" % texts_num)
output.close()
if __name__ == '__main__':
main() | true |
c8a6f239830fc1ca85a2bf91b613f3fce9b26ba8 | Python | chijuzipi/ChineseAuthor | /src/NPG/urlGenerator2.py | UTF-8 | 1,276 | 2.8125 | 3 | [] | no_license | from bs4 import BeautifulSoup
class URLGenerator:
def __init__(self):
# when the urls from file
self.generate()
# when the urls can be direct synthesized
#self.synthesis()
def generate(self):
f1 = open('archive/NatureGene/NatureGeneIssueList.html', 'r')
f2 = open('archive/processed/NatureGene.txt', 'w')
content = f1.read()
soup = BeautifulSoup(content)
#find every <a href ... tag
out = soup.find_all(href=True)
for item in out:
url = item["href"]
if self.confirm(url):
f2.write(url + '\n')
def synthesis(self):
f = open('archive/processed/AccountChem.txt', 'w')
parentURL = "http://pubs.acs.org/toc/achre4/"
for vol in range (1,48):
for issue in range(1, 13):
url = parentURL + str(vol) + "/" + str(issue)
year = str(vol + 1967)
f.write(url + " " + year +'\n')
for vol in range (48,49):
for issue in range(1, 3):
url = parentURL + str(vol) + "/" + str(issue)
year = str(vol + 1967)
f.write(url + " " + year +'\n')
def confirm(self, url):
critic1 = "http://www.nature.com/ng/journal/" in url
if critic1:
return True
def main():
generator = URLGenerator()
if __name__ == '__main__':
main()
| true |
9316c7524b8992840e9c9da4812965f1ba08ca2a | Python | janvrany/bricknil | /bricknil/sensor/light.py | UTF-8 | 1,944 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2019 Virantha N. Ekanayake
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All LED/light output devices"""
from asyncio import sleep, current_task, create_task as spawn # Needed for motor speed ramp
from enum import Enum
from struct import pack
from ..const import Color
from .peripheral import Peripheral
class LED(Peripheral):
""" Changes the LED color on the Hubs::
@attach(LED, name='hub_led')
self.hub_led.set_output(Color.red)
"""
_sensor_id = 0x0017
async def set_color(self, color: Color):
""" Converts a Color enumeration to a color value"""
# For now, only support preset colors
assert isinstance(color, Color)
col = color.value
assert col < 11
mode = 0
await self.set_output(mode, col)
class Light(Peripheral):
"""
Connects to the external light.
Example::
@attach(Light, name='light')
And then within the run body, use::
await self.light.set_brightness(brightness)
"""
_sensor_id = 0x0008
async def set_brightness(self, brightness: int):
"""Sets the brightness of the light.
Args:
brightness (int) : A value between -100 and 100 where 0 is off and
-100 or 100 are both maximum brightness.
"""
mode = 0
brightness, = pack('b', int(brightness))
await self.set_output(mode, brightness)
| true |
4bdecfa16459c2703cddaeb16c3221c0ecdeffbb | Python | gamecmt/jandan | /img_spider.py | UTF-8 | 7,255 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import hashlib
import re
import base64
import os
import sqlite3
import time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
def page_source(url):
''' 读取网页 '''
options = Options()
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver.get(url)
time.sleep(3)
html = driver.page_source
driver.quit()
return html
def get_page_number(html):
''' 得到当前页码 '''
pattern = re.compile('<span class="current-comment-page">\[(.*?)\]</span>')
try:
page_number = re.findall(pattern, html)[0]
except:
return
return page_number
def get_next_page_url(html):
''' 得到下一页网页地址 '''
pattern = re.compile(
'<a title="Older Comments" href="//(.*?)" class="previous-comment-page">')
try:
next_page_url = "http://{}".format(re.findall(pattern, html)[0])
except:
return
return next_page_url
def get_img_comments(html):
''' 将jandan页面内容以图片内容划分,热榜是50个,无聊和妹子是25个 '''
pattern = re.compile(
'<span class="righttext">((?:.|\n)*?)<a href="javascript:;" class="tucao-btn"')
img_comments = re.findall(pattern, html)
return img_comments
def get_img_id(img_comment):
''' 获取当前图片内容的id '''
pattern = re.compile('<a href="/t/(.*?)"')
img_id = re.findall(pattern, img_comment)
return img_id[0]
def get_img_url(img_comment):
''' 获取当前图片内容的url,其中多个链接以,链接 '''
img_url = ""
pattern = re.compile('<a href="//(.*?)" target="_blank" class')
img_array = re.findall(pattern, img_comment)
for i in img_array:
img_url = img_url + "," + i
img_url = img_url[1:]
return img_url
def get_img_oo(img_comment):
''' 获取当前图片内容的oo '''
pattern = re.compile('OO</a> \[<span>(.*?)</span>')
try:
img_oo = re.findall(pattern, img_comment)[0]
except:
img_oo = 0
return img_oo
def get_img_xx(img_comment):
''' 获取当前图片内容的xx '''
pattern = re.compile('XX</a> \[<span>(.*?)</span>')
try:
img_xx = re.findall(pattern, img_comment)[0]
except:
img_xx = 0
return img_xx
def get_page_info(html):
''' 当前页面的页码、下一页链接地址,以及所有图片的id,url,oo,xx数据 '''
imgs = get_img_comments(html)
page_number = get_page_number(html)
next_page_url = get_next_page_url(html)
img_info = []
for i in imgs:
img_one_info = [get_img_id(i), get_img_url(
i), get_img_oo(i), get_img_xx(i)]
img_info.append(img_one_info)
return img_info, page_number, next_page_url
def distinct():
''' 清除重复图片纪录,只保留最小id的那条数据,以url为判断重复图片的依据。 '''
conn = sqlite3.connect('jandan.db')
# 清除所有重复id的数据,保留最早纪录。
conn.execute(
"delete from wuliao where rowid not in(select min(rowid) from wuliao group by img_url);"
)
conn.execute(
"delete from meizi where rowid not in(select min(rowid) from meizi group by img_url);"
)
conn.commit()
conn.close()
def check_db(db_name):
''' 检查是否有sqlite数据库,如果没有则创建一个新jandan.db数据库。 '''
if not os.path.isfile(db_name):
conn = sqlite3.connect(db_name)
# 创建妹子图表
conn.execute(
"CREATE TABLE `meizi` (`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `img_id` INTEGER DEFAULT 0, `img_url` TEXT NOT NULL,`img_oo` INTEGER NOT NULL DEFAULT 0, `img_xx` INTEGER NOT NULL DEFAULT 0, `datetime` TEXT DEFAULT CURRENT_TIMESTAMP);"
)
# 创建无聊图表
conn.execute(
"CREATE TABLE `wuliao` (`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `img_id` INTEGER DEFAULT 0, `img_url` TEXT NOT NULL,`img_oo` INTEGER NOT NULL DEFAULT 0, `img_xx` INTEGER NOT NULL DEFAULT 0, `datetime` TEXT DEFAULT CURRENT_TIMESTAMP);"
)
conn.commit()
conn.close()
def img_wuliao_sqlite(page_number, img_info):
''' 将无聊图图片信息插入到jandan数据库wuliao表格中 '''
conn = sqlite3.connect('jandan.db')
cursor = conn.cursor()
for img_one_info in img_info:
cursor.execute("select id from wuliao where img_id=?",
(img_one_info[0], ))
values = cursor.fetchone()
try:
value = values[0]
except Exception:
value = None
if value is None:
conn.execute(
"INSERT INTO wuliao (img_id,img_url,img_oo,img_xx) VALUES (?,?,?,?)",
img_one_info)
else:
cursor.execute(
'UPDATE wuliao SET img_id=?,img_url=?,img_oo=?,img_xx=? WHERE id = ?',
(img_one_info[0], img_one_info[1], img_one_info[2],
img_one_info[3], value))
conn.commit()
conn.close()
print("wuliao " + page_number + " records successfully.")
def img_meizi_sqlite(page_number, img_info):
''' 将妹子图图片信息插入到jandan数据库meizi表格中 '''
conn = sqlite3.connect('jandan.db')
cursor = conn.cursor()
for img_one_info in img_info:
cursor.execute("select id from meizi where img_id=?",
(img_one_info[0], ))
values = cursor.fetchone()
try:
value = values[0]
except Exception:
value = None
if value is None:
conn.execute(
"INSERT INTO meizi (img_id,img_url,img_oo,img_xx) VALUES (?,?,?,?)",
img_one_info)
else:
cursor.execute(
'UPDATE meizi SET img_id=?,img_url=?,img_oo=?,img_xx=? WHERE id = ?',
(img_one_info[0], img_one_info[1], img_one_info[2],
img_one_info[3], value))
conn.commit()
conn.close()
print("meizi " + page_number + " records successfully.")
if __name__ == '__main__':
check_db('jandan.db')
# 下载热榜
url = "http://jandan.net/top"
html = page_source(url)
img_info, page_number, next_page_url = get_page_info(html)
img_wuliao_sqlite('热榜', img_info)
# 下载无聊图
url = "http://jandan.net/pic"
html = page_source(url)
img_info, page_number, next_page_url = get_page_info(html)
img_wuliao_sqlite(page_number, img_info)
while (next_page_url is not None or int(page_number) >= 100):
html = page_source(next_page_url)
img_info, page_number, next_page_url = get_page_info(html)
img_wuliao_sqlite(page_number, img_info)
# 下载妹子图
url = "http://jandan.net/ooxx"
html = page_source(url)
img_info, page_number, next_page_url = get_page_info(html)
img_meizi_sqlite(page_number, img_info)
while (next_page_url is not None or int(page_number) >= 50):
html = page_source(next_page_url)
img_info, page_number, next_page_url = get_page_info(html)
img_meizi_sqlite(page_number, img_info)
distinct()
| true |
d54e0cf415e6798e02ce8be6c2effc28e3ec5807 | Python | telegrambotproject/MainRepository | /functions.py | UTF-8 | 8,727 | 2.59375 | 3 | [] | no_license | import requests
import pickle
import datetime
import requests
import json
import urllib
import urllib.request as urlrequest
import ssl
now = datetime.datetime.now()
# functions for requests
with open('keys/imdbapi.txt') as f:
imdb_key = f.read()
with open('keys/apikey.txt') as f:
key = f.read()
def request_proxy(proxy):
proxy_handler = urlrequest.ProxyHandler(proxy)
opener = urlrequest.build_opener(proxy_handler)
urllib.request.install_opener(opener) # Установка прокси для запроса фаилов в телеграме.
def fake_ssl():
try: # Создаю поддельный SSL сертификат
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
def save_obj(obj, name): # для базы данных
with open('obj/' + name + '.pkl', 'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as file:
return pickle.load(file)
def search_movies(id):
URL = f'https://api.kinohod.ru/api/rest/site/v1/cinemas/{id}/movies'
PARAMS = {
'apikey': key,
'limit': 10,
'date': now.strftime("%d-%m-%Y")
}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
return data
def search_current_movies(movie_number): # кол-во фильмов
URL = 'https://api.kinohod.ru/api/rest/site/v1/movies'
PARAMS = {
'apikey': key,
'date': now.strftime("%d-%m-%Y"),
'limit': movie_number
}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
return data
def nearest_cinemas(lat, lon):
URL = 'https://api.kinohod.ru/api/rest/site/v1/cinemas'
PARAMS = {
'apikey': key,
'latitude': lat,
'longitude': lon,
'sort': 'distance'
}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
data = sorted(data, key=lambda x: x['distance']) # Сортировка кинотеатров по растоянию.
return data
# print(nearest_cinemas(55.730897, 37.629541))
# nearest_cinemas() latitude and longitude from the user
# in the input of the function
def sessions(id, movie_name):
URL = f'https://api.kinohod.ru/api/rest/site/v1/cinemas/{id}/schedules'
PARAMS = {
'apikey': key,
'date': now.strftime("%d-%m-%Y"),
'search': movie_name
}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
return data
def search_new_by_ganres(ganre):
URL = 'https://api.kinohod.ru/api/rest/site/v1/movies/recommend'
PARAMS = {
'apikey': key
}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
for i in range(len(data)):
for j in range(len(data[i]['genres'])):
if data[i]['genres'][j]['name'] == ganre:
print(data[i]['originalTitle'])
# search_new_by_genres('экшен') there are around 30 ganres
def get_id_cinema(cinema_name):
URL = 'https://api.kinohod.ru/api/rest/site/v1/cinemas'
PARAMS = {
'apikey': key
}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
for i in range(len(data)):
if data[i]['shortTitle'] == cinema_name:
print(data[i]['id'])
# give this function a name of the cinema and it will give you the ID
# Example: get_id('5 Звезд на Новокузнецкой')
def date_conversion(date): # converting %Y-%m-%d to %b %d %Y
date_time_obj = datetime.datetime.strptime(date, '%Y-%m-%d')
return date_time_obj.strftime('%b %d %Y')
def get_imdb_id(movie_name): # get imdb id of an upcoming movie
URL = 'https://api.themoviedb.org/3/search/movie'
PARAMS = {
'api_key': imdb_key,
'query': movie_name
}
r = requests.get(url=URL, params=PARAMS) # Первый реквест, что бы найти фильмы с похожим названием
data = r.json()
if r.status_code == 200 and int(data['total_results']) > 0:
return data
else:
print(data)
return False
def get_future_movies(data):
film_date = "3000-01-01"
film_id = "False"
for m in data['results']: # Беру ближайший фильм из списка
if now.strftime("%Y-%m-%d") < m['release_date'] < film_date:
film_date = m['release_date']
film_id = m['id']
if film_id != False:
URL = 'https://api.themoviedb.org/3/movie/' + str(film_id)
PARAMS = {
'api_key': imdb_key,
}
r = requests.get(url=URL, params=PARAMS) # Реквест для нахождения imdb id фильма
data = r.json()
film_date = date_conversion(film_date)
return data.get('imdb_id', [False] * 3)[2], data.get('original_title', False), film_date
else:
print(data)
return False, None, None
def current_movie_list():
for i in range(1, 2):
URL = 'https://api.themoviedb.org/3/movie/upcoming'
PARAMS = {
'api_key': imdb_key,
'page': i
}
r = requests.get(url=URL, params=PARAMS) # Первый реквест, что бы найти фильмы с похожим названием
data = r.json()['results']
print(data)
for k in data:
title = str(k['title'])
print(f'"{title}","{title}"')
# Поиск мест рядом через google api
# loc = "Широта, Долгота"
# name = "Название места"
def search(loc: str, name, g_key):
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json'
params = {'key': g_key,
'name': name,
'radius': 1500,
'location': loc,
'rankby': 'prominence'}
r = requests.get(url=url, params=params)
return r.json()
# loc = "Широта, Долгота" (место начала)
# waypoints = ["Широта, Долгота", ...] (указывать все места по порядку)
# waypoint_id = [id места, ...] (Опционально, но нельзя указывать отдельно от параметра waypoints.)
def route(loc, waypoints, waypoint_id):
url = 'https://www.google.com/maps/dir/'
params = {'api': '1',
'origin': loc,
'waypoints': '|'.join(waypoints[:-1]),
'waypoint_place_ids': '|'.join(waypoint_id[:-1]),
'destination': waypoints[-1],
'destination_place_id': waypoint_id[-1],
'travelmode': 'walking'}
p = requests.Request('GET', url=url, params=params).prepare()
print(p.url)
waypoints.clear()
waypoint_id.clear()
return p.url
def google_speech_request(file):
from pydub import AudioSegment
import speech_recognition as sr
import apiai
with open('keys/auth.json') as f: # Ключ для google speech
credentials = f.read()
with open('keys/dialogflow.txt') as f: # Ключ для dialogflow
dialogflow = f.read()
r = sr.Recognizer() # нужно для библиотеки "speech_recognition"
with open('audio.ogg', 'wb') as audio: # сохраняю аудио на компьютер.
audio.write(file.read())
ogg = AudioSegment.from_ogg("audio.ogg") # Переконвертация в формат wav
ogg.export("audio.wav", format="wav")
with sr.AudioFile('audio.wav') as source:
audio = r.record(source)
try:
text = r.recognize_google_cloud(audio, credentials_json=credentials) # запрос google speech
request = apiai.ApiAI(dialogflow).text_request()
request.lang = 'en'
request.query = text
print(text)
responseJson = json.loads(request.getresponse().read().decode('utf-8'))
print(responseJson)
response = responseJson['result']['parameters'] # Разбираем JSON и вытаскиваем ответ
# Если есть ответ от бота - присылаем юзеру, если нет, то гугл не разобрал аудио.
if response:
return f'You said: {text}\n Bot responded: {response}'
else:
return f'I think you said: "{text}", but I did not understand you'
except sr.UnknownValueError:
return 'UnknownValueError'
except sr.RequestError as e:
return f"Could not request results from Google Speech Recognition service; {e}"
def google_cinema_handler(response):
response.get('date')
def google_notify_handler():
pass
| true |
87fb791555073a1f95760b36b5a65af9ee958741 | Python | hmcRobotLab/robot-reu-2012 | /irobot_nav/src/HandleData.py.save.1 | UTF-8 | 9,691 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
import roslib; roslib.load_manifest('irobot_nav')
import rospy
import irobot_mudd
import cv_bridge
import cv
import sensor_msgs.msg as sm
from std_msgs.msg import String
from irobot_mudd.srv import *
from irobot_mudd.msg import *
import TheHive
import ImageProcessing
import RangeProcessing
import StateMachine
#Get data and robot instances
D = TheHive.get_data_instance()
R = TheHive.get_robot_instance()
################## BEGIN DATA HANDLING FUNCTIONS ###################
def handle_sensor_data(data):
"""Handle_sensor_data is called every time the robot gets a new sensorPacket."""
#print dir( data )
D.data = data
#Check for a bump
if data.bumpRight or data.bumpLeft:
print "Bumped!"
#Check if play button was pressed
if data.play:
print "Play button pressed!"
StateMachine.state_stop()
rospy.signal_shutdown("play button pressed")
def handle_image_data(data):
"""Handles data from the Kinect, mouse, and keyboard."""
#Get the incoming RGB image from the Kinect
D.image = D.bridge.imgmsg_to_cv(data, "bgr8")
if D.created_images == False:
#Initialize the additional images we need for processing
ImageProcessing.initialize(D)
D.created_images = True
# Recalculate threshold image
ImageProcessing.threshold_image(D)
# Recalculate blob in main image
ImageProcessing.find_biggest_region(D)
# Check on the display of dragged section
ImageProcessing.mouse_section(D)
#Display target circle
#ImageProcessing.target_coord(D)
#Display info box on image
ImageProcessing.draw_on_image(D)
#Handle incoming key presses
key_press = cv.WaitKey(5) & 255
if key_press != 255: #Handle only if it's a real key
check_key_press(D, key_press) #(255 = "no key pressed")
#Update the displays:
#Show main image in the image window
#cv.ShowImage('Image', D.image)
#Show threshold image in the threshold window 3currentThreshold = getattr(D, D.current_threshold)
cv.ShowImage('Threshold', currentThreshold)
def handle_range_image(data):
"""Handles the data each time the kinect sends a range image."""
#Get the incoming depth image from the kinect
D.range = D.bridge.imgmsg_to_cv(data, "32FC1")
if D.range_image == False:
#Initialize the range image from kinect sensor
RangeProcessing.initialize(D)
D.range_image = True
#Calculate horizontal and vertical angles and display in window
D.xAngle = RangeProcessing.calculate_angles(D, D.p1, D.p2, "horizontal")
D.yAngle = RangeProcessing.calculate_angles(D, D.p3, D.p4, "vertical")
RangeProcessing.draw_on_image(D)
#Handle incoming key presses
key_press = cv.WaitKey(5) & 255
if key_press != 255: #Handle only if it's a real key
check_key_press(D, key_press) #(255 = "no key pressed")
#Display the image in the Range Window
cv.ShowImage('Range', D.range)
def handle_keyboard_data(data):
"""Handles all input from the keyboard."""
pass
def handle_mouse_data(data):
"""Handles all input from the mouse."""
pass
def mouseImage(event, x, y, flags, param):
"""Handles incoming mouse input to the Image window."""
if event==cv.CV_EVENT_LBUTTONDOWN: #Clicked the left button
print "x, y are", x, y
(b,g,r) = D.image[y,x]
print "r,g,b is", int(r), int(g), int(b)
(h,s,v) = D.hsv[y,x]
print "h,s,v is", int(h), int(s), int(v)
D.down_coord = (x,y)
D.mouse_down = True
elif event==cv.CV_EVENT_LBUTTONUP: #Let go of the left button
print "x, y are", x, y
(b,g,r) = D.image[y,x]
print "r,g,b is", int(r), int(g), int(b)
(h,s,v) = D.hsv[y,x]
print "h,s,v is", int(h), int(s), int(v)
D.up_coord = (x,y)
D.mouse_down = False
if D.mode == "clear":
D.sections = []
else: #Start, add, or subtract -- put lower coordinates first
x0, y0, x1, y1 = D.down_coord[0], D.down_coord[1], D.up_coord[0], D.up_coord[1]
if x0 > x1:
x0, x1 = x1, x0
if y0 > y1:
y0, y1 = y1, y0
if D.mode == "start":
D.sections = []
mode_dict = {"start":'a', "add":'a', "subtract":'s'}
D.sections.append([mode_dict[D.mode], (x0, y0), (x1, y1)])
ImageProcessing.process_section(D)
elif event == cv.CV_EVENT_RBUTTONDOWN: #Right click
D.target_coord = (x, y)
ImageProcessing.target_coord(D)
elif D.mouse_down and event==cv.CV_EVENT_MOUSEMOVE: #Mouse just moved
D.up_coord = (x,y)
def mouseRange(event, x, y, flags, param):
"""Handles incoming mouse input to the Range window."""
#If the left button was clicked
if event==cv.CV_EVENT_LBUTTONDOWN:
print "x, y are", x, y
pixel_val= D.image[y,x]
print "the pixel's depth value is", pixel_val
if D.mode == "setLeft":
D.dot1 = (x,y)
D.mode = D.lastmode
elif D.mode == "setRight":
D.dot2 = (x,y)
D.mode = D.lastmode
elif D.mode == "setTop":
D.dot3 = (x,y)
D.mode = D.lastmode
elif D.mode == "setDown":
D.dot4 = (x,y)
D.mode = D.lastmode
################### END DATA HANDLING FUNCTIONS ####################
######## TEMPORARY CALLBACK FUNCTIONS UNTIL WE GET TOPIC PUBLISHING/SUBSCRIBING WORKING
def check_key_press(D, key_press):
"""Handles incoming key presses."""
D.last_key_pressed = key_press
if key_press == ord('q') or key_press == 27: #If a 'q' or ESC was pressed
R.move(0,0)
print "quitting"
rospy.signal_shutdown( "Quit requested from keyboard" )
elif key_press == ord('h'):
print " Keyboard Command Menu"
print " =============================="
print " ESC/q: quit"
print " h : help menu"
print " s : save thresholds to file"
print " l : load thresholds from file"
print " c : mousedrags will no longer set thresholds, kept values will be cleared"
print " a : mousedrag will assign thresholds to area within drag, \n" + \
" resets on new click or drag"
print " r : mousedrags will remove the area under consideration, \n" + \
" must have set an area in 'a' mode first"
print " m : mousedrags will add the area under consideration, \n" + \
" must have set an area in 'a' mode first"
print " t : show total threshold image in threshold window"
print " A : activate robot for moving, press A again to deactivate "
print " 1 : begin state machine as leader"
print " 2 : begin state machine as follower"
#Save thresholds to file
elif key_press == ord('s'):
fileName = raw_input('Please enter the name of a color: ')
fileName += "_thresholds.txt"
writeFile = open(fileName, "w") #open file for writing
print >> writeFile, D.thresholds
writeFile.close()
#Load thresholds from file
elif key_press == ord('l'):
whichFile = raw_input('Please enter the name of a color: ')
whichFile += "_thresholds.txt"
readFile = open(whichFile, "r") #open file for reading
data = readFile.read()
D.thresholds = eval(data)
readFile.close()
D.loaded_thresholds = True
#Reset threshold sliders
#for thresh in ['red', 'blue', 'green', 'hue', 'sat', 'val']:
# cv.SetTrackbarPos('low_' + thresh, 'Sliders', D.thresholds['low_'+thresh])
# cv.SetTrackbarPos('high_' + thresh, 'Sliders', D.thresholds['high_'+thresh])
#Start picking up thresholded images
elif key_press == ord('a'):
D.mode = "start"
#Clear all loaded sections
elif key_press == ord('c'):
D.mode = "clear"
D.sections = []
#Remove areas from thresholding
elif key_press == ord('r'):
if len(D.sections) > 0:
D.mode = "subtract"
else:
print "Cannot switch modes, need a starting area first. Press 'i' " + \
"to select a starting area."
# Add areas for thresholding
elif key_press == ord('m'):
if len(D.sections) > 0:
D.mode = "add"
else:
print "Cannot switch modes, need a starting area first. Press 'i' " + \
"to select a starting area."
#Display thresholded image
elif key_press == ord('t'):
D.current_threshold = D.threshed_image
# Activate the robot for moving
elif key_press == ord('A'):
StateMachine.activate()
# Activate robot as leader, following a white line
elif key_press == ord('1'):
print "Setting mode to \"leader\"."
R.curState = "state_lead"
StateMachine.state_start("leader")
# Activate robot as follower, following the defined target
elif key_press == ord('2'):
print "Setting mode to \"follower\"."
R.curState = "state_follow"
StateMachine.state_start("follower")
#Robot keyboard driving controls
elif key_press == 82: #Up arrow: go forward
R.move(250, 250)
elif key_press == 84: #Down arrow: go backwards
R.move(-250, -250)
elif key_press == 81: #Left arrow: turn left
R.move(-250, 250)
elif key_press == 83: #Right arrow: turn right
R.move(250,-250)
elif key_press == 32: #Spacebar: stop
R.move(0,0)
| true |
b29dd12a0a21d6e9b73c8ed5734d3f633cb7b384 | Python | almazakhmetzyanov/semrush_task | /Libs/Utils/headers_extractor.py | UTF-8 | 1,859 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import tests_config
import re
class HeadersExtractor:
@staticmethod
def _prepare_url_for_regexp(url):
# символы которые надо спрятать от регулярного выражения, это тупо, зато просто
symbols_for_replacing = ['?', '.', '-', '+']
for i in symbols_for_replacing:
url = url.replace(i, '\\{}'.format(i))
return url
def extract_headers_from_log(self, host, api_method):
# я перенаправил stderr канал hiperfifo в файлик и беру всю информацию оттуда
log = open(file=tests_config.HIPERFIFO_LOG_FILE_PATH, mode='r').read()
# для более простого регулярного выражения я сделал лог одной строкой, чтобы не мучаться с переносами
log_without_line_break = log.replace('\n', '')
# вычленяем всю информацию по связке урл + апи метод, чтобы не вытянуть чего лишнего, берём последнее актуальное
prepared_url = self._prepare_url_for_regexp(host)
search_result = re.findall(r'> {}.*Host: {}.*<'.format(api_method, prepared_url), log_without_line_break)[-1]
# убираем уже ненужные строки из того что нашлось + разделяем на список
headers_list = re.search(r'<.*<', search_result).group().split('< ')[2:]
# убираем ненужный лишний символ <
headers_list[-1] = headers_list[-1][:-1]
return headers_list
if __name__ == "__main__":
print(HeadersExtractor().extract_headers_from_log(host='http://127.0.0.1:5000', api_method='GET'))
| true |
2aa72a9b19bf2db8747eda4ebe316b3128137d2c | Python | continuoustests/OpenIDE.CodeSamples | /.OpenIDE/scripts/read-configuration.py | UTF-8 | 1,064 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
import sys, subprocess
# Runs process and returns lines ouputted by the process
def run_process(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = []
while(True):
retcode = p.poll() # returns None while subprocess is running
line = p.stdout.readline().strip('\n').strip('\r')
if line != "":
lines.append(line)
if(retcode is not None):
break
return lines
def print_definitions():
print("Reads configuration and prints value")
def run_command(run_location, global_profile, local_profile, args):
lines = run_process(["oi","conf","read","read-configuration.test"])
if len(lines) != 1:
print("error|Could not find a config entry for read-configuration.test")
return
print("Entry is: "+lines[0])
if __name__ == "__main__":
args = sys.argv
if len(args) > 1 and args[2] == 'get-command-definitions':
print_definitions()
else:
run_command(args[1], args[2], args[3], args[4:])
| true |
351c1068ee65a7efe96a40d5c45bc09760b19f99 | Python | atmadjahenry/Tugas | /Mengganti Huruf Vokal.py | UTF-8 | 356 | 3.640625 | 4 | [] | no_license | '''[Mengganti Huruf vokal]
Input:
- Masukkan teks: 'Hari ini adalah hari Rabu.'
- Masukkan huruf vokal: 'o'
Output:
Horo ono odoloh horo Robo.
'''
import re
vokal = '[aeiou]'
teks = input('Masukkan teks = ').lower()
pengganti = input('Masukkan karakter pengganti = ').lower()
output = re.sub(vokal, pengganti, teks)
print(output) | true |
6e3a446137d1557e9e37279f066fd6fe13f116e0 | Python | NosevichOleksandr/firstrepository | /homework...idk.py | UTF-8 | 199 | 3.140625 | 3 | [] | no_license | def func(b):
answ = ''
if b.isdigit():
return b
else:
for i in str(b):
if i.isdigit() == False:
answ += i
return answ
print(func('abc3'))
| true |
f394032e975d56ebcf7b39f79b36a2bc26fc8813 | Python | A090MA/Py_challenge | /PyPoll/main.py | UTF-8 | 1,199 | 3.328125 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import os
from pathlib import Path
import csv
import pandas as pd
import numpy as np
# In[2]:
file1 = "Resources/election_data.csv"
poll_df = pd.read_csv(file1)
poll_df.head()
# In[7]:
# The total number of votes cast
len(poll_df['Voter ID'].value_counts())
# In[12]:
# A complete list of candidates who received votes
list(poll_df["Candidate"].unique())
# In[17]:
# The percentage of votes each candidate won
percentage = 100*poll_df["Candidate"].value_counts()/len(poll_df['Voter ID'].value_counts())
percentage
# In[64]:
# The total number of votes each candidate won
total = poll_df["Candidate"].value_counts()
total_df = pd.DataFrame(total)
total_df.rename(columns = {"Candidate": "votes"}, inplace=True)
total_df
# In[65]:
# The winner of the election based on popular vote.
print("""Election Results
------------------
Total Votes:""" + str(len(poll_df['Voter ID'].value_counts())) +
"""\n------------------
The percentage of votes each candidate won:\n""" + str(percentage) +
"""\n------------------
The total number of votes each candidate won:\n""" + str(total_df)+
"""\n------------------
Winner: Khan
------------------"""
)
| true |
91571fccb67f9c96d7d05596c89ed0b60bef1d0f | Python | matthewsklar/OpenAI | /Test/TensorImage.py | UTF-8 | 3,103 | 3.234375 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('tmp/data/', one_hot=True)
# Nodes per hidden layer
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100 # Process 100 images at a time
# 784 = 28 X 28
x = tf.placeholder('float', [None, 784]) # Input Data
y = tf.placeholder('float') # Correct Data
def neural_network_model(data):
'''
Predict the number
matmul(data, weights) = [1 x n_nodes] = Values of neurons
:param data:
:return:
'''
hidden_layer_1 = {
'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))
}
hidden_layer_2 = {
'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))
}
hidden_layer_3 = {
'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))
}
output_layer = {
'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))
}
'''
Calculate layers
'''
# data * weights + biases
l1 = tf.add(tf.matmul(data, hidden_layer_1['weights']), hidden_layer_1['biases']) # input_data * weights + biases
l1 = tf.nn.relu(l1) # Activation Function
l2 = tf.add(tf.matmul(l1, hidden_layer_2['weights']), hidden_layer_2['biases']) # layer1 * weights + biases
l2 = tf.nn.relu(l2) # Activation Function
l3 = tf.add(tf.matmul(l2, hidden_layer_3['weights']), hidden_layer_3['biases']) # layer2 * weights + biases
l3 = tf.nn.relu(l3) # Activation Function
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases']) # layer3 * weights + biases
return output
def train_neural_network(data):
prediction = neural_network_model(data)
# Error between prediction and actual output
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Use Adam Optimizer to minimize cost
optimizer = tf.train.AdamOptimizer().minimize(cost)
n_epochs = 10 # Cycles of (feed forward + back prop)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(n_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch %d completed out of %d loss: %f' % (epoch + 1, n_epochs, epoch_loss))
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) # output = expected output
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy: %f' % (accuracy.eval({x: mnist.test.images, y: mnist.test.labels})))
train_neural_network(x)
| true |
ea990468dc4a5998797258f24fad85b4a84c3890 | Python | DatDLuu/Short_Python_Algorithm | /challenge/rockPaperScissor.py | UTF-8 | 1,980 | 3.59375 | 4 | [] | no_license | # given a string represents rock paper scissor moves
# calculate corresponding moves based on rules
'''the rules you'll be following:
If you win, switch to what your opponent played
If you lose, switch to whatever wasn't played that round
In case of a draw, choose the move you've played least frequently
If there's a tie among these, choose the one you've played most recently
If there's a draw on the first move, choose Rock next
Always start with Scissors (you've heard it's statistically most likely to win)
'''
# return [win,lose,draw] based on calculated moves
def rockPaperScissor(opponentsMoves):
result = [0,0,0]
move = "S"
point = {"R":[0,-1,1],"P":[1,0,-1],"S":[-1,1,0]}
moves_index = {"R":0,"P":1,"S":2}
played = {"R":0,"P":0,"S":0}
most_rec = sec_most = ""
for index,opp_move in enumerate(opponentsMoves):
m_i = moves_index[opp_move]
p = point[move][m_i]
played[move]+=1
if most_rec != move:
sec_most = most_rec
most_rec = move
if p == 1:
move = opp_move
result[0]+=1
continue
if p == -1:
temp = ["R","P","S"]
temp.remove(move)
temp.remove(opp_move)
move = temp[0]
result[1]+=1
continue
if p == 0:
result[2]+=1
if index == 0:
move = "R"
continue
else:
least_chose = [i for i in played if played[i]==min(played.values())]
if len(least_chose) > 1:
if most_rec in least_chose:
move = most_rec
continue
if sec_most in least_chose:
move = sec_most
continue
else:
move = least_chose[0]
continue
return result
| true |
2dddf69977bb7b30724dcde8031873dd2e045c47 | Python | Xavilien/word-game | /test_preprocessing.py | UTF-8 | 434 | 2.671875 | 3 | [] | no_license | from unittest import TestCase
from preprocessing import *
class Test(TestCase):
def test_remove_prefix(self):
self.assertEqual(remove_prefix(['a', 'ab'], ""), ['a'])
self.assertEqual(remove_prefix(['a', 'ab', 'ac'], ""), ['a'])
self.assertEqual(remove_prefix(['a', 'ab', 'ac', 'b', 'ba', 'bc'], ""), ['a', 'b'])
self.assertEqual(remove_prefix(['dog', 'doggy', 'doggie', 'dogbone'], ""), ['dog'])
| true |
22b6cdd6c76de1061bbb30d96c80c15d03c1f910 | Python | Wattyyy/LeetCode | /submissions/valid-number/solution.py | UTF-8 | 351 | 3.078125 | 3 | [
"MIT"
] | permissive | # https://leetcode.com/problems/valid-number
class Solution:
def isNumber(self, s: str) -> bool:
invalids = {"inf", "-inf", "+inf", "Infinity", "-Infinity", "+Infinity"}
if s in invalids:
return False
try:
res = float(s)
return True
except ValueError:
return False
| true |
3c54026b524d35eae1dea89c25a59f08b94baf5a | Python | litakgit/DSAlgo | /IC_Problems/77_form_bst_from_pre_in_order.py | UTF-8 | 902 | 3.765625 | 4 | [] | no_license |
class BSTNode(object):
def __init__ (self, data, left=None, right=None):
self.data = data
self.left, self.right = left, right
def __repr__(self):
return str(self.data) + " left " + str(self.left) + " right " + str(self.right)
def form_tree(inorder, preorder ):
if not inorder or not preorder:
return None
val_to_index_inorder = {node_val : index for index, node_val in enumerate(inorder)}
transition_index = val_to_index_inorder[preorder[0]]
return BSTNode(preorder[0],
form_tree(inorder[:transition_index], preorder[1:transition_index+1]),
form_tree(inorder[transition_index+1:], preorder[transition_index+1:]))
if __name__ == "__main__":
in_order = [3, 5, 7, 10, 15, 17, 18, 20, 25, 30, 40]
pre_order = [17, 10, 5, 3, 7, 15, 30, 20, 18, 25, 40]
print (form_tree(in_order, pre_order))
| true |
e9bbdcf6040698448a183d5004c24061e8d51dbc | Python | Tishacy/InstagramSpider | /instagram/instagram.py | UTF-8 | 8,032 | 2.59375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# Author: Tishacy
# Date: 2021-03-26
import os
import logging
import pandas as pd
from .query import Query
from .parser import PostParser, CommentParser, TagPostParser
from .downloader import Downloader, Resource
from .common import POSTS_QUERY_HASH_PARAM, \
COMMENTS_QUERY_HASH_PARAM, TAG_POSTS_QUERY_HASH_PARAM
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger('instagram')
def task_fetch_posts_and_comments(
author_id,
count=28,
posts_out='data/posts_data.xlsx',
comments_out='data/comments_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given author and the comments
of these posts, and save them to files.
:param author_id: author id
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:param comments_out: out file of the comments data
:return None:
"""
# Create query instances for posts and comments
post_query = Query(PostParser)
comment_query = Query(CommentParser)
# Query posts data
post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {
"id": author_id,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
# Query comments data of posts
comment_data = []
for i, post in enumerate(post_data):
logger.info("Get comment of %d %s" % (i, post['short_code']))
comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {
"shortcode": post['short_code'],
"first": 50,
}, None)
for comment in comment_data_of_one_post:
comment['post_short_code'] = post['short_code']
comment_data.extend(comment_data_of_one_post)
logger.info("Count of comment_data: %d" % len(comment_data))
# Save the comments data
comment_data_df = pd.DataFrame(comment_data)
comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)
logger.info("Save the comments data to %s." % comments_out)
def task_fetch_tag_posts_and_comments(
tag_name,
count=100,
posts_out='data/tag_posts_data.xlsx',
comments_out='data/tag_comments_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given tag and the comments
of these posts, and save them to files.
:param tag_name: tag name
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:param comments_out: out file of the comments data
:return None:
"""
# Create query instances for posts and comments
post_query = Query(TagPostParser)
comment_query = Query(CommentParser)
# Query posts data
post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {
"tag_name": tag_name,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
# Query comments data of posts
comment_data = []
for i, post in enumerate(post_data):
logger.info("Get comment of %d %s" % (i, post['short_code']))
comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {
"shortcode": post['short_code'],
"first": 50,
}, 100)
for comment in comment_data_of_one_post:
comment['post_short_code'] = post['short_code']
comment_data.extend(comment_data_of_one_post)
logger.info("Count of comment_data: %d" % len(comment_data))
# Save the comments data
comment_data_df = pd.DataFrame(comment_data)
comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)
logger.info("Save the comments data to %s." % comments_out)
def task_fetch_posts(
author_id,
count=28,
posts_out='data/posts_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given author and the comments
of these posts, and save them to files.
:param author_id: author id
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:return None:
"""
# Create query instances for posts
post_query = Query(PostParser)
# Query posts data
post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {
"id": author_id,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
def task_fetch_tag_posts(
tag_name,
count=100,
posts_out='data/tag_posts_data.xlsx'):
"""[Task] Fetch a specific number of posts of the given tag and the comments
of these posts, and save them to files.
:param tag_name: tag name
:param count: number of posts to fetch
:param posts_out: out file of the posts data
:return None:
"""
# Create query instances for posts
post_query = Query(TagPostParser)
# Query posts data
post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {
"tag_name": tag_name,
"first": 50,
}, count)
logger.info("Count of posts data: %d" % len(post_data))
# Save the posts data
post_data_df = pd.DataFrame(post_data)
post_data_df.to_excel(posts_out, encoding='utf-8', index=False)
logger.info("Save the posts data to %s." % posts_out)
def task_download_resources(data_fpath, url_field='display_image_url', out_fields=None, out_dir='pics', overwrite=False):
"""[Task] Download all pics to files.
:param data_fpath: data file path
:param url_field: field of pic urls in the data file.
:param out_fields: fields of output names in the data file, using '-' to join these fields.
:param out_dir: output directory of downloaded pics.
:param overwrite: whether to overwrite the existing files
:return None:
"""
if data_fpath is None or not isinstance(data_fpath, str):
raise ValueError("data_fpath must be a string.")
if not os.path.exists(data_fpath):
raise FileNotFoundError("data_fpath is not found.")
_, ext = os.path.splitext(data_fpath)
if ext not in ['.xls', '.xlsx']:
raise TypeError("data_fpath must be an excel file path with the extension of .xls or .xlsx, but got %s" % ext)
if out_fields is None:
out_fields = ['short_code']
data_df = pd.read_excel(data_fpath)
resources = []
for i, item in data_df.iterrows():
url = item[url_field]
if not url or pd.isna(url):
continue
_, ext = os.path.splitext(url.split("?")[0])
out_fname = '-'.join([item[out_field] for out_field in out_fields]) + ext
out = os.path.join(out_dir, out_fname)
resources.append(Resource(url, out))
downloader = Downloader(max_workers=100)
downloader.download(resources)
if __name__ == "__main__":
author_id = "1596900784"
task_fetch_posts(author_id, 1000, f'data/{author_id}.xlsx')
task_download_resources(f'data/{author_id}.xlsx', 'display_image_url', ['short_code'], out_dir=f'pics/{author_id}', overwrite=False)
task_download_resources(f'data/{author_id}.xlsx', 'video_url', ['short_code'], out_dir=f'videos/{author_id}', overwrite=False)
tag_name = 'computerscience'
task_fetch_tag_posts(tag_name, 1000, f'data/{tag_name}.xlsx')
task_download_resources(f'data/{tag_name}.xlsx', 'display_image_url', ['short_code'], out_dir=f'pics/{tag_name}', overwrite=False)
| true |
08536f23fe92ac0d62557b23bc1699aea6faeb4c | Python | superpavelka/Python-basics | /py_tasks-2/py_tasks-2-1.py | UTF-8 | 446 | 3.375 | 3 | [] | no_license | my_list_1 = [6, 5, 8, 2, 7, 7, 4]
my_list_2 = [6, 7, 7, 8, 4]
# можем выдать на печать
print(set(my_list_1) - set(my_list_2))
# можем запихнуть в переменную и после распечатать
lst_diff = set(my_list_1) - set(my_list_2)
print(lst_diff)
# можно привести к листу и снова его напечатать
lst_diff = list(lst_diff)
print(type(lst_diff))
print(lst_diff) | true |
1e7e7eaa306db7e14212fa9cd02621831f7d357f | Python | qaiser-mahmood/dnt | /Prototype_code/AltOCR/amazon_text.py | UTF-8 | 559 | 2.875 | 3 | [] | no_license | def OCR_amazon_text(path):
import boto3
# Read document content
with open(path, 'rb') as document:
imageBytes = bytearray(document.read())
# Amazon Textract client
textract = boto3.client('textract')
# Call Amazon Textract
response = textract.detect_document_text(Document={'Bytes': imageBytes})
# Print detected text
for item in response["Blocks"]:
if item["BlockType"] == "LINE":
print (item["Text"])
if __name__ == "__main__":
import sys
OCR_amazon_text(' '.join(sys.argv[1:]))
| true |
2b58ecc0d5ef0cf29e318cc3bcc5bdc0f116e518 | Python | NEleanor/compbio-galaxy-wrappers | /vcf_tools/var_select.py | UTF-8 | 1,335 | 2.671875 | 3 | [] | no_license | """
Select variants in a VCF.
Example usage: var_select.py 'input.vcf' 'fc' 'output.vcf'
var_select.py '/Users/onwuzu/Downloads/test_output_var_label.vcf' 'fc' --exclusive '/Users/onwuzu/Downloads/test_output_var_select.vcf'
Details: Select variants in input VCF.
"""
import argparse
import vcf_tools
VERSION = vcf_tools.VERSION+'.0'
def main():
parser = argparse.ArgumentParser(description="var_select.py 'file.vcf' 'fc' --exclusive 'output.vcf'")
parser.add_argument('input_vcf', help="Input VCF")
parser.add_argument('filter', nargs="+", help="Filter(s) of interest")
parser.add_argument('output_vcf', help="Output VCF.")
parser.add_argument('--exclusive', action='store_true', help="Select variants with ONLY filter(s) of interest")
parser.add_argument('--remove', action='store_true', help="Remove variants with selected filter(s) of interest")
parser.add_argument('-v', '--version', action='version', version="%(prog)s " + VERSION)
args = parser.parse_args()
var = vcf_tools.VarLabel(args.input_vcf, args.filter)
selected, removed = var.get_filter_records(args.exclusive, args.remove)
vcf_tools.VarWriter(selected).as_vcf(args.output_vcf, var.reader.header)
vcf_tools.VarWriter(removed).as_vcf("filtered_out.vcf", var.reader.header)
if __name__ == "__main__":
main()
| true |
7168299d58881214563a0d59475a6c7eb126bc28 | Python | waynerbarrios/lab16 | /app.py | UTF-8 | 1,697 | 2.6875 | 3 | [] | no_license | from flask import Flask, request, render_template, jsonify, make_response, session
from forms import FormaLogin
import os
import db
app= Flask(__name__)
app.secret_key= os.urandom(32)
@app.route('/')
@app.route('/index')
def index():
if 'username' in session:
usu= session['username']
clave= session['password']
opcion= session['opcion']
print("Acceso anterior con Username: "+ usu +" ingresado con clave "+clave + " en el programa " + opcion)
formaL= FormaLogin()
return render_template('login.html', form=formaL)
@app.route('/login', methods=('GET','POST'))
def login():
formaL= FormaLogin()
if request.method=='POST':
usu= formaL.username.data
pwd= formaL.password.data
session.clear()
session['username']= usu
session['password']= pwd
session['opcion']= 'Menu Login 001'
resultSet= db.getUser(usu)
if resultSet==None:
return jsonify({"mensaje":"Usuario no Existe"})
else :
u0= resultSet[0]
u1= resultSet[1]
if (usu==u0 and pwd==u1):
# Creacion de una cookie
cookieUser= make_response(render_template("ingreso.html", user= usu))
cookieUser.set_cookie('_user', usu)
return cookieUser
else :
return jsonify({"mensaje":"Usuario con Password Errado"})
else :
return render_template("login.html", form=formaL)
@app.route('/cookie')
def obtenerCookie():
valor= request.cookies.get('_user')
return "<h2>La Cookie almacenada Administrador es "+valor+"</h2>"
# Main program
if __name__=="__main__":
app.run(debug=True)
| true |
820ef30b08851275d5a6d592badf50757c217439 | Python | JonasGroeger/ddns-inwx | /vendor/tldextract/tldextract.py | UTF-8 | 14,174 | 2.671875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""`tldextract` accurately separates the gTLD or ccTLD (generic or country code
top-level domain) from the registered domain and subdomains of a URL.
>>> import tldextract
>>> tldextract.extract('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> tldextract.extract('http://forums.bbc.co.uk/') # United Kingdom
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
>>> tldextract.extract('http://www.worldbank.org.kg/') # Kyrgyzstan
ExtractResult(subdomain='www', domain='worldbank', suffix='org.kg')
`ExtractResult` is a namedtuple, so it's simple to access the parts you want.
>>> ext = tldextract.extract('http://forums.bbc.co.uk')
>>> (ext.subdomain, ext.domain, ext.suffix)
('forums', 'bbc', 'co.uk')
>>> # rejoin subdomain and domain
>>> '.'.join(ext[:2])
'forums.bbc'
>>> # a common alias
>>> ext.registered_domain
'bbc.co.uk'
Note subdomain and suffix are _optional_. Not all URL-like inputs have a
subdomain or a valid suffix.
>>> tldextract.extract('google.com')
ExtractResult(subdomain='', domain='google', suffix='com')
>>> tldextract.extract('google.notavalidsuffix')
ExtractResult(subdomain='google', domain='notavalidsuffix', suffix='')
>>> tldextract.extract('http://127.0.0.1:8080/deployed/')
ExtractResult(subdomain='', domain='127.0.0.1', suffix='')
If you want to rejoin the whole namedtuple, regardless of whether a subdomain
or suffix were found:
>>> ext = tldextract.extract('http://127.0.0.1:8080/deployed/')
>>> # this has unwanted dots
>>> '.'.join(ext)
'.127.0.0.1.'
>>> # join part only if truthy
>>> '.'.join(part for part in ext if part)
'127.0.0.1'
"""
import collections
from contextlib import closing
import errno
from functools import wraps
import json
import logging
import os
import re
import idna
try:
import pkg_resources
except ImportError:
class pkg_resources(object): # pylint: disable=invalid-name
"""Fake pkg_resources interface which falls back to getting resources
inside `tldextract`'s directory.
"""
@classmethod
def resource_stream(cls, _, resource_name):
moddir = os.path.dirname(__file__)
path = os.path.join(moddir, resource_name)
return open(path)
from .remote import find_first_response
from .remote import looks_like_ip
from .remote import SCHEME_RE
from .remote import IP_RE
# pylint: disable=invalid-name,undefined-variable
try:
STRING_TYPE = basestring
except NameError:
STRING_TYPE = str
# pylint: enable=invalid-name,undefined-variable
LOG = logging.getLogger("tldextract")
CACHE_FILE_DEFAULT = os.path.join(os.path.dirname(__file__), '.tld_set')
CACHE_FILE = os.path.expanduser(os.environ.get("TLDEXTRACT_CACHE", CACHE_FILE_DEFAULT))
PUBLIC_SUFFIX_LIST_URLS = (
'https://publicsuffix.org/list/public_suffix_list.dat',
'https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat',
)
PUBLIC_SUFFIX_RE = re.compile(r'^(?P<suffix>[.*!]*\w[\S]*)', re.UNICODE | re.MULTILINE)
class ExtractResult(collections.namedtuple('ExtractResult', 'subdomain domain suffix')):
'''namedtuple of a URL's subdomain, domain, and suffix.'''
# Necessary for __dict__ member to get populated in Python 3+
__slots__ = ()
@property
def registered_domain(self):
"""
Joins the domain and suffix fields with a dot, if they're both set.
>>> extract('http://forums.bbc.co.uk').registered_domain
'bbc.co.uk'
>>> extract('http://localhost:8080').registered_domain
''
"""
if self.domain and self.suffix:
return self.domain + '.' + self.suffix
return ''
@property
def fqdn(self):
"""
Returns a Fully Qualified Domain Name, if there is a proper domain/suffix.
>>> extract('http://forums.bbc.co.uk/path/to/file').fqdn
'forums.bbc.co.uk'
>>> extract('http://localhost:8080').fqdn
''
"""
if self.domain and self.suffix:
# self is the namedtuple (subdomain domain suffix)
return '.'.join(i for i in self if i)
return ''
@property
def ipv4(self):
"""
Returns the ipv4 if that is what the presented domain/url is
>>> extract('http://127.0.0.1/path/to/file').ipv4
'127.0.0.1'
>>> extract('http://127.0.0.1.1/path/to/file').ipv4
''
>>> extract('http://256.1.1.1').ipv4
''
"""
if not (self.suffix or self.subdomain) and IP_RE.match(self.domain):
return self.domain
return ''
class TLDExtract(object):
'''A callable for extracting, subdomain, domain, and suffix components from
a URL.'''
# TODO: Agreed with Pylint: too-many-arguments
def __init__(self, cache_file=CACHE_FILE, suffix_list_urls=PUBLIC_SUFFIX_LIST_URLS, # pylint: disable=too-many-arguments
fallback_to_snapshot=True, include_psl_private_domains=False, extra_suffixes=()):
"""
Constructs a callable for extracting subdomain, domain, and suffix
components from a URL.
Upon calling it, it first checks for a JSON `cache_file`.
By default, the `cache_file` will live in the tldextract directory.
You can disable the caching functionality of this module by setting `cache_file` to False.
If the `cache_file` does not exist (such as on the first run), HTTP request the URLs in
`suffix_list_urls` in order, until one returns public suffix list data. To disable HTTP
requests, set this to something falsy.
The default list of URLs point to the latest version of the Mozilla Public Suffix List and
its mirror, but any similar document could be specified.
Local files can be specified by using the `file://` protocol. (See `urllib2` documentation.)
If there is no `cache_file` loaded and no data is found from the `suffix_list_urls`,
the module will fall back to the included TLD set snapshot. If you do not want
this behavior, you may set `fallback_to_snapshot` to False, and an exception will be
raised instead.
The Public Suffix List includes a list of "private domains" as TLDs,
such as blogspot.com. These do not fit `tldextract`'s definition of a
suffix, so these domains are excluded by default. If you'd like them
included instead, set `include_psl_private_domains` to True.
You can pass additional suffixes in `extra_suffixes` argument without changing list URL
"""
suffix_list_urls = suffix_list_urls or ()
self.suffix_list_urls = tuple(url.strip() for url in suffix_list_urls if url.strip())
self.cache_file = os.path.expanduser(cache_file or '')
self.fallback_to_snapshot = fallback_to_snapshot
if not (self.suffix_list_urls or self.cache_file or self.fallback_to_snapshot):
raise ValueError("The arguments you have provided disable all ways for tldextract "
"to obtain data. Please provide a suffix list data, a cache_file, "
"or set `fallback_to_snapshot` to `True`.")
self.include_psl_private_domains = include_psl_private_domains
self.extra_suffixes = extra_suffixes
self._extractor = None
def __call__(self, url):
"""
Takes a string URL and splits it into its subdomain, domain, and
suffix (effective TLD, gTLD, ccTLD, etc.) component.
>>> extract = TLDExtract()
>>> extract('http://forums.news.cnn.com/')
ExtractResult(subdomain='forums.news', domain='cnn', suffix='com')
>>> extract('http://forums.bbc.co.uk/')
ExtractResult(subdomain='forums', domain='bbc', suffix='co.uk')
"""
netloc = SCHEME_RE.sub("", url) \
.partition("/")[0] \
.partition("?")[0] \
.partition("#")[0] \
.split("@")[-1] \
.partition(":")[0] \
.strip() \
.rstrip(".")
labels = netloc.split(".")
def decode_punycode(label):
if label.startswith("xn--"):
try:
return idna.decode(label.encode('ascii'))
except UnicodeError:
pass
return label
translations = [decode_punycode(label).lower() for label in labels]
suffix_index = self._get_tld_extractor().suffix_index(translations)
registered_domain = ".".join(labels[:suffix_index])
suffix = ".".join(labels[suffix_index:])
if not suffix and netloc and looks_like_ip(netloc):
return ExtractResult('', netloc, '')
subdomain, _, domain = registered_domain.rpartition('.')
return ExtractResult(subdomain, domain, suffix)
def update(self, fetch_now=False):
if os.path.exists(self.cache_file):
os.unlink(self.cache_file)
self._extractor = None
if fetch_now:
self._get_tld_extractor()
@property
def tlds(self):
return self._get_tld_extractor().tlds
def _get_tld_extractor(self):
'''Get or compute this object's TLDExtractor. Looks up the TLDExtractor
in roughly the following order, based on the settings passed to
__init__:
1. Memoized on `self`
2. Local system cache file
3. Remote PSL, over HTTP
4. Bundled PSL snapshot file'''
if self._extractor:
return self._extractor
tlds = self._get_cached_tlds()
if tlds:
tlds.extend(self.extra_suffixes)
self._extractor = _PublicSuffixListTLDExtractor(tlds)
return self._extractor
elif self.suffix_list_urls:
raw_suffix_list_data = find_first_response(self.suffix_list_urls)
tlds = get_tlds_from_raw_suffix_list_data(
raw_suffix_list_data,
self.include_psl_private_domains
)
if not tlds and self.fallback_to_snapshot:
tlds = self._get_snapshot_tld_extractor()
tlds.extend(self.extra_suffixes)
self._extractor = _PublicSuffixListTLDExtractor(tlds)
return self._extractor
elif not tlds:
raise Exception("tlds is empty, but fallback_to_snapshot is set"
" to false. Cannot proceed without tlds.")
self._cache_tlds(tlds)
tlds.extend(self.extra_suffixes)
self._extractor = _PublicSuffixListTLDExtractor(tlds)
return self._extractor
def _get_cached_tlds(self):
'''Read the local TLD cache file. Returns None on IOError or other
error, or if this object is not set to use the cache
file.'''
if not self.cache_file:
return
try:
with open(self.cache_file) as cache_file:
try:
return json.loads(cache_file.read())
except (IOError, ValueError) as exc:
LOG.error(
"error reading TLD cache file %s: %s",
self.cache_file,
exc
)
except IOError as ioe:
file_not_found = ioe.errno == errno.ENOENT
if not file_not_found:
LOG.error("error reading TLD cache file %s: %s", self.cache_file, ioe)
@staticmethod
def _get_snapshot_tld_extractor():
snapshot_stream = pkg_resources.resource_stream(__name__, '.tld_set_snapshot')
with closing(snapshot_stream) as snapshot_file:
return json.loads(snapshot_file.read().decode('utf-8'))
def _cache_tlds(self, tlds):
'''Logs a diff of the new TLDs and caches them on disk, according to
settings passed to __init__.'''
if LOG.isEnabledFor(logging.DEBUG):
import difflib
snapshot_stream = pkg_resources.resource_stream(__name__, '.tld_set_snapshot')
with closing(snapshot_stream) as snapshot_file:
snapshot = sorted(
json.loads(snapshot_file.read().decode('utf-8'))
)
new = sorted(tlds)
LOG.debug('computed TLD diff:\n' + '\n'.join(difflib.unified_diff(
snapshot,
new,
fromfile=".tld_set_snapshot",
tofile=self.cache_file
)))
if self.cache_file:
try:
with open(self.cache_file, 'w') as cache_file:
json.dump(tlds, cache_file)
except IOError as ioe:
LOG.warning("unable to cache TLDs in file %s: %s", self.cache_file, ioe)
TLD_EXTRACTOR = TLDExtract()
@wraps(TLD_EXTRACTOR.__call__)
def extract(url):
return TLD_EXTRACTOR(url)
@wraps(TLD_EXTRACTOR.update)
def update(*args, **kwargs):
return TLD_EXTRACTOR.update(*args, **kwargs)
def get_tlds_from_raw_suffix_list_data(suffix_list_source, include_psl_private_domains=False):
if include_psl_private_domains:
text = suffix_list_source
else:
text, _, _ = suffix_list_source.partition('// ===BEGIN PRIVATE DOMAINS===')
tlds = [m.group('suffix') for m in PUBLIC_SUFFIX_RE.finditer(text)]
return tlds
class _PublicSuffixListTLDExtractor(object):
def __init__(self, tlds):
self.tlds = frozenset(tlds)
def suffix_index(self, lower_spl):
"""Returns the index of the first suffix label.
Returns len(spl) if no suffix is found
"""
for i in range(len(lower_spl)):
maybe_tld = '.'.join(lower_spl[i:])
exception_tld = '!' + maybe_tld
if exception_tld in self.tlds:
return i + 1
if maybe_tld in self.tlds:
return i
wildcard_tld = '*.' + '.'.join(lower_spl[i + 1:])
if wildcard_tld in self.tlds:
return i
return len(lower_spl)
| true |
d136c178450cf488e6ae75cfd62921a0b884cbad | Python | jeevananthanr/Python | /PyBasics/tuples.py | UTF-8 | 590 | 4.21875 | 4 | [] | no_license | #Tuples
#constant/immutable list
num_tup=(1,5,'hello','Python',1.5)
print num_tup,"->",type(num_tup)
num_tup=tuple(range(5,11))
#reassign
num_tup=tuple(range(1,11))
print num_tup
#num_tup[5]=10 --will throw an error
#count
print num_tup.count(5)
#index
print num_tup.index(7)
print num_tup.index(7,3)
print num_tup.index(7,3,7)
#len
print len(num_tup)
print min(num_tup)
print max(num_tup)
print sum(num_tup)
#slicing
print num_tup[1:5]
print num_tup[:]
print num_tup[4:]
print num_tup[:6]
print num_tup[::-1]
print "------------------------------"
| true |
0a1684fbe86523b22da0d94fe846123d7d2f2ebd | Python | Obarads/torchpcp | /torchpcp/modules/XTransformation.py | UTF-8 | 1,106 | 2.59375 | 3 | [
"MIT"
] | permissive | import torch
from torch import nn
from torchpcp.modules.Layer import Conv2D
class XTransform(nn.Module):
def __init__(self, in_channel, k):
super().__init__()
self.conv1 = Conv2D(in_channel, k*k, (1,k)) # [B, k*k, N, 1] # pf.conv2d is not this order
self.conv2 = Conv2D(k*k, k*k, (1,1), conv_args={"groups":k}) # DepthwiseConv2D(k, k, (1, k)) & convert(x)
self.conv3 = Conv2D(k*k, k*k, (1,1), act=None, conv_args={"groups":k}) # DepthwiseConv2D(k, k, (1, k), act=None) & convert(x)
self.k = k
def forward(self, x):
"""
Parameter
---------
x: [B, C, N, k]
Inputs.
Returns
-------
trans: [B, N, k, k]
X-transformation matrix.
"""
# B, C, N, k = x.shape
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
trans = self.to_trans(x)
return trans
def to_trans(self, x):
B, kk, N, _ = x.shape
x = x.permute(0,2,3,1).contiguous()
x = x.view(B, N, self.k, self.k).contiguous()
return x
| true |
eada3d7a9daa581feffcfd64209deadca2ae4c28 | Python | billtubbs/game-learner | /test_tictactoe.py | UTF-8 | 6,662 | 3.296875 | 3 | [] | no_license | # !/usr/bin/env python
"""Unit Tests for tictactoe.py. Run this script to check
everything is working.
"""
import unittest
import numpy as np
from tictactoe import TicTacToeGame, GameController, RandomPlayer, \
TicTacToeExpert
from gamelearner import train_computer_players
class TestTicTacToe(unittest.TestCase):
def test_check_game_state(self):
"""Test methods for checking game state (they should
be identical).
"""
game = TicTacToeGame()
for i in range(5):
if i == 0:
state = np.zeros((3, 3), dtype=int)
else:
state = np.random.randint(0, 3, size=9).reshape((3, 3))
result1 = game.check_game_state(state) # Numpy indexing method
result2 = game.check_game_state(state, calc=True) # Numpy.sum method
self.assertEqual(result1, result2), "Error in TicTacToeGame.check_game_state"
def test_game_execution(self):
"""Steps through one game of Tic-Tac-Toe checking
various attributes and methods.
"""
game = TicTacToeGame()
self.assertEqual(game.roles, [1, 2])
self.assertEqual(game.size, 3)
self.assertEqual(game.possible_n_players, [2])
self.assertEqual(game.marks, ['X', 'O'])
self.assertIsInstance(game.input_example, tuple)
self.assertEqual(len(game.input_example), 2)
self.assertFalse(game.game_over)
self.assertEqual(game.winner, None)
self.assertTrue(
np.array_equal(game.state, np.zeros((game.size, game.size)))
)
# Make some moves
game.make_move((1, (0, 2)))
# Check rewards
self.assertEqual(game.get_rewards(), {2: 0.0})
game.make_move((2, (0, 1)))
self.assertEqual(game.get_rewards(), {1: 0.0})
game.make_move((1, (1, 1)))
game.make_move((2, (2, 2)))
state = np.array([
[0, 2, 1],
[0, 1, 0],
[0, 0, 2]
])
self.assertTrue(
np.array_equal(game.state, state)
)
self.assertFalse(game.game_over)
self.assertEqual(
game.moves, [(1, (0, 2)), (2, (0, 1)),
(1, (1, 1)), (2, (2, 2))]
)
self.assertEqual(game.turn, 1)
self.assertEqual(
game.available_moves(), [(0, 0), (1, 0), (1, 2),
(2, 0), (2, 1)]
)
game.make_move((1, (2, 0)))
self.assertTrue(game.game_over)
self.assertEqual(game.winner, 1)
# Check terminal rewards
rewards = game.get_terminal_rewards()
self.assertEqual(
rewards, {1: game.terminal_rewards['win'],
2: game.terminal_rewards['lose']}
)
game.reverse_move()
self.assertTrue(np.array_equal(game.state, state))
self.assertTrue(game.winner is None)
with self.assertRaises(Exception) as context:
game.make_move((2, (1, 2)))
self.assertTrue("It is not player 2's turn." in str(context.exception))
# Make some more moves...
game.make_move((1, (1, 2)))
game.make_move((2, (2, 0)))
game.make_move((1, (0, 0)))
game.make_move((2, (1, 0)))
self.assertEqual(game.state[2, 1], 0)
game.make_move((1, (2, 1)))
self.assertTrue(game.game_over)
self.assertEqual(game.winner, None)
rewards = game.get_terminal_rewards()
self.assertEqual(
rewards, {1: game.terminal_rewards['draw'],
2: game.terminal_rewards['draw']}
)
def test_initialize_game(self):
"""Test use of moves argument when initializing a
new game.
"""
moves = [(1, (0, 2)), (2, (0, 1)), (1, (1, 1)), (2, (2, 2))]
game = TicTacToeGame(moves=moves)
state = np.array([
[0, 2, 1],
[0, 1, 0],
[0, 0, 2]
])
self.assertTrue(
np.array_equal(game.state, state)
)
def test_generate_state_key(self):
"""Test generate_state_key method of TicTacToeGame.
"""
game = TicTacToeGame()
game.state[:] = [[1, 0, 0],
[2, 0, 0],
[0, 0, 1]]
self.assertEqual(
game.generate_state_key(game.state, 1), b'S--O----S'
)
self.assertEqual(
game.generate_state_key(game.state, 2), b'O--S----O'
)
def test_with_players(self):
game = TicTacToeGame()
players = [RandomPlayer(seed=1), RandomPlayer(seed=1)]
ctrl = GameController(game, players)
ctrl.play(show=False)
final_state = np.array([
[1, 2, 1],
[2, 1, 1],
[1, 2, 2]
])
self.assertTrue(np.array_equal(ctrl.game.state, final_state))
self.assertEqual(game.game_over, 1)
self.assertEqual(game.winner, 1)
def test_expert_player(self):
results = []
game = TicTacToeGame()
expert_player1 = TicTacToeExpert("EXP1", seed=1)
expert_player2 = TicTacToeExpert("EXP2", seed=1)
random_player = RandomPlayer(seed=1)
players = [expert_player1, expert_player2, random_player]
game_stats = train_computer_players(game, players, iterations=100,
seed=1, show=False)
self.assertTrue(game_stats[expert_player1]['lost'] == 0)
self.assertTrue(game_stats[expert_player2]['lost'] == 0)
self.assertTrue(game_stats[random_player]['won'] == 0)
# Save results
results.append({player.name: stat for player, stat in
game_stats.items()})
# Check repeatability with random seed set
game.reset()
expert_player1 = TicTacToeExpert("EXP1", seed=1)
expert_player2 = TicTacToeExpert("EXP2", seed=1)
random_player = RandomPlayer(seed=1)
players = [expert_player1, expert_player2, random_player]
game_stats = train_computer_players(game, players, iterations=100,
seed=1, show=False)
self.assertTrue(game_stats[expert_player1]['lost'] == 0)
self.assertTrue(game_stats[expert_player2]['lost'] == 0)
self.assertTrue(game_stats[random_player]['won'] == 0)
# Save results
results.append({player.name: stat for player, stat in
game_stats.items()})
self.assertTrue(results[0] == results[1])
if __name__ == '__main__':
unittest.main()
| true |
d686e9c45b277b1d9bdf2748b89cf376b959ea54 | Python | luroto/holbertonschool-higher_level_programming | /0x04-python-more_data_structures/1-search_replace.py | UTF-8 | 350 | 3.328125 | 3 | [] | no_license | #!/usr/bin/python3
def search_replace(my_list, search, replace):
if my_list is None:
return(my_list)
else:
newlist = my_list.copy()
for i in range(len(my_list)):
if my_list[i] == search:
newlist[i] = replace
else:
newlist[i] = my_list[i]
return(newlist)
| true |
6706b43700491b765aa2a11a65121f6ca9c582d1 | Python | AdityaMalani/RSA | /rsa.py | UTF-8 | 1,647 | 3.296875 | 3 | [] | no_license | import math
import random
def isPrime(num):
for i in range(2,int(num/2)+1):
if(num%i==0):
return 0
return 1
def calculateE(p,q,phi):
list1 = []
for e in range(2,phi):
if math.gcd(e,phi) == 1:
list1.append(e)
return list1
def calculateD(phi,e):
for d in range(1,phi):
if (d*e)%phi is 1:
return d
def charalgo(pt,e,n,d):
print("Encrypting...")
enc = []
dec = []
for i in pt:
c = (ord(i)**e)%n
enc.append(c)
print("Encrpyted text = ",end='')
print(enc)
print("--------------------------------------")
print("Decrypting...")
for j in enc:
m = (j**d)%n
dec.append(chr(m))
str1 = ''.join(dec)
print("Decrpyted text = ",end='')
print(str1)
def algo(data,e,n,d):
print("Encrypting...")
c = (data**e)%n
print("Encrpyted data : "+str(c))
print("--------------------------------------")
print("Decrypting...")
dec = (c**d)%n
print("Decrypted data :"+str(dec))
def driver():
p = int(input("Enter the first prime number:"))
while isPrime(p)==0:
p = int(input("Please enter the first PRIME number:"))
q = int(input("Enter the second prime number:"))
while isPrime(q)==0:
q = int(input("Please enter the second PRIME number:"))
n=p*q
phi = (p-1)*(q-1)
list1 = calculateE(p,q,phi)
print(list1)
e = random.choice(list1)
print("Selected e = "+ str(e))
d = calculateD(phi,e)
print("Calculated d = " +str(d))
print("Public key is (n="+str(n)+",e="+str(e)+")")
print("Private key is (n="+str(n)+",d="+str(d)+")")
print("--------------------------------------")
pt = input('Enter message : ')
if pt.isdigit():
pt = int(pt)
algo(pt,e,n,d)
else:
charalgo(pt,e,n,d)
driver()
| true |
580db10085397c9ca3abaea93b075c4ac4056c28 | Python | Smurodkhon12/lesson1 | /text1/10_dars.py | UTF-8 | 1,411 | 3.140625 | 3 | [] | no_license | # import time
# soat = []
# minut = []
# sekund = []
# if input("soat, minut yoki sekund kiriting: ") == soat:
# print(time.strftime("%H"))
# elif input("soat, minut yoki sekund kiriting: ") == minut:
# print(time.strftime("%M"))
# elif input("soat, minut yoki sekund kiriting: ") == sekund:
# print(time.strftime("%S"))
#
# while soat == soat and minut == minut and sekund == sekund:
# print(time.strftime("%H, %M, %S "))
import random
# result = {'0': 0, '1': 0, '2': 0, '3': 0,}
# with open('eslabqol', 'r') as file:
# lines = file.readlines()
# for line in lines:
# print(line, end='-')
import random
# with open('eslabqol', 'a') as file:
# file.write(str)
# for i in range(1, 50):
# if i % 2 == 0:
# with open('data.txt', 'a') as file:
# file.write(str(i)+'\n')
# x = 'i'
# while x:
# x = input("straka kiriting: ")
# with open('data.txt', 'a') as file:
# file.write(x + '\n')
# import time
# t = time.time()
# t = t // 60
# t = t // 60
# t = t // 24
# t = t // 365
# print(t)
# jon = 5
#
# while jon > 0:
# jon - 1
# x = 5
# y = 0.25
# b = True
#
# with open("data.txt", 'w') as file:
# file.write('x' + str(x))
# file.write('y' + str(y))
# file.write('b')
# a = ' sadasdasd sadasdasd '
# b = a.strip()
# print(a)
# print(b)
#
# x = a.replace(' ', ' ')
sart = '1 2 3 '
| true |
bb310197ac8e241b51ab733b11581e3fe5c9c6d7 | Python | MouseLand/kesa-et-al-2019 | /EnsemblePursuitModule/EnsemblePursuitNumpyFast.py | UTF-8 | 13,229 | 2.6875 | 3 | [] | no_license | import numpy as np
import time
from sklearn.cluster import KMeans
def fit_one_ensemble_seed_timecourse(X, C, seed_timecourse = [], lam = 0.005):
NT, NN = X.shape
valid_neurons=np.ones((NN,),dtype=bool)
bias = seed_timecourse @ X
current_v = seed_timecourse
C_summed = bias.flatten()
iorder = np.zeros(NN, 'int32')
for n in range(NN):
# at each iteration, first determine the neuron to be added
imax = np.argmax(C_summed * valid_neurons)
vnorm = np.sum(current_v**2)
cost_delta = np.maximum(0., C_summed[imax])**2 / vnorm
if cost_delta<lam*X.shape[0]:
break
valid_neurons[imax] = False
C_summed = C_summed + C[:, imax]
current_v = current_v + X[:, imax]
iorder[n] = imax
iorder = iorder[:n]
return iorder, current_v
def zscore(self,X):
mean_stimuli=np.mean(X.T,axis=0)
std_stimuli=np.std(X.T,axis=0,ddof=1)+1e-10
X=np.subtract(X.T,mean_stimuli)
X=np.divide(X,std_stimuli)
return X.T
class EnsemblePursuitNumpyFast():
def __init__(self,n_ensembles,lambd,options_dict):
self.n_ensembles=n_ensembles
self.lambd=lambd
self.options_dict=options_dict
def calculate_dot_squared(self,C_summed):
'''
The matrix of summed correlation entries, which represent x.T@v have to be
squared for computing the cost.
'''
dot_squared=np.clip(C_summed,a_min=0,a_max=None)**2
return dot_squared
def calculate_cost_delta(self,C_summed,current_v):
'''
Use the similarity matrix to compute the change in cost for adding one more neuron.
'''
cost_delta=np.clip(C_summed,a_min=0,a_max=None)**2/(self.sz[1]*(current_v**2).sum())-self.lambd
return cost_delta
def mask_dot_squared(self,selected_neurons,dot_squared):
'''
Mask out neurons that are already in the ensemble for cost computation.
'''
mask=np.zeros((selected_neurons.shape[0]),dtype=bool)
mask[selected_neurons==0]=1
mask[selected_neurons!=0]=0
masked_dot_squared=mask*dot_squared
return masked_dot_squared
def sum_C(self,C_summed_unnorm,C,max_delta_neuron):
'''
As you accumulate more neurons to the ensemble you sum columns of the
correlation matrix into an accumulator matrix.
'''
C_summed_unnorm=C_summed_unnorm+C[:,max_delta_neuron]
return C_summed_unnorm
def sum_v(self, v, max_delta_neuron, X):
current_v=v+X[max_delta_neuron,:]
return current_v
def update_C(self,X,C,u,v,selected_neurons):
#selected_neurons=np.nonzero(u)[0]
cross_term_init=X@(v.T)
cross_term=np.outer(u[selected_neurons],cross_term_init)
C[selected_neurons,:]=C[selected_neurons,:]-cross_term
ixgrid=np.ix_(~selected_neurons,selected_neurons)
C[ixgrid]=C[ixgrid]-cross_term.T[~selected_neurons,:]
def fit_one_ensemble(self,X,C):
#A parameter to account for how many top neurons we sample from. It starts from 1,
#because we choose the top neuron when possible, e.g. when we can find an ensemble
# that is larger than min ensemble size. If there is no ensemble with the top neuron
# we increase the number of neurons to sample from.
self.n_neurons_for_sampling=1
n=0
min_assembly_size=self.options_dict['min_assembly_size']
#index for switching between top neurons for fitting ensemble when the first neurons
#doesn't give large enough ensemble
index=-1
#A while loop for trying sampling other neurons if the found ensemble size is smaller
#than threshold.
while n<min_assembly_size:
seed=self.repeated_seed(C,index)
n=1
current_v=X[seed,:]
current_v_unnorm=current_v.copy()
selected_neurons=np.zeros((X.shape[0]),dtype=bool)
#Seed current_v
selected_neurons[seed]=1
#Fake cost to initiate while loop
max_cost_delta=1000
C_summed_unnorm=0
max_delta_neuron=seed
while max_cost_delta>0:
#Add the x corresponding to the max delta neuron to C_sum. Saves computational
#time.
#print(n)
C_summed_unnorm=self.sum_C(C_summed_unnorm,C,max_delta_neuron)
C_summed=(1./n)*C_summed_unnorm
dot_squared=self.calculate_dot_squared(C_summed)
#invert the 0's and 1's in the array which stores which neurons have already
#been selected into the assembly to use it as a mask
masked_dot_squared=self.mask_dot_squared(selected_neurons,dot_squared)
max_delta_neuron=np.argmax(masked_dot_squared)
cost_delta=self.calculate_cost_delta(C_summed[max_delta_neuron],current_v)
print(cost_delta)
if cost_delta>0:
selected_neurons[max_delta_neuron]=1
current_v_unnorm= self.sum_v(current_v_unnorm,max_delta_neuron,X)
n+=1
current_v=(1./n)*current_v_unnorm
max_cost_delta=cost_delta
index+=-1
print('nr of neurons in ensemble',n)
current_u=np.zeros((X.shape[0],1))
current_u[selected_neurons,0]=np.clip(C_summed[selected_neurons],a_min=0,a_max=None)/(current_v**2).sum()
self.U=np.concatenate((self.U,current_u),axis=1)
self.V=np.concatenate((self.V,current_v.reshape(1,self.sz[1])),axis=0)
return current_u, current_v, C, selected_neurons
def fit_one_ensemble_suite2p(self,X,C,seed_timecourse):
self.n_neurons_for_sampling=1
min_assembly_size=self.options_dict['min_assembly_size']
min_assembly_size=1
self.sz=X.shape
#index for switching between top neurons for fitting ensemble when the first neurons
#doesn't give large enough ensemble
index=-1
selected_neurons=np.zeros((X.shape[0]),dtype=bool)
#Fake cost to initiate while loop
max_cost_delta=1000
n=1
#while n<=min_assembly_size:
C[:,-1]=(X@(seed_timecourse.T)).flatten()
n=1
current_v=seed_timecourse
print(current_v.shape)
current_v_unnorm=current_v.copy()
selected_neurons=np.zeros((X.shape[0]),dtype=bool)
#Fake cost to initiate while loop
max_cost_delta=1000
C_summed_unnorm=0
max_delta_neuron=-1
while max_cost_delta>0:
#Add the x corresponding to the max delta neuron to C_sum. Saves computational
#time.
C_summed_unnorm=self.sum_C(C_summed_unnorm,C,max_delta_neuron)
C_summed=(1./n)*C_summed_unnorm
dot_squared=self.calculate_dot_squared(C_summed)
#invert the 0's and 1's in the array which stores which neurons have already
#been selected into the assembly to use it as a mask
masked_dot_squared=self.mask_dot_squared(selected_neurons,dot_squared)
max_delta_neuron=np.argmax(masked_dot_squared)
cost_delta=self.calculate_cost_delta(C_summed[max_delta_neuron],current_v)
if cost_delta>0:
selected_neurons[max_delta_neuron]=1
current_v_unnorm= self.sum_v(current_v_unnorm,max_delta_neuron,X)
n+=1
current_v=(1./n)*current_v_unnorm
max_cost_delta=cost_delta
return selected_neurons
def fit_one_ensemble_seed_timecourse(self,X,C,seed_timecourse):
self.n_neurons_for_sampling=1
min_assembly_size=self.options_dict['min_assembly_size']
min_assembly_size=1
self.sz=X.shape
#index for switching between top neurons for fitting ensemble when the first neurons
#doesn't give large enough ensemble
index=-1
selected_neurons=np.zeros((X.shape[0]),dtype=bool)
#Fake cost to initiate while loop
max_cost_delta=1000
n=1
#while n<=min_assembly_size:
#Fill the last column with the similarity of X and seed timecourse
C[:,-1]=(X@(seed_timecourse.T)).flatten()
n=1
current_v=seed_timecourse
current_v_unnorm=current_v.copy()
selected_neurons=np.zeros((X.shape[0]),dtype=bool)
#Fake cost to initiate while loop
max_cost_delta=1000
#Initialize C_sum unnormalized to zero
C_summed_unnorm=0
#max_delta_neuron starts as the input activity trace
max_delta_neuron=-1
while max_cost_delta>0:
#Add the x corresponding to the max delta neuron to C_sum. Saves computational
#time.
#Summing the C at each iteration instead of indexing neurons and take the mean_stimuli
#is three orders of magnitude faster.
C_summed_unnorm=self.sum_C(C_summed_unnorm,C,max_delta_neuron)
C_summed=(1./n)*C_summed_unnorm
dot_squared=self.calculate_dot_squared(C_summed)
#invert the 0's and 1's in the array which stores which neurons have already
#been selected into the assembly to use it as a mask
masked_dot_squared=self.mask_dot_squared(selected_neurons,dot_squared)
max_delta_neuron=np.argmax(masked_dot_squared)
cost_delta=self.calculate_cost_delta(C_summed[max_delta_neuron],current_v)
if cost_delta>0:
selected_neurons[max_delta_neuron]=1
current_v_unnorm= self.sum_v(current_v_unnorm,max_delta_neuron,X)
n+=1
current_v=(1./n)*current_v_unnorm
max_cost_delta=cost_delta
index+=-1
print('nr of neurons in ensemble',n)
current_u=np.zeros((X.shape[0],1))
current_u[selected_neurons,0]=np.clip(C_summed[selected_neurons],a_min=0,a_max=None)/(current_v**2).sum()
self.U=np.concatenate((self.U,current_u),axis=1)
self.V=np.concatenate((self.V,current_v.reshape(1,self.sz[1])),axis=0)
return current_u, current_v, selected_neurons
def repeated_seed(self,C,index):
nr_neurons_to_av=self.options_dict['seed_neuron_av_nr']
sorted_similarities=np.sort(C,axis=1)[:,:-1][:,C.shape[0]-nr_neurons_to_av-1:]
average_similarities=np.mean(sorted_similarities,axis=1)
top_neurons=np.argsort(average_similarities)
seed=top_neurons[-index]
return seed
def fit_transform(self,X):
X=self.zscore(X)
self.sz=X.shape
self.U=np.zeros((X.shape[0],1))
self.V=np.zeros((1,X.shape[1]))
start=time.time()
C=X@X.T
#end=time.time()
#print('full',end-start)
for iteration in range(0,self.n_ensembles):
start=time.time()
current_u, current_v, C,selected_neurons=self.fit_one_ensemble(X,C)
#end=time.time()
#print(end-start,'loop')
U_V=current_u.reshape(self.sz[0],1)@current_v.reshape(1,self.sz[1])
start=time.time()
self.update_C(X,C,current_u,current_v,selected_neurons)
end=time.time()
print('optimized',end-start)
X=X-U_V
#print('ensemble nr', iteration)
cost=np.mean(X*X)
#print('cost',cost)
#After fitting arrays discard the zero initialization rows and columns from U and V.
self.U=self.U[:,1:]
self.V=self.V[1:,:]
return self.U, self.V.T
def fit_transform_kmeans_init(self,X):
X=self.zscore(X)
self.sz=X.shape
self.U=np.zeros((X.shape[0],1))
self.V=np.zeros((1,X.shape[1]))
#there's an extra column at the end for holding the input activity trace
#Make the order 'F' to support fast column operations
C=np.empty((self.sz[0],self.sz[0]+1),order='F')
#Fill the columns with X@X.T except for the column with the input activity trace
C[:,:-1]=X@X.T
#kmeans = KMeans(n_clusters=self.n_ensembles, random_state=0).fit(X.T).labels_
#np.save('kmeans_init.npy',kmeans)
kmeans=np.load('kmeans_init.npy')
for iteration in range(0,self.n_ensembles):
start=time.time()
print('where',np.where(kmeans==iteration)[0].flatten())
average_timecourse=np.mean(X[np.where(kmeans==iteration)[0].flatten(),:], axis=0)
current_u, current_v,selected_neurons=self.fit_one_ensemble_seed_timecourse(X,C,average_timecourse)
U_V=current_u.reshape(self.sz[0],1)@current_v.reshape(1,self.sz[1])
start=time.time()
#Update C in-place. Uses an efficient algorithm to update only the neurons that have been
#added to the previous ensemble.
self.update_C(X,C[:,:-1],current_u,current_v,selected_neurons)
X=X-U_V
cost=np.mean(X*X)
print('cost',cost)
#After fitting arrays discard the zero initialization rows and columns from U and V.
self.U=self.U[:,1:]
self.V=self.V[1:,:]
return self.U, self.V.T
| true |
f4d0fbd414f453917913aed6c37e8f712e58fc49 | Python | lwang-astro/Agama | /pytests/example_self_consistent_model3.py | UTF-8 | 8,282 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
"""
Example of construction of a three-component disk-bulge-halo equilibrium model of a galaxy.
The approach is explained in example_self_consistent_model.py;
this example differs in that it has a somewhat simpler structure (only a single stellar disk
component, no stellar halo or gas disk).
Another modification is that the halo and the bulge are represented by 'pseudo-isotropic' DF:
it is a spherical isotropic DF that is constructed using the Eddington inversion formula
for the given density profile in the spherically-symmetric approximation of the total potential.
This DF is then expressed in terms of actions and embedded into the 'real', non-spherical
potential, giving rise to a slightly different density profile; however, it is close enough
to the input one so that only one iteration is performed at the first stage.
Then the disk DF is constructed in the current total potential, and a few more iterations
are needed to converge towards a self-consistent model.
"""
import agama, numpy, ConfigParser, os, sys
def writeRotationCurve(filename, potential):
potential.export(filename)
radii = numpy.logspace(-2, 1.5, 71)
vcirc = (-potential.force( numpy.vstack((radii, radii*0, radii*0)).T)[:,0] * radii)**0.5
numpy.savetxt(filename, numpy.vstack((radii, vcirc)).T, fmt="%.6g", header="radius\tv_circ")
def printoutInfo(model, iteration):
densHalo = model.components[0].getDensity()
densBulge= model.components[1].getDensity()
densDisk = model.components[2].getDensity()
pt0 = (2.0, 0, 0)
pt1 = (2.0, 0, 0.5)
print \
"Disk total mass=%g," % densDisk.totalMass(), \
"rho(R=2,z=0)=%g, rho(R=2,z=0.5)=%g" % \
(densDisk.density(pt0), densDisk.density(pt1))
print \
"Bulge total mass=%g," % densBulge.totalMass(), \
"rho(R=0.5,z=0)=%g" % \
(densBulge.density(0.4, 0, 0))
print \
"Halo total mass=%g," % densHalo.totalMass(), \
"rho(R=2,z=0)=%g, rho(R=2,z=0.5)=%g" % \
(densHalo.density(pt0), densHalo.density(pt1))
print "Potential at origin=-(%g km/s)^2," % (-model.potential.potential(0,0,0))**0.5, \
"total mass=%g" % model.potential.totalMass()
densDisk. export("dens_disk_iter" +str(iteration));
densBulge.export("dens_bulge_iter"+str(iteration));
densHalo. export("dens_halo_iter" +str(iteration));
writeRotationCurve("rotcurve_iter"+str(iteration), model.potential)
if __name__ == "__main__":
# read parameters from the INI file
iniFileName = os.path.dirname(os.path.realpath(sys.argv[0])) + "/../data/SCM3.ini"
ini = ConfigParser.RawConfigParser()
ini.optionxform=str # do not convert key to lowercase
ini.read(iniFileName)
iniPotenHalo = dict(ini.items("Potential halo"))
iniPotenBulge = dict(ini.items("Potential bulge"))
iniPotenDisk = dict(ini.items("Potential disk"))
iniDFDisk = dict(ini.items("DF disk"))
iniSCMHalo = dict(ini.items("SelfConsistentModel halo"))
iniSCMBulge = dict(ini.items("SelfConsistentModel bulge"))
iniSCMDisk = dict(ini.items("SelfConsistentModel disk"))
iniSCM = dict(ini.items("SelfConsistentModel"))
# initialize the SelfConsistentModel object (only the potential expansion parameters)
model = agama.SelfConsistentModel(**iniSCM)
# create initial ('guessed') density profiles of all components
densityBulge = agama.Density(**iniPotenBulge)
densityHalo = agama.Density(**iniPotenHalo)
densityDisk = agama.Density(**iniPotenDisk)
# add components to SCM - at first, all of them are static density profiles
model.components.append(agama.Component(density=densityHalo, disklike=False))
model.components.append(agama.Component(density=densityBulge, disklike=False))
model.components.append(agama.Component(density=densityDisk, disklike=True))
# compute the initial potential
model.iterate()
writeRotationCurve("rotcurve_init", model.potential)
# initialize the DFs of spheroidal components using the Eddington inversion formula
# for their respective density profiles in the spherically-symmetric initial guess for the potential
pot_sph = agama.Potential(type='Multipole', density=model.potential, lmax=0, gridsizer=100, rmin=1e-3, rmax=1e3)
dfHalo = agama.DistributionFunction(type='PseudoIsotropic', potential=pot_sph, density=densityHalo)
dfBulge = agama.DistributionFunction(type='PseudoIsotropic', potential=pot_sph, density=densityBulge)
printoutInfo(model, 0)
print "\033[1;33m**** STARTING ONE-COMPONENT MODELLING ****\033[0m\nMasses are: " \
"Mhalo=%g," % densityHalo.totalMass(), \
"Mbulge=%g," % densityBulge.totalMass(), \
"Mdisk=%g" % densityDisk.totalMass()
# replace the halo and bulge SCM components with the DF-based ones
model.components[0] = agama.Component(df=dfHalo, disklike=False, **iniSCMHalo)
model.components[1] = agama.Component(df=dfBulge, disklike=False, **iniSCMBulge)
# do one iteration to determine the self-consistent density profile of the halo and the bulge
print "\033[1;37mStarting iteration #1\033[0m"
model.iterate()
printoutInfo(model, 1)
# now that we have a reasonable guess for the total potential,
# we may initialize the DF of the stellar disk
dfDisk = agama.DistributionFunction(potential=model.potential, **iniDFDisk)
# we can compute the masses even though we don't know the density profile yet
print "\033[1;33m**** STARTING TWO-COMPONENT MODELLING ****\033[0m\nMasses are: ", \
"Mhalo=%g," % dfHalo.totalMass(), \
"Mbulge=%g," % dfBulge.totalMass(), \
"Mdisk=%g" % dfDisk.totalMass()
# replace the static disk component them with a DF-based disk one
model.components[2] = agama.Component(df=dfDisk, disklike=True, **iniSCMDisk)
# do a few more iterations to obtain the self-consistent density profile for both disks
for iteration in range(2,5):
print "\033[1;37mStarting iteration #%d\033[0m" % iteration
model.iterate()
printoutInfo(model, iteration)
print "\033[1;33mComputing disk density and velocity profiles\033[0m"
R = numpy.linspace(0.2, 10, 50)
xyz = numpy.column_stack((R, R*0, R*0))
Sigma,_ = agama.GalaxyModel(potential=model.potential, df=dfDisk, af=model.af).projectedMoments(R)
rho,sigma = agama.GalaxyModel(potential=model.potential, df=dfDisk, af=model.af).moments(xyz)
force, deriv = model.potential.forceDeriv(xyz)
kappa = numpy.sqrt(-deriv[:,0] - 3*force[:,0]/R)
ToomreQ = sigma[:,0]**0.5 * kappa / 3.36 / Sigma
numpy.savetxt("disk_plane",
numpy.vstack((R, Sigma, rho, sigma[:,0]**0.5, sigma[:,1]**0.5, ToomreQ)).T,
header="R Sigma rho(R,z=0) sigma_R sigma_z ToomreQ", fmt="%.6g")
# export model to an N-body snapshot
print "\033[1;33mCreating an N-body representation of the model\033[0m"
format = 'text' # one could also use 'nemo' or 'gadget' here
# first create a representation of density profiles without velocities
# (just for demonstration), by drawing samples from the density distribution
print "Sampling bulge density"
agama.writeSnapshot("dens_bulge_final", model.components[1].getDensity().sample(40000), format)
print "Sampling disk density"
agama.writeSnapshot("dens_disk_final", model.components[2].getDensity().sample(160000), format)
print "Sampling halo density"
agama.writeSnapshot("dens_halo_final", model.components[0].getDensity().sample(800000), format)
# now create genuinely self-consistent models of both components,
# by drawing positions and velocities from the DF in the given (self-consistent) potential
print "Sampling bulge DF"
agama.writeSnapshot("model_bulge_final", \
agama.GalaxyModel(potential=model.potential, df=dfBulge, af=model.af).sample(40000), format)
print "Sampling disk DF"
agama.writeSnapshot("model_disk_final", \
agama.GalaxyModel(potential=model.potential, df=dfDisk, af=model.af).sample(160000), format)
print "Sampling halo DF"
agama.writeSnapshot("model_halo_final", \
agama.GalaxyModel(potential=model.potential, df=dfHalo, af=model.af).sample(800000), format)
| true |
4ba29ecb372db94b8082aa66de1036c271904c0a | Python | sormehka/PaymentCode | /src/antifraud.py | UTF-8 | 7,664 | 3.265625 | 3 | [] | no_license | import csv
import pandas as pd
import sys
import numpy as np
def parse_input(batch_file, stream_file, batch_file_fixed, stream_file_fixed): # 1. The message column contains extra commas which complicates the import with separator ','. The files were fixed using split command and saved as new csv files\
with open(batch_file, 'rb') as b, open(batch_file_fixed, 'wb') as bb:
writer = csv.writer(bb, delimiter=',')
for line in b:
row = line.split(',', 4)
writer.writerow(row)
with open(stream_file, 'rb') as s, open(stream_file_fixed, 'wb') as ss:
writer = csv.writer(ss, delimiter=',')
for line in s:
row = line.split(',', 4)
writer.writerow(row)
# 2. Defining a graph class
class Graph(object):
def _init_(self): # Initializing a graph object with an empty dictionary
self._graph_dict={}
def vertices(self): # Returns the vertices of graph (list of graph dict keys)
return list(self._graph_dict.keys())
def add_vertex(self,vertex): # If vertex is not already defined, a key "vertex" with an empty set will be added to the dictionary
if vertex not in self._graph_dict:
self._graph_dict[vertex]=set()
def add_edge(self,vertex1,vertex2): # Adding edges to vertix dictionaries
if not vertex1 in self._graph_dict:
self.add_vertex(vertex1)
if not vertex2 in self._graph_dict:
self.add_vertex(vertex2)
self._graph_dict[vertex1].add(vertex2)
self._graph_dict[vertex2].add(vertex1)
def BFS(self, start, end, trusted_depth): #Breath Frist Search algorithm to find the path between two vetices, trusted_depth represents the degree of friends network
gp=self._graph_dict
if not start in gp: return None
visited = {}
for v in gp.keys():
visited[v] = False
my_queue = []
distances = []
my_queue.append(start)
distances.append(0)
while len(my_queue) > 0:
current_item = my_queue.pop()
current_distance = distances.pop()
visited[current_item] = True
if current_distance > trusted_depth:
return False
if current_item == end:
return True
for neighbor in gp[current_item]:
if visited[neighbor]:
continue
my_queue.insert(0, neighbor)
distances.insert(0, current_distance+1)
return False
# 3. Initializing the graph class, adding vertices and edges based on the batch_payment file
def main():
if len(sys.argv) < 7:
print "Input error: this code requires two input files,two temporary files, and 3 output file paths"
sys.exit(1)
batch_file = sys.argv[1]
stream_file = sys.argv[2]
batch_file_fixed=sys.argv[3]
stream_file_fixed=sys.argv[4]
output1 = sys.argv[5]
output2 = sys.argv[6]
output3 = sys.argv[7]
# 1.1 Parse input files
parse_input(batch_file, stream_file, batch_file_fixed, stream_file_fixed)
# 1.2. Reading and saving the two columns id1 and id2
df=pd.read_csv(batch_file_fixed,usecols=[1,2]).as_matrix()
df_out=pd.read_csv(stream_file_fixed,usecols=[1,2]).as_matrix()
graph=Graph()
graph._init_()
for i in range(0,len(df)):
graph.add_vertex(df[i][0])
graph.add_vertex(df[i][1])
graph.add_edge(df[i][0],df[i][1])
# 4. Writing the outputs of three features to text files
with open(output1,"w") as out_text: #output1-Feature1
trusted_depth=1
vertices_index=graph.vertices()
trusted=np.zeros((len(vertices_index)+1,len(vertices_index)+1))
for i in range(0, len(df_out)):
if not df_out[i][0] in vertices_index or not df_out[i][1] in vertices_index:
out_text.write("unverified\n") #if not in vertices, new costumer, unverified
elif trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]==1:
out_text.write("trusted\n") #already labeled as trusted
elif trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]==2:
out_text.write("unverified\n") #already labeled as unverified
elif not graph.BFS(df_out[i][0],df_out[i][1],trusted_depth):
out_text.write("unverified\n") #new label as unverified
trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]=2
trusted[vertices_index.index(df_out[i][1])][vertices_index.index(df_out[i][0])]=2
else: #new label as trusted
out_text.write("trusted\n")
trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]=1
trusted[vertices_index.index(df_out[i][1])][vertices_index.index(df_out[i][0])]=1
with open(output2,"w") as out_text: #output2-Feature2
trusted_depth=2
#Trusted matrix from feature 1. The ones are still trusted.
for i in range(0, len(df_out)):
if not df_out[i][0] in vertices_index or not df_out[i][1] in vertices_index:
out_text.write("unverified\n")
elif trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]==1:
out_text.write("trusted\n")
elif trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]==3:
out_text.write("unverified\n")
elif not graph.BFS(df_out[i][0],df_out[i][1],trusted_depth):
out_text.write("unverified\n")
trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]=3
trusted[vertices_index.index(df_out[i][1])][vertices_index.index(df_out[i][0])]=3
else:
out_text.write("trusted\n")
trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]=1
trusted[vertices_index.index(df_out[i][1])][vertices_index.index(df_out[i][0])]=1
with open(output3,"w") as out_text: #output3-Feature3
trusted_depth=4
for i in range(0, len(df_out)):
if not df_out[i][0] in vertices_index or not df_out[i][1] in vertices_index:
out_text.write("unverified\n")
elif trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]==1:
out_text.write("trusted\n")
elif trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]==4:
out_text.write("unverified\n")
elif not graph.BFS(df_out[i][0],df_out[i][1],trusted_depth):
out_text.write("unverified\n")
trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]=4
trusted[vertices_index.index(df_out[i][1])][vertices_index.index(df_out[i][0])]=4
else:
out_text.write("trusted\n")
trusted[vertices_index.index(df_out[i][0])][vertices_index.index(df_out[i][1])]=1
trusted[vertices_index.index(df_out[i][1])][vertices_index.index(df_out[i][0])]=1
if __name__ == "__main__":
main()
| true |
ac4c281432301ceb27ffe71da5326688d7f962dd | Python | junefish/adventofcode | /adventofcode2022/day19/day19problem1.py | UTF-8 | 1,207 | 2.890625 | 3 | [] | no_license | blueprints = []
with open('adventofcode2022/day19/day19example.txt') as input:
for line in input:
label = (line.strip().split(': '))[0].split(' ')
number = int(label[-1])
list = []
robots = (line.strip().split(': '))[-1].split('. ')
for bot in robots:
info = []
type = bot.split(' ')[1]
info.append(type)
if bot.split(' ')[-3] == 'and':
cost_num1 = int(bot.split(' ')[4])
cost_type1 = bot.split(' ')[5].strip('.')
cost1 = {cost_type1: cost_num1}
info.append(cost1)
cost_num2 = int(bot.split(' ')[-2])
cost_type2 = bot.split(' ')[-1].strip('.')
cost2 = {cost_type2: cost_num2}
info.append(cost2)
else:
cost_num = int(bot.split(' ')[4])
cost_type = bot.split(' ')[5].strip('.')
cost = {cost_type: cost_num}
info.append(cost)
list.append(info)
blueprints.append({number: info})
print(blueprints) | true |
5743398f487fcceb3247a0ee2e76cc624f565c9d | Python | anisimovkv/sudoku | /src/test_sudoku.py | UTF-8 | 1,422 | 3.140625 | 3 | [] | no_license | import unittest
from typing import Tuple
import numpy as np
from .sudoku import sudoku_solver
class MyTestCase(unittest.TestCase):
def test_sudoku_solver(self):
input, expect_output = self.init_data()
output = sudoku_solver(input.copy())
print(input)
print(output)
print(expect_output)
np.testing.assert_array_equal(expect_output, output)
@staticmethod
def init_data() -> Tuple:
# https://en.wikipedia.org/wiki/Sudoku
in_data: np.ndarray = np.array([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9],
])
out_data: np.ndarray = np.array([
[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9],
])
return in_data, out_data
if __name__ == '__main__':
unittest.main()
| true |
cee6a7df33c05f87ab8353a6c7877fb6542a0402 | Python | yanruibo/machine-learning | /bayes/preprocess_data.py | UTF-8 | 720 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
# encoding: utf-8
'''
Created on Nov 6, 2015
@author: yanruibo
'''
import numpy as np
if __name__ == '__main__':
data = np.loadtxt(fname='unformatted-data.txt', dtype=np.int64, delimiter=',')
data = np.delete(data, [0], axis=1)
for i in range(len(data)):
if(data[i,0]==2):
data[i,0]=0
else:
data[i,0]=1
print data
trainMat = np.delete(data,[0],axis=1)
classes = data[:,0].reshape((len(data[:,0]),1))
resultMat = np.append(trainMat, classes, axis=1)
print resultMat
#np.savetxt("formatted-data.txt", resultMat,fmt='%d')
loadData = np.loadtxt("formatted-data.txt")
print "loadData",loadData
| true |
b6733c2077bb1a4a5ab1615f454da4efe08c29fa | Python | deku-M-O/zuoye | /作业4.py | UTF-8 | 193 | 3.265625 | 3 | [] | no_license | print(5*"*")
for i in range(3):
print(1*"*"+3*(" ")+1*"*")
print(5*"*")
def fang(a):
print(a*"*")
for i in range(a):
print(1*"*"+(a-2)*(" ")+1*"*")
print(a*"*")
fang(8) | true |
21fae895fd99cb9fc7445a0c41bf6c2d3825fa6e | Python | Jomij/ml2 | /nnet.py | UTF-8 | 483 | 2.90625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plf
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
data = pd.read_csv("train.csv").as_matrix()
print("Matrix data\n",data)
clf = DecisionTreeClassifier()
X_train = data[0:21000,1:]
Y_train = data[0:21000,0]
X_test = data[21000:,1:]
Y_test = data[21000:,0]
clf.fit(X_train,Y_train)
disp = X_test[0]
disp_shape = (28,28)
plf.imshow(255-disp,cmap="grey")
plf.show()
p=clf.predict([X_test[8]])
print ("Predict",p)
| true |
41ae3bc502fb6639764742da7f442373084c105c | Python | jiangshipan/zy-web | /zy-web/service/user_service.py | UTF-8 | 669 | 2.734375 | 3 | [] | no_license | # coding=utf-8
class UserService(object):
def __init__(self):
self.__user = {
'jiangshipan': '123456',
'zhangzhiyu': '123456',
'yangboxin': '123456'
}
self.login_token = {
'jiangshipan': '123456_login',
'zhangzhiyu': 'qqqqq_login',
'yangboxin': '123qqqe456'
}
def login(self, username, password):
user_pass = self.__user.get(username)
if not user_pass:
return "user is not exist", False
if user_pass != password:
return "username or password is error", False
return self.login_token[username], True
| true |
670c5e0d6984baa4e42794598c82db99333bf2fe | Python | PeterZhangxing/codewars | /no_five.py | UTF-8 | 239 | 3.578125 | 4 | [] | no_license | #!/usr/bin/python3.5
def dont_give_me_five(start,end):
n = 0
for i in range(start,end+1):
if '5' not in list(str(i)):
n += 1
return n
if __name__ == "__main__":
n = dont_give_me_five(4,17)
print(n) | true |
c0870ca1471613a57bcc3a695c670cbcc1c9f510 | Python | XuanC6/Identifying-Duplicate-Questions | /DupQues/src/trainer.py | UTF-8 | 5,516 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
import random
class Trainer:
def __init__(self):
self.data1 = None
self.data2 = None
self.length1 = None
self.length2 = None
self.labels = None
self.data_idxs = None
def _feed_raw_data(self, raw_data, shuffle_flag=True):
self.data1, self.data2, self.length1, self.length2, self.labels = raw_data
data_idxs = list(range(len(self.labels)))
if shuffle_flag:
random.shuffle(data_idxs)
self.data_idxs = data_idxs
def _train_one_minibatch(self, step, model, sess):
batch_size = model.batch_size
batch_idxs = self.data_idxs[step:step + batch_size]
input1 = [self.data1[ix] for ix in batch_idxs]
input2 = [self.data2[ix] for ix in batch_idxs]
length1 = [self.length1[ix] for ix in batch_idxs]
length2 = [self.length2[ix] for ix in batch_idxs]
labels = [self.labels[ix] for ix in batch_idxs]
if random.random() < 0.5:
input1, input2 = input2, input1
length1, length2 = length2, length1
loss, _ = sess.run(
[model.loss, model.train_op],
feed_dict = {
model.input1: input1,
model.input2: input2,
model.length1: length1,
model.length2: length2,
model.labels: labels,
model.keep_prob: 1-model.config.dropout,
model.emb_keep_prob: model.config.emb_keep_prob,
model.training: True
}
)
return loss
def train_one_epoch(self, raw_data, model, sess, shuffle_flag=True):
# train one epoch
self._feed_raw_data(raw_data, shuffle_flag=shuffle_flag)
batch_size = model.batch_size
output_iter = 0
for step in range(0, len(self.labels), batch_size):
if min(step + batch_size, len(self.labels)) < batch_size + step:
break
loss = self._train_one_minibatch(step, model, sess)
percent = (step + batch_size)*100.0 / len(self.labels)
if percent//20 > output_iter:
output_iter = percent//20
if output_iter < 5:
print('train loss: %.4f at %.2f%% of train set.\r'%(loss, percent))
sstr = 'train loss: %.4f at %.2f%% of train set.\r'%(loss, percent)
sys.stdout.write(sstr)
# for linux
sys.stdout.flush()
return loss
def _evaluate_one_minibatch(self, step, model, sess):
batch_size = model.batch_size
batch_idxs = self.data_idxs[step:step + batch_size]
input1 = [self.data1[ix] for ix in batch_idxs]
input2 = [self.data2[ix] for ix in batch_idxs]
length1 = [self.length1[ix] for ix in batch_idxs]
length2 = [self.length2[ix] for ix in batch_idxs]
labels = [self.labels[ix] for ix in batch_idxs]
predicts, scores = sess.run(
[model.predicts, model.scores],
feed_dict={
model.input1: input1,
model.input2: input2,
model.length1: length1,
model.length2: length2,
model.labels: labels,
model.keep_prob: 1.0,
model.emb_keep_prob: 1.0
}
)
return (predicts, scores, labels)
def evaluate(self, raw_data, model, sess):
self._feed_raw_data(raw_data, shuffle_flag=False)
batch_size = model.config.batch_size
total_data = 0
num_correct = 0
predict_1 = 0
predict_1_correct = 0
predict_0_correct = 0
true_1 = 0
metrics = {}
for step in range(0, len(self.labels), batch_size):
if min(step+batch_size,len(self.labels)) < batch_size + step:
break
predicts, scores, labels = \
self._evaluate_one_minibatch(step, model, sess)
# if step==1600:
# print(list(zip(predicts, scores)))
for pred, label in zip(predicts, labels):
if pred == label:
num_correct += 1
if pred == label == 1:
# TP
predict_1_correct += 1
if pred == label == 0:
# TN
predict_0_correct += 1
if pred == 1:
# TP + FP
predict_1 += 1
if label == 1:
# TP + FN
true_1 += 1
total_data += batch_size
metrics["TP"] = predict_1_correct
metrics["TN"] = predict_0_correct
metrics["FP"] = predict_1 - predict_1_correct
metrics["FN"] = true_1 - predict_1_correct
metrics["acc"] = num_correct/total_data
if not predict_1:
precision = 0
else:
precision = predict_1_correct/predict_1
recall = predict_1_correct/true_1
metrics["precision"] = precision
metrics["recall"] = recall
if not precision and not recall:
metrics["F1 score"] = 0
else:
metrics["F1 score"] = 2*precision*recall/(precision + recall)
return metrics
| true |
96a6b3bb1846472d5a2e6df6f5e2b297f6ff6263 | Python | ayushbansal323/TE | /python/sudoku.py | UTF-8 | 1,317 | 3.140625 | 3 | [] | no_license | import random
def makesudoku():
a=[[0,0,0],[0,0,0],[0,0,0]]
iCount=int(random.uniform(1,9))
jCount=int(random.uniform(1,4))
i=0;
do=1
while(do == 1):
a=[[0,0,0],[0,0,0],[0,0,0]]
i=0;
while i<iCount:
j=int(random.uniform(1,4))-1
no=int(random.uniform(1,4))
if no not in a[j]:
if no != a[0][i%3] and no != a[1][i%3] and no != a[2][i%3]:
a[j][i%3]=no
i=1+i
do=0
for i1 in range(3):
for j1 in range(3):
ans=0
if(j1 != 0):
ans=ans+a[i1][0]
if(j1 != 1):
ans=ans+a[i1][1]
if(j1 != 2):
ans=ans+a[i1][2]
if(i1 == 0):
if (6-ans) == a[1][j1] or (6-ans) == a[2][j1]:
do=1
if(i1 == 1):
if (6-ans) == a[0][j1] or (6-ans) == a[2][j1]:
do=1
if(i1 == 2):
if (6-ans) == a[0][j1] or (6-ans) == a[1][j1] :
do=1
ans=0
if(i1 != 0):
ans=ans+a[0][j1]
if(i1 != 1):
ans=ans+a[1][j1]
if(i1 != 2):
ans=ans+a[2][j1]
if(j1 == 0):
if (6-ans) == a[i1][1] or (6-ans) == a[i1][2]:
do=1
if(j1 == 1):
if (6-ans) == a[i1][0] or (6-ans) == a[i1][2]:
do=1
if(j1 == 2):
if (6-ans) == a[i1][0] or (6-ans) == a[i1][1]:
do=1
return a
def main():
print("sudoku");
a=makesudoku()
for i in range(3):
print(a[i])
if __name__== "__main__":
main()
| true |
2f1eebb592dc64b4e6653e4a0522cd02796bb8df | Python | ballaneypranav/rosalind | /archive/scsp.py | UTF-8 | 665 | 3.40625 | 3 | [] | no_license | def main():
a = input()
b = input()
print(interleave(a, b))
archive = {}
def interleave(a, b):
if (a, b) in archive.keys():
return archive[(a, b)]
elif len(a) <= 1 or len(b) <= 1:
result = a + b
archive[(a, b)] = result
return result
elif a[0] == b[0]:
result = a[0] + interleave(a[1:], b[1:])
archive[(a, b)] = result
return result
else:
x = a[0] + interleave(a[1:], b)
y = b[0] + interleave(a, b[1:])
if len(x) < len(y):
archive[(a, b)] = x
return x
else:
archive[(a, b)] = y
return y
main()
| true |
0776d7c9de1c69ab7c25673481e6e808e0f3c9e1 | Python | noza7/Python_Home | /pro_store/change_pics/pics/new.py | UTF-8 | 1,064 | 2.84375 | 3 | [] | no_license | import cv2
import numpy as np
def cvt_background(path, color):
"""
功能:给证件照更换背景色(常用背景色红、白、蓝)
输入参数:path:照片路径
color:背景色 <格式[B,G,R]>
"""
im = cv2.imread(path)
im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
aim = np.uint8([[im[0, 0, :]]])
hsv_aim = cv2.cvtColor(aim, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(im_hsv, np.array([hsv_aim[0,0,0]-5,100,100]), np.array([hsv_aim[0,0,0]+5,255,255]))
mask_inv = cv2.bitwise_not(mask)
img1 = cv2.bitwise_and(im, im, mask=mask_inv)
bg = im.copy()
rows,cols,channels=im.shape
bg[:rows,:cols,:]=color
img2 = cv2.bitwise_and(bg,bg,mask=mask)
img = cv2.add(img1,img2)
image = {'im': im, 'im_hsv': im_hsv, 'mask': mask, 'img': img}
for key in image:
cv2.namedWindow(key)
cv2.imshow(key, image[key])
cv2.waitKey(0)
return img # test
if __name__ == '__main__':
img = cvt_background('pics/20171120125917900.jpg', [0, 0, 180])
| true |
a3e803e8f5497ce9c589ac05c6a53d3beb24b312 | Python | Eric-L-Manibardo/CaseStudy2020 | /MADRID_code/Deep Learning/02-Test/NAIVE_test.py | UTF-8 | 694 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 14:24:26 2020
@author: eric
"""
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
espiras = ['4458','6980','10124','6132','3642','4192','3697','3910','3500', '5761']
#Loop about 4 studied forecasting horizons t+1,t+2,t+3,t+4
for h in range(4):
persistencia = list()
for k in range(len(espiras)):
# load Dataset
df_train = pd.read_csv('dataset_TRAIN_MADRID/'+ espiras[k]+'train_MADRID.csv')
y_train = df_train['target'].values
persistencia.append(r2_score(y_train[1+h:], y_train[:-(1+h)]))
print(np.array(persistencia)) | true |
0c9defff6cd9dbd846376e74522763af9b287199 | Python | alexteachey/MoonPy | /export_conda_env_to_yml.py | UTF-8 | 1,016 | 2.9375 | 3 | [
"MIT"
] | permissive | import os
### identify your working environment
current_environment = os.environ['CONDA_DEFAULT_ENV']
### export this environment to a .yml file
output_name = input("What name do you want to give the output environment file? (Press ENTER to keep it the same as the current environment): ")
if output_name == '':
output_name = current_environment
export_command = 'conda env export -n '+current_environment+' -f '+output_name+'.yml --no-builds'
os.system(export_command)
#### now make sure you replace the current_environment name with the output name!
exported_filename = output_name+'.yml'
replacement_filename = 'replacement_'+output_name+'.yml'
orig_file = open(exported_filename, mode='r')
new_file = open(replacement_filename, mode='w')
for nline,line in enumerate(orig_file):
line = line.replace(current_environment, output_name)
new_file.write(line)
orig_file.close()
new_file.close()
### replace the original file with the new file
os.system('mv '+replacement_filename+' '+exported_filename)
| true |
425fa42a12f2bcb27aef6dfcdf7de7a4cdd1895d | Python | yassine2403/GOMYCODE | /aaslema5.py | UTF-8 | 110 | 3.75 | 4 | [] | no_license | import math
x=input("enter a number ")
print("the factorial of ur number is "+str(math.factorial(int(x))))
| true |
467c56d6f9bfeac4d143155c7f568bd35481df14 | Python | gitshangxy/tutorial | /L44并发和异步IO/3主线程和子线程.py | UTF-8 | 844 | 3.5625 | 4 | [] | no_license | import threading
import time
def run(n):
""" 倒计时 """
print('task', n)
time.sleep(1)
print('2s')
time.sleep(1)
print('1s')
time.sleep(1)
print('0s')
time.sleep(1)
for i in range(3):
# 生成子线程
t = threading.Thread(target=run, args=('t{}'.format(i),))
t.setDaemon(True)
t.start()
# 主线程
time.sleep(1.5) # time.sleep(1)时结果有时打印'2s'有时没有,可以看出多线程对同一份资源操作时可能出现问题。
print(threading.active_count()) # 3+1 这个例子看出线程执行不同的工作
"""
输出结果
task t0
task t1
task t2
2s
2s
2s
4
"""
# t.join() # 主线程阻塞,让子线程先运行完,失去线程并发意义,但更容易研究一些线程的表现。
# t.setDaemon(True) # 主线程结束后子线程跟着结束
| true |
34b8a5776a0af73faa08fe923a974a24c41a6fed | Python | tlillis/ai3202 | /Assignment7/prior.py | UTF-8 | 1,552 | 2.875 | 3 | [] | no_license | import helpers
from random import random
print("Prior Sampling\n")
raw_samples = helpers.getSamples()
samples = []
raw_samples = []
for i in range(len(raw_samples)):
if (i) % 4 == 0:
sample = {
"c": raw_samples[i],
"s": raw_samples[i+1],
"r": raw_samples[i+2],
"w": raw_samples[i+3]
}
samples.append(sample)
test = []
num = 0
for sample in samples:
num += 1
test.append(helpers.priorCheck(sample))
#### P(c = true) ####
count = 0.0
for sample in test:
if sample['c'] == True:
count += 1
value = count/len(test)
print('P(c = true) = {0}'.format(value))
#### P(c = true | r = true) ####
value = 0.0
count = 0.0
count_total = 0.0
for sample in test:
if sample['r'] == True:
if sample['c'] == True:
count += 1
count_total += 1
value = count/count_total
print('P(c = true | r = true) = {0}'.format(value))
#### P(s = true | w = true) ####
value = 0.0
count = 0.0
count_total = 0.0
for sample in test:
if sample['w'] == True:
if sample['s'] == True:
count += 1
count_total += 1
value = count/count_total
print('P(s = true | w = true) = {0}'.format(value))
#### P(s = true | c = true, w = true) ####
value = 0.0
count = 0.0
count_total = 0.0
for sample in test:
if sample['c'] == True and sample['w'] == True:
if sample['s'] == True:
count += 1
count_total += 1
value = count/count_total
print('P(s = true | c = true, w = true) = {0}'.format(value))
| true |
dfe71e1361aeafda6649116e93dc0d39473e02c0 | Python | nyquist/scorobot | /games/game.py | UTF-8 | 5,680 | 2.78125 | 3 | [] | no_license | import time
import random
from globalcfg import backend
from games.players import Team, SinglePlayer
from games.rules import SoccerChampionship
import pprint
class Game:
def __init__(self, team1, team2, score1, score2,duration='90'):
global backend
self.teams = (team1, team2)
self.score = (score1, score2)
self.date = time.time()
self.duration = duration #90,120,11
class Tournament:
def __init__(self, name, host_id, rules):
global backend
self.teams = set()
self.games = []
self.id = backend.addTournament(name, host_id)
self.rules = rules
def addGame(self, teamA, teamB, scoreA, scoreB):
#print("New game:", teamA, teamB, scoreA, scoreB )
new_game = Game(Team(teamA,1), Team(teamB,1), scoreA, scoreB)
return self.addGameObj(new_game)
def addGameObj(self, game):
self.games.append(game)
self.teams.add(game.teams[0])
self.teams.add(game.teams[1])
game_details = (game.date, str(game.teams[0]), str(game.teams[1]), int(game.score[0]), int(game.score[1]), str(game.duration), self.id)
return backend.addGame(game_details)
def getGames(self, last_hours = 0):
return backend.getGames(self.id, last_hours, teams_filter = [])
def getTeams(self):
return [row[0] for row in backend.getTeams(self.id)]
def getRanking(self, last_hours = 0, teams_filter=[]):
teams = dict()
for game in backend.getGames(self.id, last_hours, teams_filter):
teamA = game[2]
teamB = game[3]
if teamA in teams:
old_totals = teams[teamA]
else:
old_totals = None
teams[teamA] = self._teamTotals(old_totals, game[4], game[5])
if teamB in teams:
old_totals = teams[teamB]
else:
old_totals = None
teams[teamB] = self._teamTotals(old_totals, game[5], game[4])
return sorted(teams.items(), key= self.rules.rank, reverse=True)
def getELOs(self, before_hours=0):
teams = dict()
R=dict() #Rating
gt=dict() #Games Total
D=dict()
g=0
for game in backend.getGames(self.id, last_hours = -before_hours, teams_filter=[]):
g +=1
teamA = game[2]
teamB = game[3]
for t in [teamA, teamB]:
if t in teams:
R[t] = teams[t]['R']
gt[t] = teams[t]['gt']
else:
R[t] = 1000
gt[t] = 0
D[t] = 0
teams[t] = {'R': R[t], 'gt': gt[t], 'd': D[t]}
RA, RB = self._getElo(R[teamA], R[teamB],gt[teamA], gt[teamB], game[4], game[5])
teams[teamA]['d'] = RA - teams[teamA]['R']
teams[teamB]['d'] = RB - teams[teamB]['R']
teams[teamA]['R'] = RA
teams[teamB]['R'] = RB
teams[teamA]['gt'] +=1
teams[teamB]['gt'] +=1
#print (teamA, teamB, game[4], game[5])
#pprint.pprint (teams)
print ("Games#", g)
return teams
def _getElo(self, Ra, Rb, gta, gtb, ga, gb):
gta +=1
gtb +=1
Ka = 20
Kb = 20
d = 400
Ea = 1/(1 + pow(10,(Rb-Ra)/d))
Eb = 1/(1 + pow(10,(Ra-Rb)/d))
wa, wb = (0.5,0.5) if ga==gb else (1, 0) if ga>gb else (0,1)
kg = 1 if abs(ga-gb) < 2 else 3/2 if abs(ga-gb)==2 else (11+(abs(ga-gb)))/8
Pa = Ka*(wa-Ea)*kg
Pb = Kb*(wb-Eb)*kg
Ra_new = Ra + int(Pa)
Rb_new = Rb + int(Pb)
#print (Ra,"->",Ra_new, Rb,"->",Rb_new, gta, gtb, ga, gb, Ka, Kb, Ea, Eb, wa, wb, Pa, Pb, kg)
return (Ra_new, Rb_new)
def _teamTotals(self, old_totals, gf, ga):
if old_totals is None:
totals = {
'w':1 if gf>ga else 0,
'd':1 if gf==ga else 0,
'l':1 if gf<ga else 0,
'gf':gf,
'ga':ga,
'p':self.rules.getPoints(gf,ga)
}
else:
totals = {
'gf': old_totals['gf'] + gf,
'ga': old_totals['ga'] + ga,
'w': old_totals['w'] + 1 if gf>ga else old_totals['w'],
'l': old_totals['l'] + 1 if gf<ga else old_totals['l'],
'd': old_totals['d'] + 1 if gf==ga else old_totals['d'],
'p': old_totals['p'] + self.rules.getPoints(gf,ga)
}
return totals
if __name__ == "__main__":
team_names = ["A","B","C","D","E","F","G","H"]
team1 = random.choice(team_names)
team_names.remove(team1)
team2 = random.choice(team_names)
rules = SoccerChampionship('any')
new_tournament = Tournament('Champions League 2020','Discord#Fotbal', rules)
#new_game = Game(Team(team1,1), Team(team2,1), int(random.randint(0,4)), int(random.randint(0,4)))
#new_tournament.addGameObj(new_game)
new_tournament.getRanking()
elos = new_tournament.getELOs()
for line in new_tournament.getRanking():
print ("{T:10} {M:2} {W:2} {D:2} {L:2} {GF:3} {GA:3} {P:3} {E:7} {F:7}".format(
T=line[0],
M=line[1]['w']+line[1]['d']+line[1]['l'],
W=line[1]['w'],
D=line[1]['d'],
L=line[1]['l'],
GF=line[1]['gf'],
GA=line[1]['ga'],
P=line[1]['p'],
E=elos[line[0]]['R'],
F=elos[line[0]]['D']
)
)
| true |
6c619c5b1d6abdc4704c1db1ddb7a6db272a7457 | Python | IRTSA-SoftwareProject/IRTSA-Server | /server/commands/ris_processing/read_ris.py | UTF-8 | 4,329 | 3.40625 | 3 | [] | no_license | """ Created on 11 Apr. 2018
This module provides a method to read *.ris files into a numpy
multidimensional array. Note that *.ris files are 16-bit per pixel.
<<<<<<< HEAD
@author: James Moran [jpmoran.pac@gmail.com]
"""
import re
import numpy
import struct
def _get_metadata(file):
""" Read the metadata of the *.ris file. The metadata always appears first
in the *.ris file and has a very specific format. Thie method extracts
the important parameters, namely the image width and height, and number
of frames.
"""
# Get the file's meta data, assumes only one "description" field
metadata = (file.readline()).decode("utf-8")
temp = '\n'
while '</description>' not in temp:
temp = file.readline().decode("utf-8")
metadata = metadata + str(temp)
if '<metaitem name="imageWidth" value=' in str(temp):
width = re.search('\d+', temp).group(0)
if '<metaitem name="imageHeight" value=' in str(temp):
height = re.search('\d+', temp).group(0)
if '<metaitem name="numberOfFrames" value=' in str(temp):
frames = re.search('\d+', temp).group(0)
# The final "</ris>" is on the same line as the first data, so add it
# manually (it must follow the last "</description>").
metadata = metadata + file.read(6).decode("utf-8")
# Write start of data
datastart = file.tell()
return [int(width), int(height), int(frames), int(datastart)]
def get_thermogram(file, x_start = 0, width = float('inf'),
y_start = 0, height = float('inf'),
frame_start = 0, frame_count = float('inf')):
""" Unpacks a .ris file into a u_int16 3D numpy matrix in the format:
[frame, row, column]. _start arguments sets the pixel/frame to read from,
width/height/frame_count sets how many bytes of each to read; useful to
reduce the total data stored in memory by subsectioning the thermogram.
"""
file.seek(0)
# Get_Metadata returns the frame width and height of the file being examined.
# The width, height, and frame count specified to be examined must be less
# than or equal to this value.
[width_max, height_max, frame_count_max, datastart] = _get_metadata(file)
# If not specified, width, height, and frame count are set to infinite
# as the default call, so the value obtained from the metadata will
# always be smaller than this. The method also ensures the width and
# height are set to the maximum allowable values if set to greater than
# what is available.
width = min(width, width_max)
height = min(height, height_max)
frame_count = min(frame_count, frame_count_max)
# Ensure the start + size does not exceed the maximum. If it does, set it to
# the largest allowable within maximum.
x_start = min(x_start+width,width_max)-width
y_start = min(y_start+height,height_max)-height
frame_start = min(frame_start+frame_count,frame_count_max)-frame_count
# Create storage space for the thermogram
thermogram = numpy.zeros([frame_count, height, width], dtype = numpy.uint16)
for current_frame in range(frame_start, frame_start + frame_count):
# Locate the frame in the file
# The file is stored with each pixel stored as a 16-bit integer read left-to-right
# and top-to-bottom starting with the top left pixel. This means that the first x
# rows can be skipped if a size less than the frame height is specified. Same
# applies to the end of the file.
for current_row in range(y_start, y_start + height):
file.seek(datastart + (width_max*height_max*current_frame + width_max*current_row + x_start)*2) # Start of row, double because it is u_int16
bytes_to_read = width*2 # Double because it is u_int16
read_bytes = file.read(bytes_to_read)
read_bytes = struct.unpack('H'*int(bytes_to_read/2),read_bytes) # Convert bytes to u_int16
thermogram[current_frame-frame_start, current_row-y_start, :] = read_bytes
return thermogram
def read_thermogram(file):
''' Encapsulates the reading process and provides a file interface.
File is a path to a directory containing a .ris file.
'''
f = open(file, 'rb')
thermogram = get_thermogram(f)
f.close()
return thermogram | true |
f151219fc26686f940d9a9a67ac4c474b22f87f9 | Python | BenPortner/panflute-filters | /filters/tabulate-elements.py | UTF-8 | 1,102 | 2.703125 | 3 | [] | permissive | """
Count frequency of each element
Sample usage:
> pandoc example.md -F tabulate-elements.py --to=markdown
Element Freqency
------------- ----------
MetaBool 2
SoftBreak 1
Str 46
MetaInlines 18
RawInline 7
Doc 1
MetaBlocks 1
Emph 1
MetaMap 2
Space 24
Para 2
MetaList 2
"""
from collections import Counter
import panflute as pf
def prepare(doc):
doc.counter = Counter()
def action(elem, doc):
doc.counter[elem.tag] += 1
def finalize(doc):
c1 = pf.TableCell(pf.Plain(pf.Str("Element")))
c2 = pf.TableCell(pf.Plain(pf.Str("Frequency")))
header = pf.TableRow(c1, c2)
rows = []
for tag in doc.counter:
c1 = pf.TableCell(pf.Plain(pf.Str(tag)))
c2 = pf.TableCell(pf.Plain(pf.Str(str(doc.counter[tag]))))
rows.append(pf.TableRow(c1, c2))
table = pf.Table(*rows, header=header)
doc.content = [table] # bugbug?
def main(doc=None):
return pf.run_filter(action, prepare, finalize, doc=doc)
if __name__ == '__main__':
main()
| true |
9489c50102fc9b6740df2bebef2877640b9bd996 | Python | Gobidev/discord-mute-bot | /config_viewer.py | UTF-8 | 2,676 | 2.859375 | 3 | [] | no_license | import os
import pickle
guilds = []
class Guild:
"""Class to save the configuration for individual guilds"""
def __init__(self, guild):
self.name = str(guild)
self.guild_id = guild.id
self.is_muted = False
self.game_channel_name = "Crew"
self.dead_channel_name = "Ghosts"
self.mute_permissions_role = "Mute Master"
self.block_server_mute = False
self.game_codes = []
self.game_code_channel_id = None
class GameCode:
"""Class to save author of game code messages"""
def __init__(self, message1_id, message2_id, channel_id, author_id):
self.message1_id = message1_id
self.message2_id = message2_id
self.channel_id = channel_id
self.author_id = author_id
def load_guilds():
"""Load configuration of guilds from file with the pickle module"""
global guilds
if os.path.isfile("guilds.config"):
with open("guilds.config", "rb") as config_file:
guilds = pickle.load(config_file)
def save_guilds():
"""Save configuration of guilds to file with the pickle module"""
global guilds
with open("guilds.config", "wb") as config_file:
pickle.dump(guilds, config_file)
def generate_csv():
global guilds
output_file = open("out.csv", "w", encoding='utf8')
for guild in guilds:
guild_dict = dict(guild.__dict__.items())
first = True
for attribute in guild_dict:
if not first:
output_file.write(", ")
output_file.write(str(guild_dict[attribute]).replace(",", "."))
first = False
output_file.write("\n")
output_file.close()
def remove_duplicates():
global guilds
load_guilds()
all_guild_ids = []
for guild in guilds:
all_guild_ids.append(guild.guild_id)
double_ids = list(set([x for x in all_guild_ids if all_guild_ids.count(x) > 1]))
print(len(guilds))
for double_id in double_ids:
for guild in guilds:
if guild.guild_id == double_id:
del guilds[guilds.index(guild)]
print(len(guilds))
save_guilds()
def get_all_ids_from_csv():
try:
with open("out.csv", "r", encoding='utf-8') as f:
all_data = [n.replace("\n", "") for n in f.readlines()]
except FileNotFoundError:
print("Please generate csv first")
all_ids = [int(n.split(",")[1]) for n in all_data]
print(all_ids)
print(len(all_ids))
duplicates = list(set([x for x in all_ids if all_ids.count(x) > 1]))
print(duplicates)
print(len(duplicates))
return all_ids
if __name__ == '__main__':
remove_duplicates()
| true |
767e5332a67360f3879e0433c8739a8d9dc97387 | Python | yunjung-lee/class_python_numpy | /DataAnalysis/day1_3/HomeWork.py | UTF-8 | 487 | 2.921875 | 3 | [
"MIT"
] | permissive | import re
emails = ['python@mail.example.com', 'python+kr@example.com', # 올바른 형식
'python-dojang@example.co.kr', 'python_10@example.info', # 올바른 형식
'python.dojang@e-xample.com', # 올바른 형식
'@example.com', 'python@example', 'python@example-com'] # 잘못된 형식
pat = re.compile("(\w+[\w\.]*)@(\w+[\w\.-]*)\.([A-Za-z]+)")
for i in range (0,len(emails)) :
res = pat.search(emails[i])
if res == None :
print("False")
else:
print("True") | true |
0289f1f5d51ccb978c08e7fafaeb9c7a1c665aac | Python | rtealwitter/QuantumQueryOptimizer | /paper/experiments.py | UTF-8 | 12,387 | 2.609375 | 3 | [
"MIT"
] | permissive | import quantum_query_optimizer as qqo
import numpy as np
import matplotlib.pyplot as plt
import random
def get_domain_all(n):
'''
Parameters:
n : size of bit string
Returns:
D : list of all n-bit strings
'''
return [np.binary_repr(i, width=n) for i in range(2**n)]
def get_domain_some(n, num=32):
'''
Parameters:
n : size of bit string
num : number of bit strings to return
Returns:
D : list of num n-bit strings
'''
num = min(num, 2**n)
return [np.binary_repr(i, width=n) for i in random.sample(range(2**n), num)]
def get_output_random(D):
'''
Parameters:
D : set of input bitstrings
Returns:
E : for each element x in D, randomly 0 or 1
'''
contains_one = False
while not contains_one:
E = [str(random.randint(a=0, b=1)) for x in D]
if '1' in E:
contains_one = True
return E
def get_output_OR(D):
'''
Parameters:
D : set of input bitstrings
Returns:
E : for each element x in D,
1 if x is a 1 in every position and 0 otherwise
'''
return ['1' if '1' in x else '0' for x in D]
def e_i(index, dimension):
'''
Parameters:
i : index of basis vector
dimension : dimension of vector space
Returns:
e_i : basis vector
'''
e_i = np.zeros(dimension)
e_i[index] = 1
return e_i
# this function is from qqo, but I don't think it's exported
def getL(X, run_checks=True, tolerance=.1):
'''
Parameters:
X : matrix X
tolerance : how close the reconstruction has to be to X
Returns:
L : matrix L such that L.H * L = X (the product of the conjugate
transpose of L and itself is X)
'''
vals, vecs = np.linalg.eig(X)
L = np.zeros(X.shape, dtype = np.complex128)
for k in range(len(vals)):
val = vals[k]
vec = vecs[:,k]
ket_k = np.zeros((len(vals),1), dtype = np.complex128)
ket_k[k,0] = 1
scalar = np.complex128(np.sqrt(np.absolute(val)))
L += scalar * ket_k.dot(vec.H)
if run_checks:
reconstructed_X = np.matrix(L).H.dot(L)
if not (np.absolute(reconstructed_X - X) < tolerance).all():
print("Warning: The reconstruction of X from L is not close enough to X.", "yellow")
return np.matrix(L)
def solveSDP(D, E, n, min_iterations):
'''
Parameters:
D : list of strings of length n
E : list of strings of length 1
n : the size of each string
min_iterations : minimum number of iterations to run SDP
Returns:
A : query complexity
L : matrix L such that L.H * L = X where X is the solution to the SDP
'''
failed = True
while failed:
constraints, b, C = qqo.getConstraints(D=D, E=E)
X, num_iteration = qqo.solveSDP(constraints=constraints, b=b, C=C, accuracy=1, min_iterations=min_iterations-1)
A = X[-1,-1]
L = getL(X, run_checks=False)
reconstructed_X = np.matrix(L).H.dot(L)
if (np.absolute(reconstructed_X - X) < .01).all() and A >= 1:
failed = False
else:
print('Retrying with different random D,E...')
D = get_domain_some(n)
E = get_output_random(D)
return A, L
def buildPsiVxi(D, E, A, L, n):
'''
Parameters:
D : list of strings of length n
E : list of strings of length 1
n : number of qubits
A : query complexity
L : matrix L such that L.H * L = X where X is the solution to the SDP
Returns:
psi_s : list of vectors such that f(x) = 1
all_vxi_s : list of all vectors that form solution to SDP
'''
vectors = []
all_vxi_s = {i: [] for i in range(n)}
for index, y in enumerate(E):
vector = np.zeros(n * len(L) * 2, dtype=np.complex128)
for i in range(n):
vxi = L.H[n*index + i,:]
vxi = np.array(np.squeeze(vxi))[0]
all_vxi_s[i] += [vxi]
i_vector = e_i(i, n)
bit_vector = e_i(eval(D[index][i]), 2) if y == '1' else e_i(1 - eval(D[index][i]), 2)
vector += np.kron(i_vector, np.kron(vxi, bit_vector))
scale = np.sqrt(2*A)
vector = np.insert(vector / scale, 0, 1) if y == '1' else np.insert(vector * -1 * scale, 0, 1)
vectors += [vector / np.linalg.norm(vector)]
return vectors, all_vxi_s
def computeError(D, E, vectors, all_vxi_s, n):
'''
Parameters:
D : list of input strings of length n
E : list of output strings of length 1
all_vxi_s : list of all vectors that form solution to SDP
Returns:
max_error : maximum error in the solution to the SDP
'''
inner_errors = [0]
vxi_errors = [0]
for index1, y1 in enumerate(E):
for index2, y2 in enumerate(E):
if y1 != y2:
# Check psi_x and phi_x
inner_errors += [np.abs(np.dot(vectors[index1], vectors[index2]))]
# Check vxi_s
total = 0
for i in range(n):
if D[index1][i] != D[index2][i]:
total += np.dot(all_vxi_s[i][index1], all_vxi_s[i][index2])
vxi_errors += [np.abs(total-1)]
return max(inner_errors)
def printyPrettyVxi(D, n, all_vxi_s):
'''
Parameters:
D : list of strings of length n
n : number of qubits
all_vxi_s : list of all vectors that form solution to SDP
'''
# Remove 0 dimensions
for i in range(n):
a = np.stack(all_vxi_s[i], axis=1)
idx = np.argwhere(np.all(a[..., :] == 0, axis=1))
a = np.delete(a, idx, axis=0)
all_vxi_s[i] = a
# Print remaining dimensions
for index in range(len(D)):
for i in range(n):
print(f'x={D[index]}, i={i}')
vxi = all_vxi_s[i][:,index]
print('vxi= [', ', '.join([str(x) for x in vxi]), ']')
def fullRun(D, E, n, min_iterations, pretty_print=False, verbose=True):
'''
Parameters:
D : list of strings of length n
E : list of strings of length 1
n : number of qubits
Returns:
psi : list of vectors such that f(x) = 1
'''
# Solve SDP numerically
A, L = solveSDP(D, E, n, min_iterations)
# Build vxi vectors
vectors, all_vxi_s = buildPsiVxi(D, E, A, L, n)
psi_s = [vector for vector, y in zip(vectors, E) if y == '1']
num_one_outputs = len(psi_s)
# Compute error
max_error = computeError(D, E, vectors, all_vxi_s, n)
# Print vxi vectors
if pretty_print:
printyPrettyVxi(D, n, all_vxi_s)
# Compute spectrum of psi_s
psi_s = np.stack(psi_s, axis=1)
u, s, vh = np.linalg.svd(psi_s, full_matrices=False)
if verbose:
print(f'Query Complexity: {A}')
print(f'Max Error: {max_error}')
print(f'Input Size: {len(D)}')
print(f'Number of 1 Outputs: {num_one_outputs}')
return s, A, max_error, num_one_outputs
# iterations vs error
def write_iterations_vs_errors(ns, num_repeats, iterations, filename):
for n in ns:
for num_iter in iterations:
for _ in range(num_repeats):
D = get_domain_some(n)
E = get_output_random(D)
s, A, max_error, num_one_outputs = fullRun(D, E, n, num_iter, pretty_print=False)
if A >= 1 and max_error < .5: # Check for failure
with open(filename, 'a') as f:
f.write(f'{n},{num_iter},{max_error}\n')
else:
print('Failed :(')
return filename
def read_iterations_vs_errors(filename):
errors_by_iteration = {}
for line in open(filename, 'r'):
n, num_iter, max_error = line.split(',')
n = int(n)
num_iter = int(num_iter)
max_error = float(max_error)
if n not in errors_by_iteration:
errors_by_iteration[n] = {}
if num_iter not in errors_by_iteration[n]:
errors_by_iteration[n][num_iter] = []
errors_by_iteration[n][num_iter] += [max_error]
return errors_by_iteration
def plot_iterations_vs_errors(errors_by_iteration):
linestyles = [(5,(10,3)), '-', '--', '-.', ':']
for n, linestyle in zip(errors_by_iteration, linestyles):
iterations = []
averages = []
stds = []
for num_iter in errors_by_iteration[n]:
iterations += [num_iter]
errors = errors_by_iteration[n][num_iter]
averages += [np.mean(errors)]
stds += [np.std(errors)]
upper_confidence = [avg + std for avg, std in zip(averages, stds)]
lower_confidence = [max(avg - std, 0) for avg, std in zip(averages, stds)]
plt.plot(iterations, averages, linestyle=linestyle, label=f'n={n}')
plt.fill_between(iterations, upper_confidence, lower_confidence, alpha=0.2)
plt.title('Error vs Iterations of SDP Solver')
plt.ylabel(r'Error ($\epsilon$)')
plt.xlabel('Iterations of SDP Solver')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig('iterationsVsError.pdf')
plt.close()
# Error vs required bound
def write_error_vs_bound(ns, num_repeats, iterations, filename):
f = open(filename, 'a')
for n in ns:
for min_iterations in [iterations]*num_repeats:
D = get_domain_some(n)
E = get_output_random(D)
s, A, max_error, num_one_outputs = fullRun(D, E, n, min_iterations, pretty_print=False, verbose=False)
s_min = s[-1]
error_cutoff = s_min / (2 * 10 * A * np.sqrt(num_one_outputs) )
#for i in range(len(s)-1):
# if s[i] > 1e-5:
# error_candidate = (s[i] / A - s[i+1] ) * 1/np.sqrt(num_one_outputs)
# error_cutoff = max(error_cutoff, error_candidate)
if s_min >= 10e-8 and max_error < .5: # Hitting precision limit or failure
with open(filename, 'a') as f:
f.write(f'{n},{max_error},{error_cutoff}\n')
else:
print('Failed :(')
f.close()
return filename
def read_error_vs_bound(filename):
errors, error_cutoffs = {}, {}
for line in open(filename, 'r'):
n, max_error, error_cutoff = line.split(',')
n = int(n)
max_error = float(max_error)
error_cutoff = float(error_cutoff)
if n not in errors:
errors[n] = []
error_cutoffs[n] = []
errors[n] += [max_error]
error_cutoffs[n] += [error_cutoff]
return errors, error_cutoffs
def plot_error_vs_bound(errors, error_cutoffs):
markers = ['v', '^', '<', '>', 'o']
for n, marker in zip(errors, markers):
plt.scatter(errors[n], error_cutoffs[n], marker=marker, label=f'n={n}')
# make y scale log
plt.yscale('log')
plt.xscale('log')
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
x = np.linspace(0,max(ymax, xmax),100)
# color region above line y = x green
plt.fill_between(x, ymax, x, color='green', alpha=.1)
plt.fill_between(x, x, 0, color='red', alpha=.1)
plt.plot(x, x, label='x=y')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title('Allowed vs Actual Error with 100 Iterations of SDP Solver')
plt.ylabel(r'Allowed Error ($s_{\kappa}/(2 A \sqrt{c N_1})$)')
plt.xlabel(r'Actual Error ($\epsilon$)')
plt.legend()
plt.tight_layout()
plt.savefig('allowed_v_actual.pdf')
plt.close()
ns = [5,10,15,20,25]
num_repeats = 20
iterations = list(range(10,200,20))
filename = 'iterations_vs_errors.csv'
#write_iterations_vs_errors(ns=ns, num_repeats=num_repeats, iterations=iterations, filename=filename)
errors_by_iteration = read_iterations_vs_errors(filename)
plot_iterations_vs_errors(errors_by_iteration)
filename = 'error_vs_bound.csv'
#write_error_vs_bound(ns=ns, num_repeats=num_repeats, iterations=100, filename=filename)
errors, error_cutoffs = read_error_vs_bound(filename)
plot_error_vs_bound(errors, error_cutoffs)
| true |
8d73dd79189933b0270f60d53171c3e7da4f17a0 | Python | kapsitis/ddgatve-stat | /youtube-data/scraper/sentiment_analysis.py | UTF-8 | 4,622 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
import nltk
import time
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_random():
arr = np.random.normal(2.0,0.5,1)
result = arr[0]
if (result < 0):
result = 0.1
return result
def main():
test = pd.read_csv(os.path.join(os.path.dirname(__file__),'..', 'youtube-channels.csv'),
header=0,delimiter=',', quoting=2)
print('List of channels has %d entries.' % (len(test)))
# Links to individual videos.
v_link_lists = list()
channels = dict()
driver = webdriver.Chrome()
for i in range(0,len(test)):
user = test.at[i,'Name']
if not user in channels:
channels[user] = i
channel_url = test.at[i,'Channel']
print('Channel for user "%s": %s' % (user, channel_url))
driver.get(channel_url)
user_data = driver.find_elements_by_xpath('//*[@id="video-title"]')
print(' In the channel links found: %d' % len(user_data))
count = 0
current_list = list()
for link_item in user_data:
href = link_item.get_attribute('href')
current_list.append((href,user,count))
count += 1
v_link_lists.append(current_list)
time.sleep(get_random())
for i in range(len(v_link_lists)):
print('List of links %d' % i)
for j in v_link_lists[i]:
print(' %s,%s,%d' % (j[0], j[1], j[2]))
# Now start visiting the video URLs
time.sleep(3)
driver.fullscreen_window()
# Timeout, if no comment content is loaded during the time.
wait = WebDriverWait(driver, 20)
for i in range(len(v_link_lists)):
df = pd.DataFrame(columns = ['channel', 'link', 'type', 'content'])
for video in v_link_lists[i]:
video_url = video[0]
channel_id = video[1]
npk = video[2]
## Skip all videos with large numbers
if npk > 6:
continue
print(' Visiting %s' % video_url)
driver.get(video_url)
try:
v_id = video_url.strip('https://www.youtube.com/watch?v=')
v_title = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"h1.title yt-formatted-string"))).text
except TimeoutException:
df.loc[len(df)] = [channel_id, v_id, 'prop_log', 'TimeoutException on title']
try:
v_description = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"div#description yt-formatted-string"))).text
df.loc[len(df)] = [channel_id, v_id, 'prop_title', v_title]
df.loc[len(df)] = [channel_id, v_id, 'prop_description', v_description]
except TimeoutException:
df.loc[len(df)] = [channel_id, v_id, 'prop_log', 'TimeoutException on description']
try:
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'div#content yt-formatted-string#content-text'))).text
comments = driver.find_elements_by_css_selector('div#content yt-formatted-string#content-text')
for comment in comments:
v_comment = comment.text
df.loc[len(df)] = [channel_id, v_id, 'prop_comment', v_comment]
time.sleep(get_random())
except TimeoutException:
df.loc[len(df)] = [channel_id, v_id, 'prop_log', 'TimeoutException on comments']
# TODO: CHANGE THIS TO YOUR FILE PATH
df.to_csv (r'/home/kalvis/workspace-osx/ddgatve-stat/youtube-data/out_channel%02d.csv' % i, index = None, header=True)
print('Saved CSV file for channel%02d' % i)
print (df)
driver.quit()
if __name__ == '__main__':
main()
################################################
## MORE ROBUST CODE
################################################
## https://stackoverflow.com/questions/18953499/youtube-api-to-fetch-all-videos-on-a-channel
################################################
# More Sentiment analysis
# https://www.youtube.com/watch?v=AJVP96tAWxw
| true |
c05140ba9fbfdd4d457255e6a9a3cf154a5ebce0 | Python | rackerlabs/openstack-usage-report | /usage/reading.py | UTF-8 | 9,986 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | import copy
from exc import NoSamplesError
from exc import UnknownCounterTypeError
from log import logging
from conversions import convert
from conversions.time_units import seconds_to_hours
from data import trim
ALLOWED_METER_TYPES = set(['gauge', 'cumulative', 'delta'])
logger = logging.getLogger('usage.reading')
class Reading:
"""Models a reading of a meter."""
def __init__(self, samples, start, stop):
"""Init the reading.
:param samples: List of samples sorted by timestamp.
:type samples: List
:param start: Starting datetime.
:type start: Datetime
:param stop: Stopping datetime.
:type stop: Datetime
"""
self.start = start
self.stop = stop
trim(samples)
self._during_samples = []
self._split_samples(samples)
self._calculate()
self._set_metadata()
def _split_samples(self, samples):
"""Split samples into three buckets.
One for samples prior to start.
One for samples between start and stop.
One for samples after stop.
:param samples: List of samples sorted by timestamp
:type samples: List
"""
# Potentially a long running loop. Use local variables where possible.
prior_samples = []
during_samples = []
post_samples = []
start = self.start
stop = self.stop
for sample in samples:
logger.debug("{} - {} - {} - {} - {}".format(
sample.resource_id,
sample.message_id,
sample.timestamp,
sample.counter_name,
sample.counter_volume
))
# Check common case first
if sample.timestamp >= start and sample.timestamp <= stop:
during_samples.append(sample)
continue
# Second most common case
if sample.timestamp < start:
prior_samples.append(sample)
continue
# Must be a post sample
post_samples.append(sample)
self._prior_samples = prior_samples
self._during_samples = during_samples
self._post_samples = post_samples
if not len(self._during_samples):
raise NoSamplesError()
def resource_existed_before(self):
"""Determine if resource existed before self.start.
:returns: The length of prior samples > 0
:rtype: Bool
"""
return len(self._prior_samples) > 0
def resource_existed_after(self):
"""Determine if resource existed after self.stop.
:returns: The lenght of post samples > 0
"""
return len(self._post_samples) > 0
@property
def project_id(self):
"""Get the project id for the reading.
:returns: Project id
:rtype: String
"""
return self._during_samples[0].project_id
@property
def resource_id(self):
"""Get the resource id for the reading.
:returns: Resource id
:rtype: String
"""
return self._during_samples[0].resource_id
@property
def usage_start(self):
"""Get the usage start time.
Will be either self.start or the first during sample timestamp.
:returns: Usage start time
:rtype: Datetime|None
"""
if not self._during_samples:
return None
if self.resource_existed_before():
return self.start
return self._during_samples[0].timestamp
@property
def usage_stop(self):
"""Get the usage stop time.
Will be either self.stop or the last during sample timestamp.
:returns: Usage stop time
:rtype: Datetime|None
"""
if not self._during_samples:
return None
if self.resource_existed_after():
return self.stop
return self._during_samples[-1].timestamp
@property
def samples(self):
"""Gets during samples.
:returns: List of samples between start and stop
:rtype: List
"""
return self._during_samples
@property
def meter_name(self):
"""Get the name of the meter.
:returns: Name of the meter
:rtype: String
"""
if not self._during_samples:
return None
return self._during_samples[-1].meter
@property
def meter_type(self):
"""Get the type of meter.
:returns: Type of meter
:rtype: String
"""
if not self._during_samples:
return None
return self._during_samples[0].counter_type
def _assume_ends(self):
"""Correct the ends of the sampling.
We prepend an assumed sample to the end and beginning of the samples
based on usage start and stop times. This allows us to get a more
reading where samples do not occur close to the end of the biliing
start and stop times.
This is only useful for gauge type meters.
If the first sample is the usage start time and not the billing start
time, then we are adding another point at the same time and it has no
effect on the value.
If there are samples prior to the billing start time, then the
resource existed prior to the billing start time and it is safe
to assume a sample at the billing start time.
Likewise for the end of the samples.
"""
# Prepend usage start
assumed_start = copy.copy(self._during_samples[0])
assumed_start.timestamp = self.usage_start
self._during_samples.insert(0, assumed_start)
# Append usage stop
assumed_stop = copy.copy(self._during_samples[-1])
assumed_stop .timestamp = self.usage_stop
self._during_samples.append(assumed_stop)
def _gauge(self):
"""Compute gauge reading as guage units of time.
Performs a trapezoidal approximation of the gauge time series data.
"""
value = 0.0
self._assume_ends()
samples = self._during_samples
for i in xrange(1, len(self._during_samples)):
value += (
(
samples[i].timestamp -
samples[i - 1].timestamp
).total_seconds() *
(
samples[i].counter_volume +
samples[i - 1].counter_volume
)
)
value = value / 2
# Value is in unit seconds. convert to unit hours.
self.value = seconds_to_hours(value)
# Remove assumed start and stop
self._during_samples.pop(0)
self._during_samples.pop()
def _cumulative(self):
"""Compute cumulative reading.
Cumulative meters are counters. Just need to subtract the
first value from the last value.
"""
self.value = \
self._during_samples[-1].counter_volume - \
self._during_samples[0].counter_volume
def _delta(self):
"""Compute delta reading.
Delta meters are just changes in value since last point.
Sum the the values.
"""
self.value = 0
for sample in self._during_samples:
self.value += sample.counter_volume
def _calculate(self):
"""Performs the aggregation according to meter type."""
# Return quick if no samples
if not self._during_samples:
self.value = None
return
meter_type = self.meter_type
if meter_type not in ALLOWED_METER_TYPES:
raise UnknownCounterTypeError(meter_type)
if meter_type == 'gauge':
self._gauge()
elif meter_type == 'cumulative':
self._cumulative()
elif meter_type == 'delta':
self._delta()
def _set_metadata(self):
"""Pulls metadata from the sample list.
Metadata is tricky. Some projects like, cinder, treat the resource
metadata as deleted when the resource is deleted. If we want to
represent billing/usage information we need to go an extra couple
of steps to make that metadata available.
This function will step backward from the end of the sample list
looking for the last non deleted status sample.
"""
delete_status = ['deleting', 'deleted']
status_keys = ['state', 'status']
samples = self._during_samples
# If no samples, return early
if not samples:
self.metadata = None
return
metadata = None
# Check if sample even has status keys going by last sample.
has_keys = [
key in samples[-1].resource_metadata
for key in status_keys
]
# If sample has keys, loop back from the end.
if any(has_keys):
start = max(len(samples) - 1, 0)
stop = -1
step = -1
for i in xrange(start, stop, step):
# Loop over status fields
for key in status_keys:
status = samples[i].resource_metadata.get(key)
if status is not None and status not in delete_status:
metadata = samples[i].resource_metadata
break
# Check to see if we found some metadata.
if metadata is not None:
break
# Default to last sample metadata.
if metadata is None:
metadata = samples[-1].resource_metadata
self.metadata = metadata
def convert(self, conversion):
"""Convert value using function func.
:param conversion: Conversion function name
:type conversion: String|None
"""
if self.value is None or conversion is None:
return
self.value = convert(conversion, self.value)
| true |
54773670c8c14383fa0dc12930b3148eba788008 | Python | nicholasvoltani/Programas-feitos-durante-a-Graduacao | /Introdução ao Caos/Strogatz-Example/strogatz_example.py | UTF-8 | 1,005 | 3.1875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
iteration = lambda x,p: np.sin(x)/p
ps = np.linspace(1.1, 0, 5000)
x0 = 0.001
x1 = -0.001
Ttrans = 1000
Tstat = 1000
plt.figure()
## Points which will be plotted
xf = []
pf = []
for p in ps:
xu = x0
xb = x1
## Removing the transient
for i in range(Ttrans):
xu = iteration(xu,p)
xb = iteration(xb,p)
## Plotting fixed points
for i in range(Tstat):
xu = iteration(xu,p)
xf.append(xu)
pf.append(p)
xb = iteration(xb,p)
xf.append(xb)
pf.append(p)
#plt.scatter([p,p],[xu,xb], c='black')
plt.plot(pf,xf,',')
at_0 = []
for n in range(10):
if n==0:
at_0.append(n*np.pi)
else:
at_0.append(n*np.pi)
at_0.append(-n*np.pi)
ps_0 = [0 for k in at_0]
plt.plot(ps_0,at_0,',k')
plt.title("Bifurcation Diagram:\n" + r"$ x_{n+1} = \frac{\sin(x_n)}{p}$")
plt.xlabel('p')
plt.ylabel('x')
plt.ylim(-10*np.pi-5,10*np.pi+5)
plt.show()
| true |
583b8d2572f45275ea8d2bf7e06fd697be1be210 | Python | sask1217/ai3202 | /Assignment5/Sam_Skolnekovich_Assignment5.py | UTF-8 | 7,204 | 3.03125 | 3 | [] | no_license | # Sam Skolnekovich
# HW5
# 10/07/15
'''
' Calculating this with different values for error changes the program a great deal.
' This program is set to break for suboptimal solutions.
' Some of the nodes will act as sinks for the print function and will cause a never ending loop.
' To fix for finding suboptimal solutions you could keep track of nodes visited and unvisited.
' For World1MDP, keep e less then 16, After this it takes a step up rather then right at the startnode.
' It then gets caught in a sink between 3,7 and 2,7
'''
from sys import argv
script, filename, err = argv
txt = open(filename)
rows=0
cols=0
i=0
j=-1
#print(txt.read())
class node():
def __init__(self,node,locationx,locationy,utility,parentnode,value,reward, adj,visited,delta,pastutil):
self.node=node
self.locationx=locationx
self.locationy=locationy
self.utility=utility
self.parentnode=parentnode #unused*
self.reward=reward
self.value=value
self.adj=adj #unused A*
self.visited=False #unused A*
self.delta=10000
self.pastutil=pastutil #unused
with open(filename) as txt:
for line in txt:
line=line.strip()
items=line.split(' ')
for value in items:
cols=cols+1
rows=rows+1
rows=rows-1
cols=cols-1
cols=round(cols/rows)
allnodes=[[0 for x in range(rows)] for y in range(cols)]
currentnode=[]
with open(filename) as txt:
for line in txt:
line=line.strip()
items=line.split(' ')
i=0
j=j+1
for value in items:
if(j<=rows-1 and i<=cols-1):
allnodes[i][j]=node(None,i,j,0,None,value,None,None,None,10000,0)
i=i+1
def calcUtility(err):
maxdelta=100000
pastUtil=0
currUtil=0
delta=0
error=float(err)
error=error/9
while maxdelta>error:
maxdelta=0
allnodes[cols-1][0].reward=float(allnodes[cols-1][0].value)
for y in range(rows):
for x in range(cols):
pastUtil=allnodes[x][y].utility
if(allnodes[x][y].value!='2'): # if its a wall, set its utility very low and skip the rest
if (allnodes[x][y].value=='0'):
allnodes[x][y].reward = -1 # my function works optimally when there is a negative living reward
if(allnodes[x][y].value=='1'):
allnodes[x][y].reward=-2
if(allnodes[x][y].value=='3'):
allnodes[x][y].reward=-3
if(allnodes[x][y].value=='4'):
allnodes[x][y].reward=0
if(y+1<rows and allnodes[x][y+1].value!='2'):
if(x-1>=0 and allnodes[x-1][y].value!='2' and x+1>cols):
if(x+1<cols and allnodes[x+1][y].value!='2'):
#DOWN left right
u1=allnodes[x][y].reward+(.8*float(allnodes[x][y+1].utility)+.1*float(allnodes[x-1][y].utility)+.1*float(allnodes[x+1][y].utility))
else:
#u1 now has .1 chance of staying in the same spot
#and get rid of right chance since this is a case where you hit a wall or are out of bounds
u1=allnodes[x][y].reward+(.8*float(allnodes[x][y+1].utility)+.1*float(allnodes[x-1][y].utility))
else:
#u1 equals .9 get rid of left chance
if(x+1>=cols or allnodes[x][y].value=='2'):
u1=allnodes[x][y].reward+.8*allnodes[x][y+1].utility
else:
u1=allnodes[x][y].reward+.8*allnodes[x][y+1].utility+.1*allnodes[x+1][y].utility
else:
u1=-10000000
#u1 is not possible
if(x-1>=0 and allnodes[x-1][y].value!='2'):
if(y+1<rows and allnodes[x][y+1].value!='2'):
if(y-1>=0 and allnodes[x][y-1].value!='2'):
u2=allnodes[x][y].reward+(.8*float(allnodes[x-1][y].utility)+.1*float(allnodes[x][y-1].utility)+.1*float(allnodes[x][y+1].utility))
else:
u2=allnodes[x][y].reward+(.8*float(allnodes[x-1][y].utility)+.1*float(allnodes[x][y+1].utility))
else:
if(y-1<0 or allnodes[x][y-1].value=='2'):
u2=allnodes[x][y].reward+.8*allnodes[x-1][y].utility
else:
u2=allnodes[x][y].reward+(.8*float(allnodes[x-1][y].utility)+.1*float(allnodes[x][y-1].utility))
else:
u2=-10000000
if(x+1<cols and allnodes[x+1][y].value!='2'):
if(y+1<rows and allnodes[x][y+1].value!='2'):
if(y-1>=0 and allnodes[x][y-1].value!='2'):
u3=allnodes[x][y].reward+(.8*float(allnodes[x+1][y].utility)+.1*float(allnodes[x][y-1].utility)+.1*float(allnodes[x][y+1].utility))
else:
u3=allnodes[x][y].reward+(.8*float(allnodes[x+1][y].utility)+.1*float(allnodes[x][y+1].utility))
else:
if(y-1<0 or allnodes[x][y-1].value=='2'):
u3=allnodes[x][y].reward+.8*allnodes[x+1][y].utility
else:
u3=allnodes[x][y].reward+(.8*float(allnodes[x+1][y].utility)+.1*float(allnodes[x][y-1].utility))
else:
u3=-10000000
if(y-1>=0 and allnodes[x][y-1].value!='2'):
if(x-1>=0 and allnodes[x-1][y].value!='2'):
if(x+1<cols and allnodes[x+1][y].value!='2'):
u4=allnodes[x][y].reward+(.8*float(allnodes[x][y-1].utility)+.1*float(allnodes[x-1][y].utility)+.1*float(allnodes[x+1][y].utility))
else:
u4=allnodes[x][y].reward+(.8*float(allnodes[x][y-1].utility)+.1*float(allnodes[x-1][y].utility))
else:
if(x+1>=cols or allnodes[x+1][y].value=='2'):
u4=allnodes[x][y].reward+.8*allnodes[x][y-1].utility
else:
u4=allnodes[x][y].reward+(.8*float(allnodes[x][y-1].utility)+.1*float(allnodes[x+1][y].utility))
else:
u4=-10000000
allnodes[x][y].utility=.9*max(u1,u2,u3,u4)
currUtil=allnodes[x][y].utility
delta=abs(currUtil-pastUtil)
if(allnodes[x][y].value=='50'):
allnodes[x][y].utility=50
allnodes[x][y].delta=0
if(delta<allnodes[x][y].delta):
allnodes[x][y].delta=delta
if(allnodes[x][y].delta>maxdelta):
maxdelta=delta
if(allnodes[x][y].value=='50'):
allnodes[x][y].utility=50
allnodes[x][y].delta=0
else:
allnodes[x][y].utility=-10000000
printTest()
def printTest():
#this lets you see a map of all the utilities
'''for y in range(rows):
for x in range(cols):
print(round(allnodes[x][y].utility,2),end=' ')
if(x==9):
print('\n')'''
currentnode=allnodes[0][rows-1]
print(currentnode.locationx,currentnode.locationy,round(currentnode.utility,2))
#i=0
while currentnode.locationx != allnodes[cols-1][0].locationx or currentnode.locationy != allnodes[cols-1][0].locationy:
x=currentnode.locationx
y=currentnode.locationy
if(x-1>=0):
p1=allnodes[x-1][y].utility
else:
p1=-1000000
if(x+1<cols):
p2=allnodes[x+1][y].utility
else:
p2=-1000000
if(y+1<rows):
p3=allnodes[x][y+1].utility
else:
p3=-1000000
if(y-1>=0):
p4=allnodes[x][y-1].utility
else:
p4=-100000
Max=max(p1,p2,p3,p4) # find value next to current node that has max utility. calc this after utilites have been calculated
if(Max==p1):
currentnode=allnodes[x-1][y]
if(Max==p2):
currentnode=allnodes[x+1][y]
if(Max==p3):
currentnode=allnodes[x][y+1]
if(Max==p4):
currentnode=allnodes[x][y-1]
print(currentnode.locationx,currentnode.locationy,round(currentnode.utility,2))
calcUtility(err)
| true |
10036f99b909b0ae2c5128817c72fd0af417b7c5 | Python | judong-520/QT_trade | /cg/cg_spot/huobi_exchange.py | UTF-8 | 7,609 | 2.796875 | 3 | [] | no_license | import json
import pandas as pd
from urllib.request import urlopen, Request
pd.set_option('expand_frame_repr', False) # 当列太多时不换行
# API 请求地址
BASE_URl = "https://api.huobi.pro"
def get_url_content(url, max_try_number=5, headers=None):
"""抓取数据"""
try_num = 0
while True:
try:
request = Request(url=url, headers=headers)
content = urlopen(request, timeout=15).read()
return content
except Exception as e:
print(url, "抓取报错", e)
try_num += 1
if try_num >= max_try_number:
print("尝试失败次数过多, 放弃尝试")
return None
def get_market_trade(symbol='btcusdt'):
"""
获取最近的成交交易数据(价格,数量)
:param symbol:
:return:
"""
while True:
url = BASE_URl + '/market/trade?symbol=%s' % symbol
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
"Content - Type": "application / x - www - form - urlencoded"
}
content = get_url_content(url, 5, headers=headers)
if content:
content = content.decode('utf-8')
json_dict = json.loads(content)
print('huobi_trade-->',json_dict)
price = json_dict.get('tick').get('data')[0]['price']
amount = json_dict.get('tick').get('data')[0]['amount']
# close = tick_dict.get('close', None)
return price, amount
def get_market_detail_merged(symbol='btcusdt'):
"""
获取聚合行情(Ticker) GET /market/detail/merged
:param symbol:交易对 列如:btcusdt, bchbtc, rcneth ...
:return: amount:成交量 close:收盘价,当K线为最晚的一根时,是最新成交价
"""
while True:
url = BASE_URl + '/market/detail/merged?symbol=%s' % symbol
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
"Content - Type": "application / x - www - form - urlencoded"
}
content = get_url_content(url, 5, headers=headers)
if content:
content = content.decode('utf-8')
json_dict = json.loads(content)
print(json_dict)
tick_dict = json_dict.get('tick', None)
amount = tick_dict.get('amount', None)
close = tick_dict.get('close', None)
return amount, close
def get_market_depth(symbol='btcusdt', type='step1'):
"""
获取市场深度数据 GET /market/depth
:param symbol: 交易对 列如:btcusdt, bchbtc, rcneth ...
:param type:[string]step0, step1, step2, step3, step4, step5(合并深度0-5);step0时,不合并深度
:return: bids_list:[[price(成交价), amount(成交量)], ...],按price降序; asks_list:[[price(成交价), amount(成交量)], ...] 按price升序
"""
while True:
url = BASE_URl + '/market/depth?symbol=%s&type=%s' % (symbol, type)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
"Content - Type": "application / x - www - form - urlencoded"
}
content = get_url_content(url, 5, headers=headers)
if content:
content = content.decode('utf-8')
json_dict = json.loads(content)
tick_dict = json_dict.get('tick', None)
bids_list = tick_dict.get('bids', None)
asks_list = tick_dict.get('asks', None)
if bids_list and asks_list:
return bids_list, asks_list
if __name__ == '__main__':
print(get_market_trade())
# api接口地址
# https://github.com/huobiapi/API_Docs/wiki/REST_api_reference#get-marketdepth-%E8%8E%B7%E5%8F%96-market-depth-%E6%95%B0%E6%8D%AE
"""
GET /market/detail/merged 获取聚合行情(Ticker)
请求参数:
参数名称 是否必须 类型 描述 默认值 取值范围
symbol true string 交易对 btcusdt, bchbtc, rcneth ...
响应数据:
参数名称 是否必须 数据类型 描述 取值范围
status true string 请求处理结果 "ok" , "error"
ts true number 响应生成时间点,单位:毫秒
tick true object K线数据
ch true string 数据所属的 channel,格式: market.$symbol.detail.merged
tick 说明:
"tick": {
"id": K线id,
"amount": 成交量,
"count": 成交笔数,
"open": 开盘价,
"close": 收盘价,当K线为最晚的一根时,是最新成交价
"low": 最低价,
"high": 最高价,
"vol": 成交额, 即 sum(每一笔成交价 * 该笔的成交量)
"bid": [买1价,买1量],
"ask": [卖1价,卖1量]
}
请求响应示例:
/* GET /market/detail/merged?symbol=ethusdt */
{
"status":"ok",
"ch":"market.ethusdt.detail.merged",
"ts":1499225276950,
"tick":{
"id":1499225271,
"ts":1499225271000,
"close":1885.0000,
"open":1960.0000,
"high":1985.0000,
"low":1856.0000,
"amount":81486.2926,
"count":42122,
"vol":157052744.85708200,
"ask":[1885.0000,21.8804],
"bid":[1884.0000,1.6702]
}
}
/* GET /market/detail/merged?symbol=not-exist */
{
"ts": 1490758171271,
"status": "error",
"err-code": "invalid-parameter",
"err-msg": "invalid symbol”
}
"""
"""
GET /market/depth 获取 Market Depth 数据
请求参数:
参数名称 是否必须 类型 描述 默认值 取值范围
symbol true string 交易对 btcusdt, bchbtc, rcneth ...
type true string Depth 类型 step0, step1, step2, step3, step4, step5(合并深度0-5);step0时,不合并深度
用户选择“合并深度”时,一定报价精度内的市场挂单将予以合并显示。合并深度仅改变显示方式,不改变实际成交价格。
响应数据:
参数名称 是否必须 数据类型 描述 取值范围
status true string "ok" 或者 "error"
ts true number 响应生成时间点,单位:毫秒
tick true object Depth 数据
ch true string 数据所属的 channel,格式: market.$symbol.depth.$type
tick 说明:
"tick": {
"id": 消息id,
"ts": 消息生成时间,单位:毫秒,
"bids": 买盘,[price(成交价), amount(成交量)], 按price降序,
"asks": 卖盘,[price(成交价), amount(成交量)], 按price升序
}
请求响应示例:
/* GET /market/depth?symbol=ethusdt&type=step1 */
{
"status": "ok",
"ch": "market.btcusdt.depth.step1",
"ts": 1489472598812,
"tick": {
"id": 1489464585407,
"ts": 1489464585407,
"bids": [
[7964, 0.0678], // [price, amount]
[7963, 0.9162],
[7961, 0.1],
[7960, 12.8898],
[7958, 1.2],
[7955, 2.1009],
[7954, 0.4708],
[7953, 0.0564],
[7951, 2.8031],
[7950, 13.7785],
[7949, 0.125],
[7948, 4],
[7942, 0.4337],
[7940, 6.1612],
[7936, 0.02],
[7935, 1.3575],
[7933, 2.002],
[7932, 1.3449],
[7930, 10.2974],
[7929, 3.2226]
],
"asks": [
[7979, 0.0736],
[7980, 1.0292],
[7981, 5.5652],
[7986, 0.2416],
[7990, 1.9970],
[7995, 0.88],
[7996, 0.0212],
[8000, 9.2609],
[8002, 0.02],
[8008, 1],
[8010, 0.8735],
[8011, 2.36],
[8012, 0.02],
[8014, 0.1067],
[8015, 12.9118],
[8016, 2.5206],
[8017, 0.0166],
[8018, 1.3218],
[8019, 0.01],
[8020, 13.6584]
]
}
}
"""
| true |
db3fa10ac98c7c89a6d0746f54eb6c77db1b499a | Python | Potatology/coding | /balanced_p.py | UTF-8 | 414 | 3.1875 | 3 | [] | no_license | import stack
def balancedParents(parents):
closedParentStack = stack.Stack()
for parent in parents:
if parent=='(':
closedParentStack.push(')')
else:
if closedParentStack.isEmpty():
return False
else:
closedParentStack.pop()
return closedParentStack.isEmpty()
print(balancedParents('((())()()((())))')) | true |
127feeebca8b7d09f28845de1f71d2a3411c72e2 | Python | DheerajJoshi/Python-tribble | /Dictionary/src/built In Directory function/typedict.py | UTF-8 | 97 | 3.140625 | 3 | [] | no_license | #!/usr/bin/python
dict1 = {'Name': 'Zara', 'Age': 7};
print ("Variable Type : %s" % type (dict1)) | true |
ce138302c95e08689188eb2f8706edfce2d9693d | Python | bolivierjr/advent-of-code | /2018/day05/day05_pt2.py | UTF-8 | 1,188 | 3.265625 | 3 | [] | no_license | import os
from string import ascii_lowercase
directory = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(directory, 'input.txt')
with open(filename, 'r') as fp:
data = fp.read()
def reactors2(polymers: str) -> int:
not_found = True
polymer_lengths = set()
while not_found:
reactive_poly = polymers
start = len(reactive_poly)
for x in ascii_lowercase:
polymers = reactive_poly
polymers = polymers.replace(
x.lower(), '').replace(x.upper(), '')
for item, polymer in enumerate(reactive_poly):
lowercase_first = polymer.lower() + polymer.upper()
uppercase_first = polymer.upper() + polymer.lower()
polymers = polymers.replace(
lowercase_first, '').replace(uppercase_first, '')
if item == len(reactive_poly) - 1:
polymer_lengths.add(len(polymers))
end = len(reactive_poly)
not_found = start != end
return min(polymer_lengths)
if __name__ == '__main__':
print(f'Polymer count: {reactors2(data)}')
| true |
5ad9a69ad1bb0bb46bdd82454abe84e30dc8bce3 | Python | jessedezwart/De_Wah | /motor.py | UTF-8 | 1,583 | 2.828125 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
# set gpio mode
GPIO.setmode(GPIO.BCM)
# set pins
left_engine_pins = [17,4,3,2]
right_engine_pins = [27,22,10,9]
for pin in left_engine_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
for pin in right_engine_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
# halfstep sequence via datasheet
# https://www.raspberrypi-spy.co.uk/wp-content/uploads/2012/07/Stepper-Motor-28BJY-48-Datasheet.pdf
halfstep_seq = [
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]
]
# halfstep sequence for reversal
halfstep_seq_rev = [
[1,0,0,1],
[0,0,0,1],
[0,0,1,1],
[0,0,1,0],
[0,1,1,0],
[0,1,0,0],
[1,1,0,0],
[1,0,0,0]
]
def left_forward():
for halfstep in range(8):
for pin in range(4):
GPIO.output(left_engine_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(0.001)
def right_forward():
for halfstep in range(8):
for pin in range(4):
GPIO.output(right_engine_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(0.001)
def left_reverse():
for halfstep in range(8):
for pin in range(4):
GPIO.output(left_engine_pins[pin], halfstep_seq_rev[halfstep][pin])
time.sleep(0.001)
def right_reverse():
for halfstep in range(8):
for pin in range(4):
GPIO.output(right_engine_pins[pin], halfstep_seq_rev[halfstep][pin])
time.sleep(0.001) | true |
a03c0bbcd80f8808b92319d93d6edb141a00bb79 | Python | redpanda-ai/ctci | /solutions/successor.py | UTF-8 | 1,307 | 3.875 | 4 | [] | no_license | class Node:
def __init__(self, value, left=None, right=None, parent=None):
self.value = value
self.left = left
self.right = right
self.parent = parent
def set_left(self, other):
self.left = other
other.parent = self
def set_right(self, other):
self.right = other
other.parent = self
def leftmost_child(node: Node):
while node.left:
node = node.left
return node
def successor(node: Node):
if node is None:
return
if node.right:
return leftmost_child(node.right)
q = node
x = q.parent
while x and x.left is not q:
q = x
x = x.parent
return x
if __name__ == "__main__":
a = Node("a")
b = Node("b")
c = Node("c")
d = Node("d")
e = Node("e")
f = Node("f")
g = Node("g")
a.set_left(b)
a.set_right(c)
b.set_left(d)
b.set_right(e)
c.set_left(f)
c.set_right(g)
"""
a
* *
* *
* *
b c
* * * *
* * * *
d e f g
"""
r = leftmost_child(a)
while r:
print(r.value)
r = successor(r)
| true |
20b1498b757d3ff141ac49b2f207a1a6b416665a | Python | jffcole7/Bioinformatics | /egglib_sliding_windows.py | UTF-8 | 27,195 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# egglib_sliding_windows.py
# calculate ABBA-BABA stats, dxy, pi and S for sliding windows in genomic data
# Written for "Evaluating the use of ABBA-BABA statistics to locate introgressed loci"
# by Simon H. Martin, John W. Davey and Chris D. Jiggins
# Simon Martin: shm45@cam.ac.uk
# John Davey: jd626@cam.ac.uk
# August 2014
import sys
import egglib
def getOptionValue(option): # needs sys
if option in sys.argv:
optionPos = sys.argv.index(option)
optionValue = sys.argv[optionPos + 1]
return optionValue
else:
print >> sys.stderr, "\nWarning, option", option, "not_specified.\n"
def get_intv(string,borders = "()",inc = False):
if len(borders) != 2:
print "WARNING: borders must contain two characters"
starts = []
ends = []
output = []
for x in range(len(string)):
if string[x] == borders[0]:
starts.append(x)
if string[x] == borders[1]:
ends.append(x+1)
if len(starts) <= len(ends):
for n in range(len(starts)):
if inc:
output.append(string[starts[n]:ends[n]])
else:
output.append(string[starts[n]+1:ends[n]-1])
else:
for n in range(len(ends)):
if inc:
output.append(string[starts[n]:ends[n]])
else:
output.append(string[starts[n]+1:ends[n]-1])
return output
def median(numbers):
numbers.sort()
if len(numbers) % 2 == 1:
return numbers[(len(numbers)+1)/2-1]
else:
lower = numbers[len(numbers)/2-1]
upper = numbers[len(numbers)/2]
return (float(lower + upper)) / 2
def haplo(calls):
output = []
for call in calls:
if call in "ACGTN":
output.append(call)
output.append(call)
elif call == "K":
output.append("G")
output.append("T")
elif call == "M":
output.append("A")
output.append("C")
elif call == "R":
output.append("A")
output.append("G")
elif call == "S":
output.append("C")
output.append("G")
elif call == "W":
output.append("A")
output.append("T")
elif call == "Y":
output.append("C")
output.append("T")
else:
print "WARNING", call, "is not recognised as a valid base or ambiguous base"
output.append("N")
output.append("N")
return output
def mean(numbers):
numbers = [float(n) for n in numbers if n != "NA" and n != None]
numSum = sum(numbers)
if len(numbers) >= 1:
return float(numSum)/len(numbers)
else:
return "NA"
def mid(numbers):
numbers = [float(n) for n in numbers if n != "NA" and n != None]
if len(numbers) >= 1:
return (numbers[0] + numbers[-1])/2
else:
return None
def AlignByGroupNumber(align,groupNumber):
newAlign = align.slice(0,0)
for seqNumber in range(len(align)):
if align[seqNumber][2] == groupNumber:
newAlign.addSequences([align[seqNumber]])
return newAlign
def AlignByGroupNumbers(align,groupNumbers):
newAlign = align.slice(0,0)
for seqNumber in range(len(align)):
if align[seqNumber][2] in groupNumbers:
newAlign.addSequences([align[seqNumber]])
return newAlign
def mostCommon(things):
output = []
counts = []
uniqueThings = unique(things)
for thing in uniqueThings:
counts.append(things.count(thing))
maxCount = max(counts)
for n in range(len(counts)):
if counts[n] == maxCount:
output.append(uniqueThings[n])
return output
def unique(things):
output = list(set(things))
output.sort()
return output
def dxy(align): # "align" if the egglib alignment object, this consistes of sequences, sequence names and "groups". If the object contains two groups, the function will consider only the first two.
# retrieve group names from the alignment object
pops = align.groups().keys()
# retrieve all the positions of sequences in group 1
P1 = [i for i in range(len(align)) if align.group(i)==pops[0]]
# retrieve all the positions of sequences in group 2
P2 = [i for i in range(len(align)) if align.group(i)==pops[1]]
pairwiseSum = 0 #total of pairwise Pis
totalPairs = 0 #haplotype pairs considered
for i in P1: # for each sequence in pop1...
for j in P2: #for sequence in pop2...
seqA = align[i][1]
seqB = align[j][1]
zippedSeqs = zip(seqA,seqB)
diffs = sum(sA != sB for sA, sB in zippedSeqs if sA != "N" and sB != "N")
#sites = sum(sA != "N" and sB != "N" for sA, sB in zippedSeqs)
sites = len([site for site in zippedSeqs if site[0] != "N" and site[1] != "N"])
#after considering all positions for each pair of haplotypes, return the average pairwise pi
return 1.0 * diffs/sites
def px(align):
pairwiseSum = 0 #total of pairwise Pis
totalPairs = 0 #haplotype pairs considered
for i in range(len(align) - 1): # for each sequence except the last one...
for j in range(i + 1,len(align)): #for each of the remaining sequences from sequence i + 1 to the end of the alignment...
seqA = align[i][1]
seqB = align[j][1]
zippedSeqs = zip(seqA,seqB)
diffs = sum(sA != sB for sA, sB in zippedSeqs if sA != "N" and sB != "N")
#sites = sum(sA != "N" and sB != "N" for sA, sB in zippedSeqs)
sites = len([site for site in zippedSeqs if site[0] != "N" and site[1] != "N"])
#now add this pairwise pi to the total and add 1 to the number of pairs considered
pairwiseSum += 1.0*diffs/sites
totalPairs += 1
#after considering all positions for each pair of haplotypes, return the average pairwise pi
return pairwiseSum/totalPairs
def colFreqs(align, columnNumber):
bases = align.column(columnNumber)
Acount = float(bases.count("A"))
Ccount = float(bases.count("C"))
Gcount = float(bases.count("G"))
Tcount = float(bases.count("T"))
total = Acount + Ccount + Gcount + Tcount
if total > 0:
output = {}
output["A"] = Acount/total
output["C"] = Ccount/total
output["G"] = Gcount/total
output["T"] = Tcount/total
else:
output = {"A":"NA", "C":"NA", "G":"NA", "T":"NA"}
return output
def colBaseCounts(align, columnNumber):
output = {}
bases = align.column(columnNumber)
Acount = float(bases.count("A"))
Ccount = float(bases.count("C"))
Gcount = float(bases.count("G"))
Tcount = float(bases.count("T"))
output["A"] = Acount
output["C"] = Ccount
output["G"] = Gcount
output["T"] = Tcount
return output
#version using frequencies to calculate fhom and fd
def ABBABABA(align, P1, P2, P3, P4, P3a = None, P3b = None):
p1Align = AlignByGroupNumber(align,P1)
p2Align = AlignByGroupNumber(align,P2)
p3Align = AlignByGroupNumber(align,P3)
p4Align = AlignByGroupNumber(align,P4)
if P3a == None or P3b == None:
P3Half = len(P3Align)/2
P3aAlign = P3Align.slice(0,P3Half)
P3bAlign = P3Align.slice(P3Half,len(P3Align))
else:
p3aAlign = AlignByGroupNumber(align,P3a)
p3bAlign = AlignByGroupNumber(align,P3b)
ABBAsum = 0.0
BABAsum = 0.0
maxABBAsumG = 0.0
maxBABAsumG = 0.0
maxABBAsumHom = 0.0
maxBABAsumHom = 0.0
maxABBAsumD = 0.0
maxBABAsumD = 0.0
#get derived frequencies for all biallelic siites
for i in align.polymorphism(minimumExploitableData = 0)["siteIndices"]:
#skip this site if not biallelic
bases = [base for base in align.column(i) if base != "N"]
alleles = unique(bases)
if len(alleles) != 2: continue
#get derived state
#if the outgroup is fixed, then that is the ancestral state - otherwise the anc state is the most common allele overall
p4Alleles = unique([base for base in p4Align.column(i) if base != "N"])
if len(p4Alleles) == 1:
derived = [a for a in alleles if a != p4Alleles[0]][0]
else:
derived = [a for a in alleles if a != mostCommon(bases)[0]][0]
# get frequencies for wach pop
p1Freq = colFreqs(p1Align, i)[derived]
p2Freq = colFreqs(p2Align, i)[derived]
p3Freq = colFreqs(p3Align, i)[derived]
p4Freq = colFreqs(p4Align, i)[derived]
p3aFreq = colFreqs(p3aAlign, i)[derived]
p3bFreq = colFreqs(p3bAlign, i)[derived]
# get weigtings for ABBAs and BABAs
try: # this was added to ignore crashes when there is missing data for a population at a site - we just ignore these sites
ABBAsum += (1 - p1Freq) * p2Freq * p3Freq * (1 - p4Freq)
BABAsum += p1Freq * (1 - p2Freq) * p3Freq * (1 - p4Freq)
maxABBAsumG += (1 - p1Freq) * p3aFreq * p3bFreq * (1 - p4Freq)
maxBABAsumG += p1Freq * (1 - p3aFreq) * p3bFreq * (1 - p4Freq)
maxABBAsumHom += (1 - p1Freq) * p3Freq * p3Freq * (1 - p4Freq)
maxBABAsumHom += p1Freq * (1 - p3Freq) * p3Freq * (1 - p4Freq)
if p3Freq >= p2Freq:
maxABBAsumD += (1 - p1Freq) * p3Freq * p3Freq * (1 - p4Freq)
maxBABAsumD += p1Freq * (1 - p3Freq) * p3Freq * (1 - p4Freq)
else:
maxABBAsumD += (1 - p1Freq) * p2Freq * p2Freq * (1 - p4Freq)
maxBABAsumD += p1Freq * (1 - p2Freq) * p2Freq * (1 - p4Freq)
except:
continue
###########################################################
########################################
##########################
#calculate D, f and fb
output = {}
try:
output["D"] = (ABBAsum - BABAsum) / (ABBAsum + BABAsum)
except:
output["D"] = "NA"
try:
output["fG"] = (ABBAsum - BABAsum) / (maxABBAsumG - maxBABAsumG)
except:
output["fG"] = "NA"
try:
output["fhom"] = (ABBAsum - BABAsum) / (maxABBAsumHom - maxBABAsumHom)
except:
output["fhom"] = "NA"
try:
output["fd"] = (ABBAsum - BABAsum) / (maxABBAsumD - maxBABAsumD)
except:
output["fd"] = "NA"
output["ABBA"] = ABBAsum
output["BABA"] = BABAsum
return output
#***************************************************************************************************************
if "--stop-at" in sys.argv:
stopAt = True
stopVal = int(getOptionValue("--stop-at"))
else:
stopAt = False
if "--test" in sys.argv:
test = True
else:
test = False
if "--verbose" in sys.argv:
verbose = True
else:
verbose = False
if "--report" in sys.argv:
report = int(getOptionValue("--report"))
else:
report = 100
nextReport = report
if "-i" in sys.argv:
fileName = getOptionValue("-i")
else:
print "\nplease specify input file name using -i <file_name> \n"
sys.exit()
file = open(fileName, "rU")
#define names from header line (file must have a header)
line = file.readline()
names = line.split()
line= file.readline()
if "-p" in sys.argv:
popStrings = getOptionValue("-p")
else:
print "\nplease specify populations using -p\n"
sys.exit()
popNames = []
indNames = []
#for each pattern, store the name, the set of lists, the maximum Ns and the maximum mismatches
for popString in popStrings.strip("\"").split(";"):
currentPop = popString.split("[")[0]
popNames.append(currentPop)
vars()[currentPop] = get_intv(popString,"[]")[0].split(",")
for indName in vars()[currentPop]:
if indName in names:
if indName not in indNames:
indNames.append(indName)
else:
print "individual " + indName + "not found in header line."
sys.exit()
if "-O" in sys.argv:
includeOutGroup = True
outGroupString = getOptionValue("-O").strip("\"")
outGroup = outGroupString.split("[")[0]
vars()[outGroup] = get_intv(outGroupString,"[]")[0].split(",")
for indName in vars()[outGroup]:
if indName in names:
indNames.append(indName)
else:
print "individual " + indName + "not found in header line."
sys.exit()
else:
includeOutGroup = False
if test or verbose:
print "\nPopulations:\n"
for popName in popNames:
print popName
print vars()[popName]
print "\n"
if includeOutGroup:
print "\nOut-Group:\n"
print outGroup
print vars()[outGroup]
print "\n"
#set up a variable that reports the ploidy for each individual
ploidy = {}
if "--ploidy" in sys.argv:
ploidyNumbers = getOptionValue("--ploidy").strip("\"").split(",")
ploidyNumbers = [int(n) for n in ploidyNumbers if n == "1" or n == "2"]
if len(ploidyNumbers) == len(indNames):
print "\nPloidy is as follows:\n"
for x in range(len(indNames)):
ploidy[indNames[x]] = ploidyNumbers[x]
print indNames[x], ploidyNumbers[x]
else:
print "\nSpecify ploidy for each individual as 1 or 2, separated by commas\n"
sys.exit()
else:
#if ploidy is not specified, assume diploid
for indName in indNames:
ploidy[indName] = 2
if "-o" in sys.argv:
outName = getOptionValue("-o")
else:
print "\nplease specify output file name using -o <file_name> \n"
sys.exit()
if "-w" in sys.argv:
windSize = int(getOptionValue("-w"))
else:
print "\nplease specify window size using -w \n"
sys.exit()
if "-s" in sys.argv:
slide = int(getOptionValue("-s"))
else:
print "\nplease specify slide length using -s \n"
sys.exit()
if "-m" in sys.argv:
minSites = int(getOptionValue("-m"))
else:
print "\nplease specify the minimum number of sites per window using -m\n"
sys.exit()
allScafs = True
include = False
exclude = False
if "-S" in sys.argv:
scafsToInclude = getOptionValue("-S").strip("\"").split(",")
if test or verbose:
print "scaffolds to analyse:", scafsToInclude
allScafs = False
include = True
if "--include" in sys.argv:
scafsFileName = getOptionValue("--include")
scafsFile = open(scafsFileName, "rU")
scafsToInclude = [line.rstrip() for line in scafsFile.readlines()]
if test or verbose:
print len(scafsToInclude), "scaffolds will be included"
allScafs = False
include = True
if "--exclude" in sys.argv:
scafsFileName = getOptionValue("--exclude")
scafsFile = open(scafsFileName, "rU")
scafsToExclude = [line.rstrip() for line in scafsFile.readlines()]
if test or verbose:
print len(scafsToExclude), "scaffolds will be excluded."
allScafs = False
exclude = True
if "--chromosome" in sys.argv:
chroms = getOptionValue("--chromosome").split(",")
checkChrom = True
allScafs = False
if test or verbose:
print "\nOnly using scafolds from the following chromosomes:"
print chroms
if "--assignments" not in sys.argv:
print "\nPlease provide a chromosome assignments file using '--assignments'."
sys.exit()
else:
checkChrom = False
if checkChrom and "--assignments" in sys.argv:
chromsFileName = getOptionValue("--assignments")
chromsFile = open(chromsFileName, "rU")
chromsLines = chromsFile.readlines()
chromDict = {}
for chromsLine in chromsLines:
scaf,chrom = chromsLine.rstrip().split()
chromDict[scaf] = chrom
if "--sep" in sys.argv:
if getOptionValue("--sep") == "comma":
sep = ","
elif getOptionValue("--sep") == "white":
sep = None
else:
print "\nThe only options for --sep are [comma] or [white] \n"
sys.exit()
else:
sep = None
# start output file
mainOut = open(outName, "w")
mainOut.write("scaffold,position,start,end,midpoint,sites,sitesOverMinExD")
#check analyses
analyses = []
poly = True
popPoly = False
pairWisePoly = False
if "-a" in sys.argv:
analysesList = getOptionValue("-a").strip("\"").split(",")
if "S" in analysesList:
poly = True
analyses.append("S")
mainOut.write(",S")
if "pi" in analysesList:
popPoly = True
analyses.append("pi")
for popName in popNames:
mainOut.write("," + popName + "_pi")
if "popS" in analysesList:
popPoly = True
analyses.append("popS")
for popName in popNames:
mainOut.write("," + popName + "_S")
if "dxy" in analysesList:
pairWisePoly = True
analyses.append("dxy")
for X in range(len(popNames) - 1):
for Y in range(X + 1,len(popNames)):
mainOut.write("," + popNames[X] + "_" + popNames[Y] + "Dxy")
if "ABBABABA" in analysesList:
if "P1" in popNames and "P2" in popNames and "P3" in popNames and "O" in popNames:
analyses.append("ABBABABA")
mainOut.write(",ABBA,BABA,D,fG,fhom,fd")
else:
print "\nPopulation names P1, P2, P3 and O must be present to do ABBA BABA analyses.\n"
sys.exit()
mainOut.write("\n")
else:
print "\nplease specify analysis to be conducted (-a)\n"
sys.exit()
if analyses == []:
print "\nplease check analysis options\n"
sys.exit()
else:
print >> sys.stderr, "\nAnalyses to be included:\n"
for a in analyses:
print >> sys.stderr, a , "\n"
if "--ignoreFrequency" in sys.argv:
iF = int(getOptionValue("--ignoreFrequency"))
else:
iF = 0
if "--minimumExploitableData" in sys.argv:
minExD = float(getOptionValue("--minimumExploitableData"))
print "minimumExploitableData =", minExD
else:
minExD = 0
# counting stat that will let keep track of how far we are
windowsTested = 0
goodWindows = 0
#create temporary variables for nucleotide data
for name in indNames:
vars()["sub" + name] = []
#For the tempoarary window we need to store the positions each time to keep track of the spread of the sites
subPos = []
#read first line and store variables
line = file.readline().rstrip()
objects = line.split(sep)
if allScafs or (checkChrom and objects[0] in chromDict and chromDict[objects[0]] in chroms) or (include and objects[0] in scafsToInclude) or (exclude and objects[0] not in scafsToExclude):
subSCF = objects[0]
subPos.append(int(objects[1]))
for name in indNames:
vars()["sub" + name].append(objects[names.index(name)])
else:
subSCF = None
#read second line as the first to be evaluated by the loop
line = file.readline()
objects = line.split(sep)
windStart = 1
lastWindNA = False
while True:
#each time we do the loop we will be doing one window.
#if the line in hand is not yet too far away or on another scaffold, add the line and read another
if allScafs or subSCF is not None:
windowsTested += 1
while len(objects) > 1 and objects[0] == subSCF and int(objects[1]) < windStart + windSize:
subPos.append(int(objects[1]))
for indName in indNames:
vars()["sub" + indName].append(objects[names.index(indName)])
line = file.readline()
objects = line.split(sep)
#now the line in hand is incompatible with the current window
#if there are enough sites, we calculate stats and then slide the start along
if len(subPos) >= minSites and subSCF is not None:
if test or verbose:
print "\nGood window found. Length =", len(subPos), len(vars()["sub" + indNames[0]])
# add data to major outputs
Sites = str(len(subPos))
Scaf = (subSCF)
Position = str(windStart + (windSize - 1)/2)
Start = str(windStart)
End = str(windStart + windSize)
if mid(subPos):
Midpoint = str(int(round(mid(subPos))))
else:
Midpoint = "NA"
#if if diplois, split into haplos, if haploid, leave it
for indName in indNames:
if ploidy[indName] == 2:
#its diploid, so split into two haplotypes
vars()["haplo" + indName] = haplo(vars()["sub" + indName])
vars()[indName + "A"] = vars()["haplo" + indName][::2]
vars()[indName + "B"] = vars()["haplo" + indName][1::2]
#if haploid, the haplotype is the same as the calls we've collected
elif ploidy[indName] == 1:
vars()[indName + "A"] = vars()["sub" + indName]
# this section is for working with haplotypes separated by a | which means it must all be diploid
if test or verbose:
print "\nHaplotypes generated. Length = ", len(vars()[indNames[1] + "A"])
#create sequence objects for egglib, for all data types necessary, taking poidy into account
#first step is to create variables for all haps and each pop which will contain a tuple for each haplotype
allHaps = []
for popNumber in range(len(popNames)):
vars()[popNames[popNumber] + "Haps"] = []
for indName in vars()[popNames[popNumber]]:
#first, if its haploid, add only one haplotype, else add 2
if ploidy[indName] == 1:
hapA = (indName + "A", "".join(vars()[indName + "A"]), popNumber)
allHaps.append(hapA)
vars()[popNames[popNumber] + "Haps"].append(hapA)
else:
hapA = (indName + "A", "".join(vars()[indName + "A"]), popNumber)
hapB = (indName + "B", "".join(vars()[indName + "B"]), popNumber)
allHaps.append(hapA)
allHaps.append(hapB)
vars()[popNames[popNumber] + "Haps"].append(hapA)
vars()[popNames[popNumber] + "Haps"].append(hapB)
#now create egglib align objects for all of these sets of tuples
# for whole set, for each pop and for pairs of pops and single inds if necessary
allAlign = egglib.Align.create(allHaps)
for popName in popNames:
vars()[popName + "Align"] = egglib.Align.create(vars()[popName + "Haps"])
if pairWisePoly:
for X in range(len(popNames) - 1):
for Y in range(X + 1,len(popNames)):
vars()[popNames[X] + popNames[Y] + "Haps"] = []
for hap in vars()[popNames[X] + "Haps"]:
vars()[popNames[X] + popNames[Y] + "Haps"].append(hap)
for hap in vars()[popNames[Y] + "Haps"]:
vars()[popNames[X] + popNames[Y] + "Haps"].append(hap)
vars()[popNames[X] + popNames[Y] + "Align"] = egglib.Align.create(vars()[popNames[X] + popNames[Y] + "Haps"])
if test or verbose:
print "\negglib alignments generated:"
print "alignment length:", allAlign.ls(), "number of sequences:", allAlign.ns()
#depending on analyses requested, run analyses...
if poly:
if test or verbose:
print "\nrunning polymorphism analyses"
allPoly = allAlign.polymorphism(minimumExploitableData=minExD,allowMultipleMutations=True,ignoreFrequency=iF)
if popPoly:
if test or verbose:
print "\nrunning population-specific polymorphism analyses"
for popName in popNames:
vars()[popName + "Poly"] = vars()[popName + "Align"].polymorphism(minimumExploitableData=minExD,allowMultipleMutations=True,ignoreFrequency=iF)
if pairWisePoly:
if test or verbose:
print "\nrunning pair-wise polymorphism analyses"
for X in range(len(popNames) - 1):
for Y in range(X + 1,len(popNames)):
vars()[popNames[X] + popNames[Y] + "Poly"] = vars()[popNames[X] + popNames[Y] + "Align"].polymorphism(minimumExploitableData=minExD,allowMultipleMutations=True,ignoreFrequency=iF)
#sites passing minExD threshold
SitesOverMinExD = str(allPoly["lseff"])
#write data to main output
mainOut.write(Scaf + "," + Position + "," + Start + "," + End + "," + Midpoint + "," + Sites + "," + SitesOverMinExD)
if "S" in analyses:
mainOut.write("," + str(allPoly["S"]))
if "pi" in analyses:
for popName in popNames:
if vars()[popName + "Poly"]["lseff"] >= minSites:
try:
mainOut.write("," + str(round(px(vars()[popName + "Align"]),4)))
except:
mainOut.write(",NA")
else:
mainOut.write(",NA")
if "popS" in analyses:
for popName in popNames:
if vars()[popName + "Poly"]["lseff"] >= minSites:
mainOut.write("," + str(float(vars()[popName + "Poly"]["S"])))
else:
mainOut.write(",NA")
if "dxy" in analyses:
for X in range(len(popNames) - 1):
for Y in range(X + 1,len(popNames)):
if vars()[popNames[X] + popNames[Y] + "Poly"]["lseff"] >= minSites:
try:
mainOut.write("," + str(round(dxy(vars()[popNames[X] + popNames[Y] + "Align"]),4)))
except:
mainOut.write(",NA")
else:
mainOut.write(",NA")
if "ABBABABA" in analyses:
try:
if "P3a" in popNames and "P3b" in popNames:
ABstats = ABBABABA(allAlign, popNames.index("P1"), popNames.index("P2"), popNames.index("P3"), popNames.index("O"), popNames.index("P3a"), popNames.index("P3b"))
else:
ABstats = ABBABABA(allAlign, popNames.index("P1"), popNames.index("P2"), popNames.index("P3"), popNames.index("O"))
except:
ABstats = {"ABBA":"NA", "BABA":"NA", "D":"NA", "fG":"NA", "fhom":"NA", "fd":"NA"}
mainOut.write("," + str(ABstats["ABBA"]))
mainOut.write("," + str(ABstats["BABA"]))
mainOut.write("," + str(ABstats["D"]))
mainOut.write("," + str(ABstats["fG"]))
mainOut.write("," + str(ABstats["fhom"]))
mainOut.write("," + str(ABstats["fd"]))
mainOut.write("\n")
goodWindows += 1
if test:
break
if stopAt:
if stopVal == goodWindows:
break
lastWindNA = False
windStart += slide
i = len(subPos)
for x in subPos:
if x >= windStart:
i = subPos.index(x)
break
subPos = subPos[i:]
for name in indNames:
vars()["sub" + name] = vars()["sub" + name][i:]
#otherwise, if the last window as not NA, we will make an NA window and then we'll slide along (or reset if we're onto a new scaf
else:
if subSCF is not None and lastWindNA == False:
Sites = str(len(subPos))
SitesOverMinExD = "NA"
Scaf = (subSCF)
Position = str(windStart + (windSize-1)/2)
Start = str(windStart)
End = str(windStart + windSize)
if mid(subPos):
Midpoint = str(int(round(mid(subPos))))
else:
Midpoint = "NA"
mainOut.write(Scaf + "," + Position + "," + Start + "," + End + "," + Midpoint + "," + Sites + "," + SitesOverMinExD)
#Fill in NAs for all requested data
if "S" in analyses:
mainOut.write(",NA")
if "px" in analyses:
for popName in popNames:
mainOut.write(",NA")
if "popS" in analyses:
for popName in popNames:
mainOut.write(",NA")
if "dxy" in analyses:
for X in range(len(popNames) - 1):
for Y in range(X + 1,len(popNames)):
mainOut.write(",NA")
if "ABBABABA" in analyses:
mainOut.write(",NA,NA,NA,NA,NA,NA")
#and end the line
mainOut.write("\n")
#and record the winow as an NA
lastWindNA = True
#if the line in hand is on the same scaf, we simply slide the start along one slide
if len(objects) > 1 and objects[0] == subSCF:
i = len(subPos)
windStart += slide
for x in subPos:
if x >= windStart:
i = subPos.index(x)
break
subPos = subPos[i:]
for name in indNames:
vars()["sub" + name] = vars()["sub" + name][i:]
#otherwise its a new scaf, so we reset the subwindow and subScaf and read the next line
else:
windStart = 1
if len(objects) > 1:
if allScafs or (checkChrom and objects[0] in chromDict and chromDict[objects[0]] in chroms) or (include and objects[0] in scafsToInclude) or (exclude and objects[0] not in scafsToExclude):
subSCF = objects[0]
subPos = [int(objects[1])]
for name in indNames:
vars()["sub" + name] = [objects[names.index(name)]]
else:
subSCF = None
line = file.readline().rstrip()
objects = line.split(sep)
else:
break
if windowsTested == nextReport:
print windowsTested, "windows done ..."
nextReport += report
file.close()
mainOut.close()
print "\n" + str(windowsTested) + " windows were tested."
print "\n" + str(goodWindows) + " windows were good.\n"
print "\nDone."
| true |
2902b2a26cbcbeff567b94c1aa98a3fb0bd39b01 | Python | hsnsd/ai-explore | /simple_arbitrage.py | UTF-8 | 1,426 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 23:31:44 2018
@author: hsnsd
A simple program to give you arbitrage opportunity of any input coin using cryptonator's api.
"""
import pandas as pd
import requests
import json
from matplotlib import pyplot as plt
import numpy as np
def getArbitrage(coin, curr = 'usd'):
url = "https://api.cryptonator.com/api/full/{}-{}".format(coin,curr)
r = requests.get(url)
dict = json.loads(r.content)
data = dict['ticker']['markets']
sortedData = sorted(data, key=lambda x:x['price'])
print ('Lowest price is: ' + sortedData[0]['price'] + ' at ' + sortedData[0]['market'])
print ('Highest price is: ' + sortedData[len(sortedData)-1]['price'] + ' at ' + sortedData[len(sortedData)-1]['market'])
print ('Potential arbitrage profit is ' + str(float(sortedData[len(sortedData)-1]['price']) - float(sortedData[0]['price'])) )
df = pd.DataFrame(data)
plt.figure(num=None, figsize=(8, 6), dpi=100)
df['price'] = df['price'].astype(float)
plt.scatter(df['price'], df['volume'])
for mMkt, mRate, mVol in zip(df['market'], df['price'], df['volume']):
plt.annotate(mMkt,xy=(mRate, mVol), xytext=(5, -5), textcoords='offset points',fontsize=8)
plt.title("Cryptocurrency Arbitrage Assignment")
plt.xlabel("Price")
plt.ylabel("Volumes")
plt.show()
dict = getArbitrage(input('Enter coin name '))
| true |
57987f36113a1e5ea5184dd902eb7f7b7d679740 | Python | aligg/PythonPractice | /diceroller.py | UTF-8 | 1,244 | 4.125 | 4 | [] | no_license | from random import randint
name = input("Welcome to the dice game, what's your name?")
answer = input("Hey there %s. In the dice game the rules are as follows: You get three turns total. If you roll a 1 you get 0 points and a 6 gets you 10 points. For all other rolls, the score matches the number on the dice. Are you ready to give the dice a roll? (y)es or (n)o?" % (name))
score = 0
total_score = 0
turns = 0
while answer.lower()[0] == "y" and turns < 3:
output = randint(1, 6)
if output == 6:
score = 10
elif output == 1:
score = 0
else:
score = output
total_score += score
turns = turns + 1
print("You rolled a %d for a score of %d. Your total score is now %d and your total turns are %d" % (output,score, total_score, turns))
print ("Do you want to roll again? y or n?")
answer = input()
if answer.lower()[0] == "n" or turns == 3:
if total_score >= 10:
print("Okay see ya later %s! Your final total score was %d. This means you win the game! Congratulations!" % (name, total_score))
else:
print("Okay bye %s! Your final total score was %d, This means you lost the game. Don't be afraid to play again! Better luck next time." % (name, total_score))
| true |
4c88c526aec8e55a0e580283829d5b0774187428 | Python | eastrd/HighAnonProxyPool-v2 | /scrapers/hidemy.name.py | UTF-8 | 1,386 | 2.671875 | 3 | [] | no_license | from framework import scrape
from db import save_new_proxy_record
from time import sleep
from loglib import log_print
time_interval = 60 * 10
# Genearlize URL template to hold all proxy urls
url_template = "https://hidemy.name/en/proxy-list/?type=hs&anon=4&start={NUM}#list"
# url_pages are a list of strings to be inserted into the url template {NUM} location
url_pages = [64*i for i in range(0, 12)]
# xpath rules to locate data fields
xpath = {
"ip" : '//*[@id="content-section"]/section[1]/div/table/tbody/tr[*]/td[1]',
"port" : '//*[@id = "content-section"]/section[1]/div/table/tbody/tr[*]/td[2]',
"protocol" : '//*[@id="content-section"]/section[1]/div/table/tbody/tr[*]/td[5]',
"country" : '//*[@id="content-section"]/section[1]/div/table/tbody/tr[*]/td[3]/div',
}
# Lambda functions to further extract the information
extractor = {
"ip" : lambda text: text,
"port" : lambda text: text,
"protocol" : lambda text: text,
"country" : lambda text: text.strip(),
}
while True:
proxy_list_of_dicts = scrape(url_template, url_pages, xpath, extractor, sleep_before_scrape=10)
for proxy_dict in proxy_list_of_dicts:
save_new_proxy_record(proxy_dict)
log_print("Finished one round of scraping, sleep for " +
str(time_interval) + " seconds")
sleep(time_interval)
| true |
9fea6bb200237bbe51e1fd2c055cbf0db14319ed | Python | vendyv/A2OJ-Ladders | /A2OJ-11/075_B_Fence.py | UTF-8 | 582 | 3.1875 | 3 | [
"MIT"
] | permissive | """
75 Fence - https://codeforces.com/problemset/problem/363/B
"""
def main():
n, k = map(int, input().split())
l = list(map(int, input().split()))
s = sum(l[:k])
min = s
x=0
for i in range(1,n-k+1):
s = s - l[i-1] + l[k+i-1]
# print(sum(l[x:i]))
if s < min:
min = s
x = i
# temp = sum(l[x:i])
# if temp < s:
# s = temp
# a = x + 2
# # print(a)
# x+=1
# print(s)
# index = s.index(min(s))
return x+1
print(main()) | true |
b5917b741bde0d8847796609d90672b6d15554b3 | Python | mit308/OpenCV | /Working on Images/Adding or merging two images.py | UTF-8 | 324 | 2.703125 | 3 | [] | no_license | import cv2
import numpy as np
img=cv2.imread("messi5.jpg")
img2=cv2.imread("opencv-logo-white.png")
# Resizing the two images to add or merge
img=cv2.resize(img, (512, 512))
img2=cv2.resize(img2, (512, 512))
merge=cv2.add(img, img2) # Merging two images
cv2.imshow('Messi', merge)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
5dea282a965c085389b936b44c9f05cdcae4aa8c | Python | deedee1886-cmis/deedee1886-cmis-cs2 | /textgame.py | UTF-8 | 1,271 | 3.859375 | 4 | [
"CC0-1.0"
] | permissive |
#Stranded on an island is a program that will ask the user to make many choices, their choices will determine their survival on this uninhibited island.
a = raw_input("type your name here ")
print "Welcome to, Stranded on an Island"
print "Hello " + str(a) + ", your ship have crashed and you have been stranded on island."
print "Here you will have many choices to make, remember your survival depends on those choices."
print "You see that both your knife and canteen have washed up onto shore with you, but you can only take one, which one do you pick?"
raw_input("type your choice here ")
knife = True
canteen = False
if False:
print "Sorry that is the wrong choice, since you will be able to craft a canteen if you chose the knife"
else True:
print "Great choice the knife is needed for any island dweller"
print "You then proceed to walk around the island, you hear a loud roar, do you go over to check, or do you stay?"
raw_input("type your choice here ")
stay = True
check = False
if False:
print "You go into the bushes to check for the source of the sound, you then find a big hungry lion, you then try to run away, but the lion catches you and proceeds to eat you alive"
else True:
print "You choose to stay, and get to live for another day"
| true |
6b21f583f8660ff2c3dfbfa17a69f3a738c3f0b8 | Python | UOSAN/DEV_scripts | /fMRI/fx/models/SST/direct_regression/experiment_with_whole_brain_task_alignment.py | UTF-8 | 525 | 2.734375 | 3 | [] | no_license |
#what would it look like to take each whole-brain series, and align it to the moment of the tone?
#then we would have to have a series of images associated with each trial, and a set of metadata capturing the trials those images related to
#then we could produce maps of the max and min activity; but we still wouldn't be able to threshold it like an SPM
#another option:
#create a new event-based design matrix, where each event is a tone, and we capture events precisely aligned with tones
#nothing we have no does that. | true |
38f276c2920a0baf18754d38e09b714d80f8d618 | Python | albertosanfer/MOOC_python_UPV | /Módulo 2/Práctica2_4.py | UTF-8 | 321 | 3.96875 | 4 | [
"Apache-2.0"
] | permissive | # El código a continuación tiene un **input** que solicitará tu nombre y un
# **print** que tratará de darte la bienvenida. Modifica la variable bienvenida
# de modo que concatene tu nombre dentro del mensaje de bienvenida.
nombre = input('¿Como te llamas?')
bienvenida = 'Bienvenid@ : ' + nombre
print(bienvenida)
| true |
a9d7432cdf89eabc454e1b65cd8329c5b01cd788 | Python | geoolekom/vkart | /vkapi/group_classification/feature.py | UTF-8 | 784 | 2.609375 | 3 | [] | no_license | from .. import api as vkapi
from pprint import pprint
import re
from .parameters import max_posts
def text_process(s):
return ' '.join(filter(lambda x: len(x) > 0, re.sub('[^a-zA-Zа-яА-Я ]', '', s.replace('\n', ' ').lower()).split(' ')))
def extract_text(api, group_id, **kwargs):
texts = vkapi.get_group_texts(api, group_id, **kwargs)
# pprint(texts)
res = []
res.append(text_process(' '.join(texts['posts'])))
res.append(text_process(' '.join(map(lambda x: x['title'] + ' ' + x['description'], texts['goods']))))
res.append(text_process(' '.join(map(lambda x: x['title'] + ' ' + x['description'], texts['albums']))))
return ' '.join(res)
if __name__ == '__main__':
print(extract_text(vkapi.get_api(), '126622648', max_posts=max_posts))
| true |
17ddaf3109e64561b68be5dd6e20f2127db60a74 | Python | sn8ke01/movieranker | /test.py | UTF-8 | 2,143 | 3.1875 | 3 | [] | no_license | import bs4
import collections
import re
import csv
with open('movie_ratings.csv') as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
x = "TITANIC"
for row in read_csv:
#print(row)
print(re.search(x, str(row)))
# reMovieList = collections.namedtuple('MovieList', 'rank, title, year')
#
# html = '''<h3 class="lister-item-header">
# <span class="lister-item-index unbold text-primary">1.</span>
# <a href="/title/tt0081398/?ref_=ttls_li_tt">Raging Bull</a>
# <span class="lister-item-year text-muted unbold">(1980)</span>
# </h3>
# <h3 class="lister-item-header">
# <span class="lister-item-index unbold text-primary">2.</span>
# <a href="/title/tt0081398/?ref_=ttls_li_tt">Godfather</a>
# <span class="lister-item-year text-muted unbold">(1977)</span>
# </h3>'''
#
# # print(html)
#
# soup = bs4.BeautifulSoup(html, 'html.parser')
# tags = soup.find_all('a')
# print(tags)
# t = tags[0]
# print(t)
# print(t.text)
# print('---')
# # movie_rank = [a for a in (h3.find(class_="lister-item-index unbold text-primary") for h3 in soup.find_all('h3')) if a]
# movie_title = [a for a in (h3.find('a') for h3 in soup.findAll('h3')) if a]
# m = movie_title[0]
# print(m)
# print(m.text)
# print('---')
# movie_headers = soup.find_all(class_='lister-item-header')
# print(type(movie_headers))
#
# x = 0
# while x < 2:
# m = movie_headers[x]
# #print(type(m))
# print(m.text.strip())
# print(type(m.text))
# x = x + 1
# #data = MovieList(rank=movie_rank, title=movie_title, year=movie_year)
#print(data)
# movie_year = [a for a in (h3.find(class_="lister-item-year text-muted unbold") for h3 in soup.findAll('h3')) if a]
#
# print(type(movie_title))
#
# movie_rank = re.search("\d{1,3}(?=\.</span)", str(movie_rank)).group()
# yr = re.search("\(\d\d\d\d\)", str(movie_year))
# movie_year = yr.group().replace('(', '').replace(')', '')
#
# print(type(movie_title))
#
# data = MovieList(rank=movie_rank, title=movie_title, year=movie_year)
#
# print('rank:{}, title:{}, year:{}'.format(data.rank, data.title, data.year))
| true |
248969e3bb197f5d7e80c5949d8932d3592bf64d | Python | CaimeiWang/python100 | /080.py | UTF-8 | 874 | 4 | 4 | [] | no_license | '''
海滩上有一堆桃子,五只猴子来分。第一只猴子把这堆桃子平均分为五份,多了一个,这只猴子把多的一个扔入海中,拿走了一份。
第二只猴子把剩下的桃子又平均分成五份,又多了一个,它同样把多的一个扔入海中,拿走了一份,第三、第四、第五只猴子都是这样做的,问海滩上原来最少有多少个桃子?
'''
#method1:
for n in range(6,10000):
m = n
for i in range(5):
a=m//5
b=m%5
m=m-a
if b!=1:
break
if b==1:
print(n)
#method2:
if __name__ == '__main__':
i = 0
j = 1
x = 0
while (i < 5) :
x = 4 * j
for i in range(0,5) :
if(x%4 != 0) :
break
else :
i += 1
x = (x/4) * 5 +1
j += 1
print(x) | true |
40e118395751605624c927214ce0c2fed784e96a | Python | turovod/Otus | /4_Data_Driven_Testing/Iterators/example4-generator-function.py | UTF-8 | 492 | 2.609375 | 3 | [
"MIT"
] | permissive | import gzip, bz2
from pathlib import Path
def gen_open(paths):
for path in paths:
if path.suffix == '.gz':
yield gzip.open(path, 'rt')
elif path.suffix == '.bz2':
yield bz2.open(path, 'rt')
else:
yield open(path, 'rt')
def gen_cat(sources):
for src in sources:
for item in src:
yield item
lognames = Path('/usr/www').rglob("access-log*")
logfiles = gen_open(lognames)
loglines = gen_cat(logfiles)
| true |
4e4472c3c4cd0822b0c3d4251d4447e25c082f38 | Python | Linuxoid-Rostyan/Text-Generator | /text_generator.py | UTF-8 | 2,388 | 3.015625 | 3 | [] | no_license | from nltk.tokenize import regexp_tokenize
from nltk import bigrams
from collections import Counter
from string import ascii_uppercase
import random
file = open(input(), "r", encoding="utf-8")
bigram_list = list(bigrams(regexp_tokenize(file.read(), r'\S+')))
file_list = [str(bigram[0]) for bigram in bigram_list]
trigram_list = []
for i in range(len(file_list) - 2):
trigram_list.append([file_list[i] + " " + file_list[i + 1], file_list[i + 2]])
head_list = [head[0] for head in trigram_list]
def word_generator(tail_count_list):
if len(tail_count_list) >= 5:
return random.choices([tail[0] for tail in tail_count_list.most_common(5)], weights=tuple([tail[1] for tail in tail_count_list.most_common(5)]))[0]
else:
return random.choices([tail[0] for tail in tail_count_list.most_common(len(tail_count_list))], weights=tuple([tail[1] for tail in tail_count_list.most_common(len(tail_count_list))]))[0]
for _ in range(10):
random_sentence = random.choices(head_list)[0].split()
while True:
if (random_sentence[0][-1] != '.' and random_sentence[0][-1] != '!' and random_sentence[0][-1] != '?' and random_sentence[-1][-1] != '.' and random_sentence[-1][-1] != '!' and random_sentence[-1][-1] != '?') and random_sentence[0][0] in ascii_uppercase:
break
else:
random_sentence = random.choices(head_list)[0].split()
continue
while random_sentence[-1][-1] != '.' or random_sentence[-1][-1] != '!' or random_sentence[-1][-1] != '?' or random_sentence[0][-1] != '.' or random_sentence[0][-1] != '!' or random_sentence[0][-1] != '?':
tail_counter = Counter([trigram[1] for trigram in trigram_list if trigram[0] == random_sentence[-2] + ' ' + random_sentence[-1]])
word = word_generator(tail_counter)
if len(random_sentence) < 4:
word = word_generator(tail_counter)
random_sentence.append(word)
else:
word = random.choices([tail[0] for tail in tail_counter.most_common(5)], weights=tuple([tail[1] for tail in tail_counter.most_common(5)]))[0]
if word[-1][-1] == '.' or word[-1][-1] == '!' or word[-1][-1] == '?':
random_sentence.append(word)
break
else:
random_sentence.append(word)
generated_sentence = ' '.join(random_sentence)
print(generated_sentence)
| true |
75b5c57c94d275053e0bc2566bf8e34f41e8631e | Python | SJ12896/TIL | /startcamp/day1/lunch.py | UTF-8 | 342 | 3.171875 | 3 | [] | no_license | menu = ['예향정', '장가계', '첨단공원국밥']
# print(menu)
# print(menu[0], menu[-1])
phone_book = {'예향정' : '123-123', '첨단공원국밥' : '456-456', '장가계' : '789-789'}
# print(phone_book)
# print(phone_book['첨단공원국밥'])
import random
print(f'{phone_book[random.choice(menu)]} 의 전화번호는 {123}') | true |
e4d0990f78266c3ce0c1b245f6fdf5e8c9774c28 | Python | yuwinzer/GB_Python_basics | /hw_to_lesson_01/6_sport.py | UTF-8 | 719 | 3.609375 | 4 | [] | no_license | num_1day = int(input("Введите количество километров за первый день пробежки: "))
min_dist = int(input("Введите минимальное расстояние, которое должен пробежать спортсмен: "))
print(f"1-й день: {num_1day} км")
next_day_dist = num_1day
a = True
i = 2 # начинаем цикл со второго дня
while a:
next_day_dist += next_day_dist * 0.1
print(f"{i}-й день: {round(next_day_dist, 2)} км")
if next_day_dist >= min_dist:
print(f"На {i}-й день спортсмен достиг результата — не менее {min_dist} км.")
break
i += 1 | true |
7a94a72669119f42c7f03a87bad0a9405d4f0b06 | Python | krishnakrib/wer | /char1.py | UTF-8 | 140 | 3.109375 | 3 | [] | no_license | test_str="geeksforgeeks"
count=0
for i in test_str:
if i=='e':
count=count+1
print("count of e in geeksforgeeks=" + str(count))
| true |
df6aff3a98bedb5f065b579c5bd2fb065922df5d | Python | Aasthaengg/IBMdataset | /Python_codes/p03626/s403375817.py | UTF-8 | 603 | 3 | 3 | [] | no_license | n = int(input())
s = [list(input()), list(input())]
MOD = 1000000007
dp = [0]*n
if s[0][0] == s[1][0]: # tate
dp[0] = 3
else: # yoko
dp[0] = 6
for i in range(1,n):
if s[0][i] == s[0][i-1]:
dp[i] = dp[i-1]
elif s[0][i-1] == s[1][i-1] and s[0][i] == s[1][i]: # tate & tate
dp[i] = dp[i-1] * 2 % MOD
elif s[0][i-1] == s[1][i-1] and s[0][i] != s[1][i]: # tate & yoko
dp[i] = dp[i-1] * 2 % MOD
elif s[0][i-1] != s[1][i-1] and s[0][i] == s[1][i]: # yoko & tate
dp[i] = dp[i-1]
else: # yoko & yoko
dp[i] = dp[i-1] * 3 % MOD
print(dp[n-1]) | true |
6d76d1da4fd44827f742ae24b113581a2b19ebcc | Python | osamhack2021/AI_MaskDetector_Kitty | /tests/test_mask_detector.py | UTF-8 | 1,265 | 2.8125 | 3 | [
"MIT"
] | permissive | import pytest
import cv2
import numpy as np
import tensorflow as tf
from mask_detector import MaskDetector, FacenetDetector
test_image_filename = "resource/sample/image/pexels-gustavo-fring-4127449.jpg"
@pytest.fixture
def faces():
facenet_detector = FacenetDetector()
faces, _, _ = facenet_detector.detect_faces_from_file(test_image_filename)
return faces
@pytest.fixture
def mask_detector():
return MaskDetector()
def test_predict(mask_detector, faces):
"""
얼굴 사진들을 잘 예측했는지 확인한다
"""
preds = mask_detector.predict(faces)
assert preds.shape == (4,)
assert all(isinstance(p, np.float32) for p in preds)
# 4개의 얼굴 모두 마스크를 썼다고 인식해야함
assert all(p > 0.5 for p in preds)
def test_predict_one(mask_detector, faces):
"""
얼굴 사진 1장을 잘 예측했는지 확인한다
"""
p = mask_detector.predict_one(faces[0])
assert isinstance(p, np.float32)
assert p > 0.5
def test_invalid_format(mask_detector):
"""
64x64x3 이 아닌 이미지가 입력되면 ValueError 발생
"""
image = np.zeros((200, 200, 3), dtype=np.int8)
with pytest.raises(ValueError):
mask_detector.predict_one(image)
| true |
4ff7788fe49350a7f539c567dee8c68c23c170f3 | Python | ChengHsinHan/myOwnPrograms | /CodeWars/Python/8 kyu/#238 String Templates - Bug Fixing #5.py | UTF-8 | 255 | 3.15625 | 3 | [] | no_license | # Oh no! Timmy hasn't followed instructions very carefully and forgot how to use
# the new String Template feature, Help Timmy with his string template so it
# works as he expects!
def build_string(*args):
return "I like {}!".format(", ".join(args))
| true |
a7fc25b61458359ac469cd50a5d8e0acd0260b8f | Python | badHax/Simplified-RSA | /Scripts/client.py | UTF-8 | 3,846 | 3.484375 | 3 | [] | no_license | # Client to implement simplified RSA algorithm.
# The client says hello to the server, and the server responds with a Hello
# and its public key. The client then sends a session key encrypted with the
# server's public key. The server responds to this message with a nonce
# encrypted with the server's public key. The client decrypts the nonce
# and sends it back to the server encrypted with the session key. Finally,
# the server sends the client a message with a status code.
# Author: Odain Chevannes 2015-11-13
#!/usr/bin/python3
import socket
import math
import random
import simplified_AES
def expMod(base, power, n):
""""Returns base^power mod(n)"""
exponent = 1
i = 0
while i < power:
exponent *= base
i += 1
return exponent%n
def RSAencrypt(m, e, n):
"""Encryption side of RSA"""
# Write code to do RSA encryption
cipher = expMod(m,e,n)
return cipher
def RSAdecrypt(c, d, n):
"""Decryption side of RSA"""
# Write code to RSA decryption
plaintext = expMod(c,d,n)
return plaintext
def serverHello():
"""Sends server hello message"""
status = "100 Hello"
return status
def sendSessionKey(s):
"""Sends server session key"""
status = "112 SessionKey " + str(s)
return status
def sendTransformedNonce(xform):
"""Sends server nonce encrypted with session key"""
status = "130 " + str(xform)
return status
def computeSessionKey():
"""Computes this node's session key"""
sessionKey = random.randint(1, 32768)
return sessionKey
def main():
"""Driver function for the project"""
HOST = 'localhost' # The remote host
PORT = 13000 # The same port as used by the server
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
c.connect((HOST,PORT))
msg = serverHello()
c.sendall(bytes(msg,'utf-8')) # Sending bytes encoded in utf-8 format.
data = c.recv(1024).decode('utf-8')
strStatus = "105 Hello"
if data and data.find(strStatus) < 0:
print("Invalid data received. Closing")
else:
# Write appropriate code to parse received string and extract
# the modulus and exponent for public key encryption.
splitd = data.split(" ")
n = int(splitd[2])# Modulus for public key encryption
e = int(splitd[3]) # Exponent for public key encryption
print("Server's public key: ("+ str(n)+","+str(e)+")")
symmetricKey = computeSessionKey()
print("the generated symm key is ",symmetricKey)
simplified_AES.keyExp(symmetricKey)
encSymmKey = RSAencrypt(symmetricKey, e, n)
print("encrypted symm key ",encSymmKey)
msg = sendSessionKey(encSymmKey)
c.sendall(bytes(msg,'utf-8'))
data = c.recv(1024).decode('utf-8')
strStatus = "113 Nonce"
if data and data.find(strStatus) < 0:
print("Invalid data received. Closing")
else:
# Write code to parse received string and extract encrypted nonce
splitd = data.split(" ")
encNonce = int(splitd[2])
# from the server. The nonce has been encrypted with the server's
# private key.
print("Encrypted nonce: "+ str(encNonce))
temp = RSAencrypt(encNonce, e, n)
plaintext = temp
print("Decrypted nonce: "+ str(temp))
"""Setting up for Simplified AES encryption"""
simplified_AES.keyExp(symmetricKey) # Generating round keys for AES.
ciphertext = simplified_AES.encrypt(plaintext) # Running simplified AES.
msg = sendTransformedNonce(ciphertext)
c.sendall(bytes(msg,'utf-8'))
data = c.recv(1024).decode('utf-8')
if data:
print(data)
c.close()
if __name__ == "__main__":
main()
| true |
bc8d26969e88bf3739dd3610d95b316c9ac00e11 | Python | elisa-lj11/tier2019 | /scrapers/blog_scraper.py | UTF-8 | 1,883 | 2.75 | 3 | [] | no_license | # Created by Elisa Lupin-Jimenez
# Program to scrape HTML formatted blog code for text comments and posts
# outputs a new text file with just the comments and posts
import os
#from html.parser import HTMLParser
from bs4 import BeautifulSoup
#from selectolax.parser import HTMLParser
# Change this to read data from a specific directory
source_dir = r'F:\Elisa\text_files\avforums_text_files'
# Change this to write results to a specific directory
result_dir = r'F:\Elisa\text_files\avforums_text_files_cleaned'
def get_text_bs(html):
tree = BeautifulSoup(html, 'lxml')
body = tree.body
if body is None:
return None
for tag in body.select('script'):
tag.decompose()
for tag in body.select('style'):
tag.decompose()
text = body.get_text(separator='\n')
return text
for path,dirs,files in os.walk(source_dir):
for file in files:
if file.endswith('div.html'):
continue
try:
with open(os.path.join(path,file),'r',encoding='utf-8') as f:
new_file_name = result_dir + '\\' + file
print(new_file_name)
new_file = open(new_file_name, "w+", encoding="utf-8")
html_contents = f.read()
#text = get_text_selectolax(html_contents)
text = get_text_bs(html_contents)
new_file.write(text)
#soup = BeautifulSoup(html_contents, features='lxml')
#print(soup.prettify())
#print(html_contents)
#for tag in soup.find_all('blockquote', 'messageText'):
#new_file.write("{}\n\n".format(tag))
except:
print("Unable to write to file")
continue
if html_contents == None or html_contents == '':
print("Empty HTML file")
continue
#text_list.append(text)
| true |
fe7137a1bd8a6b68c40ba34ad33c0b2f964875e5 | Python | code-moe/copycat | /#14 Phyton List Set and Timer.py | UTF-8 | 922 | 3.578125 | 4 | [] | no_license | #name : Python List Set and Timer
#author : CodeMoe
#date : 23 August, 2019
#true-a : Code taken from Shirayuki-sama from Python Discord
#import modules Timer and ascii_letters
from timeit import Timer
from string import ascii_letters
#fill list_a & list_b with ascii letters
list_a = list(ascii_letters)
list_b = list(ascii_letters)
#return value
def test1():
"""List comp"""
return [i for i in list_a if i in list_b]
#return i for i in list a that's in list b (ALL)
#Set function to join same value that exists in both lists
def test2():
"""Set"""
return list(set(list_a) & set(list_b)) #you can add sorted() here
#return set
#print function-return
print(test1(), test2())
#you can use sorted() function to sort the list
#for example e.g. : sorted(test2())
for test in (test1, test2):
print(f"{test.__doc__:<9} -> {Timer(test).timeit(10000):.2f}s")
#Will come back later for put comment | true |
c9d8c1afa3f1b4d2a7abfd78e68fad3af58628ed | Python | zhangxi0927/mycube | /mycube_code/mycube_3d.py | UTF-8 | 20,718 | 2.578125 | 3 | [] | no_license | from vpython import *
import sys
import serial
import glob
import random
import kociemba
import numpy as np
import cv2
fps=12
turnNumber=0
scene.title = 'cube' # 设置窗口标题
faces={'F': (color.green, vector(0, 0, 1)),
'B': (color.blue, vector(0, 0, -1)),
'U': (color.yellow, vector(0, 1, 0)),
'L': (color.red, vector(-1, 0, 0)),
'D': (color.white, vector(0, -1, 0)),
'R': (color.orange, vector(1, 0, 0))}
stickers = []
for face_color,face_axis in faces.values():
for x in (-1,0,1):
for y in (-1,0,1):
# 每个颜色的面先形成同一个位置的面,z=1。5
sticker=box(color=face_color,pos=vector(x,y,1.5),length=0.98,height=0.98,width=0.05)
# 计算旋转角度,除了正面和背面,都需要转90
cos_angle=dot(vector(0,0,1),face_axis)
# 计算旋转轴
pivot=(cross(vector(0,0,1),face_axis) if cos_angle==0 else vector(1,0,0))
# origin是起始位置,从起始点到旋转物体这么个轴,围绕axis,旋转angle
sticker.rotate(angle=acos(cos_angle),axis=pivot,origin=vector(0,0,0))
stickers.append(sticker)
#print(sticker.pos,sticker.color)
# Rotate parts of the cube in 3 dimensions
def rotate3D(key):
#输入的是对应的面的字母
if key[0] in faces:
#取相对应的颜色和axis
face_color, axis = faces[key[0]]
#如果字母后面有'就转90度,只有一个字母就转-90度
angle = ((pi / 2) if len(key)>1 else -pi / 2)
for r in arange(0, angle, angle / fps):
rate(fps)
for sticker in stickers:
# 如果F,转前面,六个绿色三个红三个黄三个橘三个白
if dot(sticker.pos, axis) > 0.5:
sticker.rotate(angle=angle / fps, axis=axis,
origin=vector(0, 0, 0))
elif key[0] == 'E': #from right to left
axis = vector(0, 0.5, 0)
angle = ((pi / 2) if len(key)>1 else -pi / 2)
for r in arange(0, angle, angle / fps):
rate(fps)
for sticker in stickers:
sticker.rotate(angle=angle / fps, axis=axis,origin=vector(0, 0, 0))
#六个中心块颜色
d = 'w'
u = 'y'
f = 'g'
b = 'b'
r = 'o'
l = 'r'
#12个边块
uf = {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': ''}
ur = {'u': 'y', 'd': '', 'f': '', 'b': '', 'r': 'o', 'l': ''}
ub = {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': ''}
ul = {'u': 'y', 'd': '', 'f': '', 'b': '', 'r': '', 'l': 'r'}
df = {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': '', 'l': ''}
dr = {'u': '', 'd': 'w', 'f': '', 'b': '', 'r': 'o', 'l': ''}
db = {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': '', 'l': ''}
dl = {'u': '', 'd': 'w', 'f': '', 'b': '', 'r': '', 'l': 'r'}
fr = {'u': '', 'd': '', 'f': 'g', 'b': '', 'r': 'o', 'l': ''}
fl = {'u': '', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': 'r'}
br = {'u': '', 'd': '', 'f': '', 'b': 'b', 'r': 'o', 'l': ''}
bl = {'u': '', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}
#8个角
ufr = {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': 'o', 'l': ''}
ufl = {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': 'r'}
ubr = {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': 'o', 'l': ''}
ubl = {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}
dfr = {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': 'o', 'l': ''}
dfl = {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': '', 'l': 'r'}
dbr = {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': 'o', 'l': ''}
dbl = {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}
#记录信息
def turn(face,show=1):
global d, u, f, b, r, l, uf, ur, ub, ul, df, dr, db, dl, fr, fl, br, bl, ufr, ufl, ubr, ubl, dfr, dfl, dbr, dbl
# global turnNbr
# turnNbr+=1
# if show ==1:
# sys.stdou.write(face+", ")
if face=="R":
ufr['u'], ufr['f'], ufr['r'], ubr['u'], ubr['b'], ubr['r'], dbr['d'], dbr['b'], dbr['r'], \
dfr['d'], dfr['f'], dfr['r'] = dfr['f'], dfr['d'], dfr['r'], ufr['f'], ufr['u'], ufr['r'], \
ubr['b'], ubr['u'], ubr['r'], dbr['b'], dbr['d'], dbr['r']
ur['u'], ur['r'], br['b'], br['r'], dr['d'], dr['r'], fr['f'], fr['r'], \
= fr['f'], fr['r'], ur['u'], ur['r'], br['b'], br['r'], dr['d'], dr['r']
rotate3D("R")
if face=="R'":
dfr['f'], dfr['d'], dfr['r'], ufr['f'], ufr['u'], ufr['r'], ubr['b'], ubr['u'], ubr['r'], \
dbr['b'], dbr['d'],dbr['r']=ufr['u'], ufr['f'], ufr['r'], ubr['u'], ubr['b'], ubr['r'], \
dbr['d'], dbr['b'], dbr['r'], dfr['d'], dfr['f'], dfr['r']
ur['u'], ur['r'], fr['f'], fr['r'], dr['d'], dr['r'], br['b'], br['r']\
=br['b'], br['r'], ur['u'], ur['r'], fr['f'], fr['r'], dr['d'], dr['r']
rotate3D("R'")
if face=="U":
ufr['u'], ufr['f'], ufr['r'], ubr['u'], ubr['b'], ubr['r'], ubl['u'], ubl['b'], ubl['l'], \
ufl['u'], ufl['f'],ufl['l']=ubr['u'], ubr['r'], ubr['b'], ubl['u'], ubl['l'], ubl['b'], \
ufl['u'], ufl['l'], ufl['f'], ufr['u'], ufr['r'], ufr['f']
ur['u'], ur['r'], uf['u'], uf['f'], ul['u'], ul['l'], ub['u'], ub['b']\
=ub['u'], ub['b'], ur['u'], ur['r'], uf['u'], uf['f'], ul['u'], ul['l']
rotate3D("U")
if face=="U'":
ubr['u'], ubr['r'], ubr['b'], ubl['u'], ubl['l'], ubl['b'], ufl['u'], ufl['l'], ufl['f'], \
ufr['u'], ufr['r'], ufr['f']=ufr['u'], ufr['f'], ufr['r'], ubr['u'], ubr['b'], ubr['r'], \
ubl['u'], ubl['b'], ubl['l'], ufl['u'], ufl['f'], ufl['l']
ur['u'], ur['r'], uf['u'], uf['f'], ul['u'], ul['l'], ub['u'], ub['b']\
=uf['u'], uf['f'], ul['u'], ul['l'], ub['u'], ub['b'], ur['u'], ur['r']
rotate3D("U'")
if face=="D":
dbr['d'], dbr['r'], dbr['b'], dbl['d'], dbl['l'], dbl['b'], dfl['d'], dfl['l'], dfl['f'], \
dfr['d'], dfr['r'], dfr['f']=dfr['d'], dfr['f'], dfr['r'], dbr['d'], dbr['b'], dbr['r'], \
dbl['d'], dbl['b'], dbl['l'], dfl['d'], dfl['f'], dfl['l']
dr['d'], dr['r'], df['d'], df['f'], dl['d'], dl['l'], db['d'], db['b']\
=df['d'], df['f'], dl['d'], dl['l'], db['d'], db['b'], dr['d'], dr['r']
rotate3D("D")
if face=="D'":
dfr['d'], dfr['f'], dfr['r'], dbr['d'], dbr['b'], dbr['r'], dbl['d'], dbl['b'], dbl['l'], \
dfl['d'], dfl['f'], dfl['l']=dbr['d'], dbr['r'], dbr['b'], dbl['d'], dbl['l'], dbl['b'], \
dfl['d'], dfl['l'], dfl['f'], dfr['d'], dfr['r'], dfr['f']
df['d'], df['f'], dr['d'], dr['r'], db['d'], db['b'], dl['d'], dl['l']\
=dr['d'], dr['r'], db['d'], db['b'], dl['d'], dl['l'], df['d'], df['f']
rotate3D("D'")
if face=="L'":
ufl['u'], ufl['f'], ufl['l'], ubl['u'], ubl['b'], ubl['l'], dbl['d'], dbl['b'], dbl['l'], \
dfl['d'], dfl['f'], dfl['l']=dfl['f'], dfl['d'], dfl['l'], ufl['f'], ufl['u'], ufl['l'], \
ubl['b'], ubl['u'], ubl['l'], dbl['b'], dbl['d'], dbl['l']
ul['u'], ul['l'], bl['b'], bl['l'], dl['d'], dl['l'], fl['f'], fl['l']\
=fl['f'], fl['l'], ul['u'], ul['l'], bl['b'], bl['l'], dl['d'], dl['l']
rotate3D("L'")
if face=="L":
dfl['f'], dfl['d'], dfl['l'], ufl['f'], ufl['u'], ufl['l'], ubl['b'], ubl['u'], ubl['l'], \
dbl['b'], dbl['d'], dbl['l']=ufl['u'], ufl['f'], ufl['l'], ubl['u'], ubl['b'], ubl['l'],\
dbl['d'], dbl['b'], dbl['l'], dfl['d'], dfl['f'], dfl['l']
ul['u'], ul['l'], fl['f'], fl['l'], dl['d'], dl['l'], bl['b'], bl['l']\
=bl['b'], bl['l'], ul['u'], ul['l'], fl['f'], fl['l'], dl['d'], dl['l']
rotate3D("L")
if face=="F":
ufr['u'], ufr['f'], ufr['r'], dfr['d'], dfr['f'], dfr['r'], dfl['d'], dfl['f'], dfl['l'], \
ufl['u'], ufl['f'], ufl['l']=ufl['l'], ufl['f'], ufl['u'], ufr['r'], ufr['f'], ufr['u'], \
dfr['r'], dfr['f'], dfr['d'], dfl['l'], dfl['f'], dfl['d']
uf['u'], uf['f'], fl['l'], fl['f'], df['d'], df['f'], fr['r'], fr['f']\
=fl['l'], fl['f'], df['d'], df['f'], fr['r'], fr['f'], uf['u'], uf['f']
rotate3D("F")
if face=="F'":
ufl['l'], ufl['f'], ufl['u'], ufr['r'], ufr['f'], ufr['u'], dfr['r'], dfr['f'], dfr['d'], \
dfl['l'], dfl['f'], dfl['d']=ufr['u'], ufr['f'], ufr['r'], dfr['d'], dfr['f'], dfr['r'], \
dfl['d'], dfl['f'], dfl['l'], ufl['u'], ufl['f'], ufl['l']
fl['l'], fl['f'], df['d'], df['f'], fr['r'], fr['f'], uf['u'], uf['f']\
=uf['u'], uf['f'], fl['l'], fl['f'], df['d'], df['f'], fr['r'], fr['f']
rotate3D("F'")
if face=="B'":
ubr['u'], ubr['b'], ubr['r'], dbr['d'], dbr['b'], dbr['r'], dbl['d'], dbl['b'], dbl['l'], \
ubl['u'], ubl['b'], ubl['l']=ubl['l'], ubl['b'], ubl['u'], ubr['r'], ubr['b'], ubr['u'], \
dbr['r'], dbr['b'], dbr['d'], dbl['l'], dbl['b'], dbl['d']
ub['u'], ub['b'], bl['l'], bl['b'], db['d'], db['b'], br['r'], br['b']\
=bl['l'], bl['b'], db['d'], db['b'], br['r'], br['b'], ub['u'], ub['b']
rotate3D("B'")
if face=="B":
ubl['l'], ubl['b'], ubl['u'], ubr['r'], ubr['b'], ubr['u'], dbr['r'], dbr['b'], dbr['d'], \
dbl['l'], dbl['b'], dbl['d']=ubr['u'], ubr['b'], ubr['r'], dbr['d'], dbr['b'], dbr['r'], \
dbl['d'], dbl['b'], dbl['l'], ubl['u'], ubl['b'], ubl['l']
bl['l'], bl['b'], db['d'], db['b'], br['r'], br['b'], ub['u'], ub['b']\
=ub['u'], ub['b'], bl['l'], bl['b'], db['d'], db['b'], br['r'], br['b']
rotate3D("B")
def turnCube(show=1):#from right to left
global d, u, f, b, r, l, uf, ur, ub, ul, df, dr, db, dl, fr, fl, br, bl, ufr, ufl, ubr, ubl, dfr, dfl, dbr, dbl
f,r,b,l=r,b,l,f
ufr['u'], ufr['f'], ufr['r'], ubr['u'], ubr['b'], ubr['r'], \
ubl['u'], ubl['b'], ubl['l'], ufl['u'], ufl['f'], ufl['l']\
=ubr['u'], ubr['r'], ubr['b'], ubl['u'], ubl['l'], ubl['b'], \
ufl['u'], ufl['l'], ufl['f'], ufr['u'], ufr['r'], ufr['f']
dfr['d'], dfr['f'], dfr['r'], dbr['d'], dbr['b'], dbr['r'], \
dbl['d'], dbl['b'], dbl['l'], dfl['d'], dfl['f'], dfl['l']\
=dbr['d'], dbr['r'], dbr['b'], dbl['d'], dbl['l'], dbl['b'], \
dfl['d'], dfl['l'], dfl['f'], dfr['d'], dfr['r'], dfr['f']
ur['u'], ur['r'], uf['u'], uf['f'], ul['u'], ul['l'], ub['u'], ub['b']\
=ub['u'], ub['b'], ur['u'], ur['r'], uf['u'], uf['f'], ul['u'], ul['l']
df['d'], df['f'], dr['d'], dr['r'], db['d'], db['b'], dl['d'], dl['l']\
=dr['d'], dr['r'], db['d'], db['b'], dl['d'], dl['l'], df['d'], df['f']
fl['f'], fl['l'], fr['f'], fr['r'], br['b'], br['r'], bl['b'], bl['l']\
=fr['r'], fr['f'], br['r'], br['b'], bl['l'], bl['b'], fl['l'], fl['f']
rotate3D("E")
if show==1:
print("turn the cube")
def resetCube():
global d, u, f, b, r, l, uf, ur, ub, ul, df, dr, db, dl, fr, fl, br, bl, ufr, ufl, ubr, ubl, dfr, dfl, dbr, dbl
d = 'w'
u = 'y'
f = 'g'
b = 'b'
r = 'o'
l = 'r'
uf = {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': ''}
ur = {'u': 'y', 'd': '', 'f': '', 'b': '', 'r': 'o', 'l': ''}
ub = {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': ''}
ul = {'u': 'y', 'd': '', 'f': '', 'b': '', 'r': '', 'l': 'r'}
df = {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': '', 'l': ''}
dr = {'u': '', 'd': 'w', 'f': '', 'b': '', 'r': 'o', 'l': ''}
db = {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': '', 'l': ''}
dl = {'u': '', 'd': 'w', 'f': '', 'b': '', 'r': '', 'l': 'r'}
fr = {'u': '', 'd': '', 'f': 'g', 'b': '', 'r': 'o', 'l': ''}
fl = {'u': '', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': 'r'}
br = {'u': '', 'd': '', 'f': '', 'b': 'b', 'r': 'o', 'l': ''}
bl = {'u': '', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}
ufr = {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': 'o', 'l': ''}
ufl = {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': 'r'}
ubr = {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': 'o', 'l': ''}
ubl = {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}
dfr = {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': 'o', 'l': ''}
dfl = {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': '', 'l': 'r'}
dbr = {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': 'o', 'l': ''}
dbl = {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}
printCube()
def isFinished():
global d, u, f, b, r, l, uf, ur, ub, ul, df, dr, db, dl, fr, fl, br, bl, ufr, ufl, ubr, ubl, dfr, dfl, dbr, dbl
if d == 'w' and u == 'y' and f == 'g' and b == 'b' and r == 'o' and l == 'r' and \
uf == {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': ''} and \
ur == {'u': 'y', 'd': '', 'f': '', 'b': '', 'r': 'o', 'l': ''} and \
ub == {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': ''} and \
ul == {'u': 'y', 'd': '', 'f': '', 'b': '', 'r': '', 'l': 'r'} and \
df == {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': '', 'l': ''} and \
dr == {'u': '', 'd': 'w', 'f': '', 'b': '', 'r': 'o', 'l': ''} and \
db == {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': '', 'l': ''} and \
dl == {'u': '', 'd': 'w', 'f': '', 'b': '', 'r': '', 'l': 'r'} and \
fr == {'u': '', 'd': '', 'f': 'g', 'b': '', 'r': 'o', 'l': ''} and \
fl == {'u': '', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': 'r'} and \
br == {'u': '', 'd': '', 'f': '', 'b': 'b', 'r': 'o', 'l': ''} and \
bl == {'u': '', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': 'r'} and \
ufr == {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': 'o', 'l': ''} and \
ufl == {'u': 'y', 'd': '', 'f': 'g', 'b': '', 'r': '', 'l': 'r'} and \
ubr == {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': 'o', 'l': ''} and \
ubl == {'u': 'y', 'd': '', 'f': '', 'b': 'b', 'r': '', 'l': 'r'} and \
dfr == {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': 'o', 'l': ''} and \
dfl == {'u': '', 'd': 'w', 'f': 'g', 'b': '', 'r': '', 'l': 'r'} and \
dbr == {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': 'o', 'l': ''} and \
dbl == {'u': '', 'd': 'w', 'f': '', 'b': 'b', 'r': '', 'l': 'r'}:
return 1
else:
return 0
def scramble(scranbleNum=10,show=1):
allMoves=["R","R'","L","L'","U","U'","D","D'","F","F'","B","B'"]
i=0
firstScramble = []
for i in range(scranbleNum):
m_index=random.randint(0,len(allMoves)-1)
m=allMoves[m_index]
firstScramble.append(m)
turn(m)
print(i," ",m)
printCube()
print(firstScramble)
sign_conv={
'g' : 'F',
'y' : 'U',
'b' : 'B',
'o' : 'R',
'r' : 'L',
'w' : 'D'
}
def solveByKociemba(state):
raw = ''
for i in state:
for j in state[i]:
raw += sign_conv[j]
print(raw)
print("answer:", kociemba.solve(raw))
return kociemba.solve(raw)
stateForKociemba= {
'up' : [ubl['u'],ub['u'],ubr['u'],ul['u'],u,ur['u'],ufl['u'],uf['u'],ufr['u']],
'right': [ufr['r'], ur['r'], ubr['r'], fr['r'], r, br['r'], dfr['r'], dr['r'], dbr['r']],
'front':[ufl['f'],uf['f'],ufr['f'],fl['f'],f,fr['f'],dfl['f'],df['f'],dfr['f']],
'down':[dfl['d'],df['d'],dfr['d'],dl['d'],d,dr['d'],dbl['d'],db['d'],dbr['d']],
'left':[ubl['l'],ul['l'],ufl['l'],bl['l'],l,fl['l'],dbl['l'],dl['l'],dfl['l']],
'back':[ubr['b'],ub['b'],ubl['b'],br['b'],b,bl['b'],dbr['b'],db['b'],dbl['b']]
# 'up':['y','y','y','y','y','y','y','y','y',],
# 'right':['o','o','o','o','o','o','o','o','o',],
# 'front':['g','g','g','g','g','g','g','g','g',],
# 'down':['w','w','w','w','w','w','w','w','w',],
# 'left':['r','r','r','r','r','r','r','r','r',],
# 'back':['b','b','b','b','b','b','b','b','b',]
}
def solveMove(answer):
for i in range(len(answer)):
move=answer[i]
print(move)
print(len(move))
if len(move)>1 and move[1]=='2':
turn(move[0])
turn(move[0])
else:
turn(move)
print(i," ",move)
printCube()
def printCube():
print('\n\t'+ubl['u']+ub['u']+ubr['u']+'\n\t'+ul['u'] + u + ur['u']+'\n\t'+ufl['u'] + uf['u'] + ufr['u']+'\n')
print(ubl['l'] + ul['l'] + ufl['l']+" "+ufl['f'] + uf['f'] + ufr['f']+" "+ufr['r'] + ur['r'] + ubr['r']+" "+ubr['b'] + ub['b'] + ubl['b'] + "\n" )
print(bl['l'] + l + fl['l'] + " "+fl['f'] + f + fr['f'] + " "+fr['r'] + r + br['r'] + " "+br['b'] + b + bl['b'] + "\n")
print(dbl['l'] + dl['l'] + dfl['l'] + " "+dfl['f'] + df['f'] + dfr['f'] + " "+dfr['r'] + dr['r'] + dbr['r'] + " "+dbr['b'] + db['b'] + dbl['b']+"\n")
print("\t"+dfl['d'] + df['d'] + dfr['d']+ "\n\t" + dl['d'] + d + dr['d'] + "\n\t" + dbl['d'] + db['d'] + dbr['d'] + "\n")
print("********************************************")
# from PIL import ImageGrab
# import numpy as np
# import cv2 as cv
# import imageio
# import time
# cv.namedWindow("grab", cv.WINDOW_NORMAL)
# buff = []
# size = (0, 0, 1200, 1200)
# p = ImageGrab.grab(size)
# x, y = p.size
# while True:
# im = ImageGrab.grab(size)
# img = cv.cvtColor(np.array(im), cv.COLOR_RGB2BGR)
# # video.write(img)
# cv.imshow("grab", img)
# buff.append(img)
# if cv.waitKey(1) & 0xFF == ord('q'):
# break
scramble()
while True:
evt=scene.waitfor('keydown')
command=evt.key
if command=="F":
turn("F")
elif command=="B":
turn("B")
elif command=="U":
turn("U")
elif command=="D":
turn("D")
elif command=="R":
turn("R")
elif command=="L":
turn("L")
elif command=="f":
turn("F'")
elif command=="b":
turn("B'")
elif command=="u":
turn("U'")
elif command=="d":
turn("D'")
elif command=="r":
turn("R'")
elif command=="l":
turn("L'")
elif command=="E":
turnCube()
elif command=="q":
break
else:
continue
# command=input("enter your command: ")
# if command=="F":
# turn("F")
# elif command=="B":
# turn("B")
# elif command=="U":
# turn("U")
# elif command=="D":
# turn("D")
# elif command=="R":
# turn("R")
# elif command=="L":
# turn("L")
# elif command=="F'":
# turn("F'")
# elif command=="B'":
# turn("B'")
# elif command=="U'":
# turn("U'")
# elif command=="D'":
# turn("D'")
# elif command=="R'":
# turn("R'")
# elif command=="L'":
# turn("L'")
# elif command=="E":
# turnCube()
# elif command=="E'":
# turnCube()
# elif command=="quit":
# break
printCube()
stateForKociemba= {
'up' : [ubl['u'],ub['u'],ubr['u'],ul['u'],u,ur['u'],ufl['u'],uf['u'],ufr['u']],
'right': [ufr['r'], ur['r'], ubr['r'], fr['r'], r, br['r'], dfr['r'], dr['r'], dbr['r']],
'front':[ufl['f'],uf['f'],ufr['f'],fl['f'],f,fr['f'],dfl['f'],df['f'],dfr['f']],
'down':[dfl['d'],df['d'],dfr['d'],dl['d'],d,dr['d'],dbl['d'],db['d'],dbr['d']],
'left':[ubl['l'],ul['l'],ufl['l'],bl['l'],l,fl['l'],dbl['l'],dl['l'],dfl['l']],
'back':[ubr['b'],ub['b'],ubl['b'],br['b'],b,bl['b'],dbr['b'],db['b'],dbl['b']]
# 'up':['y','y','y','y','y','y','y','y','y',],
# 'right':['o','o','o','o','o','o','o','o','o',],
# 'front':['g','g','g','g','g','g','g','g','g',],
# 'down':['w','w','w','w','w','w','w','w','w',],
# 'left':['r','r','r','r','r','r','r','r','r',],
# 'back':['b','b','b','b','b','b','b','b','b',]
}
print(stateForKociemba)
# while True:
# command=input("enter your command: ")
# print(command)
# print(type(command))
# print(command[0])
# if command=="F":
# rotate3D("F")
# elif command=="B":
# rotate3D("B")
# elif command=="U":
# rotate3D("U")
# elif command=="D":
# rotate3D("D")
# elif command=="R":
# rotate3D("R")
# elif command=="L":
# rotate3D("L")
# elif command=="F'":
# rotate3D("F'")
# elif command=="B'":
# rotate3D("B'")
# elif command=="U'":
# rotate3D("U'")
# elif command=="D'":
# rotate3D("D'")
# elif command=="R'":
# rotate3D("R'")
# elif command=="L'":
# rotate3D("L'")
# elif command=="E":
# rotate3D("E'")
# elif command=="E'":
# rotate3D("E'")
# elif command=="quit":
# break
def search_serial_ports():
if sys.platform.startswith('win'):
ports=['COM%s'% (i+1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
ports=glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports=glob.glob('/dev/tty.*')
else:
raise EnvironmentError("unsupported platform!(windows,mac,linux,cygwin are supported!)")
result=[]
for port in ports:
try:
s=serial.Serial(port)
s.close()
result.append(port)
except(OSError, serial.SerialException):
pass
return result
answer=solveByKociemba(stateForKociemba)
answer=answer.split()
print(answer)
solveMove(answer)
# # 连接串口
# com=search_serial_ports()
# #115200是波特率
# serial = serial.Serial(com[0], 115200, timeout=2) # 连接COM14,波特率位115200
# if serial.isOpen():
# print('串口已打开')
# # 说白了Python3的字符串的编码语言用的是unicode编码,由于Python的字符串类型是str,
# # 在内存中以Unicode表示,一个字符对应若干字节,如果要在网络上传输,
# # 或保存在磁盘上就需要把str变成以字节为单位的bytes
# # python对bytes类型的数据用带b前缀的单引号或双引号表示:
# data = bytes(answer, encoding='utf-8') # 发送的数据
# serial.write(data) # 串口写数据
# serial.write(b'\r\n') # 为了配合下位机,用0X0D 0X0A做结束标志
# print('You Send Data:', data)
# while True:
# data = serial.read(20) # 串口读20位数据
# if data != b'':
# break
# print('receive data is :', data)
# else:
# print('串口未打开')
# serial.close()
# if serial.isOpen():
# print('串口未关闭')
# else:
# print('串口已关闭')
| true |
7eba93896858bb2ff8a3b18a03c5c523bc5b71d5 | Python | duguxy/pycoldatom | /pycoldatom/functions/centerofmass.py | UTF-8 | 1,545 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env python
"""Center of mass algorithm based on Fourier transform and filtering"""
import numpy as np
def center_of_mass(img):
"""Find the center of mass of a focused spot on a noisy background.
This is done by the Fourier transform method as discussed by Weisshaar et al.
(http://www.mnd-umwelttechnik.fh-wiesbaden.de/pig/weisshaar_u5.pdf), which
is insensitive to noise. This usually skews the result towards the center of
the image for the classical CoM algorithm. A tuple with the CoM coordinates
is returned.
**Inputs**
* img: 2D array, containing image data
**Outputs**
* com: tuple, containing the x,y coordinates of the center of mass
"""
img = np.matrix(img)
rbnd, cbnd = img.shape
i = np.matrix(np.arange(0, rbnd))
sin_a = np.sin((i-1)*2*np.pi / (rbnd-1))
cos_a = np.cos((i-1)*2*np.pi / (rbnd-1))
j = np.matrix(np.arange(0, cbnd)).transpose()
sin_b = np.sin((j-1)*2*np.pi / (cbnd-1))
cos_b = np.cos((j-1)*2*np.pi / (cbnd-1))
a = (cos_a * img).sum()
b = (sin_a * img).sum()
c = (img * cos_b).sum()
d = (img * sin_b).sum()
if a>0:
if b>0:
rphi = 0
else:
rphi = 2*np.pi
else:
rphi = np.pi
if c>0:
if d>0:
cphi = 0
else:
cphi = 2*np.pi
else:
cphi = np.pi
x = (np.arctan(b/a) + rphi) * (rbnd - 1)/(2*np.pi) + 1
y = (np.arctan(d/c) + cphi) * (cbnd - 1)/(2*np.pi) + 1
com = (x, y)
return com | true |
b6e2874102f3b69a783f39030d2ea5c5d68e408c | Python | MJDeeks/AFPwork | /splicing_introns.py | UTF-8 | 612 | 3.78125 | 4 | [] | no_license | #part 1
my_dna = 'ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT'
ex1 = my_dna [0:63]
ex2 = my_dna [90:]
print('Original seqence {0} \n'.format(my_dna))
print('Coding ex 1: {0} \nCoding ex 2: {1}'.format(ex1, ex2))
#used [] instead of {}
#part2
coding_length = len(ex1+ex2)
total_length = len(my_dna)
print('{0:.2f}%'.format(coding_length/total_length*100))
#part 3
intron = my_dna[63:90]
print('Original sequence: {0} \n'.format(my_dna))
print("Coding DNA: {0}\nNon Coding DNA: {1}\nCoding DNA: {2}".format(ex1, intron.lower(),ex2)) | true |
750301025bad947c008f01df55e5a3749db2a970 | Python | nbudin/solidfuel | /solidfuel/Controllers/curves.py | UTF-8 | 7,245 | 3.40625 | 3 | [] | no_license | # -*- tab-width: 4 -*-
import math, random
class Curve:
def __init__(self, start, length=None):
self._start = start
self._length = length
if self._length is not None:
self._end = start + length
else:
self._end = None
def start(self):
return self._start
def length(self):
return self._length
def end(self):
return self._end
def value(self, time):
raise "This is an abstract class."
class InstantCurve(Curve):
def __init__(self, time, value):
Curve.__init__(self, time, 0.0)
self._value = value
def value(self, time):
return self._value
class ConstantCurve(Curve):
def __init__(self, start, value, length=None):
Curve.__init__(self, start, length)
self._value = value
def value(self, time):
if self.start() <= time and (self.end() is None or time <= self.end()):
return self._value
else:
return 0.0
class LinearCurve(Curve):
def __init__(self, start, length, startvalue, amount):
Curve.__init__(self, start, length)
self._startvalue = startvalue
self._amount = amount
self._endvalue = startvalue + amount
self._amountpersecond = self._amount / self._length
def value(self, time):
if time < self._start:
return self._startvalue
elif time < self._end:
return self._startvalue + (time - self._start) * self._amountpersecond
else:
return self._endvalue
class ParabolicCurve(Curve):
# y = coefficient * x^2 + startvalue
def __init__(self, start, startvalue, coefficient=None, length=None, until=None, decelerate=False):
if coefficient is not None and until is not None:
# until = coefficient * length^2 + startvalue
# until - startvalue = coefficient * length^2
# (until - startvalue) / coefficient = length^2
# sqrt((until - startvalue) / coefficient) = length
length = math.sqrt((until - startvalue) / coefficient)
elif coefficient is not None and length is not None:
until = coefficient * (length**2) + startvalue
else:
# until = coefficient * length^2 + startvalue
# until - startvalue = coefficient * length^2
# (until - startvalue) / (length^2) = coefficient
coefficient = (until - startvalue) / (length**2)
Curve.__init__(self, start, length)
self._startvalue = startvalue
self._endvalue = until
self._coefficient = coefficient
self._decelerate = decelerate
def value(self, time):
x = (time - self._start)
if self._decelerate:
x -= self._length
y = self._coefficient * (x ** 2) + self._startvalue
if self._decelerate:
y -= self._endvalue
y *= -1
return y
class SineWave(Curve):
def __init__(self, start, period, max=1.0, min=0.0):
Curve.__init__(self, start)
self._freq = (2 * 3.14159) / period
self._amplitude = (max - min) / 2.0
self._y = min + self._amplitude
def value(self, time):
return self._amplitude * math.sin(self._freq * (time - self._start)) + self._y
class CatmullRomSpline(Curve):
def __init__(self, start, startvalue):
self._points = [(start, startvalue)]
def _findIndex(self, time):
for i in range(len(self._points)):
if time < self._points[i][0]:
return i
return len(self._points)
def addPoint(self, time, value):
self._points.insert(self._findIndex(time), (time, value))
def start(self):
return self._points[0][0]
def end(self):
return self._points[-1][0]
def length(self):
return self.end() - self.start()
def value(self, time):
if time <= self._points[0][0]:
return self._points[0][1]
if time >= self._points[-1][0]:
return self._points[-1][1]
timeIndex = self._findIndex(time)
startpoint = self._points[timeIndex - 1]
endpoint = self._points[timeIndex]
tdelta = endpoint[0] - startpoint[0]
# "smooth" start and endpoints for tangents
if timeIndex > 1:
prevpoint = self._points[timeIndex - 2]
else:
prevpoint = (startpoint[0] - tdelta, startpoint[1])
if timeIndex < len(self._points) - 1:
nextpoint = self._points[timeIndex + 1]
else:
nextpoint = (endpoint[0] + tdelta, endpoint[1])
# s ranges linearly from 0.0 - 1.0 within each pair of points
s = (time - startpoint[0]) / (endpoint[0] - startpoint[0])
# hermite basis functions
h1 = 2*(s**3) - 3*(s**2) + 1
h2 = -2*(s**3) + 3*(s**2)
h3 = s**3 - 2*(s**2) + s
h4 = s**3 - s**2
if startpoint[0] == prevpoint[0]:
t1 = 0.5 * (startpoint[1] - prevpoint[1])
else:
t1 = 0.5 * (startpoint[1] - prevpoint[1]) / (startpoint[0] - prevpoint[0])
if endpoint[0] == startpoint[0]:
t1 += 0.5 * (endpoint[1] - startpoint[1])
else:
t1 += 0.5 * (endpoint[1] - startpoint[1]) / (endpoint[0] - startpoint[0])
if endpoint[0] == startpoint[0]:
t2 = 0.5 * (endpoint[1] - startpoint[1])
else:
t2 = 0.5 * (endpoint[1] - startpoint[1]) / (endpoint[0] - startpoint[0])
if nextpoint[0] == endpoint[0]:
t2 += 0.5 * (nextpoint[1] - endpoint[1])
else:
t2 += 0.5 * (nextpoint[1] - endpoint[1]) / (nextpoint[0] - endpoint[0])
value = h1*startpoint[1] + h2*endpoint[1] + h3*t1 + h4*h2
return value
class Tremor(Curve):
def __init__(self, start, length, magnitude=1.0):
Curve.__init__(self, start, length)
self._magnitude = magnitude
def value(self, time):
if time < self._start:
return 0.0
elif time > self._end:
return 0.0
cmag = self._magnitude * (1.0 - ((time - self._start) / self._length))
return (random.random() * (2*cmag)) - cmag
class Motion(Curve):
def __init__(self, start, startvalue, speed, accelspeed):
Curve.__init__(self, start)
self._lastUpdate = self._start
self._lastValue = startvalue
self._speed = speed
self._speedCurve = None
self._accelSpeed = accelspeed
def changeSpeed(self, time, newSpeed):
curSpeed = self.speed(time)
self._lastValue = self.value(time)
self._lastUpdate = time
diff = abs(newSpeed - curSpeed)
targetTime = time + (diff / self._accelSpeed)
self._speedCurve = CatmullRomSpline(time, curSpeed)
self._speedCurve.addPoint(targetTime, newSpeed)
def speed(self, time):
if self._speedCurve is None:
return self._speed
else:
if time > self._speedCurve.end():
self._speed = self._speedCurve.value(time)
self._speedCurve = None
return self._speed
else:
return self._speedCurve.value(time)
def value(self, time):
return self._lastValue + ((time - self._lastUpdate) * self.speed(time))
| true |