blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2dc3168176ead09c643b6d8b67b66316f48bcabc | Python | yirano/project_data-structures | /src/binary_search_tree/binary_search_tree.py | UTF-8 | 4,541 | 4.5625 | 5 | [] | no_license | """
Binary search trees are a data structure that enforce an ordering over
the data they store. That ordering in turn makes it a lot more efficient
at searching for a particular piece of data in the tree.
This part of the project comprises two days:
1. Implement the methods `insert`, `contains`, `get_max`, and `for_each`
on the BSTNode class.
2. Implement the `in_order_print`, `bft_print`, and `dft_print` methods
on the BSTNode class.
"""
class BSTNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Insert the given value into the tree
def insert(self, value):
# take the current value of our node (self.value)
# compare to the new value we want to insert
# if new value < self.value
if self.value > value:
# if self.left is already taken by a node
if self.left != None:
# make that (left) node, call insert
self.left.insert(value)
else:
# set the left to the new node with the new value
self.left = BSTNode(value)
# elif new value >= self.value
elif self.value <= value:
# if self.right is already taken by node
if self.right != None:
# make that (right) node call insert
self.right.insert(value)
# set the right child to the new node with new value
else:
self.right = BSTNode(value)
# Return True if the tree contains the value
# False if it does not
def contains(self, target):
if self.value == target:
return True
# compare the target to current value
# if current value < target
# found = False
if self.value > target:
# check the left substree
# if you cannot go left, return False
if self.left is None:
return False
found = self.left.contains(target)
# elif current value >= target
if self.value <= target:
# check if right subtree contains target
# if you cannot go right, return False
if self.right is None:
return False
found = self.right.contains(target)
return found
def get_max(self):
# while(current.right):
# current = current.right
# return current.value
if self.right is None:
return self.value
return self.right.get_max()
# Call the function `fn` on the value of each node
def for_each(self, fn):
if self.value is None:
pass
else:
fn(self.value)
if self.left is not None:
self.left.for_each(fn)
if self.right is not None:
self.right.for_each(fn)
def in_order_print(self):
if self.left:
self.left.in_order_print()
print(self.value)
if self.right:
self.right.in_order_print()
def bft_print(self):
queue = []
current = None
if not self.value:
pass
queue.append(self.value)
if self.left:
queue.append(self.left)
if self.right:
queue.append(self.right)
print(queue.pop(0))
while len(queue) != 0:
current = queue[0]
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
print(queue.pop(0))
def dft_print(self):
queue = []
queue.append(self)
while len(queue) != 0:
current = queue.pop()
print(current)
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
# Stretch Goals -------------------------
# Note: Research may be required
# Print Pre-order recursive DFT
def pre_order_dft(self):
pass
# Print Post-order recursive DFT
def post_order_dft(self):
pass
"""
This code is necessary for testing the `print` methods
"""
bst = BSTNode(1)
bst.insert(8)
bst.insert(5)
bst.insert(7)
bst.insert(6)
bst.insert(3)
bst.insert(4)
bst.insert(2)
print("bft_print")
bst.bft_print()
bst.dft_print()
print("elegant methods")
print("pre order")
bst.pre_order_dft()
print("in order")
bst.in_order_print()
print("post order")
bst.post_order_dft()
| true |
16bfaf1ba05d2424825e5bfa79cd9883adc3a970 | Python | abjklk/aps-2020 | /bitwise_substrings.py | UTF-8 | 186 | 4.0625 | 4 | [] | no_license | # Handout 4
# Program to obtain substrings of string using bitwise shift op
a = "ABCD"
n = len(a)
for i in range(1<<n):
for j in range(n):
if i&(1<<j):
print(a[j],end="")
print() | true |
535ed274f70398730b49d117e2c0d3346cf61f2d | Python | zorzonp/Mini_Project_2 | /main.py | UTF-8 | 3,637 | 3.375 | 3 | [] | no_license | ####################################################################
##
## Authors: Peter Zorzonello
## Last Update: 10/20/2018
## Class: EC601 - A1
## File_Name: Main.py
##
## Description:
## This is a test file to test all of the API calls in helper.
## This file will show how the model performed.
##
####################################################################
#import my API
import helper
#Import other libraries
import numpy as np
import pandas as pd
import os, os.path
import matplotlib.pyplot as plt
#global Variables
batch_size = 0
num_files_train = 0
batch_size = 9000
#the data directory is where all the images are
path = 'data'
#this can be any size but the bigger it is the slower it runs
image_size = 28
color_mode = 'rgb'
mode = 'binary'
classes = ['dog', 'cat']
#get batch size from number of files in directory
for name in os.listdir(path+'/train/class_a/'):
num_files_train = num_files_train + 1
#get batch size from number of files in directory
for name in os.listdir(path+'/train/class_b/'):
num_files_train = num_files_train + 1
num_batch = num_files_train/batch_size
print("num_files_train: ", num_files_train)
print("num_batchs: ", num_batch)
#get the test and train data from the images in 'data/'
train_data = helper.getTrainData(path, image_size, color_mode, batch_size, mode)
test_data, test_files_names = helper.getTestData(path, image_size, color_mode, batch_size, mode)
#get the user to choose which modle number to use
model_num = helper.getModelNumFromUser()
#Use the chosen model
if model_num == '3':
model = helper.getModelThree(image_size)
elif model_num == '2':
model = helper.getModelTwo(image_size)
else:
model = helper.getModelOne(image_size)
opt = helper.getOptimizer()
#get the number of epochs the model should use
num_epoch = helper.getEpoch()
i = 0
print("Fit Model")
while i < num_batch:
train_set_images, train_set_labels = helper.getTestSet(train_data)
model = helper.compile(model, optimizer = opt)
#fit the data in the modle
model = helper.fit(model, train_set_images, train_set_labels, num_epoch)
i = i+1
print("Evaluate Model")
test_set_images, test_set_labels = helper.getTestSet(test_data)
#evaluate the model
loss, accuracy = helper.evalModel(model, test_set_images, test_set_labels)
#print data on how well the model did
print('Test accuracy: ', accuracy)
print('Test loss: ', loss)
helper.printSummary(model_num, opt, num_epoch)
#Do a prediction
print("Predict")
predictions = model.predict_classes(test_set_images)
print(predictions)
print(test_set_labels)
try:
#get the first occurance of a CAT
lbl_index = 0
for label in test_set_labels:
if label == 1:
break
lbl_index = lbl_index + 1
#print the predicted class as text
tmp_index = int(predictions[lbl_index])
print("Class predict: ", classes[tmp_index])
#if the class predicted matched the label then print pass, else print fail
if(predictions[lbl_index] == test_set_labels[lbl_index]):
print("PASS!")
else:
print("FAIL comparison!!")
except Exception as e:
print("There was no cat picture in the testing set.")
try:
#get the first occurance of a DOG
lbl_index = 0
for label in test_set_labels:
if label == 0:
break
lbl_index = lbl_index + 1
#print the predicted class as text
tmp_index = int(predictions[lbl_index])
print("Class predict:", classes[tmp_index])
#if the class predicted matched the label then print pass, else print fail
if(predictions[lbl_index] == test_set_labels[lbl_index]):
print("PASS!")
else:
print("FAIL comparison!!")
except Exception as e:
print("There was no dog picture in the testing set.")
| true |
dc6dfb1150e7553d63a14a5119cc64174a6632e4 | Python | bgbutler/TimeSeriesBook | /chapter_11/random_walk_stationarity.py | UTF-8 | 592 | 3.390625 | 3 | [] | no_license | # calculate the stationarity of a random walk
from random import seed
from random import random
from statsmodels.tsa.stattools import adfuller
# generate random walk
seed(1)
random_walk = list()
random_walk.append(-1 if random() < 0.5 else 1)
for i in range(1, 1000):
movement = -1 if random() < 0.5 else 1
value = random_walk[i-1] + movement
random_walk.append(value)
# statistical test
result = adfuller(random_walk)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value)) | true |
31cc65040b083727ce03f1140a71948f0158b5ed | Python | tojov/kat_ran_thru_my_keebord | /kat_ran/kat.py | UTF-8 | 462 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 23:12:15 2019
@author: abhijithneilabraham
"""
import random
import time
c=int(input('write in number how much you love a cat \n'))
def catran():
a=random.randint(97,122)
print(chr(a),end="")
r=0.05
for i in range(c):
if i%5==0:
r=random.uniform(0.01,0.2)
time.sleep(r)
catran()
print("\n Oops,cat ran"+str(c)+"times through ya keyboard!")
| true |
6e31925f6c79949b0d2cb4fb124c648b24303af9 | Python | 1nkribbon/stepik_selenium | /alert_task.py | UTF-8 | 656 | 3.046875 | 3 | [] | no_license | import time
import math
from selenium import webdriver
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/alert_accept.html"
browser = webdriver.Chrome()
try:
browser.get(link)
first_button = browser.find_element_by_css_selector(".btn-primary")
first_button.click()
confirm1 = browser.switch_to.alert
confirm1.accept()
x = browser.find_element_by_id("input_value")
answer = calc(int(x.text))
answer_input = browser.find_element_by_id("answer")
answer_input.send_keys(answer)
button = browser.find_element_by_css_selector(".btn-primary")
button.click()
finally:
time.sleep(10)
browser.quit()
| true |
9cfc814633b7fcb02e3ab03868aeb7614ebb5845 | Python | mohammedkaifs/python-programs | /16_sets_in_python.py | UTF-8 | 518 | 4.34375 | 4 | [] | no_license | a = {1,3,4,5}
print(type(a))
print(a)
#Important : this syntax will create an empty dictionary and an empty set
a={}
print(type(a))
# An empty set can be using the below syntax:
b=set()
print(type(b))
# adding values to an empty set
b.add(4)
b.add(5)
b.add((7,8))
# b.add({4:5}) # cannot add lists or dictonary in sets
print(b)
print(len(b)) # prints the length of this set
b.remove(5) # removes 5 from set b
# b.remove(51) #cannot remove 15 because it is not present in set
print(b)
print(b.pop())
print(b)
| true |
5a20f241d1cecc6b034657094aa409d883fe921e | Python | ilailabs/python | /tutorials/trash/magic_method_operator_overloading.py | UTF-8 | 617 | 4.09375 | 4 | [] | no_license | # Python also provides magic methods for comparisons.
# __lt__ for <
# __le__ for <=
# __eq__ for ==
# __ne__ for !=
# __gt__ for >
# __ge__ for >=
#
# If __ne__ is not implemented, it returns the opposite of __eq__.
# There are no other relationships between the other operators.
# Example:
class SpecialString:
def __init__(self, cont):
self.cont = cont
def __gt__(self, other):
for index in range(len(other.cont)+1):
result = other.cont[:index] + ">" + self.cont
result += ">" + other.cont[index:]
print(result)
spam = SpecialString("spam")
eggs = SpecialString("eggs")
spam > eggs
| true |
3e74cba96ddbd8e7e58aaf305d67bc50177de202 | Python | jboegeholz/flask_ajax | /ajax_lists.py | UTF-8 | 1,614 | 2.875 | 3 | [] | no_license | from time import sleep
from flask import Flask, jsonify
from flask import render_template
from flask import request
app = Flask(__name__)
@app.route('/')
def hello_world():
hello_string = "Hello World"
return render_template("index.html",
hello_message=hello_string)
@app.route('/static_item_list')
def static_item_list():
fruits = ["Apple", "Banana", "Lemon"]
return render_template("static_item_list.html", fruits=fruits)
@app.route('/item_list_with_filter', methods=["GET", "POST"])
def item_list_with_filter():
fruits = ["Apple", "Banana", "Lemon"]
if request.method == "POST":
if "filter" in request.form:
fruit_filter_value = request.form["filter"]
else:
fruit_filter_value = ""
filtered_items = []
for fruit in fruits:
if fruit_filter_value.lower() in fruit.lower():
filtered_items.append(fruit)
fruits = filtered_items
return render_template("item_list_with_filter.html",
fruits=fruits)
@app.route('/dynamic_item_list')
def dynamic_item_list():
return render_template("dynamic_item_list.html")
@app.route('/_items')
def items():
filter_value = request.args.get('filter', "", type=str)
fruits = ["Apple", "Banana", "Lemon"]
filtered_items = []
for fruit in fruits:
if filter_value.lower() in fruit.lower():
filtered_items.append(fruit)
sleep(1) # to simulate latency on the server side
return jsonify(fruits=filtered_items)
if __name__ == '__main__':
app.run()
| true |
141790bb72be7a39261013d2f8ebeb8ad5d140bf | Python | Rivarrl/leetcode_python | /leetcode/601-900/719.py | UTF-8 | 967 | 3.3125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# ======================================
# @File : 719.py
# @Time : 2020/12/25 10:08 上午
# @Author : Rivarrl
# ======================================
from algorithm_utils import *
class Solution:
"""
[719. 找出第 k 小的距离对](https://leetcode-cn.com/problems/find-k-th-smallest-pair-distance/)
"""
@timeit
def smallestDistancePair(self, nums: List[int], k: int) -> int:
nums.sort()
n = len(nums)
def f(x):
res = i = 0
for j in range(1, n):
while nums[j] - nums[i] > x:
i += 1
res += j - i
return res
lo, hi = 0, nums[-1] - nums[0]
while lo < hi:
mi = lo + hi >> 1
if f(mi) < k:
lo = mi + 1
else:
hi = mi
return lo
if __name__ == '__main__':
a = Solution()
a.smallestDistancePair([1,3,1], 1) | true |
62a930374cc4a780ab3163e653b615ab9cb2278c | Python | shohei/chip-convex-hull | /chipdetect.py | UTF-8 | 1,214 | 2.625 | 3 | [] | no_license | import cv2, matplotlib
import numpy as np
import matplotlib.pyplot as plt
chips = cv2.imread('chip.png')
chips_gray = cv2.cvtColor(chips, cv2.COLOR_BGR2GRAY)
chips_preprocessed = cv2.GaussianBlur(chips_gray, (5, 5), 0)
_, chips_binary = cv2.threshold(chips_preprocessed, 230, 255, cv2.THRESH_BINARY)
chips_binary = cv2.bitwise_not(chips_binary)
_, chips_contours, _ = cv2.findContours(chips_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
chips_and_contours = np.copy(chips)
min_chip_area = 60
large_contours = [cnt for cnt in chips_contours if cv2.contourArea(cnt) > min_chip_area]
bounding_img = np.copy(chips)
for contour in large_contours:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cgx = int(rect[0][0])
cgy = int(rect[0][1])
leftx = int(cgx - (rect[1][0]/2.0))
lefty = int(cgy - (rect[1][1]/2.0))
angle = round(rect[2],1)
cv2.drawContours(bounding_img,[box],0,(0,0,255),2)
cv2.circle(bounding_img,(cgx,cgy), 10, (255,0,0), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(bounding_img,'Rot: '+str(angle)+'[deg]',(leftx,lefty), font, 0.7, (0,0,0),2,cv2.LINE_AA)
plt.imshow(bounding_img)
plt.axis("off")
cv2.imwrite("result.png" , bounding_img)
plt.show() | true |
a702924713f5d366d56f175082e03b5e87e19a39 | Python | rentainhe/interview-algorithm-collection | /剑指offer/offer-64.py | UTF-8 | 217 | 2.96875 | 3 | [] | no_license | # encoding: utf-8
class Solution:
def sumNums(self, n: int) -> int:
mid = n // 2
if n % 2 == 0: # 判断奇数偶数
return n * mid + mid
else:
return n * mid + n
| true |
12b79fbcb1436e250e17b5ae6ff7a3755f9cdbb5 | Python | alexanu/Python_Trading_Snippets | /data/Netfonds_tick_and_processing/Netfonds_another.py | UTF-8 | 6,715 | 3.28125 | 3 | [] | no_license | import datetime
from datetime import timedelta
from pandas import DataFrame, concat, date_range, read_csv
class Lime:
'''
A simple API for extracting stock tick data.
###Parameters
* start_date -- datetime, date beginning the retrieval window
* end_date -- datetime, date ending the retrieval window
* exchange -- string ( optional ), ticker's exchange: ['Nasdaq', 'Nyse', 'Amex']
* ticker -- string ( optional ), stock ticker symbol. With or with out
Netfonds exchange extension.
'''
def __init__(self, start_date, end_date=None, exchange=None, ticker=None):
self.start_date = self.initialize_date(start_date)
self.end_date = self.initialize_date(end_date)
self.ticker = None
self._exchange = exchange
self._file_format = 'csv'
self._df = None
self._exchanges = {
'Nasdaq': '.O',
'Nyse': '.N',
'Amex': '.A'
}
self.exchange_extensions = ['O', 'N', 'A']
self._url = 'http://www.netfonds.no/quotes/tradedump.php'
self.uri = None
def get_exchange(self):
''' Returns the exchange chosen '''
return self._exchange
def get_df(self):
''' Gets the stored tick data '''
return self._df
def set_df(self, dataframe):
'''
Sets stored tick data
Parameters
* dataframe -- pandas.DataFrame()
'''
self._df = concat([self.get_df(), dataframe]) if self._df is None else dataframe
self.process_data()
def initialize_date(self, date):
'''
Returns parsed todays date, a parsed supplied date
###Parameters
* date -- datetime, date to be parsed
'''
if not date:
date = datetime.date.today()
return self.date_parse(date)
def date_parse(self, date):
'''
Parses date to YYYY/MM/DD.
###Parameters
* date -- datetime, date to be parsed
'''
return date.strftime('%Y%m%d')
def check_date(self, start, end):
'''
Checks whether supplied dates are acceptable.
###Parameters
* start -- datetime, date beginning the retrieval window
* end -- datetime, date ending the retrieval window
'''
if timedelta(0) > (end - start) > timedelta(21):
raise LimeInvalidDate(start, end)
return True
def format_ticker_with_exchange_extenstion(self):
self.ticker = "{}{}".format(self.ticker,
self._exchanges[self._exchange.title()])
return self.ticker
def validate_ticker_exchange_extenstion(self):
'''Checks if ticker has a valid exchange extension. '''
extension = self.ticker.split('.')[1]
if extension in self.exchange_extensions:
return True
return False
def check_ticker_exchange_extenstion(self):
'''
Check's whether the appropriate netfonds extension, ( '.N', '.O', '.A' ), has been added.
If it hasn't, but the ticker's exchange has, it adds the appropriate extension.
If neither have; it raises a LimeInvalidTicker exception.
'''
try:
self.validate_ticker_exchange_extenstion()
except IndexError:
if not self._exchange:
self.get_exchange_extension_from_ticker()
self.format_ticker_with_exchange_extenstion()
else:
raise LimeInvalidTicker()
return self.ticker
def get_exchange_extension_from_ticker(self):
'''
Loops through the three exchanges Netfonds supports, ( Nasdaq, NYSE, Amex),
and returns the correct exchange extension if it exists.
'''
for key in self._exchanges.keys():
self.ticker = "{}{}".format(self.ticker, self._exchanges[key])
self._get_tick_data()
if self._df is not None and (len(self._df.columns) > 1):
self._exchange = key
self.format_ticker_with_exchange_extenstion()
return self._exchange
raise LimeInvalidTicker()
def set_start_end_dates(self, start, end=None):
'''
Parses and Prepares Start and End dates.
###Parameters
* start -- datetime
* end -- ( optional ) datetime, defaults to today's date
'''
self.start_date = self.date_parse(start)
self.end_date = self.date_parse(end) if end else self.get_date_today()
self.check_date(start, end)
def process_data(self):
'''
Cleans data after its retrieved from Netfonds
'''
df = self.get_df()
try:
df.time = df.time.apply(lambda x: datetime.datetime.strptime(x, '%Y%m%dT%H%M%S'))
df = df.set_index(df.time)
except AttributeError:
raise LimeInvalidQuery(self.uri)
def _get_tick_data(self):
'''
Retrieves tick data from Netfonds from a known ticker.
'''
self.uri = '{}?date={}&paper={}&csv_format={}'.format(self._url,
self.start_date,
self.ticker,
self._file_format)
self.set_df(read_csv(self.uri))
def get_trades(self, ticker, exchange=None):
'''
Gets the trades made for a ticker on a specified day.
###Parameters
* ticker -- string, stock ticker symbol
'''
if exchange:
self.exchange = exchange
self.ticker = ticker
self.check_ticker_exchange_extenstion()
self._get_tick_data()
return self.get_df()
def get_trade_history(self, ticker, start_date, end_date=None):
'''
Retrieves the trades made for a ticker from a range of days.
###Parameters
* ticker -- string, stock ticker symbol
* start_date -- datetime, starting date of retrieval window
* end_date -- datetime (optional), ending date of retrieval window.
defaults to today, if committed.
Note: Tick data only persist for 21 days on Netfonds. Any queries greater
than that window will raise a LimeInvalidQuery exception.
'''
self.ticker = ticker
self.set_start_end_dates(start_date, end_date)
for day in date_range(start=start_date, end=self.end_date, freq='B'):
self.start_date = self.date_parse(day)
self.set_df(self.get_trades(self.ticker))
return self.get_df() | true |
6001df227b41931e5959af7082509afa03305d6e | Python | strike1989/Text_Classification | /GRU.py | UTF-8 | 4,767 | 2.6875 | 3 | [] | no_license | #coding:utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import jieba
import pandas as pd
df_technology = pd.read_csv("./data/technology_news.csv", encoding='utf-8')
df_technology = df_technology.dropna()
df_car = pd.read_csv("./data/car_news.csv", encoding='utf-8')
df_car = df_car.dropna()
df_entertainment = pd.read_csv("./data/entertainment_news.csv", encoding='utf-8')
df_entertainment = df_entertainment.dropna()
df_military = pd.read_csv("./data/military_news.csv", encoding='utf-8')
df_military = df_military.dropna()
df_sports = pd.read_csv("./data/sports_news.csv", encoding='utf-8')
df_sports = df_sports.dropna()
technology = df_technology.content.values.tolist()[1000:21000]
car = df_car.content.values.tolist()[1000:21000]
entertainment = df_entertainment.content.values.tolist()[:20000]
military = df_military.content.values.tolist()[:20000]
sports = df_sports.content.values.tolist()[:20000]
stopwords=pd.read_csv("data/stopwords.txt",index_col=False,quoting=3,sep="\t",names=['stopword'], encoding='utf-8')
stopwords=stopwords['stopword'].values
def preprocess_text(content_lines, sentences, category):
for line in content_lines:
try:
segs=jieba.lcut(line)
segs = filter(lambda x:len(x)>1, segs)
segs = filter(lambda x:x not in stopwords, segs)
sentences.append((" ".join(segs), category))
except Exception,e:
continue
#生成训练数据
sentences = []
preprocess_text(technology, sentences, 'technology')
preprocess_text(car, sentences, 'car')
preprocess_text(entertainment, sentences, 'entertainment')
preprocess_text(military, sentences, 'military')
preprocess_text(sports, sentences, 'sports')
from sklearn.model_selection import train_test_split
x, y = zip(*sentences)
train_data, test_data, train_target, test_target = train_test_split(x, y, random_state=1234)
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 15
MIN_WORD_FREQUENCE = 1
EMBEDDING_SIZE = 50
global n_words
# 处理词汇
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH, min_frequency=MIN_WORD_FREQUENCE)
x_train = np.array(list(vocab_processor.fit_transform(train_data)))
x_test = np.array(list(vocab_processor.transform(test_data)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
cate_dic = {'technology':1, 'car':2, 'entertainment':3, 'military':4, 'sports':5}
train_target = map(lambda x:cate_dic[x], train_target)
test_target = map(lambda x:cate_dic[x], test_target)
y_train = pandas.Series(train_target)
y_test = pandas.Series(test_target)
def rnn_model(features, target):
"""用RNN模型(这里用的是GRU)完成文本分类"""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
from tensorflow.contrib.learn.python import SKCompat
model_fn = rnn_model
classifier = SKCompat(learn.Estimator(model_fn=model_fn))
# Train and predict
classifier.fit(x_train, y_train, steps=1000)
y_predicted = classifier.predict(x_test)['class']
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score)) | true |
148f60527e05e59c46e4e51c9d52c858bc01bee5 | Python | Louka98/EasyML | /elbow.py | UTF-8 | 531 | 2.8125 | 3 | [] | no_license | from sklearn.clusters import KMeans
import matplotlib.pylot as plt
import numpy as np
import pandas as pd
def elbowvis()
wcss=[]
for i in range(1,30):
kmeans = KMeans(n_clusters=i, init ='k-means++', max_iter=300, n_init=10,random_state=0 )
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(8,8))
plt.plot(range(1,30),wcss)
plt.title('The Elbow Method Graph')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
| true |
0241936e6cd0073b71f7cfac850dc8c49f0cff84 | Python | GSSJacky/neural-painters-pytorch | /neural_painters/transforms.py | UTF-8 | 2,858 | 3.1875 | 3 | [
"MIT"
] | permissive | """
Contains various differentiable image transforms.
Loosely based on Lucid's transforms.py https://github.com/tensorflow/lucid/
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import kornia
class RandomScale(nn.Module):
"""Module for randomly scaling an image"""
def __init__(self, scales):
"""
:param scales: list of scales to randomly choose from e.g. [0.8, 1.0, 1.2] will randomly scale an image by
0.8, 1.0, or 1.2
"""
super(RandomScale, self).__init__()
self.scales = scales
def forward(self, x: torch.Tensor):
scale = self.scales[random.randint(0, len(self.scales)-1)]
return F.interpolate(x, scale_factor=scale, mode='bilinear')
class RandomCrop(nn.Module):
"""Module for randomly cropping an image"""
def __init__(self, size: int):
"""
:param size: How much to crop from both sides. e.g. 8 will remove 8 pixels in both x and y directions.
"""
super(RandomCrop, self).__init__()
self.size = size
def forward(self, x: torch.Tensor):
batch_size, _, h, w = x.shape
h_move = random.randint(0, self.size)
w_move = random.randint(0, self.size)
return x[:, :, h_move:h-self.size+h_move, w_move:w-self.size+w_move]
class RandomRotate(nn.Module):
"""Module for randomly rotating an image"""
def __init__(self, angle=10, same_throughout_batch=False):
"""
:param angle: Angle in degrees
:param same_throughout_batch: Degree of rotation, although random, is kept the same throughout a single batch.
"""
super(RandomRotate, self).__init__()
self.angle=angle
self.same_throughout_batch = same_throughout_batch
def forward(self, img: torch.tensor):
b, _, h, w = img.shape
# create transformation (rotation)
if not self.same_throughout_batch:
angle = torch.randn(b, device=img.device) * self.angle
else:
angle = torch.randn(1, device=img.device) * self.angle
angle = angle.repeat(b)
center = torch.ones(b, 2, device=img.device)
center[..., 0] = img.shape[3] / 2 # x
center[..., 1] = img.shape[2] / 2 # y
# define the scale factor
scale = torch.ones(b, device=img.device)
M = kornia.get_rotation_matrix2d(center, angle, scale)
img_warped = kornia.warp_affine(img, M, dsize=(h, w))
return img_warped
class Normalization(nn.Module):
"""Normalization module"""
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
| true |
dab89966eed981400b5add8d42eebf3546520e4b | Python | AatifTripleA/tictactoe_player_vs_player | /tictactoe_ply_vs_ply.py | UTF-8 | 6,097 | 4.09375 | 4 | [] | no_license | # Tic Tac Toe
import random
class TicTacToe:
def __init__(self, board):
self.board = board
def __repr__(self):
return ("<" + self.__class__.__name__ +
" board='" + str(self.board) + "'"
">")
def drawBoard(self):
# This function prints out the board that it was passed.
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + self.board[7] + ' | ' + self.board[8] + ' | ' + self.board[9])
print(' | |')
print('-----------')
print(' | |')
print(' ' + self.board[4] + ' | ' + self.board[5] + ' | ' + self.board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + self.board[1] + ' | ' + self.board[2] + ' | ' + self.board[3])
print(' | |')
def makeMove(self, letter, move):
self.board[move] = letter
def isWinner(self, le):
# Given a board and a player's letter, this function returns True if that player has won.
# We use bo instead of board and le instead of letter so we don't have to type as much.
return ((self.board[7] == le and self.board[8] == le and self.board[9] == le) or # across the top
(self.board[4] == le and self.board[5] == le and self.board[6] == le) or # across the middle
(self.board[1] == le and self.board[2] == le and self.board[3] == le) or # across the bottom
(self.board[7] == le and self.board[4] == le and self.board[1] == le) or # down the left side
(self.board[8] == le and self.board[5] == le and self.board[2] == le) or # down the middle
(self.board[9] == le and self.board[6] == le and self.board[3] == le) or # down the right side
(self.board[7] == le and self.board[5] == le and self.board[3] == le) or # diagonal
(self.board[9] == le and self.board[5] == le and self.board[1] == le)) # diagonal
def getBoardCopy(self):
# Make a duplicate of the board list and return it the duplicate.
dupeBoard = []
for i in self.board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(self, move):
# Return true if the passed move is free on the passed board.
return self.board[move] == ' '
def getPlayerMove(self, playerNum):
# Let the player type in his move.
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not self.isSpaceFree(int(move)):
print('Player' + str(playerNum) + ': What is your next move? (1-9)')
move = input()
return int(move)
def isBoardFull(self):
# Return True if every space on the board has been taken. Otherwise return False.
for i in range(1, 10):
if self.isSpaceFree(i):
return False
return True
def inputPlayerLetter():
return ['X', 'O']
def whoGoesFirst():
# Randomly choose the player who goes first.
if random.randint(0, 1) == 0:
return p2Name
else:
return p1Name
def playAgain():
# This function returns True if the player wants to play again, otherwise it returns False.
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
print('Welcome to Tic Tac Toe! This is a two player game!')
instructions = input('Would you like to view the game\'s instructions?')
if instructions.startswith('y'):
print('''The goal of Tic Tac Toe is to make an uninterrupted line from your chosen letter (x or o).
This line can be vertical, horizontal, or diagonal.\nDoing that wins you the round.
If you win 5 rounds before your partner, you win the game.\nA tie is possible in a round, but not in the overall game.\n
The board is set up like a keypad. The bottom row counts 1-3 from left to right, middle row 4-6, and top row 7-9.''')
player_one_wins = 0
player_two_wins = 0
p1Name = input('Who will play x?')
p2Name = input('Who will play o?')
while player_one_wins < 5 and player_two_wins < 5:
# Reset the board
theBoard = [' '] * 10
tictactoe = TicTacToe(theBoard)
print(p1Name + ' has won ' + str(player_one_wins) + ' rounds.')
print(p2Name + ' has won ' + str(player_two_wins) + ' rounds.')
player1Letter, player2Letter = inputPlayerLetter()
turn = whoGoesFirst()
print(str(turn) + ' will go first.')
gameIsPlaying = True
while gameIsPlaying:
if turn == p1Name:
# Player's turn.
tictactoe.drawBoard()
move = tictactoe.getPlayerMove(1)
tictactoe.makeMove(player1Letter, move)
if tictactoe.isWinner(player1Letter):
tictactoe.drawBoard()
print('Hooray! ' + p1Name +'has won the game!')
player_one_wins += 1
gameIsPlaying = False
else:
if tictactoe.isBoardFull():
tictactoe.drawBoard()
print('The game is a tie!')
break
else:
turn = p2Name
else:
# Player2's turn.
tictactoe.drawBoard()
move = tictactoe.getPlayerMove(2)
tictactoe.makeMove(player2Letter, move)
if tictactoe.isWinner(player2Letter):
tictactoe.drawBoard()
print('Hooray! ' + p2Name +' has won the game!')
player_two_wins += 1
gameIsPlaying = False
else:
if tictactoe.isBoardFull():
tictactoe.drawBoard()
print('The game is a tie!')
break
else:
turn = p1Name
if not playAgain():
break
if player_one_wins > player_two_wins and player_one_wins >= 5:
print(p1Name + ' has won!')
elif player_one_wins < player_two_wins and player_two_wins >= 5:
print(p2Name + ' has won!')
| true |
d3ea0cd341a60bdcc2fe4004d97813f22ba36587 | Python | stefoxp/codewars | /PlayingWithPassphrases/code/play.py | UTF-8 | 588 | 3.515625 | 4 | [] | no_license | import string
def play_pass(s, n):
result = ""
s_len = len(s)
for i in range(s_len):
single_char = s[i]
if single_char.isdigit():
result += str(9 - int(single_char))
elif single_char.isalpha():
index = string.ascii_uppercase.index(single_char.upper()) + n
if index >= 26:
index -= 26
val = string.ascii_uppercase[index]
if i % 2 != 0:
val = val.lower()
result += val
else:
result += single_char
return result[::-1]
| true |
01d0da6a20a993b760f911c1496369ad548a7670 | Python | jjiayying/cp2019 | /Practical 1/q3_miles_to_kilometre.py | UTF-8 | 162 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[9]:
miles = float(input("miles"))
kilometres = 1.60934 * miles
print("{0:.2f}".format(kilometres))
# In[ ]:
| true |
097461f5ac536c60af4b1d29b5bd708404698433 | Python | christofoo/hard-way | /ex8.py | UTF-8 | 814 | 3.828125 | 4 | [] | no_license | # this names the string of r conversions formatter
formatter = "%r %r %r %r"
# this prints formatter with the fills being integers
print formatter % (1, 2, 3, 4)
#this prints formatter with the conversions being strings
print formatter % ("one", "two", "three", "four")
#this prints the formatter with the conversion fills being true and false
print formatter % (True, False, False, True)
#this prints formatter with the fills being formatter which is the 4 %rs 4 times
print formatter % (formatter, formatter, formatter, formatter)
#this prints formatter with strings filling the r conversions
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
#no idea why this^ shows up as single quotes for the first two and last one. | true |
75d879a1d915d43488bcd12ba643d4fae23eaf61 | Python | BE-PROJECTS2018/GroupNo29-Aspect-and-Review-Based-Recommendation-System | /arbrsenv/arbrs/preprocessed/asp_sent_extraction.py | UTF-8 | 3,185 | 2.625 | 3 | [] | no_license | import json
import nltk
import math
import re
import string
from pycorenlp import StanfordCoreNLP
from textblob import TextBlob
from nltk.corpus import wordnet
import asp_sent_rules as rules
import unicodedata
nlp = StanfordCoreNLP('http://localhost:9000')
#f = open("sample_sentences.txt","r")
line = "Camera is very good"
asp_sent = {}
asp_rating = {}
def corefResolver(line):
ind_sent = []
complete_coref_output = nlp.annotate(line,properties={'annotators':'dcoref','outputFormat':'json'})
coref_output = complete_coref_output['corefs']
raw_sent = TextBlob(line)
sent_array = raw_sent.sentences
for j in sent_array:
ind_sent.append(str(j))
for k in coref_output:
prop_noun = ""
for m in coref_output[k]:
if m['type'] == 'NOMINAL' and prop_noun == "":
prop_noun = m['text']
elif m['type'] == 'PRONOMINAL' and prop_noun != "":
sent_num = int(m['sentNum'])
ind_sent[sent_num-1] = ind_sent[sent_num-1].replace(m['text'],prop_noun)
return ind_sent
#insert aspect-sentiment pair in asp_sent dictionary
def insert_asp_sent(asp,sent):
if asp not in asp_sent:
asp_sent[asp] = []
asp_sent[asp].append(sent)
#get negative relations for further reference
def getNegRelations(dep_output,negatives):
for j in dep_output['sentences'][0]['basicDependencies']:
gov = j['governorGloss']
if j['dep'] == 'neg':
negatives[gov] = ''
return negatives
#wrap the sentences in TextBlob and Sentence Tokenize
#for line in f:
#sent_array = corefResolver(line)
sent_array = corefResolver(line)
for ind in sent_array:
text = str(ind)
negatives = {}
d = {}
rel_dictionary = {}
pos_output = nlp.annotate(text, properties={
'annotators': 'pos',
'outputFormat': 'json'
})
dep_output = nlp.annotate(text, properties={
'annotators': 'depparse',
'outputFormat': 'json'
})
negatives = getNegRelations(dep_output,negatives)
#making POS tags dictionary
for i in pos_output['sentences'][0]['tokens']:
d[i['word']] = i['pos']
for j in dep_output['sentences'][0]['basicDependencies']:
dep_name = j['dep']
gov = j['governorGloss']
dep = j['dependentGloss']
if dep_name not in rel_dictionary:
rel_dictionary[dep_name] = []
rel_dictionary[dep_name].append({'gov':gov,'dep':dep})
#print(rel_dictionary)
#passing through each dependency
for j in dep_output['sentences'][0]['basicDependencies']:
gov = j['governorGloss']
dep = j['dependentGloss']
if j['dep'] == 'amod':
asp_sent = rules.amodRules(gov,dep,d,rel_dictionary,negatives,asp_sent)
elif j['dep'] == 'nsubj':
asp_sent = rules.nsubjRules(gov,dep,d,rel_dictionary,negatives,asp_sent)
print(asp_sent)
for asp in asp_sent:
length = len(asp_sent[asp])
avg = 0
sum = 0
for word in asp_sent[asp]:
blob_word = TextBlob(word)
sum = sum + blob_word.sentiment.polarity
avg = sum / length
asp_rating[asp] = avg
print(asp_rating)
# print() | true |
c0a204b98ac8342f61d20be4739a578233cf4e9e | Python | Chacon-Miguel/CodeForces-Solutions | /choosingTeams.py | UTF-8 | 871 | 3.8125 | 4 | [] | no_license | # n is the number of students
# k is the number of times players r needed to play
n, k = [int(a) for a in input().split()]
# List that holds how many times each player has played
PlayedGames = [int(a) for a in input().split()]
# assume all players are eligible
eligiblePlayers = n
# iterate through the PlayedGames list
for index in range(n):
# If the number of times the player has played is greater
# than the difference between the max number of times allowed
# to play and k, then remove one from valid players
if PlayedGames[index] > (5-k):
eligiblePlayers -= 1
# if there are less than three eligible players, no team can be formed
if eligiblePlayers < 3:
print(0)
# since every player can only play once, use integer division to find the
# total number of teams that can be formed
else:
print(eligiblePlayers//3) | true |
9b567ec5eb8972c1d7a2898b5467316b38a48e0e | Python | atdog/adbg | /adbg/commands/disasm.py | UTF-8 | 1,504 | 2.609375 | 3 | [] | no_license | from adbg.commands import GDBCommand
import adbg.modules.memory as memory
import adbg.modules.color as color
import adbg.modules.arch as arch
from capstone import *
class CSArch():
def __init__(self, cs_arch, cs_mode):
self._arch = cs_arch
self._mode = cs_mode
@property
def arch(self):
return self._arch
@property
def mode(self):
return self._mode
arch_constant = {}
arch_constant['x86-64'] = CSArch(CS_ARCH_X86, CS_MODE_64)
arch_constant['i386'] = CSArch(CS_ARCH_X86, CS_MODE_32)
def disasm_pc(pc=None, line=10):
if not pc:
raise
if type(pc) is str:
pc = int(pc, 16)
code = memory.read(pc, 8 * line).tobytes()
csv = arch_constant[arch.current]
md = Cs(csv.arch, csv.mode)
result = []
n = 0
for i in md.disasm(code, pc):
ins = "%s\t%s" % (i.mnemonic, i.op_str)
if i.address == pc:
line = "%s:\t%s" %(color.code_adr(hex(i.address)), color.code_val_pc(ins))
else:
line = "%s:\t%s" %(color.code_adr(hex(i.address)), ins)
result.append(line)
n += 1
if n == line:
break
return result
@GDBCommand
def disasm(pc=None):
if not pc:
print("please specify the PC address to disassemble")
return
result = disasm_pc(pc)
while len(result) < 10:
result.append('(bad)')
n = 0
for line in result:
print(line)
n += 1
if n == line:
break
| true |
e0f2f72f2b397e9d050593a9e1ebc5cb2ef2beee | Python | Rovbau/Robina | /VisualKarte.pyw | UTF-8 | 1,931 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python3
from math import cos,sin,radians,asin,degrees
from tkinter import *
import pickle
import time
#Kartennull für TK
Nullx=200
Nully=380
#Tkinter
root=Tk()
root.title ("Hinderniss-Daten") #Titel de Fensters
root.geometry("700x700+0+0")
can=Canvas(master=root, width=600, height=600, bg="grey")
#Karten Nullpunkt
def printObstacles():
try:
can.delete("Point")
obstacles_in_grid = pickle.load( open("RoboObstacles.p" , "rb" ))
for pos in obstacles_in_grid:
X=pos[0]*10
Y=pos[1]*10
#Zeichne Hindernisspunkte Global ein
can.create_rectangle(Nullx+X-5,Nully-Y+5,Nullx+X+5,Nully-Y-5, width=1, fill="red",tag="Point")
position_in_grid = pickle.load( open("RoboPath.p" , "rb" ))
for pos in position_in_grid:
X=pos[0]
Y=pos[1]
#Zeichne Hindernisspunkte Global ein
can.create_oval(Nullx+X-15,Nully-Y+15,Nullx+X+15,Nully-Y-15, width=1, fill=None,tag="Point")
position_solved_path = pickle.load( open("RoboSolved.p" , "rb" ))
for pos in position_solved_path:
X=pos[0]*10
Y=pos[1]*10
#Zeichne Hindernisspunkte Global ein
can.create_oval(Nullx+X-3,Nully-Y+3,Nullx+X+3,Nully-Y-3, width=1, fill="green",tag="Point")
print(time.time())
root.after(1500,printObstacles)
except:
print("ERROR")
time.sleep(0.5)
printObstacles()
###MAIN###
printObstacles()
can.create_oval(Nullx-2,Nully+2,Nullx+2,Nully-2, width=1, fill="black")
can.create_oval(Nullx-50,Nully+50,Nullx+50,Nully-50, width=1, fill=None)
can.create_oval(Nullx-100,Nully+100,Nullx+100,Nully-100, width=1, fill=None)
can.create_oval(Nullx-150,Nully+150,Nullx+150,Nully-150, width=1, fill=None)
can.pack()
root.mainloop()
| true |
864ddc27c519aba1ae00db99e557cfa77a6a3738 | Python | chinmay0301/GlowHockey | /main.py | UTF-8 | 1,701 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
import cv2
import numpy as np
# init hsv detect function
h = 0
cv2.namedWindow('Original')
def rethsv(event,x,y,flags,param):
global h
if event == cv2.EVENT_LBUTTONDOWN:
#print hsv[y,x]
h=hsv[y,x,0]
cv2.setMouseCallback('Original', rethsv)
# choose default camera
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
ret, frame = cap.read()
frame = cv2.flip(frame,1)
cv2.imshow('Original',frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define hue limits and create mask
if h<20:
l=0
m=h+20
elif h>245:
m=255
l=h-10
else:
l=h-20
m=h+20
lower = np.array([l,100,100])
upper = np.array([m,255,255])
mask = cv2.inRange(hsv, lower, upper)
# generate result image
mask_res = cv2.bitwise_and(frame,frame, mask=mask)
# convert to grayscale and invert
img = cv2.cvtColor(mask_res, cv2.COLOR_BGR2GRAY)
img = 255-img
# Blob Detector Parameters
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 100
detector = cv2.SimpleBlobDetector(params)
keypoints = detector.detect(img)
im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Detected and drawn to show: the blobs
cv2.imshow("Keypoints", im_with_keypoints)
# Print Keypoint position
if len(keypoints)>0:
print "x:", keypoints[0].pt[0]
print "y:", keypoints[0].pt[1]
if (cv2.waitKey(5) & 0xFF) == 27:
break
# clean up
cv2.destroyAllWindows()
| true |
391f702ceaa13e85abc2d6722bf9feab86ba6dc2 | Python | pradeepraja2097/Python | /opencv/venv/image_contour.py | UTF-8 | 798 | 2.921875 | 3 | [] | no_license | # contour is nothing but connecting outer boundaries with same colour and same intensity
# it is used for object detection
import cv2
import numpy as np
img=cv2.imread('opencv-logo.png')
imgray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # convert imge to grayscale
ret,thresh=cv2.threshold(imgray,127,255,0) # define the threshold value of imgray image
contours,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) # cv2.CHAIN_APPROX_NONE all the boundary points are storedall the boundary points are stored
# contour is x,y coordinates of boundary points of the object
print("number of contours =",str(len(contours)))
print(contours[0])
cv2.drawContours(img,contours,-1,(0,0,255),3)
cv2.imshow('image',img)
cv2.imshow('image gray',imgray)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
9ad57f016c70d121b70687087844257118e4b589 | Python | amazingyyc/Deep8CV | /MNIST/cnn_minist.py | UTF-8 | 3,056 | 2.71875 | 3 | [] | no_license | # coding=utf-8
import cPickle, gzip, os, sys
import numpy as np
from deep8 import *
def loadData(dataPath):
# Load the dataset
f = gzip.open(dataPath, 'rb')
trainSet, validSet, testSet = cPickle.load(f)
f.close()
return (trainSet[0], trainSet[1], validSet[0], validSet[1], testSet[0], testSet[1])
# load data
trainX, trainY, validX, validY, testX, testY = loadData(os.getcwd() + "/data/mnist.pkl.gz")
'''
trainX [50000, 784]
trainY [50000, ]
validX [10000, 784]
validY [10000, ]
testX [10000, 784]
testY [10000, ]
'''
epoch = 1
executor = EagerExecutor()
learningRate = LinearDecayLearningRateIterator(totalStep = epoch * len(trainX), start=1e-3, end=0.0)
trainer = AdamTrainer(learningRate = learningRate)
x = parameter(executor, [28, 28, 1], False)
y = parameter(executor, [10], False)
# first convolution
w_conv1 = parameter(executor, [32, 5, 5, 1])
b_conv1 = parameter(executor, [32])
# second convolution
w_conv2 = parameter(executor, [64, 5, 5, 32])
b_conv2 = parameter(executor, [64])
# full connected layer
w_fc1 = parameter(executor, [1024, 4 * 4 * 64])
b_fc1 = parameter(executor, [1024])
# full connected layer
w_fc2 = parameter(executor, [10, 1024])
b_fc2 = parameter(executor, [10])
w_conv1.gaussian()
b_conv1.gaussian()
w_conv2.gaussian()
b_conv2.gaussian()
w_fc1.gaussian()
b_fc1.gaussian()
w_fc2.gaussian()
b_fc2.gaussian()
for e in range(epoch):
for i in range(len(trainX)):
one_hot_y = np.zeros([10], dtype=np.float32)
one_hot_y[trainY[i]] = 1.0
x.feed(trainX[i])
y.feed(one_hot_y)
layer1 = (x.conv2d(w_conv1, covered=False) + b_conv1).relu().maxPooling2d(covered = False, filterHeight=2, filterWidth=2, strideY=2, strideX=2)
layer2 = (layer1.conv2d(w_conv2, covered=False) + b_conv2).relu().maxPooling2d(covered = False, filterHeight=2, filterWidth=2, strideY=2, strideX=2)
layer3 = (w_fc1 * layer2.reShape([4 * 4 * 64]) + b_fc1).relu()
layer4 = w_fc2 * layer3 + b_fc2
loss = layer4.softmaxCrossEntropyLoss(y)
print "epoch:", e, ", step:", i, ", loss => ", loss.valueStr()
backward(loss)
trainer.train(executor)
pred = np.zeros([10], dtype=np.float32)
correct = 0
wrong = 0
for i in range(len(testX)):
x.feed(testX[i])
layer1 = (x.conv2d(w_conv1, covered=False) + b_conv1).relu().maxPooling2d(filterHeight=2, filterWidth=2, strideY=2, strideX=2)
layer2 = (layer1.conv2d(w_conv2, covered=False) + b_conv2).relu().maxPooling2d(filterHeight=2, filterWidth=2, strideY=2, strideX=2)
layer3 = (w_fc1 * layer2.reShape([4 * 4 * 64]) + b_fc1).relu()
layer4 = w_fc2 * layer3 + b_fc2
ret = layer4.softmax()
ret.fetch(pred)
executor.clearInterimNodes()
if np.argmax(pred) == testY[i]:
correct += 1
print "test ", i, " => right"
else:
wrong += 1
print "test ", i, " => wrong"
print "Total:", correct + wrong, ", Correct:", correct, ", Wrong:", wrong, "Accuracy:", (1.0 * correct) / (correct + wrong)
| true |
7dd55bcf2fad239690cee795a568cede4f245a54 | Python | letruongthanh24103698/BLE_matlab | /code/server.py | UTF-8 | 2,836 | 2.625 | 3 | [] | no_license | ####****************************Request library****************************####
from estimate_dis import estimate_dis
####***********************************************************************####
#import lib
from scipy.io import loadmat
import requests
import matplotlib.pyplot as plt
import math
####**********************get data from json/mat file**********************####
def processdata(mat):
gateway = []
tag = []
pathloss = []
data=mat['data']['data']
for i in range(0,len(data)-1,1):
cell=data[i,0]
if cell['name'][0,0][0] == 'gateway':
for j in range(0,len(cell['data1'][0,0]['value'][0,0])-1,1):
gateway.append(cell['data1'][0,0]['value'][0,0][j][0])
elif cell['name'][0,0][0] == 'tag':
for j in range(0,len(cell['data1'][0,0]['value'][0,0])-1,1):
tag.append(cell['data1'][0,0]['value'][0,0][j][0])
elif cell['name'][0,0][0] == 'pathloss':
for j in range(0,len(cell['data1'][0,0]['value'][0,0])-1,1):
pathloss.append(cell['data1'][0,0]['value'][0,0][j][0])
return gateway, tag, pathloss
####************************************************************************####
####**********************************main**********************************####
#get json data
#URL = "http://68.183.235.97:8080/rtlsbletest/getall"
#r = requests.get(url=URL)
#data = r.json()
#load .mat
data=loadmat('23d_12m_11h_49m.mat')
#init variable
cnt=0
R_sum=0
R_mean=[]
R_tag_kal=[]
R_path_kal=[]
distance=[]
x=[]
last_est_tag=0
last_est_path=0
dis_pathloss=9
#get data
gateway, tag, pathloss = processdata(data)
#***init***#
est=estimate_dis(0.00075,0,0.00075,20)
#***end - init***#
#calculate
for i in range(0,len(gateway)-1,1):
R_sum=R_sum+gateway[i]
R_mean.append(R_sum/(cnt+1))
if i==0:
initiate=1
else:
initiate=0
#***get estimate tag***#
last_est_tag= est.kalman(R_mean[cnt], gateway[i], tag[i], last_est_tag, initiate)
#***end - get estimate tag***#
#***get estimate pathloss***#
last_est_path= est.kalman(R_mean[cnt], gateway[i], pathloss[i], last_est_path, initiate)
#***end - get estimate pathloss***#
#***calculate distance***#
dis=est.calculate(last_est_tag, last_est_path, dis_pathloss)
#***end - calculate distance***#
#append to array to plot figure
R_tag_kal.append(last_est_tag)
R_path_kal.append(last_est_path)
cnt=cnt+1;
x.append(cnt)
distance.append(dis)
#plot figure
plt.figure(1)
plt.plot(x,R_tag_kal,label='TAG')
plt.plot(x,R_path_kal,label='PATHLOSS')
plt.figure(2)
plt.plot(x,distance,label='DISTANCE')
plt.legend()
plt.grid()
plt.show()
####************************************************************************####
| true |
f5fc4f62c3a078b39c1ded2344607edecbe19e78 | Python | hmaynard8877/dog-shelter-project | /food_calculator.py | UTF-8 | 1,331 | 3.96875 | 4 | [] | no_license | MAX_CAPACITY = 30
def calculate_food(num_small, num_medium, num_large, lbs_surplus):
#Check that number of dog values are integers
if (type(num_small) != int or type(num_medium) != int or type(num_large) != int):
raise TypeError("Error: Number of dogs must be an integer.")
#Check that amount of excess food is a float or integer
if (not (type(lbs_surplus) == float or type(lbs_surplus) == int)):
raise TypeError("Error: Amount of leftover food must be an integer or float.")
#Check that all values are positive
if (num_small < 0 or num_medium < 0 or num_large < 0 or lbs_surplus < 0):
raise Exception("Error: Values entered must be positive.")
#Check that values are not null
if (num_small is None or num_medium is None or num_large is None or lbs_surplus is None):
raise Exception("Error: At least one variable is set to None.")
#Check number of dogs against maximum capacity of shelter
if (num_small + num_medium + num_large > MAX_CAPACITY):
raise Exception("Error: Number of dogs exceeds capacity.")
#Calculate amount of dog food to order for next month
food_amount = (((num_small * 10) + (num_medium * 20) + (num_large * 30)) - lbs_surplus) * 1.2
if (food_amount < 0):
return 0
else:
return round(food_amount, 2) | true |
3395d43fac4d27d82877b11b5f2752ea02a5a17e | Python | michal037/workspace | /plot1.py | UTF-8 | 186 | 3.125 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plot
def fun(x):
return (np.cos(3*x) / x) ** 2
X = np.linspace(0.3, np.pi, 500)
Y = [fun(x) for x in X]
plot.plot(X, Y)
plot.show()
| true |
bda9079bdbbe9a9c183afc95a84b790e34232f84 | Python | sichen/hrmmdiscuz | /scripts/dz_multiuser.py | UTF-8 | 3,585 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
'''
The python script helps me create discuz users in batch
Created on Nov 14, 2011
@author: sichen
'''
from optparse import OptionParser
import datetime
import time
import sys
import md5
import random
import re
# Globals
# the global salt value
SALT = 'ab12cd'
# the global password
PW = 'rzxlszy'
# the global md5 hash of
# md5(md5($password).$salt);
M = md5.new(PW).hexdigest()
PASSWORD = md5.new(M+SALT).hexdigest()
TIMEBASE = 1319783169
TIMENOW = int(time.time())
def add_user(uid, username, password, salt, email, timestamp='1318315182', ip = '71.198.27.101', timeoffset = 9999, credit = 2):
insert_user = "INSERT IGNORE INTO pre_ucenter_members(username,password,email,regip,regdate,salt) VALUES(" + " '" + username + "','" + password + "','" + email + "','" + ip + "'," + timestamp + ",'" + salt + "');"
insert_memberfield = "INSERT IGNORE INTO pre_ucenter_memberfields(uid) VALUES (" + str(uid) + ");"
print insert_user
print insert_memberfield
activate_user = "INSERT IGNORE INTO pre_common_member(email,username,password,emailstatus, regdate,credits,timeoffset ) VALUES( " + "'" + email + "','" + username + "','" + password + "', 1, " + timestamp + ", " + str(credit) + ", " + str(timeoffset) + ");"
activate_user_membercount = "INSERT IGNORE INTO pre_common_member_count(uid,extcredits2) VALUES (" + str(uid) + ", " + str(credit) + ");"
print activate_user
print activate_user_membercount
def validate_user(username):
pattern = re.compile('[\w\d.+-]+')
match = pattern.search(username)
if match:
return username
else:
return ''
def process_user(uid, username):
if username == '':
return
email = username + '@telekbird.com.cn'
rtime = random.randint(TIMEBASE, TIMENOW)
timestamp = str(rtime)
ip0 = random.randint(1, 255)
ip1 = random.randint(1, 255)
ip2 = random.randint(1, 255)
ip = '71.%d.%d.%d' % (ip0, ip1, ip2)
add_user(uid, username, PASSWORD, SALT, email, timestamp, ip)
def process_file(startuid, filename):
lines = []
uid = startuid
number_processed = 0
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError, e:
print "IOError: %s" % (str(e))
sys.exit(1)
print "========================="
# each line contains a username
for line in lines:
uname = validate_user(line.strip())
if uname == '':
break
process_user(uid, uname)
uid += 1
number_processed += 1
# print out summary
print "========================="
print "processed: " + str(number_processed)
print "========================="
def options_parser(scriptname):
usage = "Usage: " + scriptname + "[--start-uid UID] [--user-file FILE] "
parser = OptionParser(usage)
parser.add_option("", "--start-uid", type="int", dest="uid", action="store",
help="The uid to start with, must not be in the database already.")
parser.add_option("", "--user-file", type="string", dest="userfile", action="store",
help="The file that contains a list of usernames to be created.")
return parser
def main():
parser = options_parser(sys.argv[0])
(options, args) = parser.parse_args(sys.argv)
if not options.uid:
print parser.print_help()
sys.exit(1)
if not options.userfile:
print parser.print_help()
sys.exit(1)
process_file(options.uid, options.userfile)
if __name__ == "__main__":
main() | true |
500d3964d53a1e9784b28170b29c6456b1b47ecc | Python | StarSTRUQ/ND-Tile | /ndtile.py | UTF-8 | 5,614 | 2.734375 | 3 | [
"BSD-3-Clause"
] | permissive | """
Do Tiling for an N-dimensional data set given an input CSV file
containing one point per row. Each point is specified by a set
of independent parameter values followed by the dependent scalar value.
Copyright (c) 2016, Donald E. Willcox
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import argparse
from Tiling import Point, Domain
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str,
help='Name of the input csv file containing (x1, x2, ..., y) data series for scalar data of the form y=f(x1, x2, ...). One line of header will be skipped.')
parser.add_argument('-L2rt', '--L2resthresh', type=float,
help='Upper threshold for tiling constraint: L-2 norm of normalized residuals.')
parser.add_argument('-cfdt', '--cdetthresh', type=float,
help='Lower threshold for tiling constraint: coefficient of determination.')
parser.add_argument('-tsym', '--tilesymmetry', type=float,
help='Threshold on normalized residual symmetry across a tile.')
parser.add_argument('-fsym', '--factortilesymmetry', type=float,
help='Threshold on growth factor for normalized residual symmetry across a tile.')
parser.add_argument('-ptsf', '--plotsurfaces', action='store_true',
help='If supplied, plot tile surfaces when searching for empty space to create virtual tiles.')
parser.add_argument('-pint', '--plotintermediate', action='store_true',
help='If supplied, plot the domain at intermediate steps whenever a new point is added to a tile.')
parser.add_argument('-ptil', '--plottiling', action='store_true',
help='If supplied, plot the domain whenever a new tile is added to the domain.')
parser.add_argument('-pfin', '--plotfinal', action='store_true',
help='If supplied, plot the domain when tiling is complete.')
parser.add_argument('-dlab', '--dimlabels', type=str, nargs='*', help='If supplied, will collect a series of strings specifying, in order, the label for each dimension. If the number of dimension labels is not exactly equal to the dimensionality of the dataset, then the supplied labels will be ignored.')
parser.add_argument('-ilab', '--independentlabel', type=str, help='Takes a string argument to set the label for the independent scalar value corresponding to this dataset.')
parser.add_argument('-noshrink', '--noshrink', action='store_true',
help='If supplied, virtual tiles containing empty space will not be shrunk after point tiling.')
parser.add_argument('-log', '--logfile', type=str,
help='Name of the log file in which to write the status of intermediate steps. If --logfile is not supplied, no intermediate printing will be done.')
parser.add_argument('-o', '--outfile', type=str,
help='Name of the summary file in which to print the final tiling result.')
args = parser.parse_args()
# Read Data
raw_data = np.genfromtxt(args.infile, delimiter=',', skip_header=1)
# Each element of data is a row from the csv file, so convert to columns
data = np.transpose(raw_data)
# data[0:-1] = Independent Parameter values
pt_ivals = np.transpose(data[0:-1])
# data[-1] = Scalar Dependent Parameter values
pt_dvals = data[-1]
# Create list of Points
pointlist = []
for r, v in zip(pt_ivals, pt_dvals):
p = Point(r, v)
pointlist.append(p)
# Get bounds on the independent parameter domain
lo = np.amin(pt_ivals, axis=0)
hi = np.amax(pt_ivals, axis=0)
# Form Domain
dom = Domain(points=pointlist, lo=lo, hi=hi,
dlabels=args.dimlabels, ilabel=args.independentlabel,
logfile=args.logfile, summaryfile=args.outfile)
# Tile Domain
dom.do_domain_tiling(L2r_thresh=args.L2resthresh, coeff_det_thresh=args.cdetthresh, tilde_resd_thresh=args.tilesymmetry,
tilde_resd_factor=args.factortilesymmetry, attempt_virtual_shrink=(not args.noshrink),
plot_tile_surfaces=args.plotsurfaces, plot_intermediate=args.plotintermediate,
plot_tiling=args.plottiling, plot_final=args.plotfinal)
# Cleanup, closing open file handles
dom.close()
| true |
bebec923cde95cab21569885180b4777fa46e9b9 | Python | Flaagrah/Deep_Learning_Portfolio | /YOLO/src/yolo_model/normalization.py | UTF-8 | 1,762 | 2.546875 | 3 | [] | no_license | import tensorflow as tf
import os
import numpy as np
import pandas
from yolo_model import B_BOX_SIDE as B_BOX_SIDE
from yolo_model import IMAGE_HEIGHT as IMAGE_HEIGHT
from yolo_model import IMAGE_WIDTH as IMAGE_WIDTH
from yolo_model import CLASSES as CLASSES
num_classes = len(CLASSES)
#Normalize the width and height by square rooting. The purpose is to make smaller values more visible.
def NormalizeWidthHeight(labels):
rLabels = np.reshape(labels, (-1, int(IMAGE_HEIGHT/B_BOX_SIDE), int(IMAGE_WIDTH/B_BOX_SIDE), num_classes+4))
widthHeight = rLabels[:,:,:,num_classes+2:]
otherLabels = rLabels[:,:,:,0:num_classes+2]
widthHeight = np.sqrt(widthHeight)
normalizedVars = np.concatenate([otherLabels, widthHeight], axis = -1)
normalizedVars = normalizedVars.flatten()
normalizedVars = np.asarray(normalizedVars)
return normalizedVars
def NormalizeWidthHeightForAll(allLabels):
normLabels = []
normalized = None
for i in range(0, len(allLabels)):
normalized = NormalizeWidthHeight(allLabels[i])
normLabels.append(normalized)
return np.asarray(normLabels).astype(np.float32)
#Undo normalization.
def unNormalize(labels):
widthHeight = labels[:,:,num_classes+2:]
otherLabels = labels[:,:,0:num_classes+2]
widthHeight = np.multiply(widthHeight, widthHeight)
unNormalLabels = np.concatenate([otherLabels, widthHeight], axis = -1)
unNormalLabels = unNormalLabels.flatten()
unNormalLabels = np.asarray(unNormalLabels)
return unNormalLabels
def unNormalizeAll(labels):
normLabels = []
for i in range(0, len(labels)):
normLabels.append(unNormalize(labels[i]))
return normLabels
| true |
2044742f6fe96a08e34dbf233e5610956e69f468 | Python | Aakritisingla1895/Recsys | /regression_collabfilter.py | UTF-8 | 902 | 2.734375 | 3 | [] | no_license | import pandas as pd
import numpy as np
from scipy.optimize import fmin_cg
from scipy.stats import pearsonr
from sklearn.metrics import mean_squared_error
args = None
def load_data(filename, exclusion = False):
users = {}
with open(filename) as reader:
#skip first line
next(reader)
for line in reader:
if len(line.strip()) == 0:
continue
# Divide the line into user, movieid, and rating
split_line = line.split(",")
user = int(split_line[0])
if user not in users:
users[user] = {}
user = users[user]
movie_id = int(split_line[1])
rating = float(split_line[2])
if exclusion:
if len(user) < 10:
user[movie_id] = rating
else:
user[movie_id] = rating
return users | true |
8854f633824c8bb87b7c1df9c8dc7c6070e2f1d8 | Python | amol9/vote | /vote/polls.py | UTF-8 | 1,893 | 2.8125 | 3 | [
"MIT"
] | permissive | import re
from .reddit_post import RedditPost
from .image_poll import ImagePoll, Image
from .straw_poll import StrawPollError, VotePass
class PollError(Exception):
pass
class Polls:
def __init__(self):
self._map = {
'reddit_image_poll' : self._reddit_image_poll,
'reddit_image_poll2' : self._reddit_image_poll2
}
def run(self, poll_name, params=None):
if not poll_name in self._map.keys():
print('no such poll')
print('available polls:\n' + '\n'.join(self._map.keys()))
return
method = self._map[poll_name]
try:
method(params)
except PollError as e:
print(e)
def _reddit_image_poll(self, params):
j = self._get_reddit_post_content(params)
list_item_regex = re.compile("\d+\.\s+\[(.*?)\]\((.*?)\).*\((.*?)\)")
matches = list_item_regex.findall(j)
for m in matches:
ip = ImagePoll(title=m[0].strip(), images=Image(None, m[1].strip()), poll_url=m[2].strip(), cache_images=True)
self._ip_vote()
def _get_reddit_post_content(self, params):
if not 'post_id' in params.keys():
raise PollError('please provide a reddit post id')
rp = RedditPost(post_id=params['post_id'], cache=True)
return rp.content
def _ip_vote(self, ip):
try:
success = ip.vote()
if not success:
print('failure in casting vote')
except VotePass:
pass
except StrawPollError as e:
print(e)
def _reddit_image_poll2(self, params):
j = self._get_reddit_post_content(params)
list_item_regex = re.compile("^\d+\..*$", re.M)
matches = list_item_regex.findall(j)
link_regex = re.compile("\[(.*?)\]\((.*?)\)")
for m in matches:
links = link_regex.findall(m)
images = []
for l in links[:-1]:
img = Image(l[0].strip(), l[1].strip())
images.append(img)
ip = ImagePoll(title=links[0][0].strip(), images=images, poll_url=links[-1][1].strip(), cache_images=True)
self._ip_vote(ip)
| true |
4c949343d08f24e29d85ccd4f25e08efc76de85b | Python | llDataSciencell/CriptoAutoTrade | /TrainModel/XGBoost2/trade_class.py | UTF-8 | 4,462 | 2.796875 | 3 | [] | no_license | #coding: utf-8
'''
default:2239.65016075
after:2436.87876149
##0.4 40%
635.385700015
711.099316173
'''
import numpy as np
import poloniex
import datetime
import time
class TradeClass(object):
def __init__(self):
pass
def getDataPoloniex(self):
polo = poloniex.Poloniex()
polo.timeout = 10
chartUSDT_BTC = polo.returnChartData('USDT_ETH', period=300, start=time.time() - 1440*60 * 500, end=time.time())#1440(min)*60(sec)=DAY
tmpDate = [chartUSDT_BTC[i]['date'] for i in range(len(chartUSDT_BTC))]
date = [datetime.datetime.fromtimestamp(tmpDate[i]) for i in range(len(tmpDate))]
data = [float(chartUSDT_BTC[i]['open']) for i in range(len(chartUSDT_BTC))]
return date ,data
def PercentageLabel(self,Xtrain,yTrain):
X=[]
Y=[]
for i in range(0,len(yTrain)):
original=Xtrain[i][-1]
X.append([float(val/original) for val in Xtrain[i]])
Y.append(float(float(yTrain[i]/Xtrain[i][-1])-1)*100*100)#%*100
return X,Y
def TestPercentageLabel(self,Xtrain):
X=[]
for i in range(0,len(Xtrain)):
original = Xtrain[-1]
X.append([float(val/original) for val in Xtrain])
return X
#+30ドル
def buy(self,pred,money, ethereum, total_money, current_price):
first_money,first_ethereum,first_total_money = money,ethereum,total_money
if abs(pred) < 0.0:
return first_money, first_ethereum, first_total_money
spend = abs(money * 0.05)
money -= spend * 1.0000#1.0015
if money < 0:
return first_money,first_ethereum,first_total_money
ethereum += float(spend / current_price)
total_money = money + ethereum * current_price
return money, ethereum, total_money
def sell(self,pred,money, ethereum, total_money, current_price):
first_money, first_ethereum, first_total_money = money, ethereum, total_money
if abs(pred) <0.0:
return first_money, first_ethereum, first_total_money
spend = abs(ethereum * 0.05)
ethereum -= spend * 1.0000#1.0015
if ethereum < 0.0:
return first_money,first_ethereum,first_total_money
money += float(spend * current_price)
total_money = money + float(ethereum * current_price)
return money, ethereum, total_money
#abs(pred)にすること!
def buy_simple(self,pred,money, ethereum, total_money, current_price):
first_money, first_ethereum, first_total_money = money, ethereum, total_money
spend = money * 0.5 * (abs(pred)*0.1)
money -= spend * 1.0000
if money < 0.0 or abs(pred) < 0.5:
return first_money,first_ethereum,first_total_money
ethereum += float(spend / current_price)
total_money = money + ethereum * current_price
return money, ethereum, total_money
def sell_simple(self,pred,money, ethereum, total_money, current_price):
first_money, first_ethereum, first_total_money = money, ethereum, total_money
spend = ethereum * 0.5 * (abs(pred)*0.1)
ethereum -= spend * 1.0000
if ethereum < 0.0 or abs(pred) < 0.2:
return first_money,first_ethereum,first_total_money
money += float(spend * current_price)
total_money = money + float(ethereum * current_price)
return money, ethereum, total_money
# 配列の長さバグかも
#0.0001だけだと+30
#0.001*predで+200ドル
def simulate_trade(self,price, X_test, model):
money = 300
ethereum = 0.01
total_money = money + np.float64(price[0] * ethereum)
first_total_money = total_money
for i in range(0, len(price)):
print(i)
current_price = price[i]
prediction = model.predict(X_test[i])
pred = prediction[0]
if pred > 0:
print("buy")
money, ethereum, total_money = self.buy_simple(pred,money, ethereum, total_money, current_price)
print("money"+str(money))
elif pred <= 0:
print("sell")
money, ethereum, total_money = self.sell_simple(pred,money, ethereum, total_money, current_price)
print("money"+str(money))
print("FIRST"+str(first_total_money))
print("FINAL" + str(total_money))
return total_money
| true |
e2c75d047171757b12eee13ce15675c153843030 | Python | venkat-oss/SPOJ | /CHOTU.py | UTF-8 | 139 | 3.0625 | 3 | [] | no_license | import math
T = int(input())
for i in range(T):
a, b = map(int, input().split(' '))
print("%.3f" %(2 * math.sqrt(a * a - b * b)))
| true |
439040318de62e45daf5e31c6ef988310ff33ecf | Python | Damiao-NT/Listas_pythonBrasil | /Q12.py | UTF-8 | 712 | 4.03125 | 4 | [] | no_license | # Foram anotadas as idades e alturas de 30 alunos. Faça um Programa que determine quantos alunos com mais de 13 anos possuem altura inferior à média de altura desses alunos
idade = []
altura = []
media_altura = 0
conte = 0
for i in range (30):
idade.append(int(input("Digite a idade do aluno %d:" %(i+1))))
altura.append(float(input("Tambem digite a altura do aluno %d:" %(i+1))))
media_altura += (altura[i])
print(media_altura)
for q in range(30):
if ((idade[q] > 13) and (altura[q] < (media_altura/30))):
conte += 1
else:
continue
print("A média da altura dos aluno é:",(media_altura/30),"E somente ",conte,"alunos com mais de 13 anos possivel altura menor que a média.")
| true |
8a66380a6843f9ce15e430554e771187e616457d | Python | 7Cx0/udacity101 | /lesson1/26.py | UTF-8 | 285 | 2.84375 | 3 | [] | no_license | speed_of_light = 299792458 #meters per second
cycles_per_second = 2700000000. #2.7 GHz
cycle_distance = speed_of_light / cycles_per_second
print cycle_distance * 100
cycles_per_second = 2800000000. #2.8 GHz
cycle_distance = speed_of_light / cycles_per_second
print cycle_distance
| true |
3f4c0cfe353a1cb7a7045e8587499ac5377f2436 | Python | pollyanarocha416/desafio-gitHub | /logica-prog-ecencial/Concatenação.py | UTF-8 | 155 | 3.75 | 4 | [] | no_license | text1 = input('digite seu nome: ')
text2 = input('digite seu sobre nome: ')
phrase = text1 + text2
print('seu nome e sobre nome e: ')
print(phrase)
| true |
3d1c2c2c2976bfff1ea7bc6d1e65d7ceba3c453f | Python | ItsNewe/py-sheet | /sheet.py | UTF-8 | 23,426 | 3.984375 | 4 | [] | no_license | # -*- coding:utf-8 -*-
#################################
# FEUILLE DE REVISION DE PYTHON #
# PAR NEWE #
# https://github.com/itsnewe #
#################################
# Basé sur plusieurs tutoriels, mais notamment #
# https://openclassrooms.com/courses/apprenez-a-programmer-en-python/ #
# # # /!\ CETTE FICHE N'EST PAS UN TUTO, SIMPLEMENT UN MEMO /!\ # # #
***********************************************************
##### #
# Pour trouver des informations sur un certain élément #
# Utiliser CTRL+F et chercher le nom de cet élément #
# (par exemple "listes" ou encore "fonctions") #
# #####
'''
Ce code n'est pas à exécuter, juste à lire
">>>" en début de ligne montre ce que le code en question afficherait dans la console
'''
248 = WIP
############################################################################
## MODULES
import random #Importe le module entier
from math import pi, sqrt #Préférable si on a besoin que de certaines fonctions d'un module
from math import sqrt as square_root #Importe sqrt() sous un autre nom
## BASES
print("meme")
5+4-3
2*(3+4)
str1="Ceci est une string" #On peut utiliser aussi bien "" que ''
str2='Il faut faire attention aux apostrophes avec ces délimitations, in faut les échapper comme c\'est montré ici"
str3="Première ligne\nDeuxième ligne" #\n signifie "newline", cela va aller à la ligne
#Ceci est un commentaire, c'est un bout de texte qui ne sera pas interprété par Python
"""Ceci est un commentaire sur plusieurs lignes,
quand on les utilise au début d'une fonction pour expliquer son fonctionnement, on appelle ça une docstring
On peut utiliser les deux types d'apostrophes comme pour une string"""
input("Entrer une valeur") #Print le texte entre parenthèses dans la console et enregistre la valeur
#qu'on tape comme valeur de la variable
#A noter que la fonction input() prend une string, pour obtenir un autre type, il faut convertir.
print(int("2")+3) #Convertit 2 en int et effectue l'opération (sans la conversion, une erreur surviendrait)
value = float(input("Entrez un chiffre à virgule: ")) #Définit la valeur donnée comme la valeur de la variable, de type float
#Les variables, comme tout autre objet, doivent être créées avant de pouvoir être appellées
str1 = "Bonjour"
del str1 #Supprime la var str1
print(str1) #Erreur, vu que str1 à été supprimée
##LES BOOLS (LOGIQUE BOOLEENNE)
"hello" == "hello" # == est un bool, qui renvoie True ou False (attention aux maj)
#/!\ "=" est un assignement tandis que "==" est un bool
var1 != var2 #Un autre type de bool, qui renvoie True si var1 n'est pas égal à var2 et inversement
'''
Il existe d'autres type de comparateurs (bool):
> : Renvoie True si var1 est plus grand que var2
< : Renvoie True si var1 est plus petit que var2
>= : Renvoie True si var1 est supérieur ou égal à var2
<= : Renvoie True si var1 est inférieur ou égal à var2
'''
##BOUCLES (LOOPS)
##LA BOUCLE IF
'''Les boucles en python n'utilisent pas {}, py utilise l'identation (tabs) et les ":" '''
if 10 > 5: #En py, les parenthèses pour définir les variables d'une boucle sont optionelles
print("10 est plus grand que 5") #(On peut écrire if(var1 == var2): comme on peut écrire if var1==var2:)
else:
if 5>10: #Les boucles if/else peuvent être nestés indéfiniment (nesté = boucle dans une boucle)
print("Uhm..")
else:
print("J'ai beugé")
num = 7
if num == 5:
print("Le nombre est 5")
elif num == 11: #Utiliser "elif" au lieu de succéder les if() est plus pratique
print("Le nombre est 11")
elif num == 7:
print("Le nombre est 7")
else:
print("Le nomnre n'est ni 5, ni 11, ni 7")
#On peut nester les boucles if comme dans n'mporte quel language
num = 12
if num > 5:
print("Plus grand que 5")
if num <=47:
print("entre 5 et 47")
'''
py utilise le même ordre de priorité qu'en maths
(Les parenthèses en priorités, puis "*" & "/", et enfin "+" & "-")
'''
'''
py utilise des mots pour la logique booléenne là ou d'autres langages
utilisent "&&", "||", etc...
Ces mots sont:
and = (Si les deux sont True; renvoie True)
or = (Si au moins 1 arg est True; renvoie True)
not = (Prend seulement 1 arg. Si la var est True; renvoie False et inversement)
'''
>>> False == False or True # "==" passe avant le "or"
True
>>> False == (False or True) #Comme en maths, les parenthèses sont prioritaires
False
>>> (False == False) or True
True
##LA BOUCLE WHILE
'''Une boucle while effectue l'action définie tant que la valeur renvoie True'''
i = 1
while i <=5:
print(i) #Celle ci va compter jusqu'à 5 puis s'arrêter
i += 1 #Ne pas oublier d'incrémenter 1 pour éviter une boucle infinie
while 1==1:
print("Vers l'infini et au delà") #Cette boucle est une boucle infinie, car sa valeur restera toujours True
i = 0
while 1==1:
print(i)
i +=1
if i >= 5:
print("On sort de la loop")
break #Pour sortir d'une boucle, on utilise "break"
i = 0
while True:
i += 1
if i == 2:
print("On passe 2")
continue #"continue" laisse la boucle s'éxecuter
if i == 5:
print("Sortie de la boucle")
break
print(i)
print("Terminé")
## LA BOUCLE FOR_IN
words = ["hello", "world", "spam", "eggs"]
for word in words: #La boucle for analyse tous les items que contient un élément
print(word + "!")
>>>hello! world! spam! eggs!
for i in range(5): #Un range peut être utilisé pour effectuer une action x fois (comme un while)
print("hello!") #La valeur s'incrémente automatiquement, pas besoin donc d'ajouter "i+=1" à la fin
## LA BOUCLE IF_IN
car = "e" # Cette boucle vérifie la présence d'un élément dans une séquence
voyelles = "aeiouyAEIOUYàâéèêëùîï" # Cela fontionne aussi avec les listes
if car in voyelles:
print(car, "est une voyelle")
##LISTES
'''Une liste est une sorte de "tiroir" qui permet de ranger différents éléments'''
words = ["Hello", "world", "!"]
#Pour naviguer dans une liste, on utilise l'indexation, qui est la position de l'élément recherché dans la liste
#/!\ LE PREMIER INDEX D'UNE LISTE, COMME TOUT AUTRE OBJET, EST 0 ET NON 1
print(words[0]) #Pour accéder au premier élément d'une liste, on utilise donc [0]
print(words[1]) #Le chiffre entre crochet détermine la position de l'élément dans l'array
print(words[2]) #Ici, "Hello"=0, "world"=1, "!"=2
number = 3
things = ["string", 0, [1, 2, number], 4.56] #Une liste peut contenir des éléments de tous types
print(things[1])
print(things[2])
print(things[2][2]) #Les listes peuvent être nestés (ceci va print 3)
nums = [1, 2, 3, 4, 5]
nums[2] = 5 #Remplace "3" par "5"
print(nums)
>>>[7, 7, 5, 7, 7]
nums = [1, 2, 3] #Les listes peuvent être concaténées, tout comme les str
print(nums + [4, 5, 6])
print(nums * 3)
words = ["spam", "egg", "spam", "sausage"]
print("spam" in words) #Cela retourne True si "spam" est trouvé dans la liste
nums = [1, 2, 3]
nums.append(4) #La méthode append() va rajouter l'argument donné à la fin de la liste
print(nums)
nums = [1, 3, 5, 2, 4]
print(len(nums)) #len() print la longueur de l'élément, en l'occurence celle de la liste
words = ["Python", "fun"]
words.insert(1, "is") #Insère l'argument à l'index choisi, en l'occurence words[1]
print(words)
>>>["Python", "is", "fun"]
'''
Il existe un tas de fonctions pour les listes, en voici quelques unes:
max(list): Renvoie l'élément de la liste ayant la plus grande valeur
min(list): Renvoie l'élément de la liste ayant la plus petite valeur
list.count(obj): Renvoie un int équivalent au nombre de fois qu'un item apparait dans la liste
list.remove(obj): Supprime un objet de la liste (Mettre en arg l'objet lui même, pas l'index)
list.reverse(): Mets la liste à l'envers
'''
##LISTES ET STRINGS
#Pour convertir une string en list, on utilise la fonction split()
str = "Hello world!"
str.split(" ")
>>>['Hello', 'world!']
'''
split() utilise le caractère donné pour couper la chaine
"Hello*world!".split("*") donnera donc le même résultat
split() possède un paramètre par défaut qui coupe aux espaces, ce qui revient
donc à ce que l'on vient de faire
'''
#Pour faire l'inverse, on utilise la fonction .join()
liste= ['Hello', 'world!']
" ".join(liste) #On "soude" tous les items de la liste avec le caractère
>>>Hello world! #donné entre eux, ici c'est un espace
#FONCTIONS UTILES POUR LES STRINGS
find(stri) #Cherche la position d'une string dans une autre
count(stri) #Compte le nombre d'occurences de stri dans la chaine
lower() #Convertit une chaine en minuscules
upper() #Convertit une chaine en majuscules
title() #Convertit en majuscule l'initiale de chaque mot
capitalize() #Convertit en majuscule la première lettre de la chaine
swapcase() #Convertit toutes les majuscules en minuscules et inversmeent
strip() #Enlève les espaces éventuels au début et à la fin de la chaine
replace(ch1, ch2) #remplace tous les cars ch1 par ch2 dans la chaine
index(ch) #trouve l'index de la première occurence de ch dans la chaine
##LES RANGES
numbers = list(range(10)) #range crée une liste séquentielle de chiffres
print(numbers)
>>>[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
numbers = list(range(3, 8)) #range les chiffres entre 3 et 8 (2 args)
print(numbers)
range(20) == range(0, 20)
numbers = list(range(5, 20, 2)) #range peut avoir un 3eme arg, qui détermine l'intervalle de la séquence
print(numbers)
>>>
##COMPREHENSIONS DE LISTES
'''
Les compréhensions de liste sont un moyen de filtrer ou modifier une liste très simplement.
'''
liste_origine = [0, 1, 2, 3, 4, 5]
[nb**2 for nb in liste_origine if nb %2==0] #Met au carré chaque élément "nb" trouvé dans "liste_origine"
>>>[0, 1, 4, 9, 16, 25] ----------- #On peut ajouter une condition comme vu en fin de ligne avec le "if"
#FRAGMENTS DE CHAINES
ch = "test"
print(ch[n,m)] #Le n^è caractère est inclus mais pas le m^è
print(ch[0:3)]
>>>Jul
print(ch[:3])
>>>Jul
print(ch[3:])
>>>iette
##LES DICTIONNAIRES
monDict = dict() #Deux façons de créer un dictionnaire
monDict = {}
monDict["id"] = "testest" #On insère un élément similairement à comme pour une liste
monDict["mdp"] = "*"
monDict
>>> {'mdp': '*', 'id': 'testest'}
monDict["id"]="abcde" #Comme pour les vars, la valeur a été remplacée
print(monDict["id"]) #Accès a la valeur de la clé
placard = {"chemise":3, "pantalon":6, "tee-shirt":7}#On peut créér des disctionnaires pré remplis
del placard["chemise"] #On paut supprimer une clé et sa valeur de 2 façons
placard.pop("chemise") #La différence est qu'avec pop, la valeur supprimée est retournée
>>> 3
#PARCOURS DU DICTIONNAIRE
fruits = {"pommes":21, "melons":3, "poires":31}
for cle in fruits.keys(): #Afficher les clés
print(cle)
>>>pommes
>>>melons
>>>poires
for valeur in fruits.values(): #Afficher les valeurs
print(valeur)
>>> >>> 3
>>> 31
for cle, valeur in fruits.items(): #Afficher la clé et sa valeur
print("La clé {} contient la valeur {}.".format(cle, valeur))
>>> La clé melons contient la valeur 3.
>>> La clé poires contient la valeur 31.
>>> La clé pommes contient la valeur 21.
#RECUPERER DES PARAMETRES NOMMES DANS UN DICT
def fonction_inconnue(*en_liste, **en_dictionnaire): #Cette fonction permettra de capturer tous types de paramètres, nommés ou non
#On peut aussi utiliser un dictionnaire en tant que conteneurde paramètres nommés
parametres = {"sep":" >> ", "end":" -\n"}
print("Voici", "un", "exemple", "d'appel", **parametres)
>>> Voici >> un >> exemple >> d'appel
#248
#FONCTIONS
def ma_fonc(): #def créé une fonction
print("spam")
print("spam")
print("spam")
#Les fonctions doivent être créés avant de pouvoir être appelées (comme les variables)
ma_fonc() #Appelle la fonction
def print_avec_exclamation(word): #Les fonctions peuvent prendre des arguments
print(word + "!")
#Des fonctions avec un nom court sont préférables, ce nom est un très mauvais exemple
print_avec_exclamation("spam") #La valeur donnée prend la place de la variable word de la fonction
print_avec_exclamation("eggs")
print_avec_exclamation("python")
excla = print_avec_exclamation #Les foncs peuvent être assignées à une variable comme tout autre objet
print(excla("meme"))
def somme_deux_fois(x, y): #Elles peuvent prendre plusieurs arguments
print(x + y)
print(x + y)
somme_deux_fois(5, 8) #Ici, 5=x et 8=y, à cause de leur position
def max(x, y):
if x >=y:
return x #Si la valeur du if est True, max renvoie la valeur x
else:
return y #return met fin à la fonction, tout code présent aprèssera ignoré
print("Je ne serais jamais éxecuté")
print(max(4, 7))
z = max(8, 5)
print(z)
def add(x, y=0): #On peut assigner une valeur par défaut à une variable, qui sera utilisée si
return x + y #aucune valeur n'est donnée lors de l'appel, dans le cas contraire on utilise celle de l'appel
print(add(1)) #1+0
>>>1
def deux_fois(func, x, y): #Une fonc peut être utilisée comme arg
return func(func(x, y), func(x, y))
a = 5
b = 10
print(deux_fois(add, a, b)) #Les foncs peuvent aussi être utilisées en tant qu'arguments
value = random.randint(1, 6) #Ceci est un appel à la fonction randint du module random, importé tout en haut
def lister(*args): #Un arg précédé indique un nombre indéfini d'arguments, ils peuvent donc être infinis
print("J'ai reçu {0}".format("args"))
#On peut utiliser une liste en arguments d'une fonction
liste_test = [1, 2, 3, 4]
print(lister(*liste_test))
>>>"J'ai reçu 1 2 3 4"
#EXCEPTIONS
try: #Tente d'éxécuter le code
num1 = 7
num2 = 0
print (num1 / num2)
print("Calcul terminé")
#Si une erreur survient lors de l'éxecution, les blocks except vont s'éxecuter
except ZeroDivisionError: #Si l'erreur est une erreur de type ZeroDivisionError (div par 0), se block s'éxécute
print("Une erreur est survenue\nCause: Division par zéro")
except (ValueError, TypeError): #On peut utiliser un même block pour plusieurstypes d'erreurs
print("Une erreur de valeur ou de type est survenue") #And have mutliple errors to handle
except: #Un block except sans arg s'occupe de toutes les erreurs (ou celles qui ne sont pas égales aux blocks précédents)
print("Une erreur est survenue")
finally: #Un block qui s'éxecute peu importe si il y a eu une erreur ou non
print("Ce code va s'éxecuter peu importe ce qui se passe avant")
raise ValueError("ceci est un test") #Ceci déclenche une erreur de type ValueError
#On peut donner des infos sur l'exception en les mettant en arguments
try:
num = 5 / 0
except:
print("Une erreur est survenue")
raise #Raise sans arg va re-déclencher la dernière erreur qui s'est produite
>>>Une erreur est survenue
ZeroDivisionError: division by zero
##FICHIERS
fichier = open("filename.txt") #On ouvre un fichier en vue de le lire ou l'éditer
'''
On peut spécifier le mode d'ouverture d'un fichier en ajoutant un second argument à la fonction open()
"r" = read mode; mode lecture. C'est le mode par défaut.
"w" = write mode; mode écriture. Supprime tout le contenu d'un fichier pour réecrire dessus.
"a" = append mode; mode ajout. Ajoute le texte donné après les données existantes.
Ajouter "b" à un mode (rb, wb) ouvre le fichier en mode binaire,
utile pour les fichier non texte (comme les images and fichiers son).
'''
fichier2 = open("afile.txt", "w")
# Manipulations avec le fichier
fichier2.close() #Lorsqu'on en a fini avec le fichier, on doit le fermer
fichier = open("filename.txt", "r") #Lire des fichiers
cont = file.read() #Cont == le contenu entier du fichier
print(cont)
fichier.close()
fichier = open("filename.txt", "r")
print(fichier.read(16)) #WOn peut passer le nombre d'octets du fichier qu'on souhaite lire
print(fichier.read(4)) #+ d'appels = + du fichier lu tranche d'octets par tranche d'octets
print(fichier.read(4))
print(fichier.read()) #Print le reste du fichier
fichier.close() #Si on tente de lire le fichier après avoir atteint la fin, on a une string vide
autrefichier = open("newfile.txt", "w") #Le mode "w" crée un nouveau fichier si il n'existe pas
autrefichier.write("This has been written to a file") #On écrit dans le fichier
autrefichier.close()
#Quand on ouvre un fichier en mode write, tout le contenu existant précédemment est supprimé
'''
Il est de bonne mesure de fermer le fichier après qu'n ai fini de l'utiliser.
Une bonne façon de faire cela est d'utiliser try et finally.
Cela nous assure que le fichier sera fermé, même si une erreur survient.
'''
try:
f = open("filename.txt")
print(f.read())
finally:
f.close()
'''
Une autre façon de le faire est d'utiliser des boucles with
Cela crééra une variable temporaire qui est accessible
seulement a l'intérieur de la boucle.
Le fichier est automatiquement fermé à la fin de la boucle,
même si des exceptions surviennent.
'''
with open("filename.txt") as f:
print(f.read())
#ECRITURE/LECTURE EN OCTETS (BYTES)
chaine = "Amélie et Eugène\n"
of =open("test.txt", "rb")
octets =of.read()
of.close()
type(octets)
>>> <class 'bytes'>
print(octets)
>>> b'Am\xc3\xa9lie et Eug\xc3\xa8ne\n'
#ENREGISTRER DES OBJETS DANS UN FICHIER AVEC PICKLE
import pickle #Ce module permet d'enregistrer et de restituer des objets dans et depuis un fichier texte
score = {
"joueur 1": 5,
"joueur 2": 35,
"joueur 3": 20,
"joueur 4": 2,
}
with open('donnees', 'wb') as fichier: #Sauvegarde d'un objet dans un fichier
monPickler = pickle.Pickler(fichier)
monPickler.dump(score)
with open('donnees', 'rb') as fichier: #Lecture de l'objet contenu dans le fichier
monDepickler = pickle.Unpickler(fichier)
scoreRecup = mon_depickler.load()
#ENCODAGE & DECODAGE
#On reprend le résultat en octets de la partie "ECRITURE/LECTURE EN OCTETS" = ch_car
ch_car = octets.decode("utf8")
ch_car
>>> 'Amélie et Eugène\n'
type(ch_car)
>>> <class 'str'>
#Pour encoder une string dans un certain codec, on utilise la fonction .encode()
chaine = "Bonne fête de Noël"
octets_u = chaine.encode("Utf-8")
octets_u
>>>b'Bonne f\xc3\xaate de No\xc3\xabl'
#Lors de l'ouverture d'un fichier, Python utilise automatiquement le codec par défaut du système
#Des exceptions peuvent survenir au cas ou on tente d'ouvrir un fichier qui n'est pas encodé dans le codec standard
#Dans ce cas, on obtient une exception dans la csl
#On peut spécifier un codec a utiliser dans la fonction open()
fichier =open("test.txt", "r", encoding ="Latin-1")
#ACCES A N'IMPORTE QUEL CARACTERE UNICODE
ord(ch) #Renvoie l'identifiant unicode du caractère ch
chr(num) #Renvoie le caractère pour l'identifiant num Unicode spécifié
#TUPLES
#Les tuples sont des objets immutables; c'est à dire que une fois qu'ils sont créés, on ne peut plus les modifier
#On les utilise rarement mais Python les utilise en fond pour effectuer différentes actions, e.g l'échange de valeurs entre 2 variables
tuple_vide = ()
tuple_non_vide = (1,) #Est équivalent à ci dessous
tuple_non_vide = 1, #Attention à la virgule, sans elle ce serait un int
tuple_avec_plusieurs_valeurs = (1, 2, 5)
#CLASSES
class personne(): #Création d'une classe "personne"
def __init__(self, nom, age): #constructeur de la classe
self.prenom=nom #self fait référence à l'objet qu'on est en train de créer
self.age=age
self.sexe="M" #On peut hardcoder une valeur
self._lieu_residence="Paris" #Par convention, on n'accède pas à un attribut commencant par "_" en dehors de la classe
##PROPRIETES DE CLASSE
def _get_lieu_residence(self): #Méthode qui sera appelée quand on souhaitera accéder en lecture à l'attribut 'lieu_residence'
#Même règle que pour les attributs, on n'accède pas à une méthode commencant par "_" en dehors de la classe
print("On accède à l'attribut lieu_residence !")
return self._lieu_residence
def _set_lieu_residence(self, nouvelle_residence): #Méthode appelée quand on souhaite modifier le lieu de résidence
print("Attention, il semble que {} déménage à {}.".format( \
self.prenom, nouvelle_residence))
self._lieu_residence = nouvelle_residence
# On va dire à Python que notre attribut lieu_residence pointe vers une
# propriété
lieu_residence = property(_get_lieu_residence, _set_lieu_residence) #nom_propriete = property(methode_accesseur, methode_mutateur, methode_suppression, methode_aide)
#METHODES SPECIALES
def __del__(self): #Méthode appelée quand l'objet est supprimé
print("C'est la fin ! On me supprime !")
def __repr__(self): #Méthode appellée lorsqu'on référence directement un objet, remplace "<__main__.XXX object at 0x00B46A70>"
return "Personne: nom({}), prénom({}), âge({})".format(
self.nom, self.prenom, self.age)
def __str__(self): #Méthode appellée quand on appelle notre objet dans un print()
return "{} {}, âgé de {} ans".format(
self.prenom, self.nom, self.age)
def __getattr__(self, nom):
"""Si Python ne trouve pas l'attribut nommé nom, il appelle
cette méthode. On affiche une alerte"""
print("Alerte ! Il n'y a pas d'attribut {} ici !".format(nom))
def __setattr__(self, nom_attr, val_attr):
"""Méthode appelée quand on fait objet.nom_attr = val_attr.
On se charge d'enregistrer l'objet"""
object.__setattr__(self, nom_attr, val_attr)
self.enregistrer())
def __delattr__(self, nom_attr):
"""On ne peut supprimer d'attribut, on lève l'exception
AttributeError"""
raise AttributeError("Vous ne pouvez supprimer aucun attribut de cette classe")
jean = personne("jean", 69)
jean.age()
>>>69
jean.age = 420 #Redéfinition
jean.age
>>>420
class Compteur:
"""Cette classe possède un attribut de classe qui s'incrémente à chaque
fois que l'on crée un objet de ce type"""
objets_crees = 0 # Le compteur vaut 0 au départ
def __init__(self):
self.compte=0
Compteur.objets_crees += 1 À chaque fois qu'on crée un objet, on incrémente le compteur
def reinit(self): #Méthode d'objet
self.compte=0
def combien(cls): #Méthode de classe affichant combien d'objets ont été créés
print("Jusqu'à présent, {} objets ont été créés.".format(
cls.objets_crees))
Compteur.objets_crees
>>>0
a = Compteur() # On crée un premier objet
Compteur.objets_crees
>>>1
b = Compteur()
Compteur.objets_crees
>>>2
class Test:
def afficher(): #Fonction statique: ne prend aucun premier argument
print("On affiche la même chose.")
print("peu importe les données de l'objet ou de la classe.")
afficher = staticmethod(afficher)
dir(Test) #Renvoie une liste de toutes les méthodes et attributs liés à l'objet
#PROPRIETES DE CLASSES
| true |
8b324d04a47cc0f4a40cc322efd9913b40e83ffc | Python | JagritiG/interview-questions-answers-python | /code/set_2_linkedlist/2_add_two_numbers.py | UTF-8 | 2,749 | 4.4375 | 4 | [] | no_license | # You are given two non-empty linked lists representing two non-negative integers.
# The digits are stored in reverse order and each of their nodes contain a single digit.
# Add the two numbers and return it as a linked list.
# Explanation: 342 + 465 = 807
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
# ======================================================================================
# Algorithm:
# linked list
# TC:
# SC:
# ========================================================================================
class SllNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __repr__(self):
"""Returns a printable representation of object we call it on."""
return "{}".format(self.val)
# method-1
def add_two_numbers(l1, l2):
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Input: (3 4 2)
# + (4 6 5)
# -----------------
# 8 0 7
# Output: 7 -> 0 -> 8
res = SllNode()
curr = res
carry = 0
while l1 or l2 or carry:
if l1:
carry += l1.val
l1 = l1.next
if l2:
carry += l2.val
l2 = l2.next
curr.next = SllNode(carry % 10)
curr = curr.next
carry = carry//10
print(str(res.next) + " -> " + str(res.next.next) + " -> " + str(res.next.next.next))
print([res.next, res.next.next, res.next.next.next])
return res.next
# Method-2
def add_two_numbers_2(l1, l2):
res = SllNode(0)
curr = res
carry = 0
while l1 or l2:
if not l1:
i = 0
else:
i = l1.val
if not l2:
j = 0
else:
j = l2.val
lists_sum = i + j + carry
if lists_sum >= 10:
remainder = lists_sum % 10
curr.next = SllNode(remainder)
carry = 1
else:
curr.next = SllNode(lists_sum)
carry = 0
curr = curr.next
if l1:
l1 = l1.next
if l2:
l2 = l2.next
if carry > 0:
curr.next = SllNode(carry)
print(str(res.next) + " -> " + str(res.next.next) + " -> " + str(res.next.next.next))
print([res.next, res.next.next, res.next.next.next])
return res.next
if __name__ == "__main__":
node1 = SllNode(3)
node1.next = SllNode(4)
node1.next.next = SllNode(2)
node2 = SllNode(4)
node2.next = SllNode(6)
node2.next.next = SllNode(5)
print("Input Lists:")
print([node1, node1.next, node1.next.next])
print([node2, node2.next, node2.next.next])
print("\n")
print("Output:")
print(add_two_numbers(node1, node2))
print(add_two_numbers_2(node1, node2))
| true |
2a38c501ef575909c9bbc7afbd1a3d1c01c65a48 | Python | cn-uofbasel/BACnet | /21-fs-ias-lec/14-BAC-News/dependencies/07-14-logCtrl/src/logStore/appconn/chat_connection.py | UTF-8 | 1,328 | 2.890625 | 3 | [
"MIT"
] | permissive | from .connection import Function
class ChatFunction(Function):
"""Connection to the group chat to insert and output the chat elements"""
def __init__(self):
super(ChatFunction, self).__init__()
def insert_chat_msg(self, cbor):
"""adds a new chat element as cbor
@:parameter event: The new cbor event to be added
@:returns 1 if successful, -1 if any error occurred
"""
self.insert_event(cbor)
def get_chat_since(self, timestamp, chat_id):
"""returns all elements which have a higher timestamp and the correct chat id
@:parameter timestamp: Everything from that time will be returned
@:parameter chat_id: Everything with the right chat_id will be returned
@:returns a list with the chat message and the timestamp of the message if successful, None if any error occurred
"""
return self._handler.get_event_since('chat', timestamp, chat_id)
def get_full_chat(self, chat_id):
"""returns all chat elements with the correct chat id
@:parameter chat_id: Everything with the right chat_id will be returned
@:returns a list of all messages with the corresponding chat_id if successful, None if any error occurred
"""
return self._handler.get_all_chat_msgs('chat', chat_id)
| true |
d50a0dd67bbd6f2ab5212be7aecf50c42fffc1d0 | Python | DukeFerdinand/developer-portfolio | /api/scripts/seed_db.py | UTF-8 | 1,139 | 2.53125 | 3 | [] | no_license | from os import environ, path
from sys import argv
from json import load
from db.config import connect_db
from db.models.models import Page, PageData
c = {
"MONGO_DB": environ["MONGO_DB"],
"MONGO_HOST": environ["MONGO_HOST"],
"MONGO_USR": environ["MONGO_USR"],
"MONGO_PWD": environ["MONGO_PWD"],
"MONGO_PORT": environ["MONGO_PORT"]
}
collections = {}
with open(path.join(path.dirname(__file__), 'dev-portfolio.json')) as file:
collections = load(file)
db = connect_db(c)
def confirm_choice(choice):
return input(f'Warning! Running "{choice}" on the database will DESTROY everything. Proceed? [y/N] ').lower() == 'y'
# TODO: Dump the data into the seed when you're done with dev
if argv[1] == "up":
if confirm_choice('up'):
db.drop_database(c['MONGO_DB'])
page = Page(
page_type="front_page",
page_data=PageData(page_title="Doug Flynn")
)
page.save()
else:
print('Aborting')
elif argv[1] == "down":
if confirm_choice('down'):
print(c['MONGO_DB'])
db.drop_database(c['MONGO_DB'])
else:
print('Aborting')
| true |
c27a83f508ad572ed0018dd47595ee75b810b6ba | Python | JuncheolH01469/Nomadcoders | /파이썬으로 웹 스크래퍼 만들기/#1 Theory/1_8 - Code Challenge!/main.py | UTF-8 | 483 | 4 | 4 | [] | no_license | def plus(a, b):
return float(a) + float(b)
def minus(a, b):
return float(a) - float(b)
def times(a, b):
return float(a) * float(b)
def division(a, b):
return float(a) / float(b)
def remainder(a, b):
return float(a) % float(b)
def negation(a):
return -float(a)
def power(a, b):
return float(a) ** float(b)
print(plus(18, 7))
print(minus(18, 7))
print(times(9, 7))
print(division(18, 6))
print(remainder(18, 7))
print(negation(18))
print(power(2, 7)) | true |
29e362b9d0f4caa7d1e8d9e4f0493b7a94fafe68 | Python | ggoofie/stepic-python-trainer | /duplicates_in_list.py | UTF-8 | 927 | 3.984375 | 4 | [] | no_license | """
Напишите программу, которая принимает на вход список целых чисел и выводит на экран значения, которые повторяются в нём более одного раза.
Для решения задачи может пригодиться метод sort списка.
Формат ввода:
Одна строка с целыми числами, разделёнными пробелом.
Формат вывода:
Строка, содержащая числа, разделённые пробелом. Числа не должны повторяться, порядок вывода может быть произвольным.
Sample Input:
4 8 0 3 4 2 0 3
Sample Output:
0 3 4
"""
l = list(map(int, input().split()))
s1, s2 = set(), set()
for i in l:
if i in s1:
s2.add(i)
else:
s1.add(i)
print(*s2)
| true |
6f84da2181eba387fe5c9e69c1c698f5622d11aa | Python | BingzhaoZhu/Hardware-fridendly-DT | /BIOCAS2019_reconstructed/model_cost.py | UTF-8 | 5,550 | 2.65625 | 3 | [] | no_license | import numpy as np
import lightgbm as lgb
def ReadTree(name, num_tree):
Trees=[]
with open(name,'r') as file:
l=file.readline().rstrip('\n')
for i in range(num_tree):
tree = {}
while not ('Tree='+str(i))==l:
if 'end of trees' in l:
return Trees
l = file.readline().rstrip('\n')
while not 'split_feature' in l:
l = file.readline().rstrip('\n')
temp=l.split('=')
split_feature=temp[1].split(' ')
tree['split_feature']=list(map(int, split_feature))
while not 'threshold' in l:
l = file.readline().rstrip('\n')
temp=l.split('=')
threshold=temp[1].split(' ')
tree['threshold']=list(map(float, threshold))
while not 'left_child' in l:
l = file.readline().rstrip('\n')
temp=l.split('=')
left_child=temp[1].split(' ')
tree['left_child']=list(map(int, left_child))
while not 'right_child' in l:
l = file.readline().rstrip('\n')
temp=l.split('=')
right_child=temp[1].split(' ')
tree['right_child']=list(map(int, right_child))
while not 'leaf_value' in l:
l = file.readline().rstrip('\n')
temp=l.split('=')
leaf_value=temp[1].split(' ')
tree['leaf_value']=list(map(float, leaf_value))
Trees.append(tree)
return Trees
def one_split(tr,teX,ind, node, cost, mask):
penalty=0
feature_idx=tr['split_feature'][node]
N=np.sum(ind,dtype=int) #np.sum(mask[ind,feature_idx])
penalty += N * cost[feature_idx]
mask[ind,feature_idx]=False
threshold=tr['threshold'][node]
left_inx = teX[:, feature_idx] <= threshold
right_inx = teX[:, feature_idx] > threshold
left_inx = left_inx * ind
right_inx = right_inx * ind
if tr['left_child'][node]>=0:
p_left=one_split(tr, teX, left_inx, tr['left_child'][node], cost, mask)
penalty+=p_left
if tr['right_child'][node]>=0:
p_right=one_split(tr, teX, right_inx, tr['right_child'][node], cost, mask)
penalty += p_right
return penalty
def cost(teX, cost, name, num_tree):
mask=np.ones_like(teX).astype(bool)
Tree=ReadTree(name,num_tree)
Total_penalty=0
for tr in Tree:
#mask = np.ones_like(teX).astype(bool)
penalty=one_split(tr,teX, teX[:,0]>-float('inf'), 0, cost, mask)
Total_penalty+=penalty
return Total_penalty,len(mask[:,0])-np.count_nonzero(mask,0)
def size(name, num_tree):
Tree = ReadTree(name, num_tree)
size = 0
for tr in Tree:
internal=len(tr['threshold'])
size+=internal*2+1
return size*4/1000
def quan(line,num_bits,max_r,min_r):
temp = line.split('=')
leaf_value = temp[1].split(' ')
weights = list(map(float, leaf_value))
'''
if max_r==None or min_r==None:
max_r=max(weights)
min_r=min(weights)
elif max_r<=max(weights):
max_r = max(weights)
elif min_r>min(weights):
min_r = min(weights)
'''
step = 2.0 * max(abs(max_r), abs(min_r)) / (2 ** num_bits)
#print('quantization step set to', step)
for i in range(len(weights)):
weights[i]=str(np.round(weights[i]/step)*step)
l=' '.join(weights)
return 'leaf_value='+l+'\n'
def change_size(line,model_size):
temp = line.split('=')
leaf_value = temp[1].split(' ')
weights = list(map(int, leaf_value))
for i in range(len(weights)):
weights[i]=str(np.round(weights[i]+model_size[i]))
l = ' '.join(weights)
return 'tree_sizes=' + l + '\n'
def quantization(num_bits,name='model.txt'):
from tempfile import mkstemp
from shutil import move
from os import fdopen, remove
if num_bits==0:
from shutil import copyfile
copyfile('model.txt', 'quan_model.txt')
return 0
Tree = ReadTree('model.txt', 100)
max_r = float('-inf')
min_r = float('inf')
for t in Tree:
max_r=max(max_r,max(t['leaf_value']))
min_r = min(min_r, min(t['leaf_value']))
step = (max_r - min_r) / (2 ** num_bits - 1)
#print('quantization step set to', step)
model_size = np.ones(len(Tree),dtype='int')*16
tree_ind=0
fh_t, abs_path_t = mkstemp()
with fdopen(fh_t, 'w') as new_file:
with open('model.txt') as old_file:
for line in old_file:
if not 'leaf_value' in line:
new_file.write(line)
else:
l=quan(line,num_bits,max_r,min_r)
new_file.write(l)
model_size[tree_ind] += len(l)-len(line)
tree_ind+=1
move(abs_path_t, 'quan_model.txt')
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open('quan_model.txt') as old_file:
for line in old_file:
if not 'tree_sizes=' in line:
new_file.write(line)
else:
l=change_size(line,model_size)
new_file.write(l)
move(abs_path, 'quan_model.txt')
return step
def get_leaf_weights(name):
Tree = ReadTree(name, 100)
weights=[]
for t in Tree:
weights=weights+t['leaf_value']
return weights
| true |
3e05a62ffa255aa0677c3750f298e3f8ba005db2 | Python | jeb2162/datadog-metric-explorer | /dd_metric_explorer.py | UTF-8 | 3,659 | 2.890625 | 3 | [] | no_license | # Main file for the Datadog Metric Explore Script.
import sys
from custom_metric_data import custom_metric_usage
from datadog_account_object import datadog_account
from metric_analysis_and_export import analyze_metrics
def main(run_time_parameters):
dd_account_object = datadog_account(run_time_parameters)
# If file_path is not USE_API, then load the csv file
if run_time_parameters['file_path']['value'] != 'USE_API':
custom_metric_object = custom_metric_usage(run_time_parameters)
custom_metric_pd = custom_metric_object.custom_metric_pd
# If file_path is USE_API then use the api to get metric usage
else:
custom_metric_pd = dd_account_object.get_custom_metrics_usage()
custom_metric_list = custom_metric_pd['metric_name'].tolist()
analyze_metrics(custom_metric_pd, dd_account_object)
# Function to get input paramters from command line
def get_run_time_parameters():
# Default run_time_parameters to use if no user inputs
run_time_parameters = {'api_key':{'value':None,'discription':'Api key for Datadog Org you whish to explore metrics for. Here is infor on DD api keys: https://docs.datadoghq.com/account_management/api-app-keys/'},
'app_key':{'value':None,'discription':'App key for Datadog Org you whish to explore metrics for. Here is infor on DD app keys: https://docs.datadoghq.com/account_management/api-app-keys/#application-keys'},
'file_path':{'value':'USE_API','discription':'File path to custom metric csv file. If left blank, the custom metrics will be pulled from the api'}}
# Check to see if user asked for help
help(run_time_parameters)
# Loop through each key in the run_time_parameters to get the command line input for it
for key in run_time_parameters:
# Look at all items in sys.argv, all of the variables passed into script when run using: python3 load_test.py <number_of_logs_per_second> <length_of_log>
for item in sys.argv:
index_of_key = item.find(key)
# If key is in item, attempt to get inputed value and update the run_time_parameters
if index_of_key != -1:
try:
index_of_colon = item.find(':') + 1
run_time_parameters[key]['value'] = item[index_of_colon:]
except:
raise Exception('\n\nInvalid Entry for {} input\n\n'.format(key))
# Check if the key's value is None, if so raise exception as it is a required item
if run_time_parameters[key]['value'] == None:
raise Exception('\n\n{} is a required input\n\n'.format(key))
# Return the run_time_parameters
return run_time_parameters
# Help function
def help(run_time_parameters):
for item in sys.argv:
# Check if help was submitted in command line
index_of_key = item.find('help')
if index_of_key != -1:
# Help was submited. So now print out helpful information on running this script
print('\n\n\n*** Help for load_test.py ***\n')
print('This script requires runs on python 3.7 or later\n')
run_script_text = "Run script via: python3 dd_metric_explorer.py "
for key in run_time_parameters:
run_script_text = run_script_text + key + ':<' + key + '> '
print(run_script_text + '\n')
# Loop through each key in run_time_parameters and print out its description
for key in run_time_parameters:
print('\n' + key + ': ' + run_time_parameters[key]['discription'])
print('Default Value: ' + str(run_time_parameters[key]['value']))
print('\n\n\n')
# After printing out help info, exit the script and return to command line
sys.exit(0)
# Initialization function
if __name__ == '__main__':
# Get runtime parameters
run_time_parameters = get_run_time_parameters()
# Call into the main funciton
main(run_time_parameters) | true |
726c7e5044e26ee8125cdedddb2012a22c0a63de | Python | orcilano/Mlib | /skiharris.py | UTF-8 | 967 | 2.671875 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float
from skimage.feature import corner_harris, corner_peaks
from imageio import imread
def harris(image, **kwargs):
return corner_peaks(corner_harris(image), **kwargs)
def plot_harris_points(image, filtered_coords):
""" plots corners found in image"""
plt.imshow(image)
y, x = np.transpose(filtered_coords)
plt.plot(x, y, 'b.')
plt.axis('off')
# display results
plt.figure(figsize=(8, 3))
# im_lena = img_as_float(data.lena())
im_lena = np.mean(img_as_float(imread('G:/sfm_source_1024/1-video/pictures/video-010.png')), axis=2)
im_text = img_as_float(data.text())
filtered_coords = harris(im_lena, min_distance=5, threshold_rel=0.02)
plt.axes([0, 0, 0.3, 0.95])
plot_harris_points(im_lena, filtered_coords)
filtered_coords = harris(im_text, min_distance=4)
plt.axes([0.2, 0, 0.77, 1])
plot_harris_points(im_text, filtered_coords)
plt.show()
| true |
0cd9ca1280030db9e256647eb0376f448e650580 | Python | EricMFischer/two-sum-hash-table | /two_sum_hash_table.py | UTF-8 | 1,681 | 3.71875 | 4 | [] | no_license | '''
The goal of this problem is to implement a variant of the 2-SUM algorithm. The file contains 1
million integers, both positive and negative (there might be some repetitions). The ith row of
the file specifies the ith entry of the array.
The task is to compute the number of target values t in the interval [-10000,10000] (inclusive)
such that there are distinct numbers x and y in the input file that satisfy x + y = t. (NOTE:
ensuring distinctness requires a one-line addition to the algorithm from lecture.)
Write your numeric answer (an integer between 0 and 20001) in the space provided.
OPTIONAL CHALLENGE: If this problem is too easy for you, try implementing your own hash table for
it. For example, you could compare performance under the chaining and open addressing approaches
to resolving collisions.
'''
from multiprocessing import Pool
import time
# Global variables
H = {}
# input: target value
def find_two_sum(t_val):
global T_VALS
for num in H:
if t_val - num in H:
return 1
return 0
# input: filename, interval
# output: number of target values t in interval such that x + y = t, where x and y are distinct
# numbers in input file
def two_sum(filename, i):
global H
with open(filename) as f_handle:
for line in f_handle:
H[int(line)] = 1
pool = Pool()
result = pool.map(find_two_sum, list(range(i[0], i[1] + 1)))
return sum(result)
def main():
start = time.time()
interval = [-10000, 10000]
# interval = [3, 1000000]
result = two_sum('two_sum_hash_table.txt', interval)
print('result: ', result)
print('elapsed time: ', time.time() - start)
main()
| true |
c8236be47591a4d00fb9362dd659fc3d9b0eef5b | Python | wuwei23/SpiderLearn | /Spiderlearn/回顾python编程/进程间通信Pipe.py | UTF-8 | 1,156 | 3.140625 | 3 | [] | no_license | import multiprocessing
import random
import os,time
#Pip方法返回(conn1,conn2)代表一个管道的亮度啊女,Pipe方法有duplex参数,
# 如果duplex参数值为True(默认值),那么代表这个管道为全双工模式,若duplex
#值为False,conn1只负责接收消息,conn2只负责发送消息,send和recv方法分别是
# 发送和接收消息的方法,如果没有消息可以接收,recv方法会已知阻塞,如果管道已经关闭
#recv会抛出EOPError
def proc_send(pipe,urls):
for url in urls:
print('Process(%s) send: %s' % (os.getpid(),url))
pipe.send(url)
time.sleep(random.random())#生成一个0~1的随机浮点数
def proc_recv(pipe):
while True:
print('Process(%s) rev:%s' % (os.getpid(),pipe.recv()))
time.sleep(random.random())
if __name__ == "__main__":
pipe = multiprocessing.Pipe()#创建一个管道
p1 = multiprocessing.Process(target=proc_send,args=(pipe[0],
['url_'+str(i) for i in range(10)]))
p2 = multiprocessing.Process(target=proc_recv,args=(pipe[1],))
p1.start()
p2.start()
p1.join()
p2.join() | true |
d3b658d4ba57440deb49dc8dbbfcf60f3f371e5e | Python | jonathanthen/INFO1110-and-DATA1002-CodeDump | /wk5stringsearch2.py | UTF-8 | 725 | 4.03125 | 4 | [] | no_license | def starts_with(word, chs):
if word == "":
return False
elif len(chs) == 0:
return False
else:
i = 0
while i < len(chs):
if word.startswith(chs[i]) == True:
return True
i += 1
return False
# You can put the function you made in part 1 here;
# It might be helpful when making your search() function!
def search(words, start_chs):
# Start writing your search() function here!
newlis = []
if len(words) == 0 or len(start_chs) == 0:
return []
else:
j = 0
while j < len(words):
torf = starts_with(words[j], start_chs)
if torf == True:
newlis.append(words[j])
j += 1
return newlis | true |
ba979d194eaef61c2821a6a30843c0d6242298f6 | Python | jaymgonzalez/python-crash-course-exercises | /file_system_exceptions.py | UTF-8 | 4,551 | 3.796875 | 4 | [] | no_license | # read from file
import json
filename = 'pi.txt'
# # with info outside the block
# with open(filename) as file_object:
# lines = file_object.readlines()
# for line in lines:
# print(line.rstrip() * 3)
# with infor within the block
with open(filename) as file_object:
for line in file_object:
print(line.rstrip() * 3)
# Writting to empty file
filename = 'programming.txt'
# with open(filename, 'w') as file_object:
# file_object.write('programming is coooool!\n')
# int(file_object.write('2345678\n'))
# Wrtting to an existing file
with open(filename, 'a') as file_object:
file_object.write('I love creating apps that run in the browser!\n')
file_object.write('And make sense of large data sets\n')
# Program that writes a log of the user
filename = 'guest.txt'
name = input('Please enter your name: ')
with open(filename, 'w') as file_object:
file_object.write(name)
# Guest Book
filename = 'guest_book.txt'
print('input "q" to exit at any time')
while True:
name = input('Please enter your name. ')
if name == 'q':
break
print(f'Welcome {name}, you\'ll be added to our guest book')
with open(filename, 'a') as file_object:
file_object.write(f'{name}\n')
# programming reasons
filename = 'reasons.txt'
print('input "q" to exit at any time')
while True:
reason = input('Please enter the reason you like programming. ')
if reason == 'q':
break
with open(filename, 'a') as file_object:
file_object.write(f'{reason}\n')
# Handling exceptions
# ZeroDivisionError
try:
print(5/0)
except ZeroDivisionError:
print('You cannot divide by zero!')
# FileNotFoundError
filename = 'sir_edwin.txt'
def count_words(filename):
'''Count the number of words in a file'''
try:
with open(filename) as file_object:
content = file_object.read()
except FileNotFoundError:
# print(f'The file {filename} couldn\'t been found.')
# fail silently
pass
else:
words = content.split()
num_words = len(words)
print(f'The file {filename} has approximately {num_words} words.')
filenames = ['sir_edwin.txt', 'siddartha.txt', 'letters.txt', 'studies.txt']
for book in filenames:
count_words(book)
# ValueError exception
num_1 = input('Please enter first number to add. ')
num_2 = input('Please enter second number to add. ')
try:
int(num_1)
int(num_2)
except ValueError:
print('Please provide a valid number')
else:
addition = int(num_1) + int(num_2)
print(addition)
# Calculator
print('This is an adding calculator, keep inputing numbers to add...')
print('to stop add the letter "q"')
temp = 0
while True:
num = input('Please enter a number to add. ')
if num == 'q':
break
try:
num = int(num)
except ValueError:
print('Please provide a valid number')
else:
result = temp + num
print(f'The sum is {result}')
temp = result
# Count occurences
def count_occurences(filename, word):
'''Count the number of times a word appears in a file'''
try:
with open(filename) as file_object:
content = file_object.read()
except FileNotFoundError:
# print(f'The file {filename} couldn\'t been found.')
# fail silently
pass
else:
count = content.lower().count(word)
print(
f'The file \'{filename}\' repeats the word {word} {count} times.')
count_occurences('letters.txt', 'and')
# Storing data
numbers = [2, 3, 5, 7, 11, 13]
filename = 'numbers.json'
# # Create file and add info to it
# with open(filename, 'w') as file_object:
# json.dump(numbers, file_object)
# Retrieve info from file
with open(filename) as file_object:
numbers = json.load(file_object)
print(numbers)
# Favourite Number
def find_fav_num():
fav_num = input('Please tell me your favourite number. ')
filename = 'fav_num.json'
with open(filename, 'w') as file_object:
json.dump(fav_num, file_object)
print(
f'Your fav num {fav_num} has been stored, I\'ll remember it next time you come')
def print_fav_num():
filename = 'fav_num.json'
try:
with open(filename) as file_object:
num = json.load(file_object)
except FileNotFoundError:
return None
else:
return num
def fav_num():
fav_num = print_fav_num()
if fav_num:
print(f'I know your favourite number, it\'s {fav_num}')
else:
find_fav_num()
fav_num()
| true |
0a17c3b60349322f1ae9fc4aecfb56b3d816400a | Python | camilok14/mummy_simulation | /util.py | UTF-8 | 981 | 3.125 | 3 | [] | no_license | from numpy.random import normal, uniform
from random import sample, random
def get_random_attributes(dist, size) -> list:
"""
Returns a list of 3 lists of size random numbers.
Each one of the 3 lists will have a dist distribution.
Parameters
----------
dist : string
If dist is 'normal' the 3 lists will have a normal distribution, otherwise the list will have a uniform distribution.
size: int
Length of the 3 lists with the random numbers.
"""
if dist == 'normal':
return normal(0.5, 0.1, (3, size))
return uniform(0.0, 1.0, (3, size))
def get_random_ids(size) -> list:
"""
Returns a list of int without duplicates and without 0, which is the mummy member id
Parameters
----------
size : int
Length of the list to return.
"""
return sample(range(1, size + 1), size)
def get_random_number() -> float:
"""
Returns a random number between 0 and 1
"""
return random() | true |
76ac87c47c0614b1aeaff5eb004fabd60c1fb7a6 | Python | w893058897/pythonhomework | /hero_factory.py | UTF-8 | 521 | 3 | 3 | [] | no_license | from pythonhomework.Hero import Hero
from pythonhomework.Police import Police
from pythonhomework.Timo import Timo
class HeroFactory(Hero):
def add_hero(self,name):
if name == "Timo":
return Timo()
elif name == "Police":
return Police()
else:
raise Exception("该英雄不在工厂中!")
timo = HeroFactory()
timo = timo.add_hero("Timo")
police = HeroFactory()
police = police.add_hero("Police")
police.fight(timo.hp,timo.power)
police.speak_lines() | true |
021cd556e47e83f14e8886bb85a288d6cd355955 | Python | kenny-kim2/algorithm_study | /programmers/2019_2_17/test4.py | UTF-8 | 1,263 | 4.0625 | 4 | [] | no_license | # 문제 설명
# 124 나라가 있습니다. 124 나라에서는 10진법이 아닌 다음과 같은 자신들만의 규칙으로 수를 표현합니다.
#
# 124 나라에는 자연수만 존재합니다.
# 124 나라에는 모든 수를 표현할 때 1, 2, 4만 사용합니다.
# 예를 들어서 124 나라에서 사용하는 숫자는 다음과 같이 변환됩니다.
#
# 10진법 124 나라 10진법 124 나라
# 1 1 6 14
# 2 2 7 21
# 3 4 8 22
# 4 11 9 24
# 5 12 10 41
# 자연수 n이 매개변수로 주어질 때,
# n을 124 나라에서 사용하는 숫자로 바꾼 값을 return 하도록 solution 함수를 완성해 주세요.
#
# 제한사항
# n은 500,000,000이하의 자연수 입니다.
# 입출력 예
# n result
# 1 1
# 2 2
# 3 4
# 4 11
def solution(n):
answer = ''
num_list = ['1','2','4']
while True:
answer = str(num_list[n % 3 - 1]) + answer
n=(n-1)//3
if n == 0:
break
return answer
arr1 = [1,2,3,4,5, 10, 13]
return_list = ['1','2','4','11', '12', '41', '111']
for i in range(len(arr1)):
if solution(arr1[i]) == return_list[i]:
print('case {} pass --------------'.format(str(i + 1)))
else:
print('case {} fail --------------'.format(str(i + 1)))
# 26 min | true |
1912af9a32beb01d48a39f0a154985e4fa9ce58d | Python | m4Rn1tSCH/flask_api_env | /ml_code/model_data/yodlee_encoder_random_test.py | UTF-8 | 12,976 | 2.53125 | 3 | [] | no_license | '''
Yodlee dataframes encoder
FIRST STAGE: retrieve the user ID dataframe with all user IDs with given filter
dataframe called bank_df is being generated in the current work directory as CSV
SECOND STAGE: randomly pick a user ID; encode thoroughly and yield the df
THIRD STAGE: encode all columns to numerical values and store corresponding dictionaries
'''
from sklearn.preprocessing import LabelEncoder
from psycopg2 import OperationalError
import pandas as pd
pd.set_option('display.width', 1000)
import numpy as np
from datetime import datetime as dt
import matplotlib.pyplot as plt
from collections import Counter
import seaborn as sns
# FILE IMPORTS FOR FLASK
from ml_code.model_data.SQL_connection import execute_read_query, create_connection
import ml_code.model_data.PostgreSQL_credentials as acc
from ml_code.model_data.spending_report_csv_function import spending_report as create_spending_report
# FILE IMPORTS FOR NOTEBOOKS
# from SQL_connection import execute_read_query, create_connection
# import PostgreSQL_credentials as acc
# from spending_report_csv_function import spending_report as create_spending_report
def df_encoder(rng=4, spending_report=False, plots=False, include_lag_features=True):
'''
Parameters
----------
rng : int, Random Seed for user picker. The default is 4.
spending_report : bool, Save a spending report in directory if True. Default is False.
plots : bool, Plots various graphs if True. Default is False.
include_lag_features : include lag feature 'amount' to database with 3, 7, and 30 day rolls. Default is True
Returns
-------
bank_df.
'''
connection = create_connection(db_name=acc.YDB_name,
db_user=acc.YDB_user,
db_password=acc.YDB_password,
db_host=acc.YDB_host,
db_port=acc.YDB_port)
# establish connection to get user IDs for all users in MA
filter_query = f"SELECT unique_mem_id, state, city, income_class FROM user_demographic WHERE state = 'MA'"
transaction_query = execute_read_query(connection, filter_query)
query_df = pd.DataFrame(transaction_query,
columns=['unique_mem_id', 'state', 'city', 'income_class'])
# dateframe to gather bank data from one randomly chosen user
# test user 1= 4
# test user 2= 8
try:
for i in pd.Series(query_df['unique_mem_id'].unique()).sample(n=1, random_state=rng):
print(i)
filter_query = f"SELECT * FROM bank_record WHERE unique_mem_id = '{i}'"
transaction_query = execute_read_query(connection, filter_query)
bank_df = pd.DataFrame(transaction_query,
columns=['unique_mem_id', 'unique_bank_account_id', 'unique_bank_transaction_id',
'amount', 'currency', 'description', 'transaction_date', 'post_date', 'transaction_base_type',
'transaction_category_name', 'primary_merchant_name', 'secondary_merchant_name', 'city',
'state', 'zip_code', 'transaction_origin', 'factual_category', 'factual_id', 'file_created_date',
'optimized_transaction_date', 'yodlee_transaction_status', 'mcc_raw', 'mcc_inferred',
'swipe_date', 'panel_file_created_date', 'update_type', 'is_outlier', 'change_source',
'account_type', 'account_source_type', 'account_score', 'user_score', 'lag', 'is_duplicate'])
print(f"User {i} has {len(bank_df)} transactions on record.")
#all these columns are empty or almost empty and contain no viable information
bank_df = bank_df.drop(columns=['secondary_merchant_name', 'swipe_date',
'update_type', 'is_outlier', 'is_duplicate',
'change_source', 'lag', 'mcc_inferred',
'mcc_raw', 'factual_id', 'factual_category',
'zip_code', 'yodlee_transaction_status',
'file_created_date', 'panel_file_created_date',
'account_type', 'account_source_type',
'account_score'], axis=1)
except OperationalError as e:
print(f"The error '{e}' occurred")
connection.rollback
'''
Plotting of various relations
The Counter object keeps track of permutations in a dictionary which can then be read and
used as labels
'''
if plots:
# Pie chart States
state_ct = Counter(list(bank_df['state']))
# The * operator can be used in conjunction with zip() to unzip the list.
labels, values = zip(*state_ct.items())
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
fig1, ax = plt.subplots(figsize=(20, 12))
ax.pie(values, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax.axis('equal')
#ax.title('Transaction locations of user {bank_df[unique_mem_id][0]}')
ax.legend(loc='center right')
plt.show()
# Pie chart transaction type
trans_ct = Counter(list(bank_df['transaction_category_name']))
# The * operator can be used in conjunction with zip() to unzip the list.
labels_2, values_2 = zip(*trans_ct.items())
#Pie chart, where the slices will be ordered and plotted counter-clockwise:
fig1, ax = plt.subplots(figsize=(20, 12))
ax.pie(values_2, labels=labels_2, autopct='%1.1f%%',
shadow=True, startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax.axis('equal')
#ax.title('Transaction categories of user {bank_df[unique_mem_id][0]}')
ax.legend(loc='center right')
plt.show()
'''
Generate a spending report of the unaltered dataframe
Use the datetime columns just defined
This report measures either the sum or mean of transactions happening
on various days of the week/or wihtin a week or a month over the course of the year
'''
# convert all date col from date to datetime objects
# date objects will block Select K Best if not converted
# first conversion from date to datetime objects; then conversion to unix
bank_df['post_date'] = pd.to_datetime(bank_df['post_date'])
bank_df['transaction_date'] = pd.to_datetime(bank_df['transaction_date'])
bank_df['optimized_transaction_date'] = pd.to_datetime(
bank_df['optimized_transaction_date'])
bank_df['file_created_date'] = pd.to_datetime(bank_df['file_created_date'])
bank_df['panel_file_created_date'] = pd.to_datetime(
bank_df['panel_file_created_date'])
# set optimized transaction_date as index for later
bank_df.set_index('optimized_transaction_date', drop=False, inplace=True)
# generate the spending report with the above randomly picked user ID
if spending_report:
create_spending_report(df=bank_df.copy())
'''
After successfully loading the data, columns that are of no importance have been removed and missing values replaced
Then the dataframe is ready to be encoded to get rid of all non-numerical data
'''
try:
# Include below if need unique ID's later:
# bank_df['unique_mem_id'] = bank_df['unique_mem_id'].astype(
# 'str', errors='ignore')
# bank_df['unique_bank_account_id'] = bank_df['unique_bank_account_id'].astype(
# 'str', errors='ignore')
# bank_df['unique_bank_transaction_id'] = bank_df['unique_bank_transaction_id'].astype(
# 'str', errors='ignore')
bank_df['amount'] = bank_df['amount'].astype('float64')
bank_df['transaction_base_type'] = bank_df['transaction_base_type'].replace(
to_replace=["debit", "credit"], value=[1, 0])
except (TypeError, OSError, ValueError) as e:
print(f"Problem with conversion: {e}")
# attempt to convert date objects to unix timestamps as numeric value (fl64) if they have no missing values; otherwise they are being dropped
date_features = ['post_date', 'transaction_date',
'optimized_transaction_date', 'file_created_date', 'panel_file_created_date']
try:
for feature in date_features:
if bank_df[feature].isnull().sum() == 0:
bank_df[feature] = bank_df[feature].apply(lambda x: dt.timestamp(x))
else:
bank_df = bank_df.drop(columns=feature, axis=1)
print(f"Column {feature} dropped")
except (TypeError, OSError, ValueError) as e:
print(f"Problem with conversion: {e}")
'''
The columns PRIMARY_MERCHANT_NAME; CITY, STATE, DESCRIPTION, TRANSACTION_CATEGORY_NAME, CURRENCY
are encoded manually and cleared of empty values
'''
encoding_features = ['primary_merchant_name', 'city', 'state', 'description', 'transaction_category_name', 'transaction_origin', 'currency']
UNKNOWN_TOKEN = '<unknown>'
embedding_maps = {}
for feature in encoding_features:
unique_list = bank_df[feature].unique().astype('str').tolist()
unique_list.append(UNKNOWN_TOKEN)
le = LabelEncoder()
le.fit_transform(unique_list)
embedding_maps[feature] = dict(zip(le.classes_, le.transform(le.classes_)))
# APPLICATION TO OUR DATASET
bank_df[feature] = bank_df[feature].apply(lambda x: x if x in embedding_maps[feature] else UNKNOWN_TOKEN)
bank_df[feature] = bank_df[feature].map(lambda x: le.transform([x])[0] if type(x) == str else x)
# dropping currency if there is only one
if len(bank_df['currency'].value_counts()) == 1:
bank_df = bank_df.drop(columns=['currency'], axis=1)
'''
IMPORTANT
The lagging features produce NaN for the first two rows due to unavailability
of values
NaNs need to be dropped to make scaling and selection of features working
'''
if include_lag_features:
#FEATURE ENGINEERING
#typical engineered features based on lagging metrics
#mean + stdev of past 3d/7d/30d/ + rolling volume
date_index = bank_df.index.values
bank_df.reset_index(drop=True, inplace=True)
#pick lag features to iterate through and calculate features
lag_features = ["amount"]
#set up time frames; how many days/months back/forth
t1 = 3
t2 = 7
t3 = 30
#rolling values for all columns ready to be processed
bank_df_rolled_3d = bank_df[lag_features].rolling(window=t1, min_periods=0)
bank_df_rolled_7d = bank_df[lag_features].rolling(window=t2, min_periods=0)
bank_df_rolled_30d = bank_df[lag_features].rolling(window=t3, min_periods=0)
#calculate the mean with a shifting time window
bank_df_mean_3d = bank_df_rolled_3d.mean().shift(periods=1).reset_index().astype(np.float32)
bank_df_mean_7d = bank_df_rolled_7d.mean().shift(periods=1).reset_index().astype(np.float32)
bank_df_mean_30d = bank_df_rolled_30d.mean().shift(periods=1).reset_index().astype(np.float32)
#calculate the std dev with a shifting time window
bank_df_std_3d = bank_df_rolled_3d.std().shift(periods=1).reset_index().astype(np.float32)
bank_df_std_7d = bank_df_rolled_7d.std().shift(periods=1).reset_index().astype(np.float32)
bank_df_std_30d = bank_df_rolled_30d.std().shift(periods=1).reset_index().astype(np.float32)
for feature in lag_features:
bank_df[f"{feature}_mean_lag{t1}"] = bank_df_mean_3d[feature]
bank_df[f"{feature}_mean_lag{t2}"] = bank_df_mean_7d[feature]
bank_df[f"{feature}_mean_lag{t3}"] = bank_df_mean_30d[feature]
bank_df[f"{feature}_std_lag{t1}"] = bank_df_std_3d[feature]
bank_df[f"{feature}_std_lag{t2}"] = bank_df_std_7d[feature]
bank_df[f"{feature}_std_lag{t3}"] = bank_df_std_30d[feature]
bank_df.set_index(date_index, drop=False, inplace=True)
#drop all features left with empty (NaN) values
bank_df = bank_df.dropna()
#drop user IDs to avoid overfitting with useless information
bank_df = bank_df.drop(['unique_mem_id',
'unique_bank_account_id',
'unique_bank_transaction_id'], axis=1)
if plots:
# seaborn plots
ax_desc = bank_df['description'].astype('int64', errors='ignore')
ax_amount = bank_df['amount'].astype('int64',errors='ignore')
sns.pairplot(bank_df)
sns.boxplot(x=ax_desc, y=ax_amount)
sns.heatmap(bank_df)
return bank_df
| true |
53bab15323255537b7683e52a0db417238132f9e | Python | zazuPhil/prog-1-ovn | /Uppgitf2.6-01.py | UTF-8 | 197 | 3.796875 | 4 | [] | no_license | inmatning = float(input('skriv in ett heltal: '))
svar = inmatning % 2
if svar == 1:
print(f'Talet {inmatning} är ojämnt.')
else:
print (f'Talet {inmatning} är jämnt.')
| true |
7cedf1752cf85acf4ea947268a4c80b5958f960f | Python | RodrigoZea/Miniproyecto5 | /fuzzy_logic.py | UTF-8 | 4,485 | 2.96875 | 3 | [] | no_license | from constants import *
# Membership functions for distance
def d_close(x):
if x <= 2:
return 1.0
elif x > HALF_MAX_DIST:
return 0.0
return -0.197197430123091 * x + 1.39439486024618
def d_medium(x):
if x <= HALF_MAX_DIST:
return 0.141421512474792 * x
return -0.141421312474633 * x + 1.99999858578688
def d_far(x):
if x <= HALF_MAX_DIST:
return 0.0
elif x > SCREEN_DIM:
return 1.0
return 0.341420445621966 * x - 2.41420445621966
def a_close(x):
if x <= PI_6:
return 1.0
elif x >= PI_2:
return 0.0
return -0.954929658365891 * x + 1.49999999990451
def a_medium(x):
if x <= PI_2:
return 0.636619772284456 * x
return -0.636619772284456 * x + 2
def a_far(x):
if x <= PI_2:
return 0.0
elif x >= PI3_2:
return 1.0
return 0.318309886243549 * x - 0.500000000159155
def fuzzy_loop(ball, robot):
dist = robot.pos.distance(ball.pos)
v2b = robot.pos.dir_to(ball.pos)
angle = robot.dir.angle(v2b)
# run through membership functions
dist_f = [d_close(dist), d_medium(dist), d_far(dist)]
rot_f = [a_close(angle), a_medium(angle), a_far(angle)]
# inference rules
rules = []
# if distance is close and direction is close -> slow forward
rules.append(min(dist_f[0], rot_f[0]))
# if distance is close and direction is medium -> rotate normal and move slowly
rules.append(min(dist_f[0], rot_f[1]))
# if distance is close and direction is far off -> rotate hard and move slowly
rules.append(min(dist_f[0], rot_f[2]))
# if distance is medium and direction is close -> normal forward
rules.append(min(dist_f[1], rot_f[0]))
# if distance is medium and direction is medium -> rotate normal and move normal
rules.append(min(dist_f[1], rot_f[1]))
# if distance is medium and direction is far off -> rotate hard and move normal
rules.append(min(dist_f[1], rot_f[2]))
# if distance is far and direction is close -> fast forward
rules.append(min(dist_f[2], rot_f[0]))
# if distance is far and direction is medium -> rotate normal and move fast
rules.append(min(dist_f[2], rot_f[1]))
# if distance is far and direction is far off -> rotate hard and move fast
rules.append(min(dist_f[2], rot_f[2]))
# maximum rule find index
max_index = 0
max_value = -1
for i in range(len(rules)):
if rules[i] > max_value:
max_value = rules[i]
max_index = i
action = actions[max_index]
robot.rotate(rad=action[1], clockwise=(robot.pos.y >= ball.pos.y))
robot.move(speed=action[0])
return dist, angle
"""
Inference Rules
Distance/
Direction | close | medium | far |
-----------------------------------------------------------------------------------|
close | slow forward | rotate and move slowly | rotate hard and move slowly |
-----------------------------------------------------------------------------------|
medium | normal forward | rotate and move normal | rotate hard and move normal |
-----------------------------------------------------------------------------------|
far | fast forward | rotate and move fast | rotate hard and move fast |
-----------------------------------------------------------------------------------|
Movement and rotation speeds
-------------------------------------------------------
| Move Slow | 0.5 u/s | No Rotate | 0 degrees |
|-----------------------|---------------|-------------|
| Move Normal | 1 u/s | Rotate Normal | 20 degrees |
------------------------|---------------|-------------|
| Move Fast | 3 u/s | Rotate Fast | 60 degrees |
|-----------------------|---------------|-------------|
"""
actions = [
[MOVE_SLOW, ROT_SLOW], [MOVE_SLOW, ROT_NORMAL], [MOVE_SLOW, ROT_FAST],
[MOVE_NORMAL, ROT_SLOW], [MOVE_NORMAL, ROT_NORMAL], [MOVE_NORMAL, ROT_FAST],
[MOVE_FAST, ROT_SLOW], [MOVE_FAST, ROT_NORMAL], [MOVE_FAST, ROT_FAST]
]
debug_actions = [
'slow forward', 'rotate normal and move slowly', 'rotate hard and move slowly',
'normal forward', 'rotate normal and move normal', 'rotate hard and move normal',
'fast forward', 'rotate normal and move fast', 'rotate hard and move fast'
]
action_history = {}
for action in debug_actions:
action_history[action] = 0 | true |
1d14c7be377de1a20203dc3a9e4a598d53345de9 | Python | hiddenSymmetries/simsgeo | /simsgeo/objectives.py | UTF-8 | 5,685 | 2.71875 | 3 | [] | no_license | from jax import grad, vjp
import jax.numpy as jnp
import numpy as np
from .jit import jit
@jit
def curve_length_pure(l):
return jnp.mean(l)
class CurveLength():
def __init__(self, curve):
self.curve = curve
self.thisgrad = jit(lambda l: grad(curve_length_pure)(l))
def J(self):
return curve_length_pure(self.curve.incremental_arclength())
def dJ(self):
return self.curve.dincremental_arclength_by_dcoeff_vjp(self.thisgrad(self.curve.incremental_arclength()))
@jit
def Lp_curvature_pure(kappa, gammadash, p, desired_kappa):
arc_length = jnp.linalg.norm(gammadash, axis=1)
return (1./p)*jnp.mean(jnp.maximum(kappa-desired_kappa, 0)**p * arc_length)
class LpCurveCurvature():
def __init__(self, curve, p, desired_length=None):
self.curve = curve
if desired_length is None:
self.desired_kappa = 0
else:
radius = desired_length/(2*pi)
self.desired_kappa = 1/radius
self.J_jax = jit(lambda kappa, gammadash: Lp_curvature_pure(kappa, gammadash, p, self.desired_kappa))
self.thisgrad0 = jit(lambda kappa, gammadash: grad(self.J_jax, argnums=0)(kappa, gammadash))
self.thisgrad1 = jit(lambda kappa, gammadash: grad(self.J_jax, argnums=1)(kappa, gammadash))
def J(self):
return self.J_jax(self.curve.kappa(), self.curve.gammadash())
def dJ(self):
grad0 = self.thisgrad0(self.curve.kappa(), self.curve.gammadash())
grad1 = self.thisgrad1(self.curve.kappa(), self.curve.gammadash())
return self.curve.dkappa_by_dcoeff_vjp(grad0) + self.curve.dgammadash_by_dcoeff_vjp(grad1)
@jit
def Lp_torsion_pure(torsion, gammadash, p):
arc_length = jnp.linalg.norm(gammadash, axis=1)
return (1./p)*jnp.mean(jnp.abs(torsion)**p * arc_length)
class LpCurveTorsion():
def __init__(self, curve, p):
self.curve = curve
self.J_jax = jit(lambda torsion, gammadash: Lp_torsion_pure(torsion, gammadash, p))
self.thisgrad0 = jit(lambda torsion, gammadash: grad(self.J_jax, argnums=0)(torsion, gammadash))
self.thisgrad1 = jit(lambda torsion, gammadash: grad(self.J_jax, argnums=1)(torsion, gammadash))
def J(self):
return self.J_jax(self.curve.torsion(), self.curve.gammadash())
def dJ(self):
grad0 = self.thisgrad0(self.curve.torsion(), self.curve.gammadash())
grad1 = self.thisgrad1(self.curve.torsion(), self.curve.gammadash())
return self.curve.dtorsion_by_dcoeff_vjp(grad0) + self.curve.dgammadash_by_dcoeff_vjp(grad1)
def distance_pure(gamma1, l1, gamma2, l2, minimum_distance):
dists = jnp.sqrt(jnp.sum((gamma1[:, None, :] - gamma2[None, :, :])**2, axis=2))
alen = jnp.linalg.norm(l1, axis=1) * jnp.linalg.norm(l2, axis=1)
return jnp.sum(alen * jnp.maximum(minimum_distance-dists, 0)**2)/(gamma1.shape[0]*gamma2.shape[0])
class MinimumDistance():
def __init__(self, curves, minimum_distance):
self.curves = curves
self.minimum_distance = minimum_distance
self.J_jax = jit(lambda gamma1, l1, gamma2, l2: distance_pure(gamma1, l1, gamma2, l2, minimum_distance))
self.thisgrad0 = jit(lambda gamma1, l1, gamma2, l2: grad(self.J_jax, argnums=0)(gamma1, l1, gamma2, l2))
self.thisgrad1 = jit(lambda gamma1, l1, gamma2, l2: grad(self.J_jax, argnums=1)(gamma1, l1, gamma2, l2))
self.thisgrad2 = jit(lambda gamma1, l1, gamma2, l2: grad(self.J_jax, argnums=2)(gamma1, l1, gamma2, l2))
self.thisgrad3 = jit(lambda gamma1, l1, gamma2, l2: grad(self.J_jax, argnums=3)(gamma1, l1, gamma2, l2))
def J(self):
res = 0
for i in range(len(self.curves)):
gamma1 = self.curves[i].gamma()
l1 = self.curves[i].gammadash()
for j in range(i):
gamma2 = self.curves[j].gamma()
l2 = self.curves[j].gammadash()
res += self.J_jax(gamma1, l1, gamma2, l2)
return res
def dJ(self):
dgamma_by_dcoeff_vjp_vecs = [None for c in self.curves]
dgammadash_by_dcoeff_vjp_vecs = [None for c in self.curves]
for i in range(len(self.curves)):
gamma1 = self.curves[i].gamma()
l1 = self.curves[i].gammadash()
for j in range(i):
gamma2 = self.curves[j].gamma()
l2 = self.curves[j].gammadash()
temp = self.thisgrad0(gamma1, l1, gamma2, l2)
if dgamma_by_dcoeff_vjp_vecs[i] is None:
dgamma_by_dcoeff_vjp_vecs[i] = temp
else:
dgamma_by_dcoeff_vjp_vecs[i] += temp
temp = self.thisgrad1(gamma1, l1, gamma2, l2)
if dgammadash_by_dcoeff_vjp_vecs[i] is None:
dgammadash_by_dcoeff_vjp_vecs[i] = temp
else:
dgammadash_by_dcoeff_vjp_vecs[i] += temp
temp = self.thisgrad2(gamma1, l1, gamma2, l2)
if dgamma_by_dcoeff_vjp_vecs[j] is None:
dgamma_by_dcoeff_vjp_vecs[j] = temp
else:
dgamma_by_dcoeff_vjp_vecs[j] += temp
temp = self.thisgrad3(gamma1, l1, gamma2, l2)
if dgammadash_by_dcoeff_vjp_vecs[j] is None:
dgammadash_by_dcoeff_vjp_vecs[j] = temp
else:
dgammadash_by_dcoeff_vjp_vecs[j] += temp
res = [self.curves[i].dgamma_by_dcoeff_vjp(dgamma_by_dcoeff_vjp_vecs[i]) + self.curves[i].dgammadash_by_dcoeff_vjp(dgammadash_by_dcoeff_vjp_vecs[i]) for i in range(len(self.curves))]
return res
| true |
4aa065eb6431511fa38f838f3fe34bdd3bc5e32b | Python | APochiero/Aeronautical-Communication-Simulation | /scripts/plotResult.py | UTF-8 | 3,239 | 2.890625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import math
import argparse
import seaborn as sns
matplotlib.rcParams['font.family'] = "serif"
t = [4.24, 7.42, 10.59, 13.77, 16.95]
k = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2]
colors = ['#944654', '#08b2e3', '#9d8df1', '#57a773', '#484d6d']
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='Split Data by K')
parser.add_argument(
'dirPath', help='path of directory containing Results files')
parser.add_argument(
'distr', help='Distribution of Interarrival time [ exp | const ]')
args = parser.parse_args()
distribution = str(args.distr)
path = str(args.dirPath)
barWidth = 0.035
errorBarStyle = dict(lw=0.5, capsize=2, capthick=0.5)
for i in range(len(t)):
with open(path + '/ResultT' + str(t[i]) + distribution + '.csv') as file:
df = pd.read_csv(file, header=None)
df = df.drop(columns=[0])
nameDistr = 'Case Exponential interarrival time distribution' if distribution == 'exp' else 'Case Constant interarrival time distribution'
print('Plotting T' + str(t[i]) + ' ' + distribution)
responseTime = pd.to_numeric(df.iloc[5], errors='coerce')
responseTimeCI = pd.to_numeric(df.iloc[6], errors='coerce')
queueLength = pd.to_numeric(df.iloc[3], errors='coerce')
queueLengthCI = pd.to_numeric(df.iloc[4], errors='coerce')
waitingTime = pd.to_numeric(df.iloc[7], errors='coerce')
waitingTimeCI = pd.to_numeric(df.iloc[8], errors='coerce')
# Plot Delay
plt.figure(1)
plt.errorbar(k, responseTime, yerr=responseTimeCI, fmt="--x",
markeredgecolor='red', linewidth=0.8, capsize=4, label='t= ' + str(t[i]))
plt.xlabel('Interarrival Time [s]')
plt.ylabel('Delay [s]')
plt.ticklabel_format(axis='x', style='sci')
plt.xticks(np.arange(0.5, 2.25, step=0.25))
plt.title(nameDistr + '\nEnd-to-End Delay')
plt.legend()
plt.grid(linestyle='--')
# Plot Queue Length
plt.figure(2)
plt.errorbar(k, queueLength, yerr=queueLengthCI, fmt="--x",
markeredgecolor='red', linewidth=0.8, capsize=4, label='t= ' + str(t[i]))
plt.xlabel('Interarrival Time [s]')
plt.ylabel('Queue Length')
plt.ticklabel_format(axis='x', style='sci')
plt.xticks(np.arange(0.5, 2.25, step=0.25))
plt.title(nameDistr + '\nQueue Length Analysis')
plt.legend()
plt.grid(linestyle='--')
# Bar plot Waiting Time Over Response Time
plt.figure(3)
x = [x - 0.04*(2-i) for x in k]
plt.bar(x, responseTime, yerr=responseTimeCI,
width=barWidth, error_kw=errorBarStyle, color='red')
plt.bar(x, waitingTime, yerr=waitingTimeCI, width=barWidth,
error_kw=errorBarStyle, label='t=' + str(t[i]), color=colors[i])
plt.xlabel('Interarrival Time [s]')
plt.ylabel('Time [s]')
plt.xticks(np.arange(0.5, 2.25, step=0.25))
plt.ticklabel_format(axis='x', style='sci')
plt.title(nameDistr + '\nWaiting Time over Response Time')
plt.legend()
plt.grid(linestyle='--')
plt.show()
| true |
51d3e34eb01b9c87086a59b28d338294d9d89eed | Python | orenltr/Photo2 | /SingleImage.py | UTF-8 | 33,880 | 3.28125 | 3 | [] | no_license | import numpy as np
import math
from Camera import Camera
from MatrixMethods import *
import PhotoViewer as pv
import matplotlib as plt
from scipy.linalg import rq,inv
# from scipy.spatial.transform import Rotation as R
class SingleImage(object):
def __init__(self, camera, type='real'):
"""
Initialize the SingleImage object
:param camera: instance of the Camera class
:param type: real image or synthetic
:param points: points in image space
:type camera: Camera
:type type: string 'real' or 'synthetic'
:type points: np.array
"""
self.__type = type
self.__camera = camera
self.__innerOrientationParameters = None
self.__isSolved = False
self.__exteriorOrientationParameters = np.array([[0, 0, 0, 0, 0, 0]], 'f').T
self.__rotationMatrix = None
@property
def innerOrientationParameters(self):
"""
Inner orientation parameters
.. warning::
Can be held either as dictionary or array. For your implementation and decision.
.. note::
Do not forget to decide how it is held and document your decision
:return: inner orinetation parameters
:rtype: dictionary
"""
return self.__innerOrientationParameters
@innerOrientationParameters.setter
def innerOrientationParameters(self, parametersArray):
r"""
:param parametersArray: the parameters to update the ``self.__innerOrientationParameters``
**Usage example**
.. code-block:: py
self.innerOrintationParameters = parametersArray
"""
self.__innerOrientationParameters = {'a0': parametersArray[0], 'a1': parametersArray[1],
'a2': parametersArray[2],
'b0': parametersArray[3], 'b1': parametersArray[4],
'b2': parametersArray[5]}
@property
def camera(self):
"""
The camera that took the image
:rtype: Camera
"""
return self.__camera
@property
def type(self):
"""
real image or synthetic
:rtype: string
"""
return self.__type
@property
def exteriorOrientationParameters(self):
r"""
Property for the exterior orientation parameters
:return: exterior orientation parameters in the following order, **however you can decide how to hold them (dictionary or array)**
.. math::
exteriorOrientationParameters = \begin{bmatrix} X_0 \\ Y_0 \\ Z_0 \\ \omega \\ \varphi \\ \kappa \end{bmatrix}
:rtype: np.ndarray or dict
"""
return self.__exteriorOrientationParameters
@exteriorOrientationParameters.setter
def exteriorOrientationParameters(self, parametersArray):
r"""
:param parametersArray: the parameters to update the ``self.__exteriorOrientationParameters``
**Usage example**
.. code-block:: py
self.exteriorOrintationParameters = parametersArray
"""
self.__exteriorOrientationParameters = parametersArray.T
@property
def RotationMatrix(self):
"""
The rotation matrix of the image
Relates to the exterior orientation
:return: rotation matrix
:rtype: np.ndarray (3x3)
"""
if self.__rotationMatrix is not None:
return self.__rotationMatrix
if self.type == 'real':
R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],
self.exteriorOrientationParameters[5])
else:
R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3],
self.exteriorOrientationParameters[4],
self.exteriorOrientationParameters[5])
return R
@RotationMatrix.setter
def RotationMatrix(self,val):
self.__rotationMatrix = val
@property
def PerspectiveMatrix(self):
ic = np.hstack((np.eye(3), -self.PerspectiveCenter))
return np.dot(np.dot(self.camera.CalibrationMatrix,self.RotationMatrix.T),ic)
@property
def isSolved(self):
"""
True if the exterior orientation is solved
:return True or False
:rtype: boolean
"""
return self.__isSolved
@property
def PerspectiveCenter(self):
"""
return the perspective center of the first image
:return: perspective center
:rtype: np.array (3, )
"""
return self.exteriorOrientationParameters[0:3]
@PerspectiveCenter.setter
def PerspectiveCenter(self,val):
self.exteriorOrientationParameters[0:3] = val[:,np.newaxis]
def ComputeInnerOrientation(self, imagePoints):
r"""
Compute inner orientation parameters
:param imagePoints: coordinates in image space
:type imagePoints: np.array nx2
:return: a dictionary of inner orientation parameters, their accuracies, and the residuals vector
:rtype: dict
.. warning::
This function is empty, need implementation
.. note::
- Don't forget to update the ``self.__innerOrinetationParameters`` member. You decide the type
- The fiducial marks are held within the camera attribute of the object, i.e., ``self.camera.fiducialMarks``
- return values can be a tuple of dictionaries and arrays.
**Usage example**
.. code-block:: py
fMarks = np.array([[113.010, 113.011],
[-112.984, -113.004],
[-112.984, 113.004],
[113.024, -112.999]])
img_fmarks = np.array([[-7208.01, 7379.35],
[7290.91, -7289.28],
[-7291.19, -7208.22],
[7375.09, 7293.59]])
cam = Camera(153.42, np.array([0.015, -0.020]), None, None, fMarks)
img = SingleImage(camera = cam, points = None)
inner_parameters, accuracies, residuals = img.ComputeInnerOrientation(img_fmarks)
"""
if self.camera.fiducialMarks == 'no fiducials': # case of digital camera
pixel_size = 0.0024 # [mm]
a1 = 1 / pixel_size
b2 = -1 / pixel_size
a2 = 0
b1 = 0
a0 = self.camera.principalPoint[0] / pixel_size
b0 = self.camera.principalPoint[1] / pixel_size
self.__innerOrientationParameters = {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,
'V': 0, 'sigma0': 0, 'sigmaX': 0}
return {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,
'V': 0, 'sigma0': 0, 'sigmaX': 0}
else:
# observation vector
l = np.matrix(imagePoints).flatten('F').T
# fiducial marks - camera system
fc = self.camera.fiducialMarks
# A matrix (16X6)
j = len(imagePoints[:, 0])
A = np.zeros((len(l), 6))
for i in range(j):
A[i, 0:3] = np.array([1, fc[i, 0], fc[i, 1]])
A[i + j, 3:] = np.array([1, fc[i, 0], fc[i, 1]])
# N matrix
N = (A.T).dot(A)
# U vector
U = (A.T).dot(l)
# adjusted variables
X = (np.linalg.inv(N)).dot(U)
# v remainders vector
v = A.dot(X) - l
# sigma posteriory
u = 6
r = len(l) - u
sigma0 = ((v.T).dot(v)) / r
sigmaX = sigma0[0, 0] * (np.linalg.inv(N))
# update field
self.__innerOrientationParameters = {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0],
'b1': X[4, 0],
'b2': X[5, 0],
'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}
return {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0], 'b1': X[4, 0], 'b2': X[5, 0],
'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}
def ComputeGeometricParameters(self):
"""
Computes the geometric inner orientation parameters
:return: geometric inner orientation parameters
:rtype: dict
.. warning::
This function is empty, need implementation
.. note::
The algebraic inner orinetation paramters are held in ``self.innerOrientatioParameters`` and their type
is according to what you decided when initialized them
"""
# algebraic inner orinetation paramters
x = self.__innerOrientationParameters
tx = x['a0']
ty = x['b0']
tetha = np.arctan((x['b1'] / x['b2']))
gamma = np.arctan((x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha))
/ (x['b1'] * np.sin(tetha) + x['b2'] * np.cos(tetha)))
sx = x['a1'] * np.cos(tetha) - x['a2'] * np.sin(tetha)
sy = (x['a1'] * np.sin(tetha) + x['a2'] * np.cos(tetha)) / (np.sin(gamma))
return {'translationX': tx, 'translationY': ty, 'rotationAngle': tetha,
'scaleFactorX': sx, 'scaleFactorY': sy, 'shearAngle': gamma}
def ComputeInverseInnerOrientation(self):
"""
Computes the parameters of the inverse inner orientation transformation
:return: parameters of the inverse transformation
:rtype: dict
.. warning::
This function is empty, need implementation
.. note::
The inner orientation algebraic parameters are held in ``self.innerOrientationParameters``
their type is as you decided when implementing
"""
inner = self.__innerOrientationParameters
matrix = np.array([[inner['a1'], inner['a2']], [inner['b1'], inner['b2']]])
# inverse matrix
inv_matrix = np.linalg.inv(matrix)
return {'a0*': -inner['a0'], 'a1*': inv_matrix[0, 0], 'a2*': inv_matrix[0, 1],
'b0*': -inner['b0'], 'b1*': inv_matrix[1, 0], 'b2*': inv_matrix[1, 1]}
def CameraToImage(self, cameraPoints):
"""
Transforms camera points to image points
:param cameraPoints: camera points
:type cameraPoints: np.array nx2
:return: corresponding Image points
:rtype: np.array nx2
.. warning::
This function is empty, need implementation
.. note::
The inner orientation parameters required for this function are held in ``self.innerOrientationParameters``
**Usage example**
.. code-block:: py
fMarks = np.array([[113.010, 113.011],
[-112.984, -113.004],
[-112.984, 113.004],
[113.024, -112.999]])
img_fmarks = np.array([[-7208.01, 7379.35],
[7290.91, -7289.28],
[-7291.19, -7208.22],
[7375.09, 7293.59]])
cam = Camera(153.42, np.array([0.015, -0.020]), None, None, fMarks)
img = SingleImage(camera = cam, points = None)
img.ComputeInnerOrientation(img_fmarks)
pts_image = img.Camera2Image(fMarks)
"""
# get algebric parameters
inner = self.__innerOrientationParameters
imgPoints = np.zeros((len(cameraPoints[:, 0]), 2))
for i in range(len(cameraPoints[:, 0])):
imgPoints[i, 0] = inner['a0'] + inner['a1'] * cameraPoints[i, 0] + inner['a2'] * cameraPoints[i, 1]
imgPoints[i, 1] = inner['b0'] + inner['b1'] * cameraPoints[i, 0] + inner['b2'] * cameraPoints[i, 1]
return imgPoints
def ImageToCamera(self, imagePoints):
"""
Transforms image points to ideal camera points
:param imagePoints: image points
:type imagePoints: np.array nx2
:return: corresponding camera points
:rtype: np.array nx2
.. warning::
This function is empty, need implementation
.. note::
The inner orientation parameters required for this function are held in ``self.innerOrientationParameters``
**Usage example**
.. code-block:: py
fMarks = np.array([[113.010, 113.011],
[-112.984, -113.004],
[-112.984, 113.004],
[113.024, -112.999]])
img_fmarks = np.array([[-7208.01, 7379.35],
[7290.91, -7289.28],
[-7291.19, -7208.22],
[7375.09, 7293.59]])
cam = Camera(153.42, np.array([0.015, -0.020]), None, None, fMarks)
img = SingleImage(camera = cam, points = None)
img.ComputeInnerOrientation(img_fmarks)
pts_camera = img.Image2Camera(img_fmarks)
"""
# get the inverse inner orientation param
inv_param = self.ComputeInverseInnerOrientation()
camPoints = np.zeros((len(imagePoints[:, 0]), 2))
for i in range(len(imagePoints[:, 0])):
camPoints[i, 0] = inv_param['a1*'] * (imagePoints[i, 0] + inv_param['a0*']) + inv_param['a2*'] * (
imagePoints[i, 1] + inv_param['b0*'])
camPoints[i, 1] = inv_param['b1*'] * (imagePoints[i, 0] + inv_param['a0*']) + inv_param['b2*'] * (
imagePoints[i, 1] + inv_param['b0*'])
return camPoints
def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):
"""
Compute exterior orientation parameters.
This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)``
:param imagePoints: image points
:param groundPoints: corresponding ground points
.. note::
Angles are given in radians
:param epsilon: threshold for convergence criteria
:type imagePoints: np.array nx2
:type groundPoints: np.array nx3
:type epsilon: float
:return: Exterior orientation parameters: (X0, Y0, Z0, omega, phi, kappa), their accuracies, and residuals vector. *The orientation parameters can be either dictionary or array -- to your decision*
:rtype: dict
**Usage Example**
.. code-block:: py
img = SingleImage(camera = cam)
grdPnts = np.array([[201058.062, 743515.351, 243.987],
[201113.400, 743566.374, 252.489],
[201112.276, 743599.838, 247.401],
[201166.862, 743608.707, 248.259],
[201196.752, 743575.451, 247.377]])
imgPnts3 = np.array([[-98.574, 10.892],
[-99.563, -5.458],
[-93.286, -10.081],
[-99.904, -20.212],
[-109.488, -20.183]])
img.ComputeExteriorOrientation(imgPnts3, grdPnts, 0.3)
"""
# compute control points in camera system using the inner orientation
camera_points = self.ImageToCamera(imagePoints)
# compute approximate values for exteriror orientation using conformic transformation
self.ComputeApproximateVals(camera_points, groundPoints)
lb = camera_points.flatten().T
dx = np.ones([6, 1]) * 100000
itr = 0
# adjustment
while np.linalg.norm(dx) > epsilon and itr < 100:
itr += 1
X = self.exteriorOrientationParameters.T
l0 = self.ComputeObservationVector(groundPoints).T
L = lb - l0
A = self.ComputeDesignMatrix(groundPoints)
N = np.dot(A.T, A)
U = np.dot(A.T, L)
dx = np.dot(np.linalg.inv(N), U)
X = X + dx
self.exteriorOrientationParameters = X.T
v = A.dot(dx) - L
# sigma posteriory
u = 6
r = len(L) - u
if r != 0:
sigma0 = ((v.T).dot(v)) / r
sigmaX = sigma0 * (np.linalg.inv(N))
else:
sigma0 = None
sigmaX = None
return self.exteriorOrientationParameters, sigma0, sigmaX
def DLT(self, imagePoints, groundPoints):
""" compute exterior and inner orientation using direct linear transformations"""
# change to homogeneous representation
groundPoints = np.hstack((groundPoints, np.ones((len(groundPoints), 1))))
imagePoints = np.hstack((imagePoints, np.ones((len(imagePoints), 1))))
# compute design matrix
a = self.ComputeDLTDesignMatrix(imagePoints, groundPoints)
# compute eigenvalues and eigenvectors
w, v = np.linalg.eig(np.dot(a.T, a))
# the solution is the eigenvector of the minimal eigenvalue
p = v[:, np.argmin(w)]
p = np.reshape(p, (3, 4))
k, r = rq(p[:3, :3])
k = k/np.abs(k[2,2]) # normalize
# handle signs
signMat = findSignMat(k)
k = np.dot(k, signMat)
r = np.dot(np.linalg.inv(signMat), r)
# update orientation
self.RotationMatrix = r.T
self.PerspectiveCenter = -np.dot(inv(p[:3,:3]),p[:,3])
# update calibration
self.camera.principalPoint = k[:2, 2]
self.camera.focalLength = -k[0,0]
def GroundToImage(self, groundPoints):
"""
Transforming ground points to image points
:param groundPoints: ground points [m]
:type groundPoints: np.array nx3
:return: corresponding Image points
:rtype: np.array nx2
"""
X0_1 = self.exteriorOrientationParameters[0]
Y0_1 = self.exteriorOrientationParameters[1]
Z0_1 = self.exteriorOrientationParameters[2]
O1 = np.array([X0_1, Y0_1, Z0_1]).T
R1 = self.RotationMatrix
x1 = np.zeros((len(groundPoints), 1))
y1 = np.zeros((len(groundPoints), 1))
f = self.camera.focalLength
for i in range(len(groundPoints)):
lamda1 = -f / (np.dot(R1.T[2], (groundPoints[i] - O1).T)) # scale first image
x1[i] = lamda1 * np.dot(R1.T[0], (groundPoints[i] - O1).T)
y1[i] = lamda1 * np.dot(R1.T[1], (groundPoints[i] - O1).T)
camera_points1 = np.vstack([x1.T, y1.T]).T
# img_points1 = self.CameraToImage(camera_points1)
img_points1 = camera_points1
return img_points1
def ImageToRay(self, imagePoints):
"""
Transforms Image point to a Ray in world system
:param imagePoints: coordinates of an image point
:type imagePoints: np.array nx2
:return: Ray direction in world system
:rtype: np.array nx3
.. warning::
This function is empty, need implementation
.. note::
The exterior orientation parameters needed here are called by ``self.exteriorOrientationParameters``
"""
pass # delete after implementations
def ImageToGround_GivenZ(self, imagePoints, Z_values):
"""
Compute corresponding ground point given the height in world system
:param imagePoints: points in image space
:param Z_values: height of the ground points
:type Z_values: np.array nx1
:type imagePoints: np.array nx2
:type eop: np.ndarray 6x1
:return: corresponding ground points
:rtype: np.ndarray
.. warning::
This function is empty, need implementation
.. note::
- The exterior orientation parameters needed here are called by ``self.exteriorOrientationParameters``
- The focal length can be called by ``self.camera.focalLength``
**Usage Example**
.. code-block:: py
imgPnt = np.array([-50., -33.])
img.ImageToGround_GivenZ(imgPnt, 115.)
"""
camera_points = self.ImageToCamera(imagePoints)
# exterior orientation parameters
omega = self.exteriorOrientationParameters[3]
phi = self.exteriorOrientationParameters[4]
kapa = self.exteriorOrientationParameters[5]
X0 = self.exteriorOrientationParameters[0]
Y0 = self.exteriorOrientationParameters[1]
Z0 = self.exteriorOrientationParameters[2]
Z = Z_values
R = Compute3DRotationMatrix(omega, phi, kapa)
X = np.zeros(len(Z))
Y = np.zeros(len(Z))
# co -linear rule
for i in range(len(Z)):
xyf = np.array([camera_points[i, 0] - self.camera.principalPoint[0],
camera_points[i, 1] - self.camera.principalPoint[1],
-self.camera.focalLength]) # camera point vector
lamda = (Z[i] - Z0) / (np.dot(R[2], xyf)) # scale
X[i] = X0 + lamda * np.dot(R[0], xyf)
Y[i] = Y0 + lamda * np.dot(R[1], xyf)
return np.vstack([X, Y, Z]).T
# ---------------------- Private methods ----------------------
def ComputeApproximateVals(self, cameraPoints, groundPoints):
"""
Compute exterior orientation approximate values via 2-D conform transformation
:param cameraPoints: points in image space (x y)
:param groundPoints: corresponding points in world system (X, Y, Z)
:type cameraPoints: np.ndarray [nx2]
:type groundPoints: np.ndarray [nx3]
:return: Approximate values of exterior orientation parameters
:rtype: np.ndarray or dict
.. note::
- ImagePoints should be transformed to ideal camera using ``self.ImageToCamera(imagePoints)``. See code below
- The focal length is stored in ``self.camera.focalLength``
- Don't forget to update ``self.exteriorOrientationParameters`` in the order defined within the property
- return values can be a tuple of dictionaries and arrays.
.. warning::
- This function is empty, need implementation
- Decide how the exterior parameters are held, don't forget to update documentation
"""
# Find approximate values
# partial derevative matrix
# order: a b c d
A = np.array([[1, 0, cameraPoints[0, 0], cameraPoints[0, 1]],
[0, 1, cameraPoints[0, 1], -1 * (cameraPoints[0, 0])],
[1, 0, cameraPoints[1, 0], cameraPoints[1, 1]],
[0, 1, cameraPoints[1, 1], -1 * (cameraPoints[1, 0])]])
# b = np.array([[groundPoints[0, 0]],
# [groundPoints[0, 1]],
# [groundPoints[1, 0]],
# [groundPoints[1, 1]]])
b = np.array([[groundPoints[0, 0]],
[groundPoints[0, 1]],
[groundPoints[2, 0]],
[groundPoints[2, 1]]])
X = np.dot(np.linalg.inv(A), b)
X0 = X[0]
Y0 = X[1]
# kapa = np.arctan(-(X[3] / X[2]))
kapa = np.arctan2(-X[3], X[2])
# kapa = 1.73
lamda = np.sqrt(X[2] ** 2, X[3] ** 2)
# Z0 = groundPoints[0, 2] + lamda * self.camera.focalLength
Z0 = groundPoints[0, 2] + lamda * self.camera.focalLength
omega = 0
if self.type == 'real':
phi = 0
else:
phi = 0.1
self.exteriorOrientationParameters = np.array([X0, Y0, Z0, omega, phi, kapa])
# self.exteriorOrientationParameters = {'X0': X0, 'Y0': Y0, 'Z0': Z0, 'lamda': lamda,
# 'kapa': kapa, 'omega': omega, 'phi': phi}
# return {'X0': X0, 'Y0': Y0, 'Z0': Z0, 'lamda': lamda,
# 'kapa': kapa, 'omega': omega, 'phi': phi}
def ComputeObservationVector(self, groundPoints):
"""
Compute observation vector for solving the exterior orientation parameters of a single image
based on their approximate values
:param groundPoints: Ground coordinates of the control points
:type groundPoints: np.array nx3
:return: Vector l0
:rtype: np.array nx1
"""
n = groundPoints.shape[0] # number of points
# Coordinates subtraction
dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]
dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]
dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]
dXYZ = np.vstack([dX, dY, dZ])
rotated_XYZ = np.dot(self.RotationMatrix.T, dXYZ).T
l0 = np.empty(n * 2)
# Computation of the observation vector based on approximate exterior orientation parameters:
l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]
l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]
return l0
def ComputeDesignMatrix(self, groundPoints):
"""
Compute the derivatives of the collinear law (design matrix)
:param groundPoints: Ground coordinates of the control points
:type groundPoints: np.array nx3
:return: The design matrix
:rtype: np.array nx6
"""
# initialization for readability
omega = self.exteriorOrientationParameters[3]
phi = self.exteriorOrientationParameters[4]
kappa = self.exteriorOrientationParameters[5]
# Coordinates subtraction
dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]
dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]
dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]
dXYZ = np.vstack([dX, dY, dZ])
rotationMatrixT = self.RotationMatrix.T
rotatedG = rotationMatrixT.dot(dXYZ)
rT1g = rotatedG[0, :]
rT2g = rotatedG[1, :]
rT3g = rotatedG[2, :]
focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2
dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]
dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]
dgdX0 = np.array([-1, 0, 0], 'f')
dgdY0 = np.array([0, -1, 0], 'f')
dgdZ0 = np.array([0, 0, -1], 'f')
# Derivatives with respect to X0
dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)
dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)
# Derivatives with respect to Y0
dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)
dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)
# Derivatives with respect to Z0
dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)
dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)
if self.type == 'real':
dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T
dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T
dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T
else:
dRTdOmega = Compute3DRotationDerivativeMatrix_RzRyRz(omega, phi, kappa, 'azimuth').T
dRTdPhi = Compute3DRotationDerivativeMatrix_RzRyRz(omega, phi, kappa, 'phi').T
dRTdKappa = Compute3DRotationDerivativeMatrix_RzRyRz(omega, phi, kappa, 'kappa').T
gRT3g = dXYZ * rT3g
# Derivatives with respect to Omega
dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -
rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]
dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -
rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]
# Derivatives with respect to Phi
dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -
rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]
dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -
rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]
# Derivatives with respect to Kappa
dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -
rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]
dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -
rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]
# all derivatives of x and y
dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,
np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])
a = np.zeros((2 * dd[0].shape[0], 6))
a[0::2] = dd[0]
a[1::2] = dd[1]
return a
def ComputeDLTDesignMatrix(self, imagePoints, groundPoints):
"""
Compute the design matrix for the DLT method
:param groundPoints: homogeneous Ground coordinates of the control points
:param imagePoints: homogeneous image coordinates of the control points
:type groundPoints: np.array nx4 (homogeneous coordinates)
:type imagePoints: np.array nx3 (homogeneous coordinates)
:return: The design matrix
:rtype: np.array 2nx12
"""
n = groundPoints.shape[0] # number of points
a = np.zeros((2 * n, 12))
rows1 = np.array(
np.hstack((np.zeros((n, 4)), -imagePoints[:, 2, np.newaxis] * groundPoints,
imagePoints[:, 1, np.newaxis] * groundPoints)))
rows2 = np.array(
np.hstack((imagePoints[:, 2, np.newaxis] * groundPoints, np.zeros((n, 4)),
-imagePoints[:, 0, np.newaxis] * groundPoints)))
a[0::2] = rows1
a[1::2] = rows2
return a
def drawSingleImage(self, modelPoints, scale, ax, rays='no', ):
"""
draws the rays to the modelpoints from the perspective center of the two images
:param modelPoints: points in the model system [ model units]
:param scale: scale of image frame
:param ax: axes of the plot
:param rays: rays from perspective center to model points
:type modelPoints: np.array nx3
:type scale: float
:type ax: plot axes
:type rays: 'yes' or 'no'
:return: none
"""
pixel_size = 0.0000024 # [m]
# images coordinate systems
pv.drawOrientation(self.RotationMatrix, self.PerspectiveCenter, 1, ax)
# images frames
pv.drawImageFrame(self.camera.sensorSize / 1000 * scale, self.camera.sensorSize / 1000 * scale,
self.RotationMatrix, self.PerspectiveCenter, self.camera.focalLength / 1000, 1, ax)
if rays == 'yes':
# draw rays from perspective center to model points
pv.drawRays(modelPoints, self.PerspectiveCenter, ax)
if __name__ == '__main__':
fMarks = np.array([[113.010, 113.011],
[-112.984, -113.004],
[-112.984, 113.004],
[113.024, -112.999]])
img_fmarks = np.array([[-7208.01, 7379.35],
[7290.91, -7289.28],
[-7291.19, -7208.22],
[7375.09, 7293.59]])
cam = Camera(153.42, np.array([0.015, -0.020]), None, None, fMarks)
img = SingleImage(camera=cam)
print(img.ComputeInnerOrientation(img_fmarks))
print(img.ImageToCamera(img_fmarks))
print(img.CameraToImage(fMarks))
GrdPnts = np.array([[5100.00, 9800.00, 100.00]])
print(img.GroundToImage(GrdPnts))
imgPnt = np.array([23.00, 25.00])
print(img.ImageToRay(imgPnt))
imgPnt2 = np.array([-50., -33.])
print(img.ImageToGround_GivenZ(imgPnt2, 115.))
# grdPnts = np.array([[201058.062, 743515.351, 243.987],
# [201113.400, 743566.374, 252.489],
# [201112.276, 743599.838, 247.401],
# [201166.862, 743608.707, 248.259],
# [201196.752, 743575.451, 247.377]])
#
# imgPnts3 = np.array([[-98.574, 10.892],
# [-99.563, -5.458],
# [-93.286, -10.081],
# [-99.904, -20.212],
# [-109.488, -20.183]])
#
# intVal = np.array([200786.686, 743884.889, 954.787, 0, 0, 133 * np.pi / 180])
#
# print img.ComputeExteriorOrientation(imgPnts3, grdPnts, intVal)
| true |
4bb2440ba34cda75e83987dbb69807c124fd91b3 | Python | yanspirit/mytest | /python/tinyPro/hanxin.py | UTF-8 | 255 | 3.140625 | 3 | [] | no_license | #!/usr/local/bin/python
def test(people):
if people%3==2 and people%5==3 and people%7 == 2:
return True
else:
return False
for i in xrange(1,100):
if test(i) == True:
print "least sodiors",i
# else:
# print "" | true |
27784d819ebc01626288cdc2a950640ccfda4e7a | Python | jevandezande/quantum | /quantum/zhamiltonian.py | UTF-8 | 3,817 | 3.4375 | 3 | [] | no_license | from matplotlib import pylab, pyplot as plt
import numpy as np
# 6x6
mat66 = -np.matrix([[100,30, 7, 7, 3, 3, 1, 0],
[ 30,80, 7, 7, 3, 3, 1, 1],
[ 7, 7,90,30, 7, 7, 3, 3],
[ 7, 7,30,70, 7, 7, 3, 3],
[ 3, 3, 7, 7,75,30, 7, 7],
[ 3, 3, 7, 7,30,50, 7, 7],
[ 1, 1, 3, 3, 7, 7,60,30],
[ 0, 1, 3, 3, 7, 7,30,40]])
class ZHamiltonian:
"""
A relativistic Hamiltonian class wherein the Hamiltonian is represented as a
matrix with a single value for each block. The spin-sectors are almost block
diagonal (designated by sectors with small off-diagonal coupling that can
be removed using approx()
"""
def __init__(self, hamiltonian, name='full', sectors=None):
"""
:param hamiltonian: a matrix of the hamiltonian elements
:param name: name for the hamiltonian (often the level of approximation)
:param sectors: the spin-sectors of the hamiltonian
"""
self.hamiltonian = hamiltonian
self.name = name
self.sectors = sectors
if sectors is None:
self.sectors = [0]
def energy(self):
"""
Return the energy for the given method
"""
evals, evecs = np.linalg.eigh(self.hamiltonian)
return evals.min()
def approx(self, method='full'):
"""
Return the hamiltonian with the specified elements zeroed out
e.g. with 1x1 sectors:
level-1: allows only coupling with adjacent sectors
------------- -------------
| a | b | c | | a | b | 0 |
------------- -------------
| d | e | f | ==> | d | e | f |
------------- -------------
| g | h | i | | 0 | f | i |
------------- -------------
1: allows only coupling of the 0th and 1st sectors
------------- -------------
| a | b | c | | a | b | 0 |
------------- -------------
| d | e | f | ==> | d | e | 0 |
------------- -------------
| g | h | i | | 0 | 0 | i |
------------- -------------
"""
sectors = self.sectors
hamiltonian = self.hamiltonian.copy()
if isinstance(method, str):
if method.lower() == 'full':
pass
elif 'level-' in method:
m = int(method[6:])
for i in range(1, len(sectors) - m):
hamiltonian[sectors[m + i]:, sectors[i-1]:sectors[i]] = 0
hamiltonian[sectors[i-1]:sectors[i], sectors[m + i]:] = 0
elif isinstance(method, int):
for a in self.sectors[method + 1:]:
hamiltonian[a:, 0:a] = hamiltonian[0:a, a:] = 0
else:
raise Exception(f'Invalid method: {method}')
return ZHamiltonian(hamiltonian, name=method)
def plot(self, ax, cmap='default'):
"""
Makes a heatmap
"""
if cmap == 'default':
cmap = plt.get_cmap('inferno')
ax.set_title(f'{self.name}: {self.energy(): >8.3f}')
im = ax.imshow(self.hamiltonian, interpolation='nearest', cmap=cmap)
return im
def runZ(mat):
"""
Plots heatmaps of various ZHamiltonians
"""
methods = ['Full', 0, 1, 2]
methods = ['Full', 'level-0', 'level-1', 'level-2']
sectors = [0, 2, 4, 6]
fig, axes = plt.subplots(1, len(methods))
# flatten
axes = axes.reshape(-1)
h = ZHamiltonian(mat, 'full', sectors)
ims = [h.approx(method).plot(ax) for method, ax in zip(methods, axes)]
cbaxes = fig.add_axes([0.93, 0.27, 0.02, 0.46])
cb = plt.colorbar(ims[1], cax=cbaxes)
plt.show()
runZ(mat66)
| true |
275e7db3ac0199c83bea484391d55f2b5734f08e | Python | dlshriver/csce430 | /Assembler/stringToMif.py | UTF-8 | 790 | 3.4375 | 3 | [] | no_license | str1 = "BAab98"
str2 = "Aab9B8"
for i in range(128):
if i < len(str1):
print "\t%s : %06x;" % (i, ord(str1[i]))
else:
print "\t" + str(i) + " : 000000;"
for i in range(128):
if i < len(str2):
print "\t%s : %06x;" % (i+128, ord(str2[i]))
else:
print "\t" + str(i+128) + " : 000000;"
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
print x_longest - longest, longest
return s1[x_longest - longest: x_longest]
print longest_common_substring(str1, str2) | true |
1c2ceb998cbc63d29f940350c0003f9e26b76bb6 | Python | MoJoVi/Euler_Project | /euler054.py | UTF-8 | 4,880 | 3.6875 | 4 | [] | no_license | """В карточной игре покер ставка состоит из пяти карт и оценивается
от самой младшей до самой старшей в следующем порядке:
Старшая карта: Карта наибольшего достоинства.
Одна пара: Две карты одного достоинства.
Две пары: Две различные пары карт
Тройка: Три карты одного достоинства.
Стрейт: Все пять карт по порядку, любые масти.
Флаш: Все пять карт одной масти.
Фул-хаус: Три карты одного достоинства и одна пара карт.
Каре: Четыре карты одного достоинства.
Стрейт-флаш: Любые пять карт одной масти по порядку.
Роял-флаш: Десятка, валет, дама, король и туз одной масти.
Достоинство карт оценивается по порядку:
2, 3, 4, 5, 6, 7, 8, 9, 10, валет, дама, король, туз.
Если у двух игроков получились ставки одного порядка, то выигрывает
тот, у кого карты старше: к примеру, две восьмерки выигрывают две пятерки.
Если же достоинства карт у игроков одинаковы, к примеру, у обоих игроков
пара дам, то сравнивают карту наивысшего достоинства (см. пример 4 ниже);
если же и эти карты одинаковы, сравнивают следующие две и т.д.
Файл poker.txt содержит одну тысячу различных ставок для игры двух игроков.
В каждой строке файла приведены десять карт (отделенные одним пробелом):
первые пять - карты 1-го игрока, оставшиеся пять - карты 2-го игрока. Можете
считать, что все ставки верны (нет неверных символов или повторов карт),
ставки каждого игрока не следуют в определенном порядке, и что при каждой
ставке есть безусловный победитель.
Сколько ставок выиграл 1-й игрок?
Примечание: карты в текстовом файле обозначены в соответствии с английскими
наименованиями достоинств и мастей: T - десятка, J - валет, Q - дама,
K - король, A - туз; S - пики, C - трефы, H - червы, D - бубны."""
def how_cards(cardlist):
sorting = {
'T': 10,
'J': 11,
'Q': 12,
'K': 13,
'A': 14
}
for ix, card in enumerate(cardlist):
card = list(card)
card[0] = sorting[card[0]] if card[0].isalpha() else int(card[0])
cardlist[ix] = card
first_player, second_player = sort_cards(cardlist[:5]), sort_cards(cardlist[5:])
return first_player, second_player
def sort_cards(hand):
value, suit = list(), set()
for card in hand:
suit.add(card.pop())
value.append(card.pop())
value.sort(reverse=True)
return value, suit
def how_comb(hand):
if len(hand[1]) == 1:
if straight(hand[0]):
if hand[0][0] == 14:
res = 10, hand[0]
else:
res = 9, hand[0]
else:
res = 6, hand[0]
elif straight(hand[0]):
res = 5, hand[0]
elif len(set(hand[0])) == 5:
res = 1, hand[0]
else:
res = pairs(hand[0])
return res
def straight(value):
return len(set(x - ix for ix, x in enumerate(value[::-1]))) == 1
def pairs(value):
comb = {(value.count(card), card) for card in value if value.count(card) > 1}
comb = sorted(list(comb), reverse=True)
res = { # other combinations
(1, 4): 8,
(2, 3): 7,
(1, 3): 4,
(2, 2): 3,
(1, 2): 2
}
return res[(len(comb), comb[0][0])], comb[0][1]
if __name__ == '__main__':
with open('poker.txt') as poker:
res = 0
for part in poker.readlines():
part = part.rstrip().split(' ')
first, second = how_cards(part)
first, second = how_comb(first), how_comb(second)
for fir, sec in zip(first, second):
if fir > sec:
res += 1
break
elif fir < sec:
break
print(res)
| true |
b6820546219d15eee35d0e8873058208f0b6d48c | Python | sushmitaraii1/Python-Assignment | /IW-Python-Assignment II/6.py | UTF-8 | 370 | 4.53125 | 5 | [] | no_license | # 6. Create a list with the names of friends and colleagues. Search for the
# name ‘John’ using a for a loop. Print ‘not found’ if you didn't find it.
lst = ['Sushmita', 'salina', 'shreya', 'upasana', 'John', 'ojaswee']
for name in lst:
if name == 'John':
print("You have friend named {}.".format(name))
break
else:
print("not found")
| true |
ca3a7fb88984d499246b8bc029264f5dfcaa6b31 | Python | Magnum457/smartAquarium | /nivel.py | UTF-8 | 823 | 2.78125 | 3 | [] | no_license | # imports
import RPi.GPIO as GPIO
import time
import res_mqtt as mqtt
# configurando os GPIO
def setup():
GPIO.setmode (GPIO.BCM) # usa o mapa de portas da placa
bot = 13
GPIO.setup (bot, GPIO.IN, pull_up_down=GPIO.PUD_UP)
estado = 0
return bot, estado
def loop_nivel():
try:
while True:
bot, estado = setup()
if GPIO.input(bot)==0:
estado = 0
print("Ligado")
mqtt.send_message("teste/nivel", "Ligado")
elif GPIO.input(bot)==1:
estado = 1
print("Desligado")
mqtt.send_message("teste/nivel", "Desligado")
time.sleep(1)
finally:
print("fechando as GPIOs")
GPIO.cleanup()
| true |
169b753b51ae84ea466edde7ec633b730b177245 | Python | JoHyukJun/algorithm-analysis | /src/python/SumOfPartialSequence.py | UTF-8 | 504 | 3.09375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | '''
main.py
Created by Jo Hyuk Jun on 2020
Copyright © 2020 Jo Hyuk Jun. All rights reserved.
'''
import sys
from itertools import combinations
n, s = map(int, sys.stdin.readline().rstrip().split(' '))
arr = list(map(int, sys.stdin.readline().rstrip().split(' ')))
f_arr = []
cnt = 0
for i in range(1, len(arr) + 1):
f_arr.append(list(combinations(arr, i)))
for i in range(len(f_arr)):
for j in range(len(f_arr[i])):
if sum(f_arr[i][j]) == s:
cnt += 1
print(cnt) | true |
85d245824b18451beea25bc3f848b09c64033dfd | Python | dasarpjonam/sloth | /scripts/cleanUpTheSerializedClasses.py | UTF-8 | 3,721 | 2.8125 | 3 | [] | no_license | #!/usr/bin/python
def processStroke(output):
output.write("[")
def processStrokeFinished(output):
output.write("--\n")
def isStroke(line, output):
if line.find('org.ladder.core.sketch.Stroke') != -1:
processStroke(output)
return True
return False
def isStrokeFinished(line, output):
if line.find("info.sift.dg.ca.datamodel.StrokeFinished") != -1:
processStrokeFinished(output)
def processSketchFinished(output):
output.write("\nsketch\n")
def isSketchFinished(line, output):
if line.find("SketchSegmentFinished") != -1:
processSketchFinished(output)
def processPoint(output):
output.write("")
def isPoint(line, output):
if (line.find("org.ladder.core.sketch.Point")) != -1:
processPoint(output)
def processX(output, x):
output.write("'x':%f," % x)
def processY(output, y):
output.write("'y':%f}," % y)
def isX(line, x):
if line.find("<void property=\"x\">") != -1:
return True;
if (x):
if line.find("</void>") != -1:
return False
else:
return True
else:
return False
def isY(line, y):
if line.find("<void property=\"y\">") != -1:
return True;
if (y):
if line.find("</void>") != -1:
return False
else:
return True
else:
return False
def isOpenObject(line):
if line.find("<object") != -1:
return True
return False
def isCloseObject(line, counter, strokeCount, output):
if line.find("</object>") != -1:
if counter == strokeCount and strokeCount != 0:
output.write("]\n")
return "endstroke"
return True
return False
def getXY(line):
match = "double"
if line.find(match) != -1:
try:
return float(line[line.index('>')+1:line.rindex('<', True)])
except ValueError, e:
print "ValueError on getXY(): %s" % line
else:
return False
def isTime(line, time):
if line.find("<void property=\"time\">") != -1:
return True;
if (time):
if line.find("</void>") != -1:
return False
else:
return True
else:
return False
def getTime(line):
match = "long"
if line.find(match) != -1:
try:
return long(line[line.index('>')+1:line.rindex('<', True)])
except ValueError, e:
print "ValueError on getTime(): %s" % line
else:
return False
def processTime(output, time):
output.write("{'time':%d," % time)
def main():
input = open('symbolset_Nov_Test_1-58.xml', 'r')
outputFile = open('stageOneNov', 'w')
objectCounter = 0
strokeObjectCount = 0
x = False;
y = False;
time = False;
for line in input:
if isOpenObject(line):
objectCounter += 1
closer = isCloseObject(line, objectCounter, strokeObjectCount, outputFile)
if closer == "endstroke":
strokeObjectCount = 0
elif closer:
objectCounter -= 1
if isStroke(line, outputFile):
strokeObjectCount = objectCounter
isStrokeFinished(line, outputFile)
isSketchFinished(line, outputFile)
x = isX(line, x)
if (x and getXY(line)):
processX(outputFile, getXY(line))
y = isY(line, y)
if (y and getXY(line)):
processY(outputFile, getXY(line))
time = isTime(line, time)
if (time and getTime(line)):
processTime(outputFile, getTime(line))
if __name__ == "__main__":
main() | true |
206c6bae1a575ba1e0ad31380419d1c205c99f4f | Python | AndrewAct/DataCamp_Python | /Preprocessing for Machine Learning in Python/Putting it All Together/01_Checking_Column_Types.py | UTF-8 | 1,101 | 3.3125 | 3 | [] | no_license | # # 6/26/2020
# Take a look at the UFO dataset's column types using the dtypes attribute. Two columns jump out for transformation: the seconds column, which is a numeric column but is being read in as object, and the date column, which can be transformed into the datetime type. That will make our feature engineering efforts easier later on.
# Check the column types
print(ufo.dtypes)
# Change the type of seconds to float
ufo["seconds"] = ufo["seconds"].astype(float)
# Change the date column to type datetime
ufo["date"] = pd.to_datetime(ufo["date"])
# Check the column types
print(ufo[["seconds", "date"]].dtypes)
# <script.py> output:
# date object
# city object
# state object
# country object
# type object
# seconds object
# length_of_time object
# desc object
# recorded object
# lat object
# long float64
# dtype: object
# seconds float64
# date datetime64[ns]
# dtype: object
| true |
7881a762f467e4a872b04fb69ed3c7fa35da4f99 | Python | n1balgo/algo | /permute_brackets.py | UTF-8 | 653 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python3
count = 0
def _print_brackets(N, M, String, Loc):
if N == 0 and M == 0:
global count
count += 1
print count, ''.join(String)
return
if N > 0:
String[Loc] = '{'
_print_brackets(N-1, M, String, Loc+1)
if M > N:
String[Loc] = '}'
_print_brackets(N, M-1, String, Loc+1)
def print_brackets(N):
global count
count = 0
String = ['' for i in xrange(0,N+N)]
_print_brackets(N, N, String, 0)
# print bracket configurations (number of combinations is n'th catalan number = 1/(n+1) * [factorial(2n)/factorian(n)^2])
print_brackets(3) | true |
c019bd19aced0689ae96ffdf91a4f1577b1729c3 | Python | gabinete-compartilhado-acredito/100-dias-congresso | /analises/xavierUtils.py | UTF-8 | 3,767 | 3.234375 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as pl
### Auxiliary functions ###
def Bold(text):
"""
Takes a string and returns it bold.
"""
return '\033[1m'+text+'\033[0m'
def unique(series):
"""
Takes a pandas series as input and print all unique values, separated by a blue bar.
"""
u = series.unique()
try:
print Bold(str(len(u)))+': '+'\033[1;34m | \033[0m'.join(sorted(u.astype(str)))
except:
print Bold(str(len(u)))+': '+'\033[1;34m | \033[0m'.join(sorted(u))
def columns(df):
"""
Print the number of columns and their names, separated by a blue bar.
"""
unique(df.columns)
def mapUnique(df):
"""
Takes a pandas dataframe and prints the unique values of all columns and their numbers.
If the number of unique values is greater than maxItems, only print out a sample.
"""
for c in df.columns.values:
maxItems = 20
u = df[c].unique()
n = len(u)
isStr = isinstance(u[0],basestring)
print ''
print Bold(c+': ')+str(n)+' unique values.'
if n<=maxItems:
if isStr:
print ', '.join(np.sort(u))
else:
print ', '.join(np.sort(u).astype('unicode'))
else:
if isStr:
print Bold('(sample) ')+', '.join(np.sort(np.random.choice(u,size=maxItems,replace=False)))
else:
print Bold('(sample) ')+', '.join(np.sort(np.random.choice(u,size=maxItems,replace=False)).astype('unicode'))
def checkMissing(df):
"""
Takes a pandas dataframe and prints out the columns that have missing values.
"""
colNames = df.columns.values
print Bold('Colunas com valores faltantes:')
Ntotal = len(df)
Nmiss = np.array([float(len(df.loc[df[c].isnull()])) for c in colNames])
df2 = pd.DataFrame(np.transpose([colNames,[df[c].isnull().any() for c in colNames], Nmiss, np.round(Nmiss/Ntotal*100,2)]),
columns=['coluna','missing','N','%'])
print df2.loc[df2['missing']==True][['coluna','N','%']]
def freq(series, value):
"""
Takes a pandas series and a value and returns the fraction of the series that presents a certain value.
"""
Ntotal = len(series)
Nsel = float(len(series.loc[series==value]))
return Nsel/Ntotal
### TEM BUG!! CORRIGIR! >> o split pode dar errado se o path tiver ../
def saveFigWdate(name):
"""
Takes a string (a filename with extension) and save the current plot to it,
but adding the current date to the filename.
"""
part = name.split('.')
t = dt.datetime.now().strftime('%Y-%m-%d')
filename = part[0]+'_'+t+'.'+part[1]
pl.savefig(filename, bbox_inches='tight')
def cov2corr(cov):
"""
Takes a covariance matrix and returns the correlation matrix.
"""
assert(len(cov) == len(np.transpose(cov))), 'Cov. matrix must be a square matrix.'
corr = [ [cov[i][j]/np.sqrt(cov[i][i]*cov[j][j]) for i in range(0,len(cov))] for j in range(0,len(cov))]
return np.array(corr)
def one2oneQ(df, col1, col2):
"""
Check if there is a one-to-one correspondence between two columns in a dataframe.
"""
n2in1 = df.groupby(col1)[col2].nunique()
n1in2 = df.groupby(col2)[col1].nunique()
if len(n2in1)==np.sum(n2in1) and len(n1in2)==np.sum(n1in2):
return True
else:
return False
def one2oneViolations(df, colIndex, colMultiples):
"""
Returns the unique values in colMultiples for a fixed value in colIndex (only for when the number of unique values is >1).
"""
return df.groupby(colIndex)[colMultiples].unique().loc[df.groupby(colIndex)[colMultiples].nunique()>1]
| true |
9333a17469482ac8f235d8b2306fa7325187fe7c | Python | yz5201214/btbbt | /btbbt/spiders/btbbt_drama_series_spider.py | UTF-8 | 11,346 | 2.578125 | 3 | [] | no_license | # 剧集爬取
import scrapy,time,json
from btbbt.myFileItem import MyFileItem
from btbbt.movieInfoItem import movieInfo
from btbbt.pipelines import redis_db, redis_data_btbbt
from scrapy.utils.project import get_project_settings
# 这了一定要注意Spider 的首字母大写
class btbbtDramaSeriesSpider(scrapy.Spider):
settings = get_project_settings()
name = 'drama'
bbsTid = '36'
'''
custom_settings = {
'ITEM_PIPELINES':{'btbbt.pipelines.btFilesPipeline': 1}
}
'''
start_urls = [
'http://btbtt.org/forum-index-fid-950.htm',# 剧集首页
]
def parse(self, response):
next_ur = None
num = None
'''
start_request 已经爬取到了网页内容,parse是将内容进行解析,分析,获取本身我自己需要的数据内容
流程是:1。爬取指定的内容页 2.通过返回内容自定义规则提取数据
:param response: 页面返回内容
:return: 必须返回
::attr("href")
'''
if redis_db.hget(redis_data_btbbt,'dramaSize') is not None:
# 初始化第0页开始
if redis_db.get('dramapageNum') is None:
num = 0
else:
num = int(redis_db.get('dramapageNum'))
# 开始解析其中具体电影内容
movidTableList = response.css('#threadlist table')
for table in movidTableList:
icoClass = table.css('span::attr("class")').extract_first()
# 滤除公告板块,考虑到图片的多样性,凡是不是公告。全部爬取
if icoClass.find('icon-top') <0:
# 获取电影帖子url
allMovieUrlList = table.css('a.subject_link')
for movieUrl in allMovieUrlList:
realUrl = response.urljoin(movieUrl.css('a::attr("href")').extract_first())
yield scrapy.Request(realUrl,callback=self.dramaParse)
# 下面是翻页请求next_ur
next_pages = response.css('div.page a')
self.log(next_pages[len(next_pages)-1].css('a::text').extract_first())
if next_pages[len(next_pages)-1].css('a::text').extract_first() == '▶':
next_ur = response.urljoin(next_pages[len(next_pages)-1].css('a::attr("href")').extract_first())
# 下面开始翻页请求
self.log("下一页地址:%s" % next_ur)
# 第一次爬取,爬到所有翻页没有停止
if next_ur is not None and num is None:
yield scrapy.Request(next_ur,callback=self.parse)
# 往后的增量爬取,只取前十页数据即可
if next_ur is not None and num is not None and num >=10:
num = num + 1
redis_db.set('dramapageNum', num)
yield scrapy.Request(next_ur,callback=self.parse)
def dramaParse(self,response):
# 配置文件中我的域名
my_url = self.settings.get('MY_URL')
onlyId = response.url.split('/')[-1]
movieTtpeStr = "".join(response.css('div.bg1.border.post h2 a::text').extract()).replace('\t', '').replace('\r','').replace('\n', '')
movieNameStr = "".join(response.css('div.bg1.border.post h2::text').extract()).replace('\t', '').replace('\r','').replace('\n', '').replace('\'','”').replace('"','”').replace(',',',')
movieTtpeList = movieTtpeStr.replace('][', ',').replace('[', '').replace(']', '').split(',')
# 文件存放路径 spider名称/年份/最后详细地址
cusPath = [self.name,movieTtpeList[0],response.url.split('/')[-1]]
movieImgs = []
# 详细信息中的图片文件下载,按照原路径保存
if len(response.css('p img')) > 0:
for imgList in response.css('p img'):
myfileItem = MyFileItem()
if imgList.css('img::attr("src")').extract_first() is not None:
myfileItem['file_urls'] = [response.urljoin(imgList.css('img::attr("src")').extract_first())]
myfileItem['file_name'] = imgList.css('img::attr("src")').extract_first().replace('http://','').replace('https://','')
movieImgs.append(myfileItem['file_name'])
yield myfileItem
mainPostAttach = response.css('#body table:nth-child(2) div.attachlist')
allAttachLen = len(response.css('div.attachlist'))
movieFiles = []
if mainPostAttach is not None and len(mainPostAttach) ==1:
allAttachLen = allAttachLen -1
x = 0
for tableTrItem in mainPostAttach.css('table tr'):
if tableTrItem.css('a') is not None and len(tableTrItem.css('a')) > 0:
url = tableTrItem.css('a::attr("href")').extract_first()
btName = tableTrItem.css('a::text').extract_first()
btSize = tableTrItem.css('td')[2].css('td::text').extract_first() # 这里获取大小
# 种子文件下载地址
movieFileUrl = response.urljoin(url)
myfileItem = MyFileItem()
if btName.find('.torrent') >= 0:
# 目前只下载种子
realFileName = onlyId + '_' + str(x) +'.torrent'
# 下载地址
myfileItem['file_urls'] = [movieFileUrl.replace('dialog', 'download')]
# 存储位置 ,文件名称不能含有中文,所以存储的时候采用
myfileItem['file_name'] = '/'.join(cusPath)+'/'+realFileName
# 自己存库用的附件列表
fileDict = {
'file_name':btName,
'file_url':myfileItem['file_name'],
'file_size':btSize
}
movieFiles.append(fileDict)
x = x + 1
yield myfileItem
movieText = response.css('#body table')[1].css('p').extract()
# 图片的地址路径替换
movieTextStr = ''.join(movieText)
movieTextStr = movieTextStr.replace('<img src="/upload/',
'<img src="' + my_url + '/upload/data/attachment/forum/upload/')
movieTextStr = movieTextStr.replace('<img src="http://',
'<img src="' + my_url + '/upload/data/attachment/forum/')
movieTextStr = movieTextStr.replace('<img src="https://',
'<img src="' + my_url + '/upload/data/attachment/forum/')
# 剧集信息入库处理
if movieTtpeStr is not None:
movieItem = movieInfo()
movieItem['spiderUrl'] = response.url
movieItem['type'] = '2'# 2剧集
# ,隔开的数组[年份,地区,类型,广告类型]
movieItem['classInfo'] = movieTtpeStr.replace('][', ',').replace('[', '').replace(']', '')
# ,隔开的数组[下载类型,名称,文件类型/大小,字幕类型,分辨率]
movieItem['name'] = movieNameStr.replace('][', ',').replace('[', '').replace(']', '')
movieItem['createTime'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
movieItem['editTime'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
movieItem['allInfo'] = movieTextStr
movieItem['imgs'] = json.dumps(movieImgs,ensure_ascii=False)
movieItem['filestr'] = json.dumps(movieFiles,ensure_ascii=False)
movieItem['bbsFid'] = self.bbsTid
bbsReplinesList = []
if allAttachLen > 0:
# 这里是全部的回帖内容
messageTableList = response.css('#body table')
# 从第三个开始,前面都是垃圾
for x in range(3, len(messageTableList)):
# 无字片源暂时过滤
repliesInfo = ''.join(messageTableList[x].css('p').extract()).replace('%7C', '|')
attach = messageTableList[x].css('div.attachlist')
# 有附件的回帖处理,有些更新是网盘更新,下面处理
if repliesInfo.find('无字片源') < 0 and len(attach) == 1:
movieFiles = []
# 这里获取该回复楼层的DIV_ID,用于下次更新的时候匹配楼层,是否更新
msgDivId = messageTableList[x].css('div.message::attr("id")').extract_first()
x = 0
for tableTrItem in attach.css('table tr'):
if tableTrItem.css('a') is not None and len(tableTrItem.css('a')) > 0:
url = tableTrItem.css('a::attr("href")').extract_first()
# 显示用的名字
btName = tableTrItem.css('a::text').extract_first()
btSize = tableTrItem.css('td')[2].css('td::text').extract_first() # 这里获取大小
# 种子文件下载地址 ,我只下载种子
if btName.find('.torrent') >= 0:
movieFileUrl = response.urljoin(url)
myfileItem = MyFileItem()
myfileItem['file_urls'] = [movieFileUrl.replace('dialog', 'download')]
# 最后是存储用的名字
myfileItem['file_name'] = '/'.join(cusPath) + '/' + msgDivId + '/' + str(
x) + '.torrent'
fileDict = {
'file_name': btName,
'file_url': myfileItem['file_name'],
'file_size': btSize
}
movieFiles.append(fileDict)
yield myfileItem
# 回帖内容
fRepliesItem = {
'id':msgDivId,
'allInfo':repliesInfo,
'filestr':json.dumps(movieFiles, ensure_ascii=False)
}
bbsReplinesList.append(fRepliesItem)
# 无附件内容,百度网盘模式更新
if repliesInfo.find('无字片源') < 0 and len(attach) == 0 and repliesInfo.find('pan.baidu.com') > 0:
# 这里获取该回复楼层的DIV_ID,用于下次更新的时候匹配楼层,是否更新
msgDivId = messageTableList[x].css('div.message::attr("id")').extract_first()
fRepliesItem = {
'id': msgDivId,
'allInfo': repliesInfo,
'filestr':json.dumps(movieFiles, ensure_ascii=False)
}
bbsReplinesList.append(fRepliesItem)
movieItem['bbsRelinesListJson'] = json.dumps(bbsReplinesList,ensure_ascii=False)
yield movieItem
| true |
84834e17e26426c69474e387f61c06b7a49a4f5f | Python | caltechlibrary/commonpy | /tests/test_data_structures.py | UTF-8 | 1,175 | 2.828125 | 3 | [
"BSD-3-Clause",
"CC-BY-3.0"
] | permissive | import json
import os
import pytest
import sys
from time import time
try:
thisdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(thisdir, '..'))
except:
sys.path.append('..')
from commonpy.data_structures import *
def test_dict_basic():
d = CaseFoldDict()
d['A'] = 1
assert 'a' in d
assert d['a'] == 1
d = CaseFoldDict({'A': 1})
assert 'a' in d
assert d['a'] == 1
def test_dict_comparison():
d1 = CaseFoldDict()
d2 = CaseFoldDict()
d1['a'] = 1
d1['B'] = 2
d2['A'] = 1
d2['b'] = 2
assert d1 == d2
assert d1.keys() == d2.keys()
def test_set_basic():
d = CaseFoldSet()
d.add('A')
assert 'a' in d
assert 'A' in d
d = CaseFoldSet(['A'])
assert 'a' in d
assert 'A' in d
d.add('b')
d = CaseFoldSet(['a']) | CaseFoldSet(['B'])
assert 'b' in d
d.add('É')
assert 'é' in d
def test_set_comparison():
d1 = CaseFoldSet()
d2 = CaseFoldSet()
d1.add('a')
d2.add('A')
assert d1 == d2
def test_json_dumps_dict():
s = json.dumps(CaseFoldDict({'A': 1, 'B': 2}))
assert 'A' in s
assert 'B' in s
| true |
dc3531fbd798ffdf55deab075bc17d3dcb2aedc3 | Python | soymintc/zufaelliger | /tests/test_joke.py | UTF-8 | 190 | 2.625 | 3 | [] | no_license | from unittest import TestCase
import zufaelliger
class TestJoke(TestCase):
def test_is_string(self):
s = zufaelliger.joke()
self.assertTrue(isinstance(s, basestring))
| true |
78bba981ae5286497c30d235f1bfee7e3f56a1dd | Python | MustafaEP/PythonKodlari | /Program2.py | UTF-8 | 228 | 3.5 | 4 | [] | no_license | sayı1=5
sayı2=10
print(sayı1)
print(sayı2)
sayı3=sayı1+sayı2
print(sayı3)
#yukarıda sayı1 ve sayı2 toplandı
sayi1=int(input("Bir sayı gir: "))
print("Girdiğiniz sayı: ",sayi1)
print(type(sayi1))
| true |
0a25b986bf0d9a67acee00bc5dd1a9e8dc9a5c96 | Python | yamaton/codeforces | /problemSet/592D-Super_ M.py | UTF-8 | 668 | 3.125 | 3 | [] | no_license | """
Codeforces Round #328 (Div. 2)
Problem 592 D. Super Ms
@author yamaton
@date 2015-10-31
"""
import itertools as it
import functools
import operator
import collections
import math
import sys
def solve(edges, attacked_nodes):
pass
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
[n, m] = [int(i) for i in input().strip().split()]
pairs = [[int(i) for i in input().strip().split()] for _ in range(n-1)]
attacked = [int(i) for i in input().strip().split()]
assert len(attacked) == m
city, time = solve(pairs, attacked)
print(city)
print(time)
if __name__ == '__main__':
main()
| true |
bb81617dbe5769a39fa735ac4090ae0a2d44d763 | Python | liuxfiu/simulus | /examples/misc/mm1-numpy.py | UTF-8 | 539 | 2.703125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import numpy # assuming numpy has been installed
import simulus
numpy.random.seed(123)
def job(idx):
r.acquire()
print("%g: job(%d) gains access" % (sim.now,idx))
sim.sleep(numpy.random.gamma(2, 2))
print("%g: job(%d) releases" % (sim.now,idx))
r.release()
def arrival():
i = 0
while True:
i += 1
sim.sleep(numpy.random.pareto(0.95))
print("%g: job(%d) arrives" % (sim.now,i))
sim.process(job, i)
sim = simulus.simulator()
r = sim.resource()
sim.process(arrival)
sim.run(10)
| true |
6bb48d10f1f647b6f3a559fa7bc527129a092545 | Python | Vishwash18/vkgithub | /Fibanacci.py | UTF-8 | 631 | 3.515625 | 4 | [] | no_license | a=int(input("Enter Number of test cases"))
def findMin(V):
Amount = [1, 2, 5, 10, 20, 50,
100, 500, 2000]
n = len(Amount)
ans = []
i = n - 1
while (i >= 0):
while (V >= Amount[i]):
V -= Amount[i]
ans.append(Amount[i])
i -= 1
for i in range(len(ans)):
print(ans[i], end=" ")
if __name__ == '__main__':
for i in range(1,a+1):
n = int(input("Enter the Number\n"))
print("Following is minimal number",
"of change for", n, ": ", end="\n")
for x in range(1,a+1):
findMin(n)
| true |
771f6d5931aa8cc90b8250551c8108eb245b4bfb | Python | HTML-as-programming-language/HTML-as-programming-language | /HTML_to_C_compiler/htmlc/elements/avr/pin_elements/digital_write.py | UTF-8 | 1,124 | 2.671875 | 3 | [] | no_license | from htmlc.diagnostics import Diagnostic, Severity
from htmlc.elements.element import Element
class DigitalWrite(Element):
def __init__(self):
super().__init__()
self.val = None
self.name = None
self.is_value_wrapper = True
self.require_htmlc_includes = [
"avr/digital.c"
]
def init(self):
if not len(self.attributes):
return
self.name, attr = list(self.attributes.items())[0]
self.val = attr.get("val") or self.get_inner_value()
def diagnostics(self):
return [] if self.name else [Diagnostic(
Severity.ERROR, self.code_range,
"Use like: <digital-write myLed>cake</digital-write>"
)]
def to_c(self, mapped_c):
mapped_c.add(
f"\n// write {self.val} to {self.name}:\n"
f"digital_write(&__{self.name}_PORT__, __{self.name}_BIT_NR__, ",
self
)
if isinstance(self.val, Element):
self.val.to_c(mapped_c)
else:
mapped_c.add(f"{self.val}", self)
mapped_c.add(");\n", self)
| true |
8cd24856312de89bf31572dd8ce8c0a45b6760f8 | Python | 514K/sas | /prost.py | UTF-8 | 164 | 2.671875 | 3 | [] | no_license | import requests
url = 'http://oreluniver.ru/schedule/'
requests.get(url)
req = requests.get(url).text
print(req[:600]) #отображение 600 символов
| true |
c680df5b323b2901c1e8345c6efb39ecd3969c0f | Python | earlbread/leetcode | /implement-strstr/implement-strstr.py | UTF-8 | 818 | 3.625 | 4 | [] | no_license | class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if not haystack and not needle:
return 0
if not haystack:
return -1
if not needle:
return 0
i = 0
while i < len(haystack) - len(needle) + 1:
if haystack[i] == needle[0]:
j = 1
while i + j < len(haystack) and j < len(needle):
if haystack[i+j] != needle[j]:
break
j += 1
if j == len(needle):
return i
i += 1
return -1
s = Solution()
print(s.strStr('mississippi', 'pi'))
print(s.strStr('mississippi', 'issip'))
| true |
cd089cfabc1a40d9f5f754fc29bc60e37520a1bf | Python | tianwei08222/forecast | /tb_forecast_workexper_jobnum.py | UTF-8 | 4,777 | 2.734375 | 3 | [] | no_license | import pandas as pd
import pymysql
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import linear_model
import json
class Forecast_Workexper_Jobnum:
x_list = []
one_year_list = []
two_year_list = []
three_year_list = []
four_year_list = []
five_year_list = []
fc_one_year = []
fc_two_year = []
fc_three_year = []
fc_four_year = []
fc_five_year = []
date_list = []
result = []
education_dict = {}
now_month = 0
now_day = 0
month = [31,28,31,30,31,30,31,31,30,31,30,31]
db = pymysql.connect("rm-uf6871zn4f8aq9vpvro.mysql.rds.aliyuncs.com", "user", "Group1234", "job_data")
def get_data(self):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 查询语句
sql = "select result from tb_forecast_workexper_jobnum"
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
list = results[0]
for i in list:
# string转换成dict的方法
list_ds = eval(i)
for j in list_ds:
self.x_list.append(j['date'])
self.one_year_list.append(j['one'])
self.two_year_list.append(j['two'])
self.three_year_list.append(j['three'])
self.four_year_list.append(j['four'])
self.five_year_list.append(j['five'])
except:
print("Error: unable to fetch data")
def get_date(self,mon,day):
date = 0
for i in range(mon-1):
date += self.month[i]
return date + day
def get_x_list(self):
tmp = []
for date in self.x_list:
split_str = date.split('-')
self.now_month = int(split_str[0])
self.now_day = int(split_str[1])
time = self.get_date(self.now_month, self.now_day)
tmp.append(time)
self.x_list = tmp
def child_train(self,y_list,res_list):
tmp_now_month = self.now_month
tmp_now_day = self.now_day
# Pandas将列表(List)转换为数据框(Dataframe)
dic = {'x_list' : self.x_list,'y_list' : y_list}
aa=pd.DataFrame(dic)
aa.head()
X = np.array(aa[['x_list']])
Y = np.array(aa[['y_list']])
# 划分数据集
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state=0)
#将训练集代入到线性回归模型中训练(岭回归)
model = linear_model.LinearRegression()
model.fit (x_train,y_train)
for i in range(7):
if( self.now_day<self.month[self.now_month] ):
self.now_day += 1
else:
self.now_month += 1
now_time = self.get_date(self.now_month,self.now_day)
now_time = np.array(now_time).reshape(1, -1)
predict_value = model.predict(now_time)
predict_value = int(predict_value)
format_time = str(self.now_month).zfill(2) + '-' + str(self.now_day).zfill(2)
self.date_list.append(format_time)
res_list.append(predict_value)
self.now_month = tmp_now_month
self.now_day = tmp_now_day
def train(self):
self.get_x_list()
# 工作经验 1 2 3 4 5年
self.child_train(self.one_year_list,self.fc_one_year)
self.child_train(self.two_year_list,self.fc_two_year)
self.child_train(self.three_year_list,self.fc_three_year)
self.child_train(self.four_year_list,self.fc_four_year)
self.child_train(self.five_year_list,self.fc_five_year)
for i in range(7):
tmp_dict = {}
tmp_dict['date'] = self.date_list[i]
tmp_dict['one'] = self.fc_one_year[i]
tmp_dict['two'] = self.fc_two_year[i]
tmp_dict['three'] = self.fc_three_year[i]
tmp_dict['four'] = self.fc_four_year[i]
tmp_dict['five'] = self.fc_five_year[i]
self.result.append(json.dumps(tmp_dict))
self.to_sql()
def to_sql(self):
self.result = str(self.result)
# 去掉转义字符的干扰
self.result = self.result.replace('"','\\"')
self.result = self.result.replace('\'','')
cursor = self.db.cursor()
sql = "update tb_forecast_workexper_jobnum set forecast = '%s' where id = 1"%(self.result)
print(sql)
try:
cursor.execute(sql)
self.db.commit()
except:
print("insert error")
self.db.close()
if __name__ == "__main__":
p = Forecast_Workexper_Jobnum()
p.get_data()
p.train()
| true |
853363884cf7aaf8bd563ba2a174a9b4e24e0d2c | Python | celinesf/personal | /2012_py_R_java_BaseHealth/NextBio/ExcelUtils.py | UTF-8 | 26,562 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
"""
Utility functions to obtain read and write on excel files
06/19/13- 1.0
"""
__author__ = "Celine Becquet"
__copyright__ = "Copyright 2013, Genophen.com"
__maintainer__ = "Celine Becquet"
__email__ = "becquet@genophen.com"
__status__ = "dev"
import logging, xlrd, copy
from NextBioUtils import NextBioUtils
class ExcelUtils():
def __init__(self):
self.util = NextBioUtils()
self.partition = "="*30 # this separates meta data from snp information
self.snp_part = "-"*30 # this separates column names from data in snp section
############## read excel data ###################
''' goto next row while getting key/value on a row
assumes data from exel file '''
def getKeyValueRow(self,data, rownum):
logging.debug(' Function: getKeyValueRow' )
rownum+=1
# print rownum, data[0][rownum]
key = data[0][rownum].lower()
if len(key.split(" "))>1:
key = "%s_%s" % (key.split(" ")[0],key.split(" ")[1])
value = data[1][rownum]
return rownum, key, value
''' extract data from a row from SNP table '''
def getSnpRow(self, header, row, data):
logging.debug(' Function: getSnpRow' )
csnp = header.index("SNP" )
dbsnp = row[csnp]
data[dbsnp]={}
self.line = None
for num in range(len(header)):
if len(header[num])>0:
if self.line is None:
self.line =row[num].replace('\n','').replace(' ',"")
else:
# print self.line
self.line = '%s\t%s' % (self.line,row[num].replace('\n','').replace(' ',""))
if header[num].lower().replace('\n','').replace(' ',"") == 'comment':
data[dbsnp][header[num].lower().replace('\n','').replace(' ',"")] = row[num].replace('\n','').replace(' ',"_")
elif header[num].lower().replace('\n','').replace(' ',"") == 'snp_population':
data[dbsnp][header[num].lower().replace('\n','').replace(' ',"")] = row[num].replace('\n','')
else:
data[dbsnp][header[num].lower().replace('\n','').replace(' ',"")] = row[num].replace('\n','').replace(' ',"")
if data[dbsnp][header[num].lower().replace('\n','').replace(' ',"")] == "":
data[dbsnp][header[num].lower().replace('\n','').replace(' ',"")] = None
return data
''' getBiosetMetaData'''
def getBiosetMetaData(self, key,value, excel_data,row_num):
logging.debug(' Function: getBiosetMetaData - key: %s, nrow: %s' %(key,row_num))
# skip empty line before new bioset
row_num, key, value = self.getKeyValueRow(excel_data,row_num)
if len(key) == 0:
row_num, key, value = self.getKeyValueRow(excel_data,row_num)
self.bioset_id= value # bioset #
# print self.bioset_id, self.bioset_info
### only record accepted biosets by science team
if self.bioset_info[self.bioset_id]["accepted"].lower() == 'yes':
self.nextbio_data[self.bioset_id]=copy.deepcopy(self.bioset_info[self.bioset_id] )
### bioset meta data
while self.partition not in key and len(key)>0: #
if excel_data[2][row_num] == "" :
if self.bioset_info[self.bioset_id]["accepted"].lower() == 'yes':## only record accepted biosets by science team
self.nextbio_data[self.bioset_id][key.lower()]=value
if key not in self.bioset_info[self.bioset_id]:
self.nextbio_data[self.bioset_id][key.lower()]=value
else:
self.util.checkSame(self.bioset_info[self.bioset_id][key], value)
else : print "issue A", excel_data[2][row_num]
row_num, key, value = self.getKeyValueRow(excel_data,row_num)
return row_num, key, value
''' getBiosetSnpData'''
def getBiosetSnpData(self, key,value, excel_data,row_num, sheet):
logging.debug(' Function: getBiosetSnpData - key: %s, nrow: %s' %(key,row_num))
### startingB SNP table
if self.partition in key:
row_num+=1
self.column_key[self.bioset_id] = sheet.row_values(row_num)
if self.bioset_info[self.bioset_id]["accepted"].lower() == 'yes':## only record accepted biosets by science team
self.nextbio_data[self.bioset_id]["snps"] = {}
### snp partition
row_num, key, value = self.getKeyValueRow(excel_data,row_num)
### get all snp data
if self.snp_part in key:
row_num, key, value = self.getKeyValueRow(excel_data,row_num)
while key == self.bioset_id:
if self.bioset_info[self.bioset_id]["accepted"].lower() == 'yes':## only record accepted biosets by science team
self.nextbio_data[self.bioset_id]["snps"] = self.getSnpRow(self.column_key[self.bioset_id], sheet.row_values(row_num),self.nextbio_data[self.bioset_id]["snps"])
row_num, key, value = self.getKeyValueRow(excel_data,row_num)
return row_num, key, value
''' get bioset data including SNP table from NextBio xsl format'''
def readNextBioDataSheet(self, sheet):
logging.debug(' Function: readNextBioDataSheet' )
self.nextbio_data = {}
excel_data = []
''' get all column'''
for cnum in range(sheet.ncols ):
excel_data.append(sheet.col_values(cnum))
''' get bioset data per data_row'''
row_num = -1
while row_num+1 < len(excel_data[0]):
row_num, key, value = self.getKeyValueRow(excel_data,row_num) ### first bioset partision line
### start new bioset
while self.partition in key and len(key)>0 and row_num+1 <len(excel_data[0]):
### get bioset meta data
row_num, key, value = self.getBiosetMetaData(key,value ,excel_data,row_num)
### get snp data
row_num, key, value = self.getBiosetSnpData(key,value ,excel_data,row_num, sheet)
''' get data from informative sheets in execl file currated by science team'''
def readExcelData(self,fname):
logging.debug(' Function: readExcelData' )
self.column_key ={}
infile = xlrd.open_workbook(fname)
''' get bioset info '''
self.bioset_info, self.header = self.readBiosetInfoSheet(infile.sheet_by_name("info"))
''' get bioset+ SNP data '''
self.readNextBioDataSheet(infile.sheet_by_name("data"))
return self.nextbio_data, self.bioset_info, self.header, self.column_key
''' get information as specified by science team'''
def readBiosetInfoSheet(self, sheet):
logging.debug(' Function: readBiosetInfoSheet' )
header =[]
data = {}
data_tmp = {}
for cnum in range(sheet.ncols ):
tmp = sheet.col_values(cnum)
col_name = tmp[0]
header.append(col_name)
tmp.remove(col_name)
data_tmp[col_name] = tmp
for bnum in range (len(data_tmp['bioset_id'])):
bioset = data_tmp['bioset_id'][bnum]
print bioset
data[bioset]={}
for tag in data_tmp:
data[bioset][tag] = data_tmp[tag][bnum]
return data, header
#################### write to excel ##############
### writeExcelRow
def writeExcelRow(self,sheet,row,key,value):
sheet.write(row,0,key )
if value is not None:
sheet.write(row,1, value )
row += 1
return row
### writeExcelSnpTableData ###
def writeExcelSnpTableData(self, sheet,row, header, data):
logging.debug(' Function: writeExcelSnpTableData')
for snp in data:
c = 0
for h in header:
if h in data[snp] and data[snp][h] is not None:
sheet.write(row,c,data[snp][h])
else:
sheet.write(row,c,'')
c+=1
row += 1
sheet.write(row,0,"=======================================================================================================================================================================================================================================" )
row += 1
sheet.write(row,0,"")
row += 1
return row, sheet
### writeExcelSnpTableHeader ###
def writeExcelSnpTableHeader(self, sheet,row, header):
logging.debug(' Function: writeExcelSnpTableHeader')
c = 0
new_header = []
for h in header:
if h != "":
h = h.replace('/n','').strip()
sheet.write(row,c,h)
new_header.append(h)
c+=1
row += 1
sheet.write(row,0,"------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
row += 1
return row, sheet, new_header
### writeInfoSheet ###
def writeInfoSheet(self, sheet,row, header, data):
logging.debug(' Function: writeInfoSheet')
if row == 0:
c=0
for key in header:
sheet.write(row,c,key)
c+=1
row +=1
if row >0:
c=0
for key in header:
sheet.write(row,c,data[key])
c+=1
row +=1
return row, sheet
### write in Nextbio bioset key balye information data###
def writeNextBioBiosetInfo(self, sheet, row,data):
logging.debug(' Function: writeNextBioBiosetInfo ')
row= self.writeExcelRow(sheet,row,"BIOSET_ID" ,data["BIOSET_ID"])
row= self.writeExcelRow(sheet,row,"PMID" ,data["PMID"])
if "BIOSET TITLE" in data:
row= self.writeExcelRow(sheet,row,"BIOSET TITLE" ,data["BIOSET TITLE"])
if "SAMPLE NUMBER" in data:
row= self.writeExcelRow(sheet,row,"SAMPLE NUMBER" ,data["SAMPLE NUMBER"])
if "COMPARISON" in data:
row= self.writeExcelRow(sheet,row,"COMPARISON" ,data["COMPARISON"])
if "TAG" in data:
row= self.writeExcelRow(sheet,row,"TAG" ,data["TAG"])
if "BIOSET SUMMARY" in data:
row= self.writeExcelRow(sheet,row,"BIOSET SUMMARY" ,data["BIOSET SUMMARY"])
if "ANALYSIS SUMMARY" in data:
row= self.writeExcelRow(sheet,row,"ANALYSIS SUMMARY" ,data["ANALYSIS SUMMARY"])
if "PLATFORM" in data:
row= self.writeExcelRow(sheet,row,"PLATFORM" ,data["PLATFORM"])
if "META-ANALYSIS" in data:
row= self.writeExcelRow(sheet,row,"META-ANALYSIS" ,data["META-ANALYSIS"])
if "REFERENCE POPULATION" in data:
row= self.writeExcelRow(sheet,row,"REFERENCE POPULATION" ,data["REFERENCE POPULATION"])
row= self.writeExcelRow(sheet,row,"BATCH" ,data["BATCH"])
row= self.writeExcelRow(sheet,row,"ACCEPTED" ,None)
row= self.writeExcelRow(sheet,row,"COMMENT" ,None)
sheet.write(row,0,"=======================================================================================================================================================================================================================================" )
row+= 1
return row, sheet
# self.batchname = "" # batch file provided by nextbio
# self.previous_batch ={}
# self.issue = False
# self.current_batch ={}
# self.key_list = {}
# self.key_combination = []
# self.column_list = []
# self.key_comb_list = None
# self.column_name_dict = {}
# self.pmid_list = {}
# self.meta_sp = [":", "="] # this separates fields whithin meta data
# self.vs = ["_vs_", "vs"] # this separates comparison and tag fields
# ''' checkSame
# compare two value'''
# def checkSame(self, data1, data2):
# logging.debug(' Function: checkSame - data1: %s, data2: %s' %(data1, data2))
# if data1 != data2 :
# if data1 != '' and data2 != '' and data1 is not None and data2 is not None :
# logging.warning(' DATA DONT FIT (checkSame) - data1: %s, data2: %s' %(data1, data2))
# return False
# return True
#
# ### writeExcelRow
# def writeExcelRow(self,sheet,row,key,value):
# sheet.write(row,0,key )
# if value is not None:
# sheet.write(row,1, value )
# row += 1
# return row
#
# ''' keyToUpperCase '''
# def keyToUpperCase(self, data):
# logging.debug(' Function: keyToUpperCase ')
# new_data = {}
# for key in data:
# new_key = key.upper()
# new_data[new_key] = data[key]
# if type(new_data[new_key]) == dict:
# new_data[new_key] = self.keyToUpperCase(new_data[new_key] )
# return new_data
#
# # ''' isSameAllele '''
# # def isSameAllele(self, allele1, allele2):
# # logging.debug(' Function: isSameAllele - allele1:%s, allele2:%s' % (allele1, allele2))
# # if allele1 == allele2:
# # return 1
# # else:
# # return 0
#
# # ''' reverse
# # '''
# # def reverseAlleles(self, alleles):
# # logging.debug(' Function: reverseAlleles - %s' % alleles)
# # reversed_allele = []
# # for index in range(0,len(alleles)):
# # allele= alleles[index ]
# # if allele == 'G':
# # reversed_allele.append('C')
# # elif allele == 'C':
# # reversed_allele.append('G')
# # elif allele == 'A':
# # reversed_allele.append('T')
# # elif allele == 'T':
# # reversed_allele.append('A')
# # elif allele == '-':
# # reversed_allele.append('-')
# # else :
# # self.warnMe('warning', ' CANT REVERSE ALLELE %s (reverseAlleles) - ' % (allele,alleles))
# # if len(reversed_allele) == 1:
# # reversed_allele = reversed_allele[0]
# # return reversed_allele
# ''' warnMe '''
# def warnMe(self, flag, comment):
# if flag == 'info':
# logging.info(comment)
# print comment
# elif flag== 'warning':
# logging.warning(comment)
# print "%s -%s" % (flag.upper(),comment)
# elif flag== 'critical':
# logging.critical(comment)
# print "%s -%s" % (flag.upper(),comment)
# elif flag== 'error':
# logging.error(comment)
# print "%s -%s" % (flag.upper(),comment)
# else:
# logging.info(comment)
# print "%s -%s" % (flag.upper(),comment)
#
# ''' get map for typo corrections '''
# def getTermMap(self,fname):
# logging.debug(' Function: getTermMap' )
# f = open(fname,'r')
# typos = f.read()
# f.close()
# return json.loads(typos)
#
#
# ''' remove quote from key or value
# Issue from batch 7
# '''
# def checkQuote(self,line):
# logging.debug(' Function: checkQuote' )
# s=line.encode("utf-8")
# if "\"" in s:
# s = line.split("\"")
# if len(s) == 2:
# if len(s[1]) == 0 :
# logging.warning(' REMOVE QUOTE s[1]=0 (checkQuote) - %s - %s\n%s' % (self.batchname,line, s))
# s = s[0]
# elif len(s[0])== 0 :
# logging.warning(' REMOVE QUOTE s[0]=0 (checkQuote) - %s - %s\n%s' % (self.batchname,line, s))
# s = s[1]
# else:
# self.warnMe('error', ' ERROR QUOTE (checkQuote) - %s - %s\n%s' % (self.batchname, line,s))
# s= line
# else:
# s1 = None
# for w in s:
# if s1 is None:
# s1=w
# else:
# s1 = "%s'%s" % (s1,w)
# s = s1
# logging.warning(' CHANGED QUOTE (checkQuote) - %s - %s\n%s' % (self.batchname, line,s))
# return s
#
# '''find key before : or ='''
# def findKey(self,line):
# logging.debug(' Function: findKey' )
# key = None
# value = None
# for sep in self.meta_sp:
# if sep in line:
# try:
# key, value = line.split(sep,1)
# except Exception, e:
# self.warnMe('critical',' NO KEY VALUE? (findKey) %s' % line )
# return key, value
# ''' add new item to list '''
# def addToList(self,item_list, item, new_value, old_value):
# logging.debug(' Function: addToList %s' % item )
# if item not in item_list:
# item_list[item] = new_value
# if old_value is not None:
# item_list[item][old_value] += 1
# else:
# item_list[item] += 1
# return item_list
# ''' get key and value from a line in meta data and count # occurence in batch'''
# def getKeyValueCount(self,key_map, line):
# logging.debug(' Function: getKeyValue' )
# line = line.strip()
# key, value = self.findKey(line)
#
# ''' record list of keys / value'''
# self.current_batch["BATCH"]=self.batchname
# if key :
# key = key.strip().upper()
# key = self.checkQuote(key)
#
# if key in key_map:
# key = key_map[key]
# else:
# self.warnMe('critical', "I COULD NOT FIND MAPPING FOR KEY %s (getKeyValueCount)- %s" % (key, self.current_batch))
#
# ''' add new key '''
# self.key_list = self.addToList(self.key_list, key, {"count" : 0, "value" : {}, "batch":{}}, "count")
#
# ''' record value'''
# value = value.strip()
# value = self.checkQuote(value)
# self.key_list[key]["value"] = self.addToList(self.key_list[key]["value"], value, {"count":0,"batch":{}}, "count")
# self.key_list[key]["value"][value]["batch"] = self.addToList(self.key_list[key]["value"][value]["batch"], self.batchname, 0, None)
#
# self.current_batch[key]["value"] = self.addToList(self.current_batch[key]["value"], value, 0, None)
# self.current_batch[key]=value
#
# ''' record batchname '''
# self.key_list[key]["batch"] = self.addToList(self.key_list[key]["batch"], self.batchname, 0, None)
#
# self.checkKeyValue(key, value)
# return key
# ''' get key and value from a line in meta data '''
# def getKeyValue(self, key_map, line):
# logging.debug(' Function: getKeyValue' )
# line = line.strip()
# key, value = self.findKey(line)
#
# ''' record list of keys / value'''
# self.current_batch["BATCH"]=self.batchname
# if key :
# key = key.strip().upper()
# key = self.checkQuote(key)
#
# if key in key_map:
# key = key_map[key]
# else:
# self.warnMe('critical', "I COULD NOT FIND MAPPING FOR KEY %s (getKeyValue)- %s" % (key, self.current_batch))
#
# ''' add new key '''
# self.key_list = self.addToList(self.key_list, key, {"count" : 0, "value" : {}, "batch":{}}, "count")
#
# ''' record value'''
# value = value.strip()
# value = self.checkQuote(value)
# self.key_list[key]["value"] = self.addToList(self.key_list[key]["value"], value, 0, None)
#
# self.current_batch[key]=value
#
# ''' record batchname '''
# self.key_list[key]["batch"] = self.addToList(self.key_list[key]["batch"], self.batchname, 0, None)
#
# self.checkKeyValue(key, value)
# return key
# ''' check key and value if duplicated and came from previous bioset '''
# def checkKeyValue(self,key,value):
# logging.debug(' Function: checkKeyValue, %s: %s' % (key,value) )
# if key not in self.key_combination:
# self.key_combination.append(key)
# else:
# self.issue = True
# logging.warning("REPEATED KEY IN ONE BIOSET (checkKeyValue) %s\t%s\t%s:%s" % (key, self.batchname, key,value))
# if self.current_batch[key]["count"] >1 and self.current_batch[key]["value"][value] == self.current_batch[key]["count"]:
# logging.warning("VALUE FOUND TWICE IN ONE BIOSET (checkKeyValue): %s" % value)
# if value in self.previous_batch[key]["value"]:
# logging.warning("VALUE FOUND IN PREVIOUS BIOSET (checkKeyValue): %s" % value)
# else:
# logging.error("VALUE FOUND NOT IN PREVIOUS BIOSET FOR SAME KEY (checkKeyValue): %s:%s\t%s" % (key, value, self.previous_batch[key]["value"]))
# else:
# self.warnMe("error", "DIFFERENT VALUE FOUND FOR SAME KEY (checkKeyValue): %s: %s\n%s" % (key,value,self.current_batch[key]["value"]))
#
# ''' get column names for SNP data
# '''
# def getColumnNames(self, line):
# logging.debug(' Function: getColumnNames' )
# line = line.strip()
# for word in line.split('\t'):
# if word not in self.column_name_dict:
# self.column_name_dict[word] = {"count" : 0, "batch":{}}
# self.column_name_dict[word]["count"] += 1
# if self.batchname not in self.column_name_dict[word]["batch"]:
# self.column_name_dict[word]["batch"][self.batchname] = 0
# self.column_name_dict[word]["batch"][self.batchname] += 1
#
# ''' get column names for SNP data
# '''
# def getColumnList(self, line):
# logging.debug(' Function: getColumnNames' )
# line = line.strip()
# self.column_list = []
# for word in line.split('\t'):
# if word not in self.column_list:
# self.column_list.append(word)
# else:
# word = '%s2' % word
# self.column_list.append(word)
# self.warnMe('warning', ' WORD REPEATED %s (getColumnList) %s' % (word, line))
# ''' get unique PMID and they bioset IDs
# '''
# def getPMID(self, line):
# logging.debug(' Function: getPMID' )
# line = line.strip()
# data = line.split('\t')
# pmid = data[1]
# self.current_batch["PMID"] = pmid
# self.current_batch["BIOSET_ID"] = data[0]
# bioset_id = data[0]
# if pmid not in self.pmid_list:
# self.pmid_list[pmid] = {"count":0,"id":{}}
# self.pmid_list[pmid]["count"] += 1
# if bioset_id not in self.pmid_list[pmid]["id"]:
# self.pmid_list[pmid]["id"][bioset_id] = 0
# self.pmid_list[pmid]["id"][bioset_id] +=1
#
# def writeOutput(self, filename,data):
# logging.debug(' Function: writeOutput %s:' % filename)
# f = open("%s/%s.json" % (config.OUTPUTPATH,filename), "w")
# f.write(json.dumps(data, indent=4))
# f.close()
# ''' write unique key of list '''
# def writeList(self, filename,data):
# logging.debug(' Function: writeList %s:' % filename)
# total = 0
# f = open("%s%s.json" % (config.OUTPUTPATH,filename), "w")
# for d in data:
# f.write("%s\t%s\n" % (d, data[d]["count"]))
# total += data[d]["count"]
# f.write("TOTAL\t%s\n" % (total))
#
# ''' add new key combination in list '''
# def addNewKeyCombination(self):
# logging.debug(' Function: addNewKeyCombination %s' %(self.key_combination) )
# if len(self.key_combination)>0:
# ''' init combination list '''
# if self.key_comb_list is None:
# self.key_comb_list = {}
# self.key_comb_list = self.addToList(self.key_comb_list, "0", {"keys":self.key_combination,"count" : 0, "batch":{}},"count")
# self.key_comb_list["0"]["batch"] = self.addToList(self.key_comb_list["0"]["batch"], self.batchname, 0, None)
# # self.key_comb_list["0"] = {"keys":self.key_combination,"count":0,"batch":{}}
# else:
# found = 0
# comb_num =""
# for comb in self.key_comb_list:
# comb_num = comb
# it = 0
# for key in self.key_comb_list[comb]["keys"]:
# if key in self.key_combination:
# it += 1
# else:
# break
# if it == len(self.key_combination) and it == len(self.key_comb_list[comb]['keys']):
# found = 1
# self.key_comb_list = self.addToList(self.key_comb_list, comb, {"keys":self.key_combination,"count" : 0, "batch":{}},"count")
# self.key_comb_list[comb]["batch"] = self.addToList(self.key_comb_list[comb]["batch"], self.batchname, 0, None)
# break
# if found == 0:
# comb_num = str(len(self.key_comb_list))
# self.key_comb_list = self.addToList(self.key_comb_list, comb_num, {"keys":self.key_combination,"count" : 0, "batch":{}},"count")
# self.key_comb_list[comb_num]["batch"] = self.addToList(self.key_comb_list[comb_num]["batch"], self.batchname, 0, None)
# self.key_combination = []
######################### READ EXCEL DATA FILE #####################
# | true |
9baf12373f94bb37ab7cb9cdc2c95cb42ef52f64 | Python | julius-risky/praxis-academy | /novice/02-02/latihan/test1.py | UTF-8 | 785 | 3.46875 | 3 | [] | no_license | import unittest
symbol=[('M',1000),('C',900),('D',500),('C D',400),('C',100),('X C',90),('L',50),('X L',40),('X',10),('I X',9),('V',5),('I V',4),('I',1)]
def romannumeral(number):
outstring = ""
while number >0:
for symbol, value in symbol:
if number-value >=0:
outstring += symbol
number = number-value
continue
return outstring
class Test(unittest.TestCase):
def test_9(self):
self.assertEqual(romannumeral(9),"IX")
def test_29(self):
self.assertEqual(romannumeral(29),"XXIX")
def test_707(self):
self.assertEqual(romannumeral(707),"DCCVII")
def tes_1800(self):
self.assertEqual(romannumeral(1800),"MDCCC")
if __name__== '__main__':
unittest.main() | true |
18c47b61f1e7618b410071aaaa3f4249963e7154 | Python | chenxu0602/LeetCode | /1870.minimum-speed-to-arrive-on-time.py | UTF-8 | 2,984 | 3.546875 | 4 | [] | no_license | #
# @lc app=leetcode id=1870 lang=python3
#
# [1870] Minimum Speed to Arrive on Time
#
# https://leetcode.com/problems/minimum-speed-to-arrive-on-time/description/
#
# algorithms
# Medium (32.38%)
# Likes: 224
# Dislikes: 59
# Total Accepted: 9.5K
# Total Submissions: 29.4K
# Testcase Example: '[1,3,2]\n6'
#
# You are given a floating-point number hour, representing the amount of time
# you have to reach the office. To commute to the office, you must take n
# trains in sequential order. You are also given an integer array dist of
# length n, where dist[i] describes the distance (in kilometers) of the i^th
# train ride.
#
# Each train can only depart at an integer hour, so you may need to wait in
# between each train ride.
#
#
# For example, if the 1^st train ride takes 1.5 hours, you must wait for an
# additional 0.5 hours before you can depart on the 2^nd train ride at the 2
# hour mark.
#
#
# Return the minimum positive integer speed (in kilometers per hour) that all
# the trains must travel at for you to reach the office on time, or -1 if it is
# impossible to be on time.
#
# Tests are generated such that the answer will not exceed 10^7 and hour will
# have at most two digits after the decimal point.
#
#
# Example 1:
#
#
# Input: dist = [1,3,2], hour = 6
# Output: 1
# Explanation: At speed 1:
# - The first train ride takes 1/1 = 1 hour.
# - Since we are already at an integer hour, we depart immediately at the 1
# hour mark. The second train takes 3/1 = 3 hours.
# - Since we are already at an integer hour, we depart immediately at the 4
# hour mark. The third train takes 2/1 = 2 hours.
# - You will arrive at exactly the 6 hour mark.
#
#
# Example 2:
#
#
# Input: dist = [1,3,2], hour = 2.7
# Output: 3
# Explanation: At speed 3:
# - The first train ride takes 1/3 = 0.33333 hours.
# - Since we are not at an integer hour, we wait until the 1 hour mark to
# depart. The second train ride takes 3/3 = 1 hour.
# - Since we are already at an integer hour, we depart immediately at the 2
# hour mark. The third train takes 2/3 = 0.66667 hours.
# - You will arrive at the 2.66667 hour mark.
#
#
# Example 3:
#
#
# Input: dist = [1,3,2], hour = 1.9
# Output: -1
# Explanation: It is impossible because the earliest the third train can depart
# is at the 2 hour mark.
#
#
#
# Constraints:
#
#
# n == dist.length
# 1 <= n <= 10^5
# 1 <= dist[i] <= 10^5
# 1 <= hour <= 10^9
# There will be at most two digits after the decimal point in hour.
#
#
#
# @lc code=start
import math
class Solution:
def minSpeedOnTime(self, dist: List[int], hour: float) -> int:
is_ontime = lambda s: sum(math.ceil(d / s) for d in dist[:-1]) + dist[-1] / s <= hour
low, high = 0, 10**7
while low + 1 < high:
mid = low + (high - low) // 2
if is_ontime(mid):
high = mid
else:
low = mid
return high if is_ontime(high) else -1
# @lc code=end
| true |
1046b6a15631ee2ecc012603386a28a92e2157c4 | Python | cmeese456/CISC684_Project1 | /tree_traversal.py | UTF-8 | 770 | 3.375 | 3 | [] | no_license | import sys
def tree_traversal(dt, row):
'''
Follow a row of a test or validation set through a decision tree and
return a leaf.
Arguments:
dt a decision tree Node
row a dict mapping column names to values for a given row in a dataframe
'''
traversal_return = None
if dt.left or dt.right:
follow_attribute = dt.label
if int(row[follow_attribute]) == 0:
traversal_return = tree_traversal(dt.left.left, row)
elif int(row[follow_attribute]) == 1:
traversal_return = tree_traversal(dt.right.left, row)
else:
sys.stderr.write('Illegal value found in leaf node.\n')
sys.exit()
else:
traversal_return = dt.label
return traversal_return
| true |
eb0d7c9c90821a59896d5e07ed7ff3b2d8d4e1d8 | Python | pbrown801/AV | /Program/getAVbest2.py | UTF-8 | 2,025 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3.7
def getAVbest2(inputcoordinates):
print(inputcoordinates)
"Coordinates are input as a single string. Output is the recommended Av value for MW reddening, error, and reference"
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle, Latitude, Longitude
from astroquery.irsa_dust import IrsaDust
import astropy.units as u
import pandas as pd
import numpy as np
import math
import sys
inputcoordinates = sys.argv[0]
testCoords = SkyCoord(inputcoordinates,frame='fk5')
#print('\n-----\nReading input files...')
inFile = 'Brown_Walker_table_1_rev2.dat'
inTable = pd.read_csv(inFile,header=None,delimiter=' ')
ra = Angle(inTable.iloc[:,1])
dec = Angle(inTable.iloc[:,2])
sourceCoords = SkyCoord(ra,dec,frame='fk5')
#print('Calculating separation from table coordinates')
separations = testCoords.separation(sourceCoords).arcminute
# compare to the distances in the table
within = np.less(separations,inTable.iloc[:,3])
# Are any of the input coordinates within the tabulated distance
# of the coordinates in the table?
correctedAV = np.where(within,inTable.iloc[:,4],None) #get calculated value
fix=any(within)
#print('fix?',fix)
if fix:
AV = next((item for item in correctedAV if item is not None),None)
correctedAVerr = np.where(within,inTable.iloc[:,5],None) #get calculated val
newAVerr = next((item for item in correctedAVerr if item is not None),None)
AVerr = math.sqrt((newAVerr)**2+(AV*0.1)**2)
sources=np.where(within,inTable.iloc[:,6],None)
source = next((item for item in sources if item is not None),None)+",S_F_2011"
if not fix:
AVtable = IrsaDust.get_extinction_table(testCoords,show_progress = False)
AV=AVtable['A_SandF'][2]
AVerr = AV*0.1
source = 'S_F_2011'
print(AV, AVerr, source)
return(AV, AVerr, source);
#if __name__ == "__main__":
getAVbest2(input) | true |
54a14a939b5a4e28b5617eeec3ab77f12e89ee60 | Python | CompRhys/ornstein-zernike | /process/core/transforms.py | UTF-8 | 10,182 | 2.984375 | 3 | [
"MIT"
] | permissive | import numpy as np
from scipy.fftpack import dst, idst
from core import block
from scipy.signal import savgol_filter
def hr_to_cr(bins, rho, data, radius, error=None, axis=1):
"""
This function takes h(r) and uses the OZ equation to find c(r) this is done via a 3D fourier transform
that is detailed in LADO paper. The transform is the the DST of f(r)*r. The function is rearranged in
fourier space to find c(k) and then the inverse transform is taken to get back to c(r).
"""
# setup scales
dk = np.pi / radius[-1]
k = dk * np.arange(1, bins + 1, dtype=np.float)
# Transform into fourier components
FT = dst(data * radius[0:bins], type=1, axis=axis)
normalisation = 2 * np.pi * radius[0] / k
H_k = normalisation * FT
# Rearrange to find direct correlation function
C_k = H_k / (1 + rho * H_k)
# # Transform back to real space
iFT = idst(C_k * k, type=1)
normalisation = k[-1] / (4 * np.pi**2 * radius[0:bins]) / (bins + 1)
c_r = normalisation * iFT
return c_r, radius
def hr_to_sq(bins, rho, data, radius, axis=1):
"""
this function takes h(r) and takes the fourier transform to find s(k)
"""
# setup scales
dk = np.pi/radius[-1]
k = dk * np.arange(1, bins + 1, dtype=np.float)
# Transform into fourier components
FT = dst(data * radius[0:bins], type=1, axis=axis)
# radius[0] is dr as the bins are spaced equally.
normalisation = 2 * np.pi * radius[0] / k
H_k = normalisation * FT
S_k = 1 + rho * H_k
return S_k, k
def sq_to_hr(bins, rho, S_k, k, axis=1):
"""
Takes the structure factor s(q) and computes the real space
total correlation function h(r)
"""
# setup scales
dr = np.pi / (k[0] * bins)
radius = dr * np.arange(1, bins + 1, dtype=np.float)
# Rearrange to find total correlation function from structure factor
H_k = (S_k - 1.) / rho
# # Transform back to real space
iFT = idst(H_k * k[:bins], type=1, axis=axis)
normalisation = bins * k[0] / (4 * np.pi**2 * radius) / (bins + 1)
h_r = normalisation * iFT
return h_r, radius
def sq_to_cr(bins, rho, S_k, k, axis=1):
"""
Takes the structure factor s(q) and computes the direct correlation
function in real space c(r)
"""
# setup scales
dr = np.pi / (bins * k[0])
radius = dr * np.arange(1, bins + 1, dtype=np.float)
# Rearrange to find direct correlation function from structure factor
# C_k = (S_k-1.)/(S_k) # 1.-(1./S_k) what is better
C_k = (S_k - 1.) / (rho * S_k)
# # Transform back to real space
iFT = idst(k[:bins] * C_k, type=1, axis=axis)
normalisation = bins * k[0] / (4 * np.pi**2 * radius) / (bins + 1)
c_r = normalisation * iFT
return c_r, radius
def sq_and_hr_to_cr(bins, rho, hr, r, S_k, k, axis=1):
"""
Takes the structure factor s(q) and computes the direct correlation
function in real space c(r)
"""
# setup scales
dr = np.pi / (bins * k[0])
radius = dr * np.arange(1, bins + 1, dtype=np.float)
assert(np.all(np.abs(radius-r)<1e-12))
iFT = idst(k[:bins] * np.square(S_k - 1.)/(rho * S_k), type=1, axis=axis)
cr = hr - iFT
return cr
def smooth_function(f):
"""
five point smoothing as detailed on page 204 of Computer Simulation of Liquids.
"""
g = np.zeros_like(f)
g[:, 0] = 1. / 70. * (69 * f[:, 0] + 4 * f[:, 1] -
6 * f[:, 2] + 4 * f[:, 3] - f[:, 4])
g[:, 1] = 1. / 35. * (2 * f[:, 0] + 27 * f[:, 1] +
12 * f[:, 2] - 8 * f[:, 3] + 2 * f[:, 4])
g[:, -2] = 1. / 35. * (2 * f[:, -1] + 27 * f[:, -2] +
12 * f[:, -4] - 8 * f[:, -4] + 2 * f[:, -5])
g[:, -1] = 1. / 70. * (69 * f[:, -1] + 4 * f[:, -2] -
6 * f[:, -3] + 4 * f[:, -4] - f[:, -5])
for i in np.arange(2, f.shape[1] - 2):
g[:, i] = 1. / 35. * (-3 * f[:, i - 2] + 12 * f[:, i - 1] +
17 * f[:, i] + 12 * f[:, i + 1] - 3 * f[:, i + 2])
return g
def process_inputs(box_size, temp, input_density, output="process",
**paths):
"""
"""
if output == "invert":
assert len(paths) == 2, "rdf_path and sq_path must be provided"
elif output == "plot":
assert len(paths) == 3, "rdf_path, sq_path and phi_path must be provided"
elif output == "process":
assert len(paths) == 4, "rdf_path, sq_path, phi_path and temp_path must be provided"
else:
raise ValueError("Unknown output given - direct/plot/process")
n_part = int(input_density * (box_size**3.))
density = n_part / (box_size**3.)
rdf = np.loadtxt(paths.get('rdf_path'))
sq = np.loadtxt(paths.get('sq_path'))
r = rdf[0, :]
r_bins = len(r)
tcf = rdf[1:, :] - 1.
q = sq[0, :]
sq = sq[1:, :]
# Find block size to remove correlations
block_size_tcf = block.fp_block_length(tcf)
block_size_sq = block.fp_block_length(sq)
block_size = np.max((block_size_tcf, block_size_sq))
# print("number of observations is {}, \nblock size is {}. \npercent {}%.".format(rdf.shape[0]-1, block_size, block_size/rdf.shape[0]*100))
# block_size = 256
block_tcf = block.block_data(tcf, block_size)
block_sq = block.block_data(sq, block_size)
# TCF
avg_tcf = np.mean(block_tcf, axis=0)
err_tcf = np.sqrt(np.var(block_tcf, axis=0, ddof=1) / block_tcf.shape[0])
fd_gr = np.var(block_tcf, axis=0, ddof=1)/(avg_tcf+1.)
mask = np.where(np.isfinite(fd_gr))
fd_gr_sg = np.copy(fd_gr) * np.sqrt(n_part)
fd_gr_sg[mask] = savgol_filter(fd_gr[mask], window_length=9, polyorder=1, deriv=0, delta=r[1]-r[0])
# s(q)
avg_sq = np.mean(block_sq, axis=0)
err_sq = np.sqrt(np.var(block_sq, axis=0, ddof=1) / block_sq.shape[0])
# s(q) from fft
sq_fft, q_fft = hr_to_sq(r_bins, density, block_tcf, r)
assert np.all(np.abs(q-q_fft)<1e-10), "The fft and sq wave-vectors do not match"
avg_sq_fft = np.mean(sq_fft, axis=0)
err_sq_fft = np.sqrt(np.var(sq_fft, axis=0, ddof=1) / sq_fft.shape[0])
# Switching function w(q)
peak = np.median(np.argmax(block_sq.T > 0.75*np.max(block_sq, axis=1), axis=0)).astype(int)
after = len(q_fft) - peak
switch = (1 + np.cbrt(np.cos(np.pi * q[:peak] / q[peak]))) / 2.
switch = np.pad(switch, (0, after), 'constant', constant_values=(0))
# Corrected s(q) using switch
sq_switch = switch * block_sq + (1. - switch) * sq_fft
avg_sq_switch = np.mean(sq_switch, axis=0)
err_sq_switch = np.sqrt(np.var(sq_switch, axis=0, ddof=1) / sq_switch.shape[0])
## Evaluate c(r)
# evaluate c(r) from corrected s(q)
dcf_swtch, r_swtch = sq_to_cr(r_bins, density, sq_switch, q_fft)
avg_dcf = np.mean(dcf_swtch, axis=0)
err_dcf = np.sqrt(np.var(dcf_swtch, axis=0, ddof=1) / dcf_swtch.shape[0])
# # c(r) by fourier inversion of just convolved term for comparision
# dcf_both = transforms.sq_and_hr_to_cr(r_bins, input_density, block_tcf, r, block_sq, q)
# avg_dcf_both = np.mean(dcf_both, axis=0)
# err_dcf_both = np.sqrt(np.var(dcf_both, axis=0, ddof=1) / dcf_both.shape[0])
## Evaluate y'(r)
block_icf = block_tcf - dcf_swtch
avg_icf = np.mean(block_icf, axis=0)
err_icf = np.sqrt(np.var(block_icf, axis=0, ddof=1) / block_icf.shape[0])
r_peak = r[np.argmax(block_tcf, axis=1)]
grad_icf = np.gradient(block_icf.T*r_peak, r, axis=0).T
avg_grad_icf = np.mean(grad_icf, axis=0)
err_grad_icf = np.sqrt(np.var(grad_icf, axis=0, ddof=1) / block_icf.shape[0])
# signs = np.where(np.sign(avg_tcf[:-1]) != np.sign(avg_tcf[1:]))[0] + 1
if output == "plot":
# evaluate c(r) from h(r)
dcf_fft, _ = hr_to_cr(r_bins, density, block_tcf, r)
avg_dcf_fft = np.mean(dcf_fft, axis=0)
err_dcf_fft = np.sqrt(np.var(dcf_fft, axis=0, ddof=1) / dcf_fft.shape[0])
# evaluate c(r) from s(q)
dcf_dir, _ = sq_to_cr(r_bins, density, block_sq, q)
avg_dcf_dir = np.mean(dcf_dir, axis=0)
err_dcf_dir = np.sqrt(np.var(dcf_dir, axis=0, ddof=1) / dcf_dir.shape[0])
## Evaluate B(r)
if output == "invert":
return (r, avg_tcf, err_tcf, avg_dcf, err_dcf, avg_grad_icf, err_grad_icf, fd_gr_sg,)
phi = np.loadtxt(paths.get('phi_path'))
assert np.all(np.abs(r-phi[0,:])<1e-10), "the rdf and phi radii do not match"
phi = phi[1,:]
ind = np.median(np.argmax(block_tcf + 1. > 0.01, axis=1)).astype(int)
block_br = np.log((block_tcf[:,ind:] + 1.)) + np.repeat(phi[ind:].reshape(-1,1),
block_tcf.shape[0], axis=1).T- block_tcf[:,ind:] + dcf_swtch[:,ind:]
avg_br = np.mean(block_br, axis=0)
err_br = np.sqrt(np.var(block_br, axis=0, ddof=1) / block_br.shape[0])
if output == "plot":
return (r, phi,
avg_tcf, err_tcf,
fd_gr, fd_gr_sg,
avg_dcf, err_dcf,
avg_icf, err_icf,
avg_grad_icf, err_grad_icf,
avg_dcf_dir, err_dcf_dir,
avg_dcf_fft, err_dcf_fft,
avg_br, err_br), \
(q, switch, avg_sq, err_sq, avg_sq_fft, err_sq_fft,
avg_sq_switch, err_sq_switch, block_sq)
else:
avg_br = np.pad(avg_br, (ind,0), "constant", constant_values=np.NaN)
err_br = np.pad(err_br, (ind,0), "constant", constant_values=np.NaN)
# Check if data satifies our cleaning heuristics
T = np.loadtxt(paths.get('temp_path'))[:,1]
block_T = block.block_data(T.reshape((-1,1)), block_size)
err = np.std(block_T)
res = np.abs(np.mean(block_T - temp))
if res > err:
passed = False
elif avg_sq_switch[0] > 1.0:
passed = False
elif np.max(avg_sq) > 2.8:
passed = False
elif np.max(avg_sq_fft) > 2.8:
passed = False
else:
passed = True
return passed, (r, phi, avg_tcf, err_tcf, avg_dcf, err_dcf, avg_icf, err_icf,
avg_grad_icf, err_grad_icf, fd_gr_sg, avg_br, err_br)
| true |
0f4bf737d0db77af52bdb6d0312c9aa75143b53e | Python | rdorgueilsciencespo/ExemplesPyGame | /images.py | UTF-8 | 2,086 | 3.53125 | 4 | [] | no_license | import pygame
import pygame.image
LARGEUR_DU_MONSTRE = 200
HAUTEUR_DU_MONSTRE = 170
ESPACE = 30
def create_layers(size):
screen = pygame.display.set_mode(size)
pygame.display.set_caption("PyGame Images Example")
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((255, 255, 255))
return screen, background
def display(screen, background, *, image, invaders):
screen.blit(background, (0, 0))
# on copie toute l'image sur la zone de dessin
screen.blit(image, (0, 0))
for i in range(3):
screen.blit(
# on veut copier un boût d'invaders ...
invaders,
# à ces coordonnées sur la zone de dessin (screen) ...
(20 + i * LARGEUR_DU_MONSTRE, 20 + ((i + 1) % 2) * 50),
# et seulement cette zone.
(
i * (LARGEUR_DU_MONSTRE + ESPACE),
0,
LARGEUR_DU_MONSTRE,
HAUTEUR_DU_MONSTRE,
),
)
# on échange la zone d'affichage et la zone de dessin pour que notre écran affiche ce qu'on a prévu.
pygame.display.flip()
def main():
# on charge nos images
image = pygame.image.load("image.jpg")
invaders = pygame.image.load("invaders.png")
# on crée les couches
screen, background = create_layers(image.get_size())
# on crée le canal de transparence de l'image png
invaders.convert_alpha()
running = True
display(screen, background, image=image, invaders=invaders)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
running = False
display(screen, background, image=image, invaders=invaders)
if __name__ == "__main__":
# initialiser pygame
pygame.init()
try:
# exécuter le jeu
main()
finally:
# finaliser pygame, quoiqu'il arrive
pygame.quit()
| true |
4f8f24b3689ca890ec12e1836d15f4b3ca4a1808 | Python | MyGitHubRepository/WebParser | /ConsoleMenuGenerator.py | UTF-8 | 2,316 | 3.75 | 4 | [] | no_license | """ Project: Web/Html Scraper, Coder: Hakan Etik, Date:18.08.2016 """
"""Code source http://stackoverflow.com/questions/15083900/console-menu-generator-in-python"""
import sys
import os
import time
#Item class function definitions
class Item:
def __init__(self, name, function, parent=None):
self.name = name
self.item_number = 0
self.function = function
self.parent = parent
if parent:
parent.add_item(self)
def draw(self):
print self.item_number,
print(" " + self.name)
def set_item_number(self, number):
self.item_number = number
def run_item(self):
self.function()
#Menu class function definitions
class Menu:
def __init__(self, name, items=None):
self.name = name
self.items = items or []
def add_item(self, item):
self.items.append(item)
if item.parent != self:
item.parent = self
def remove_item(self, item):
self.items.remove(item)
if item.parent == self:
item.parent = None
def draw(self):
print(self.name)
item_number = 1
for item in self.items:
item.set_item_number(item_number)
item.draw()
item_number = item_number + 1
def run(self, item_num):
self.items[item_num].run_item()
def terminate(self):
print "bye"
time.sleep(1) # delays for 1 seconds
sys.exit(0)
def cls(self):
os.system('cls' if os.name=='nt' else 'clear')
#Item example specific functions
def openFile():
print("OPEN FILE")
def closeFile():
print("CLOSE FILE")
#Main
def main():
main_menu = Menu("***Vestel Nightbot***")
# automatically calls main.AddItem(item1)
open = Item("Open", openFile, main_menu)
# automatically sets parent to main
main_menu.add_item(Item("Close", closeFile))
main_menu.add_item(Item("Exit", main_menu.terminate))
main_menu.cls() # clear console before drawing
while(True):
try:
main_menu.draw()
n=input("choice>")
main_menu.run(n-1)
except Exception as e:
print("Undefined option please select defined option\n\n")
time.sleep(1) # delays for 1 seconds
main_menu.cls()
if __name__=='__main__':
main() | true |
a88f43cd2f5a8ce4617afffad9a7fb04f10683a9 | Python | gscr10/TSP-improve | /utils/plots.py | UTF-8 | 3,611 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 20:47:26 2020
@author: yiningma
"""
import torch
import os
from matplotlib import pyplot as plt
import cv2
import io
import numpy as np
def plot_grad_flow(model):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
named_parameters = model.named_parameters()
ave_grads = []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
plt.ioff()
fig = plt.figure(figsize=(8,6))
plt.plot(ave_grads, color="b")
plt.hlines(0, 0, len(ave_grads)+1, linewidth=1, color="k" )
plt.xticks(range(0,len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(xmin=0, xmax=len(ave_grads))
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, dpi=60)
plt.close(fig)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def plot_improve_pg(initial_value, reward):
plt.ioff()
fig = plt.figure(figsize=(4,3))
plt.plot(initial_value.mean() - np.cumsum(reward.cpu().mean(0)))
plt.xlabel("T")
plt.ylabel("Cost")
plt.title("Avg Improvement Progress")
plt.grid(True)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, dpi=60)
plt.close(fig)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def plot_tour(city_tour, coordinates, dpi = 300, show = True):
if not show: plt.ioff()
fig = plt.figure(figsize=(8,6))
index = torch.cat((
city_tour.view(-1,1),
city_tour.view(-1,1)[None,0]),0).repeat(1,2).long()
xy = torch.gather(coordinates,0,index)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axis([-0.05, 1.05]*2)
plt.plot(xy[:,0], xy[:,1], color = 'black', zorder = 1)
g1 = plt.scatter(xy[:,0], xy[:,1], marker = 'H', s = 55, c = 'blue', zorder = 2)
g2 = plt.scatter(xy[0,0], xy[0,1], marker = 'H', s = 55, c = 'red', zorder = 2)
handle = [g1,g2]
plt.legend(handle, ['node', 'depot'], fontsize = 12)
# plot show
if not show:
buf = io.BytesIO()
plt.savefig(buf, dpi=dpi)
plt.close(fig)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
else:
plt.show()
return None
def plot_heatmap(problem, solutions, predicted_feasibility):
from problems.problem_pdp_mp import PDP as PDPmp
problem_mp = PDPmp(problem.size)
true_feasibility = (problem_mp.get_swap_mask(solutions).bool()).float()
import seaborn as sns; sns.set()
fig, (ax1, ax2) = plt.subplots(1,2,figsize = (10,4))
sns.heatmap(predicted_feasibility.detach(), ax = ax1)
sns.heatmap(true_feasibility[0], ax = ax2)
plt.show()
| true |
032218b418d7e66d986a9fac412d141de1802594 | Python | OkWilk/disk-image | /src/lib/thread.py | UTF-8 | 989 | 3.171875 | 3 | [] | no_license | """
Author: Oktawiusz Wilk
Date: 10/04/2016
License: GPL
"""
from threading import Thread
class ExtendedThread(Thread):
"""
This class wraps the standard Thread from the Python threading library to add a callback
function in case of exception being raised on the thread. With the callback method is provided
the thread can pass the exception object to the parent thread to notify it about the error.
"""
def __init__(self, exception_callback=None, *args, **kwargs):
self._callback = exception_callback
super().__init__(*args, **kwargs)
def run(self):
try:
if self._target:
self._target(*self._args, **self._kwargs)
except BaseException as e:
if self._callback:
self._callback(self, e)
else:
raise e
finally:
del self._target, self._args, self._kwargs, self._callback
class ThreadException(BaseException):
pass | true |
15032e85401fbceda1b5885b1a327f6693628298 | Python | shagulsoukath/python | /4h.py | UTF-8 | 238 | 3.390625 | 3 | [] | no_license | op=int(input())
s=input().split()
l=[]
l2=[]
l3=[]
l4=[]
for i in s:
l.append(i)
for j in l:
if j not in l2:
l2.append(j)
else:
l3.append(j)
for k in l2:
if k not in l3:
l4.append(k)
print(*l4,sep=' ')
| true |
5ee083930540a5aa0191f1b75cb9474f94e11234 | Python | solderzzc/dicombrowser | /dicombrowser/__init__.py | UTF-8 | 2,417 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | import os
import dicom
from collections import OrderedDict
def browse(directory, select_tags=None):
"""
Browses a directory and returns list of DICOM files and the values of their tags as a dictionary.
The dictionary uses same tag names as those used by pydicom library (mind the spacing and capital/lower case).
:param directory: directory pth where to search for DICOM files.
:param select_tags: list of DICOM tag names that have to be extracted. Tags outside of this list will be ignored.
:return: dictionary with DICOM tag values for each DICOM file in directory
"""
tree = OrderedDict()
if not os.path.exists(directory):
raise AttributeError("Directory does not exist.")
for fname in os.listdir(directory):
full_fname = os.path.join(directory, fname)
try:
tree[full_fname] = read_dicom_file(full_fname, tag_filter=select_tags)
except dicom.errors.InvalidDicomError:
pass
except IsADirectoryError:
pass
return tree
def read_dicom_file(fname, tag_filter=None):
"""
Reads a DICOM file and returns a dictionary where keys are DICOM tag names and values are the values of those tags.
Only tags that are present in the DICOM file, will be present in the generated dictionary.
:param fname: path to file.
:param tag_filter: list of DICOM tags whose values need to be read.
:return: dictionary where keys are tag names, values are tag values.
"""
tags = {}
disabled_tags = ['Pixel Data'] # disable for speed improvement and debugging, TODO: enable in final release
# check if the DICOM tag names are supported by pydicom
supported_dicom_tag_names = [item[2] for item in dicom._dicom_dict.DicomDictionary.values()]
if tag_filter is not None:
for tag_name in tag_filter:
if tag_name not in supported_dicom_tag_names:
raise AttributeError("%s is not a valid DICOM tag name. " \
"Please consult pydicom dictionary for a list of valid names." % tag_name)
df = dicom.read_file(fname)
for tag in df:
if tag_filter is not None:
if tag.name in tag_filter:
tags[tag.name] = str(df[tag.tag].value)
else:
if tag.name not in disabled_tags:
tags[tag.name] = str(df[tag.tag].value)
return tags
| true |
cf69c0d310505c0828fb8395eca700f8c108fe70 | Python | sanqit/text-based-browser | /Problems/Matching brackets/task.py | UTF-8 | 204 | 3.71875 | 4 | [] | no_license | brackets = 0
for symbol in input():
if symbol == "(":
brackets += 1
elif symbol == ")":
brackets -= 1
if brackets < 0:
break
print("OK" if brackets == 0 else "ERROR")
| true |
9bd57b6ae7ef043ed7763f790d3a6c358bbffb4f | Python | iras/JADE | /src/JADEmodel/Cluster0.py | UTF-8 | 1,973 | 2.6875 | 3 | [
"MIT"
] | permissive | '''
Copyright (c) 2012 Ivano Ras, ivano.ras@gmail.com
See the file license.txt for copying permission.
JADE mapping tool
'''
class Cluster0 ():
'''
sub-model class.
'''
def __init__(self, id0, name0, parent, comm):
'''constructor
@param id0 int
@param name0 string
@param comm instance of class Comm0
'''
self._id = id0 # id0 has been used instead of id since the latter is a built-in variable.
self._name = str(name0)
self.graph = parent
self.comm = comm
self._cluster_node_list = []
def addNodeToCluster (self, node): # TODO : unit test
self._cluster_node_list.append (node)
self.comm.emitAddNodeToClusterMSignal (self._id, node.getId())
def removeNodeFromCluster (self, node): # TODO : unit test
qq = len (self._cluster_node_list)
for i in range (qq-1, -1, -1):
if self._cluster_node_list[i] == node:
self.graph.removeNode (self._cluster_node_list[i].getId()) # remove the node.
del self._cluster_node_list[i] # remove the list item referencing the node.
break
def removeAllNodesFromCluster (self): # TODO : unit test
qq = len (self._cluster_node_list)
for i in range (qq-1, -1, -1):
self.graph.removeNode (self._cluster_node_list[i].getId()) # remove the node.
del self._cluster_node_list[i] # remove the list item referencing the node.
# - - - getters / setters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getId (self):
return self._id
def getName (self):
return self._name
def getNodeList (self):
return self._cluster_node_list
def setId (self, id0): self._id = id0
def setName (self, name0): self._name = name0
| true |
778cad615febfea1511ee75627367660d75e4041 | Python | theNded/Open3D | /examples/Python/Advanced/load_save_viewpoint.py | UTF-8 | 1,045 | 2.59375 | 3 | [
"MIT"
] | permissive | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/load_save_viewpoint.py
import numpy as np
import open3d as o3d
def save_view_point(pcd, filename):
vis = o3d.visualization.Visualizer()
vis.create_window()
vis.add_geometry(pcd)
vis.run() # user changes the view and press "q" to terminate
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters(filename, param)
vis.destroy_window()
def load_view_point(pcd, filename):
vis = o3d.visualization.Visualizer()
vis.create_window()
ctr = vis.get_view_control()
param = o3d.io.read_pinhole_camera_parameters(filename)
vis.add_geometry(pcd)
ctr.convert_from_pinhole_camera_parameters(param)
vis.run()
vis.destroy_window()
if __name__ == "__main__":
pcd = o3d.io.read_point_cloud("../../TestData/fragment.pcd")
save_view_point(pcd, "viewpoint.json")
load_view_point(pcd, "viewpoint.json")
| true |
06f55878fdea24d4cb631d150ade8b7adf24a72d | Python | valeonte/advent-of-code-python | /2022/day-17.py | UTF-8 | 3,383 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Advent of Code 2022 day 17.
Created on Tue Dec 20 18:35:37 2022
@author: Eftychios
"""
import os
import re
from time import time
import numpy as np
import pandas as pd
from typing import List, Set, Iterator, Tuple
from random import shuffle
os.chdir("C:/Repos/advent-of-code-python/2022")
inp_string = ">>><<><>><<<>><>>><<<>>><<<><<<>><>><<>>"
# with open("inputs/day-17.txt", "r") as f:
# inp_string = f.read()
rocks = [[(2, 0), (3, 0), (4, 0), (5, 0)], # dash
[(3, 0), (2, 1), (3, 1), (4, 1), (3, 2)], # plus
[(2, 0), (3, 0), (4, 0), (4, 1), (4, 2)], # reverse L
[(2, 0), (2, 1), (2, 2), (2, 3)], # I
[(2, 0), (3, 0), (2, 1), (3, 1)]] # square
def infinite_jets() -> Iterator[int]:
while True:
for ch in inp_string:
yield -1 if ch == '<' else 1
def infinite_rocks() -> Iterator[List[Tuple[int, int]]]:
while True:
for rock in rocks:
yield rock
def print_rocks(stopped: Set[Tuple[int, int]],
rock: List[Tuple[int, int]]):
ret = ''
max_y = min(5000, max([r[1] for r in rock]))
for y in range(max_y, -1, -1):
for x in range(9):
if x == 0:
ret += '|'
elif x == 8:
ret += '|\n'
elif (x - 1, y) in stopped:
ret += '#'
elif (x - 1, y) in rock:
ret += '@'
else:
ret += '.'
ret += '+-------+'
print(ret)
print()
stopped = set()
stopped_list = []
fallen_rocks = 0
rock_idx = 0
rock_gen = iter(infinite_rocks())
jet_gen = iter(infinite_jets())
rock_is_moving = False
highest_rock = -1
last_time = time()
start_time = last_time
max_rocks = 1000000
heights = []
while fallen_rocks <= max_rocks:
if not rock_is_moving:
fallen_rocks += 1
if fallen_rocks == max_rocks:
print('stop')
heights.append(highest_rock)
if max_rocks > 100 and fallen_rocks % (max_rocks // 20) == 0:
t = time()
print(f'{fallen_rocks} fallen rocks in {t - last_time:.2f}')
last_time = t
rock_is_moving = True
next_rock = next(rock_gen)
rock = []
for r in next_rock:
rock.append((r[0], r[1] + highest_rock + 4))
# First move from jet
jet = next(jet_gen)
moved_rock = []
crashed = False
for r in rock:
new_x = r[0] + jet
new_bit = (new_x, r[1])
if new_x < 0 or new_x > 6 or new_bit in stopped:
crashed = True
break
moved_rock.append(new_bit)
if not crashed:
rock = moved_rock
# Then try dropping one
moved_rock = []
crashed = False
for r in rock:
new_bit = (r[0], r[1] - 1)
crashed = new_bit in stopped or new_bit[1] < 0
if crashed:
rock_is_moving = False
break
moved_rock.append(new_bit)
if rock_is_moving:
rock = moved_rock
else:
for r in rock:
if r[1] >= highest_rock:
highest_rock = r[1]
stopped.add(r)
stopped_list.append(r)
if len(stopped) > 1000000:
stopped_list = stopped_list[500000:]
stopped = set(stopped_list)
print('Answer 1:', highest_rock + 1)
print_rocks(stopped, rock)
| true |