blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3ad3a6eaee13ae98521271dbb3f419fce939f8a2 | Python | michalbal/Cluedo-solver-using-Planning-and-Bayesian-Networks | /search.py | UTF-8 | 3,907 | 3.53125 | 4 | [] | no_license | """
In search.py, you will implement generic search algorithms
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def get_start_state(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def is_goal_state(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def get_successors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def get_cost_of_actions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
class Node:
def __init__(self, state, cost, path, asked_before, location=None, action=None,
this_question=None):
self.state = state
self.cost = cost
if location:
self.location = location
self.asked_before = dict()
# save the last asked question
for q in asked_before:
self.asked_before[q] = asked_before[q]
self.last_question = this_question
if action is not None:
self.path = path + [action]
if this_question:
# All previous question - doesn't matter at the end of the plan
for q in this_question:
if q in asked_before:
self.asked_before[q] += 1
else:
self.asked_before[q] = 1
self.location = util.LOCATIONS_OF_ROOMS[util.Room[this_question[2]]]
else:
self.path = path
def a_star_search(problem, heuristic, location):
"""
Search the node that has the lowest combined cost and heuristic first.
"""
fringe = util.PriorityQueue()
visited = set()
first_state = problem.get_start_state()
fringe.push(Node(first_state, 0, [], dict(), location), 0)
while not fringe.isEmpty():
current_node = fringe.pop()
if current_node.state not in visited:
if problem.is_goal_state(current_node.state):
return current_node.path
# Expand node
visited.add(current_node.state)
successors = problem.get_successors(current_node.state)
asked_before = current_node.asked_before
current_location = current_node.location
for succ, action, cost in successors:
this_question, result = None, None
if "unknown" in action.name:
name = action.name.split("_unknown_")
this_question = name[0:3]
accumulated_cost = current_node.cost + cost
heuristic_cost = heuristic(asked_before, this_question, succ,
current_location, problem)
fringe.push(
Node(succ, accumulated_cost, current_node.path, asked_before,
current_location, action, this_question),
accumulated_cost + heuristic_cost)
return None
| true |
afa2cf67aefcc9ff56bc157ceffd862aa846f207 | Python | dishaa19/Searching-for-Novel-Predictive-and-Diagnostic-biomarkers-of-COVID-19 | /COVID-19 (RF).py | UTF-8 | 2,251 | 3.046875 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
ip = pd.read_csv("COVID-19.csv")
new_ip = ip.set_index('ID')
# In[3]:
new_ip
# In[4]:
Training = new_ip[new_ip['Dataset'] == 'Training']
Test = new_ip[new_ip['Dataset']== 'Test']
Training = Training.drop(columns=['Dataset'])
Test = Test.drop(columns=['Dataset'])
# In[5]:
X_train = Training.loc[:, Training.columns != 'Diagnosis']
X_test = Test.loc[:, Test.columns != 'Diagnosis']
Y_train = Training.loc[:, Training.columns == 'Diagnosis']
Y_test = Test.loc[:, Test.columns == 'Diagnosis']
# In[6]:
X_train = X_train.replace('male',1)
X_train = X_train.replace('female',0)
X_test = X_test.replace('male',1)
X_test = X_test.replace('female',0)
# In[18]:
class MultiColumnLabelEncoder:
def __init__(self,columns = None):
self.columns = columns
def fit(self,X,y=None):
return self
def transform(self,X):
output = X.copy()
if self.columns is not None:
for col in self.columns:
output[col] = LabelEncoder().fit_transform(output[col])
else:
for colname,col in output.iteritems():
output[colname] = LabelEncoder().fit_transform(col)
return output
def fit_transform(self,X,y=None):
return self.fit(X,y).transform(X)
# In[24]:
def processor(data):
data = MultiColumnLabelEncoder(columns = ['Dataset','Diagnosis','Sex']).fit_transform(new_ip)
bool_map = {True:1, False:0}
data = data.applymap(lambda x: bool_map.get(x,x))
return data
# In[25]:
from sklearn.preprocessing import LabelEncoder
df_encoded = processor(new_ip)
# In[31]:
# In[8]:
#Import Random Forest Model
from sklearn.ensemble import RandomForestClassifier
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=1000)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,Y_train)
y_pred=clf.predict(X_test)
# In[9]:
y_pred.shape
# In[12]:
Y_test
# In[11]:
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(Y_test, y_pred))
| true |
33b6cb2cbaa1f309eff618b58171ec987631473a | Python | sepear/AntColony | /evaluation.py | UTF-8 | 2,221 | 2.84375 | 3 | [] | no_license | from dataReading import readResults, readData
from problemRepresentation import SMTWTproblem
from problemSolving import generateSolution
import matplotlib.pyplot as plt
import time
N_RUNS = 4 # number of times we evaluate a problem
def generatePlot(results,benchmark, dir):
plt.clf()
plt.ylabel('best tardiness obtained')
plt.xlabel('problem')
plt.plot([i for i in range(len(benchmark))], results, label="Our results ")
plt.plot([i for i in range(len(benchmark))], benchmark, label="Benchmark ")
plt.legend()
plt.title("Comparison between our algorithm and benchmark")
plt.savefig(dir)
def compareResults(our_results, given_results):#address of our results, address of results given
total_sum = sum(our_results)
average_difference = (sum(given_results)-total_sum)/(len(given_results)-1)
print(f"Average difference:{average_difference}")
print(f"Total sum:{total_sum}")
def evaluateSet(data, generations=500, results_name="default",benchmark_dir="data/wtopt40.txt"):
filedir = "results/"+results_name+".txt"
imagedir ="figures/"+results_name+".png"
best_results = list() # list with the best result for every problem
results = list() # list of lists of results
for problem_index in range(125): # for every problem:
print(f"problema: {problem_index}")
problem_results = list()
for run_index in range(N_RUNS): # four times
print(f"\t{run_index}")
problem = SMTWTproblem(data, problem_index)
# built inside to reset pheromones
tardiness, route = generateSolution(problem, generations)
print(f"tardiness:{tardiness}")
problem_results.append(tardiness)
results.append(problem_results)
best_results.append(min(problem_results))
given_results = readResults(benchmark_dir)
compareResults(best_results, given_results)
dataWriter(best_results, filedir)
generatePlot(best_results, given_results, imagedir)
return best_results, results
def dataWriter(results, file_address): # writes best results on a file
with open(file_address, 'w') as f:
for item in results:
f.write(f"{item}\n")
| true |
f222e038b29f14b9246b60efb8320fb64b6478fd | Python | vegarwe/blehcihost | /hci/device_interface.py | UTF-8 | 5,405 | 2.578125 | 3 | [] | no_license | import threading
import logging
import Queue
import serial
import protocol
class HciEventCallback():
def __init__(self, hcidev, classes=None, filter=None):
self.hcidev = hcidev
self.log = hcidev.log
if isinstance(classes, (list, tuple)):
self._classes = classes
elif classes == None:
self._classes = None
else:
self._classes = [classes]
self._filter = filter
self.events = Queue.Queue(maxsize=20)
def _isinstance_of_classes(self, event):
# if self._classes == None, allow any event
if self._classes == None:
return True
for _class in self._classes:
if isinstance(event, _class):
return True
return False
def _put_event(self, event):
if self.events.full():
dropped_event = self.event.get()
self.log.warn('Event queue for %s full, dropping oldest event %s',
self, dropped_event)
self.events.put(event)
def process_event(self, event):
if self._isinstance_of_classes(event):
if self._filter and self._filter(event):
self._put_event(event)
elif not self._filter:
self._put_event(event)
def wait_for_event(self, timeout=1):
try:
return self.events.get(timeout=10)
except Queue.Empty:
return None
def append_as_listener(self):
self.hcidev.pkt_handlers.append(self.process_event)
def remove_as_listener(self):
self.hcidev.pkt_handlers.remove(self.process_event)
def __enter__(self):
self.append_as_listener()
return self
def __exit__(self, type, value, traceback):
self.remove_as_listener()
class HciInterface(object):
def __init__(self, device_name):
self.device_name = device_name
self.log = logging.getLogger(((8 - len(device_name)) * ' ' + device_name))
self.pkt_handlers = []
def process_event(self, event):
self.log.debug('dbg event %r', event)
# TODO: Deserialized log...
for fun in self.pkt_handlers:
fun(event)
def write_cmd(self, cmd):
classes = [protocol.HciCommandComplete, protocol.HciCommandStatus]
_filter = lambda x: x.command_op_code == cmd.op_code
with HciEventCallback(self, classes, _filter) as callback:
self.write(cmd.serialize())
cmd_resp = callback.wait_for_event()
self.log.debug('dbg %s %s', cmd, cmd_resp)
return cmd_resp
def write_data(self, conn_handle, data):
pkt = protocol.HciDataPkt(conn_handle, protocol.L2CapPkt(data))
self.write(pkt.serialize())
def write_data_wait_for_complete(self, conn_handle, data, timeout=10):
classes = [protocol.HciNumCompletePackets]
def _filter(num_complete_event):
for handle, num_completes in num_completes.handles:
if handle == conn_handle:
return True
return False
with HciEventCallback(self, classes, _filter) as callback:
self.write_data(conn_handle, data)
data_rsp = callback.wait_for_event(timeout)
if data_rsp == None:
self.log.info('pkt %s, timeout waiting for hci data' % (pkt.__class__.__name__))
return data_rsp
class SerialHci(HciInterface, threading.Thread):
def __init__(self, port, baudrate=115200, rtscts=True):
threading.Thread.__init__(self)
HciInterface.__init__(self, port)
self.serial = serial.Serial(port=port, baudrate=baudrate, rtscts=rtscts, timeout=0.1)
self.log.debug("Opended port %s, baudrate %s, rtscts %s", port, baudrate, rtscts)
self.keep_running = False
self.start()
def stop(self):
self.keep_running = False
def run(self):
self.keep_running = True
try:
while self.keep_running:
data = self.read()
if data == '': continue
event = None
try:
event = protocol.event_factory(data)
except:
self.log.exception("Unable to parse data %r", data)
if event != None: self.process_event(event)
except:
self.log.exception("Exception in read thread")
finally:
self.keep_running = False
self.log.debug("Read thread finished")
self.serial.close()
def read(self):
data = self.serial.read(1)
if data == '':
return ''
if data[0] == '\x04':
data += self.serial.read(2)
if data[2] != '\x00':
data += self.serial.read(ord(data[2]))
data = chr(len(data)) + '\x12' + data
elif data[0] == '\x02':
data += self.serial.read(4)
if data[3] != '\x00':
data += self.serial.read(ord(data[3]))
data = chr(len(data)) + '\x11' + data
else:
return ''
self.log.debug('rx <=: %r', data)
return data
def write(self, data):
self.log.debug("tx =>: %r", data)
self.serial.write(data)
def __repr__(self):
return '%s(port="%s", baudrate=%s)' % (self.__class__.__name__, self.serial.port, self.serial.baudrate)
| true |
3d9c04ac5b1fd2f4cbf4fa7151a72009997931e8 | Python | seanmanson/euler | /19.py | UTF-8 | 891 | 3.625 | 4 | [
"MIT"
] | permissive | import math
daysInMonthLeap = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
daysInMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def isLeapYear(year):
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
curYear = 1901
curMonth = 1
curDay = 1
curWeekday = 2 #tuesday
numSundaysOnFirst = 0
while curYear < 2001:
if curDay == 1 and curMonth == 1:
print(curYear, curWeekday)
if curWeekday == 0 and curDay == 1:
numSundaysOnFirst+=1
#iterate
curWeekday+=1
if curWeekday >= 7:
curWeekday = 0
curDay+=1
if isLeapYear(curYear):
if curDay > daysInMonthLeap[curMonth-1]:
curDay = 1
curMonth+=1
else:
if curDay > daysInMonth[curMonth-1]:
curDay = 1
curMonth+=1
if curMonth > 12:
curMonth = 1
curYear+=1
print (numSundaysOnFirst)
| true |
18820557e3600e01994605b805b19c0918128dda | Python | zhouzi9412/Python-Crash-Course | /第五章/5-11.py | UTF-8 | 218 | 3.828125 | 4 | [] | no_license | numbers = list(range(1,10))
for number in numbers:
if number < 2:
print("1st")
elif number < 3:
print("2nd")
elif number < 4:
print("3rd")
else:
print(str(number) + "th") | true |
6b5e2dd5f2e59616ce6370a752f4351b2a1c406d | Python | mzfr/Competitive-coding | /Arrays/move-zero-end-of-array/case1.py | UTF-8 | 185 | 3.328125 | 3 | [] | no_license | arr = [2, 8, 7, 0, 0, 3, 6, 0, 0, 1]
c = 0
n = len(arr)
for i in range(n):
if arr[i] != 0:
arr[c] = arr[i]
c += 1
while(c < n):
arr[c] = 0
c += 1
print(arr)
| true |
48e33f6c1604366e49e35df599d6bf94917f03ef | Python | LaLuneDeIdees/EducationalProgrammingLanguages | /ver6.0.1/test/tempCodeRunnerFile.py | UTF-8 | 115 | 2.640625 | 3 | [] | no_license |
# pp = pp[2]
# for l in range(16,len(pp),16):
# list(map(lambda x: print(x,end=','),pp[l-16:l]))
# print() | true |
1bcffbaad51d78d9f9d188b5173062e2be64aa6c | Python | TakuroKato/AtCoder | /abc071_A.py | UTF-8 | 155 | 3.234375 | 3 | [] | no_license | #! -*- coding:utf-8 -*-
x,a,b = map(int,input().split())
import math
if(math.copysign(x-a,0) < math.copysign(x-b,0)):
print('A')
else:
print('B')
| true |
56d79f0169895ad4fc21091583e29ed92fcdce25 | Python | EthanHolleman/Bomb-Buster | /wires.py | UTF-8 | 1,769 | 3.328125 | 3 | [] | no_license | #problem with black or blue wires
def solveWires(wireString):
wires = wireOrder.lower().split(" ")
length = len(wireOrder)
if length == 3:
if "r" not in wires:
return "Cut the second wire"
elif wires[-1] == "w":
return "Cut the last wire"
elif wires.count("b") > 1:
return "Cut the last blue wire"
else:
return "Cut the last wire"
elif length == 4:
if wires.count("r") > 1:
serial = input("Enter last number of serial number ")
if serial % 2 == 1:
return "cut the last wire"
elif wires[-1] == 'y' and wires.count('r') == 0:
return "Cut the first wire"
elif wires.count("b") == 1:
return "Cut the first wire"
elif wires.count('y') > 1:
return "Cut the last wire"
else:
return "Cut the first wire"
elif length = 5:
if wires[-1] == 'bl':
serial = input("Enter last number of serial number ")
if serial % 2 == 1:
return "Cut the fourth wire"
elif wires.count('r') == 1 and wires.count('y') > 1:
return "Cut the first wire"
elif wires.count('bl') == 0:
return "Cut the second wire"
else:
return "Cut the first wire"
else:
if wires.count('y') == 0:
serial = input("Enter last number of serial number ")
if serial % 2 == 1:
return "Cut the third wire"
elif wires.count('y') == 1 and wires.count('w') > 1:
return "Cut the fourth wire"
elif wires.count('r') == 0:
return "Cut the last wire"
else:
return "Cut the fourth wire"
| true |
94bd6d5cd15f939e0faa62ebe99fd44b16bba90b | Python | gagandeep7717/ud120-intro-to-machine-learning | /final_project/poi_id.py | UTF-8 | 8,361 | 3.03125 | 3 | [] | no_license | #!/usr/bin/python
import sys
import pickle
from math import isnan
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import test_classifier, dump_classifier_and_data
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
"""
NEW FEATURE DESCRIPTION
milk - (as in, "MILKing the company")
(expenses + deferral payments) /
1 + (loan advances + long_term_incentive + deferred_income)
POIs have an incentive to get as much out of the company as possible
as they don't see a long-term future in the company. Expenses are
consulting and reimbursements from the company and deferral payments
are distributions from deferred compensation - these are items that the
company must pay now. Loan advances, long term incentives and deferred
income are items that the company must pay at a later time. If you
believe that the company won't have the money to pay at a later time
you will want to collect what you can now.
"""
features_list = [
'poi',
'exercised_stock_options',
'bonus',
'total_stock_value',
'fraction_of_deferred_income_to_total_payments',
'milk'
]
### Load the dictionary containing the dataset
data_dict = pickle.load(open("final_project_dataset.pkl", "r") )
### Task 2: Remove outliers
for outlier in ['TOTAL','THE TRAVEL AGENCY IN THE PARK']:
data_dict.pop(outlier,0)
### Task 3: Create new feature(s)
# Adding:
# * milk // See detailed description above or code below
# * fraction_of_deferred_income_to_total_payments
for poi in data_dict:
# NaN ---> 0
for feature in ['expenses',
'deferral_payments',
'loan_advances',
'long_term_incentive',
'deferred_income',
'total_payments'
]:
if isnan(float(data_dict[poi][feature])):
data_dict[poi][feature] = 0
data_dict[poi]['milk'] = (data_dict[poi]['expenses'] +\
data_dict[poi]['deferral_payments']) / \
(1 + data_dict[poi]['loan_advances'] + \
data_dict[poi]['long_term_incentive'] + \
data_dict[poi]['deferred_income'])
if data_dict[poi]['total_payments'] > 0:
data_dict[poi]['fraction_of_deferred_income_to_total_payments'] = \
data_dict[poi]['deferred_income'] / data_dict[poi]['total_payments']
else:
data_dict[poi]['fraction_of_deferred_income_to_total_payments'] = 0
### Store to my_dataset for easy export below.
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
from sklearn.model_selection import StratifiedShuffleSplit
def prec_recall (clf, data, feature_list, folds=1000):
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(folds, random_state = 42).split(features, labels)
# true negative / false negative / true positive / false positive
results = { 'tn': 0, 'fn': 0, 'tp': 0, 'fp': 0 }
for train_idx, test_idx in cv:
feature = { 'train': [], 'test': [] }
label = { 'train': [], 'test': [] }
for idx in train_idx:
feature['train'].append(features[idx])
label['train'].append(labels[idx])
for idx in test_idx:
feature['test'].append(features[idx])
label['test'].append(labels[idx])
# fit classifier using training
clf.fit(feature['train'], label['train'])
predictions = clf.predict(feature['test'])
for prediction, truth in zip(predictions, label['test']):
if prediction == 0 and truth == 0: # T / Neg
results['tn'] += 1
elif prediction == 0 and truth == 1: # F / Neg
results['fn'] += 1
elif prediction == 1 and truth == 0: # F / Pos
results['fp'] += 1
elif prediction == 1 and truth == 1: # T / Pos
results['tp'] += 1
else:
print "Warning: Found a predicted label that's not 0 or 1"
break
try:
precision = 1.0 * results['tp']/(results['tp']+results['fp'])
recall = 1.0 * results['tp']/(results['tp']+results['fn'])
except:
print "Got a divide by zero when trying out:", clf
return (precision, recall)
## NAIVE BAYES
#from sklearn.naive_bayes import GaussianNB
#clf = GaussianNB()
## SUPPORT VECTOR MACHINE
# Scaler - for use with SVC and LinearSVC
#from sklearn.preprocessing import MinMaxScaler
#min_max_scalar = MinMaxScaler()
#data = min_max_scalar.fit_transform(data)
# SVC
#from sklearn.svm import SVC
#clf = SVC(kernel="linear")
# LINEAR SUPPORT VECTOR MACHINE
#from sklearn.svm import LinearSVC
#clf = LinearSVC()
## RANDOM FOREST
#from sklearn.ensemble import RandomForestClassifier
#clf = RandomForestClassifier(n_estimators=9)
## DECISION TREE
from sklearn import tree
#clf = tree.DecisionTreeClassifier()
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script.
### Because of the small size of the dataset, the script uses stratified
### shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# GRID SEARCH
#from sklearn.grid_search import GridSearchCV
#svr = tree.DecisionTreeClassifier()
#clf = GridSearchCV(
# svr,
# {'criterion': ('gini','entropy'),
# 'splitter': ('best','random'),
# 'max_features': [1,2,3,5,9,0.1,0.2,0.25,0.5,0.75,0.8,0.9,0.99,"auto","sqrt","log2",None]})
# 'max_features': [.8],
# 'class_weight': [None, "auto"],
# 'max_leaf_nodes': [None, 2,3,4,5,6,7,8,9,10],
#})
# TUNED RESULT
print "\n","-"*34
print " Decision Tree Classifier Results"
print "-"*34
clf = tree.DecisionTreeClassifier(splitter="random")
precision, recall = prec_recall(clf, data, features_list)
print "\nPrecision:", precision
print " Recall:", recall
print ""
## Write out precision and recall values to results.csv to obtain statisical averages
#f = open('results.csv', 'w')
#f.write("Precision,Recall\n")
#
#precision_numbers = []
#recall_numbers = []
#for iteration in range(1000):
# precision, recall = prec_recall(clf, data, features_list)
# f.write("%f,%f\n"%(precision,recall))
# precision_numbers.append(precision)
# recall_numbers.append(recall)
#
#import numpy
#print " PRECISION:"
#print " Mean:",numpy.mean(precision_numbers)
#print " Median:",numpy.median(precision_numbers)
#print " Max:",max(precision_numbers)
#print " Min:",min(precision_numbers)
#print "First Quartile:", numpy.percentile(precision_numbers, 25)
#print "Third Quartile:", numpy.percentile(precision_numbers, 75)
#print ""
#print " RECALL"
#print " Mean:",numpy.mean(recall_numbers)
#print " Median:",numpy.median(recall_numbers)
#print " Max:",max(recall_numbers)
#print " Min:",min(recall_numbers)
#print "First Quartile:", numpy.percentile(recall_numbers, 25)
#print "Third Quartile:", numpy.percentile(recall_numbers, 75)
#### Feature Importance
## Uncomment to review the performance of individual features
## for this run
#importances = clf.feature_importances_.tolist()
#print "FEATURE IMPORTANCE"
#print "Weight\tFeature"
#print "------\t-------"
#for idx, feature in enumerate(features_list[1:]):
# print " %.2f \t%s" % (importances[idx], feature)
### Dump your classifier, dataset, and features_list so
### anyone can run/check your results.
dump_classifier_and_data(clf, my_dataset, features_list) | true |
63932461173458fb4587fbdfa0407fa8bc57351b | Python | openlab-aux/klauskleber | /klauskleber.py | UTF-8 | 4,726 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python3
import qrcode
from io import BytesIO
import sys
from urllib.parse import urljoin
#SOH = "\x0H"
STX = "\x02"
CR = "\x0D"
ESC = "\x1B"
# 191100001800020OpenLab Augsburg
# |||||||||||||||_ text
# ||||||||||||||_ x coord
# ||||||||||_ y coord
# ||||||_ fixed
# |||_ fixed
# |_ font
class LabelPrinter():
def __init__(self, port):
self.port = port
self._fd = None
def isOpen(self):
return not self._fd is None
def open(self):
self._fd = open(self.port, 'wb')
def close(self):
self._fd.close()
self._fd = None
def write(self, bytes):
self._fd.write(bytes)
def print_label(self, label, count=1):
if not self.isOpen():
self.open()
for line in label.build():
self.write(line)
if count > 1:
self.write(STX+"E"+str(count).zfill(4)+CR)
self.write(STX+"G"+CR)
self.close()
return
class Label:
thing_base_url = "https://dinge.openlab-augsburg.de/ding/"
labelbuf = []
def __init__(
self,
thing_id,
thing_name,
thing_maintainer,
thing_owner = "OpenLab",
thing_use_pol = "",
thing_discard_pol = ""):
if len(thing_id) > 10 or not thing_id.isdigit():
raise ValueError("Not a valid thing_id: field must contain max "
"10 digits ranging from 0-9")
self.thing_id = thing_id.zfill(10)
if len(thing_name) > 19:
self.thing_name = thing_name[:16] + "..."
else:
self.thing_name = thing_name
if len(thing_owner) > 13:
raise ValueError("Not a valid thing_owner: field must contain "
"less then 13 characters")
self.thing_owner = thing_owner
if len(thing_maintainer) > 13:
raise ValueError("Not a valid thing_maintainer: field must "
"contain less then 13 characters")
self.thing_maintainer = thing_maintainer
if len(thing_use_pol) > 12:
raise ValueError("Not a valid thing_use_pol: field must contain "
"less then 12 characters")
self.thing_use_pol = thing_use_pol
if len(thing_discard_pol) > 12:
raise ValueError("Not a valid thing_discard_pol: field must "
"contain less then 12 characters")
self.thing_discard_pol = thing_discard_pol
return
def _gen_qrcode(self):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_Q,
box_size=2,
border=0)
qr.add_data(urljoin(self.thing_base_url,self.thing_id))
qr.make()
img = qr.make_image()
bmp = BytesIO()
img.save(bmp, kind="BMP")
bmp.seek(0)
return bmp.read()
def _labelbuf_append_string(self, string):
self.labelbuf.append(bytes(string, "CP437"))
def build(self):
self.labelbuf = []
### GENERAL SETTINGS reseted after turn off ###
self._labelbuf_append_string(STX+"KI<5"+CR) # german char set
self._labelbuf_append_string(STX+"m"+CR) # use metric system
self._labelbuf_append_string(STX+"KX0025"+CR) # 25mm label[0] height
self._labelbuf_append_string(STX+"f740"+CR) # stop position for back feed
### QR-Code transmitting ###
self._labelbuf_append_string(STX+"IAbqrcode"+CR) # write bmp into ram as "qrcode"
self.labelbuf.append(self._gen_qrcode())
self._labelbuf_append_string(STX+"L"+CR) # enter label[0] formatting mode
self._labelbuf_append_string("1Y1100000110030qrcode"+CR) # qrcode
self._labelbuf_append_string("191100001830030Eingetragenes Inventar des OpenLab Augsburg e. V."+CR) # header
self._labelbuf_append_string("121100001310225"+self.thing_name+CR) # Name
self._labelbuf_append_string("111100000900225ID: "+self.thing_id+CR) # ID
self._labelbuf_append_string("111100000420225OWN: "+self.thing_owner+CR) # Owner
self._labelbuf_append_string("111100000070225MNT: "+self.thing_maintainer+CR) # Maintainer
self._labelbuf_append_string("111100000420670USE: "+self.thing_use_pol+CR) # Usage
self._labelbuf_append_string("111100000070670DIS: "+self.thing_discard_pol+CR) # Discard
self._labelbuf_append_string("1d2108500920853"+self.thing_id+CR) # EAN
self._labelbuf_append_string("E"+CR) # end label[0] formatting mode
return self.labelbuf
| true |
cd0a5a7dcdbb37901b429b3cef9cf35ac2b70876 | Python | aaadlane/githubactiontest | /average_word_length.py | UTF-8 | 144 | 3.25 | 3 | [] | no_license | def average_word_length(string):
words = string.split()
average_length = sum(len(word) for word in words) / len(words)
return average_length
| true |
bb443d95eaf469b163ebc8a328f456aeabdd6e0a | Python | ClubShooter2/c-111 | /class.py | UTF-8 | 3,019 | 3.203125 | 3 | [] | no_license | import csv
import pandas as pd
import plotly.figure_factory as ff
import statistics
import random
import plotly.graph_objects as go
df = pd.read_csv("data.csv")
data = df["Math_score"].tolist()
#fig = ff.create_distplot([data],["Math_score"],show_hist=False)
#fig.show()
mean = statistics.mean(data)
print("Mean of population",mean)
std_deviation = statistics.stdev(data)
print("Standard deviation of population",std_deviation)
def random_set_of_mean(counter):
dataset = []
for i in range(0, counter):
random_index= random.randint(0,len(data)-1)
value = data[random_index]
dataset.append(value)
mean = statistics.mean(dataset)
return mean
def show_fig(mean_list):
df = mean_list
mean = statistics.mean(df)
fig = ff.create_distplot([df], ["temp"], show_hist=False)
# fig.add_trace(go.Scatter(x=[mean, mean], y=[0, 1], mode="lines", name="MEAN"))
first_std_deviation_start,first_std_deviation_end= mean-std_deviation,mean+std_deviation
second_std_deviation_start,second_std_deviation_end = mean-(2*std_deviation),mean+(2*std_deviation)
third_std_deviation_start, third_std_deviation_end = mean-(3*std_deviation), mean+(3*std_deviation)
# fig = ff.create_distplot([mean_list], ["student marks"], show_hist=False)
fig.add_trace(go.Scatter(x=[mean, mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[first_std_deviation_start, first_std_deviation_start], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1 START"))
fig.add_trace(go.Scatter(x=[first_std_deviation_end, first_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1 END"))
fig.add_trace(go.Scatter(x=[second_std_deviation_start, second_std_deviation_start], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2 START"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end, second_std_deviation_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2 END"))
fig.add_trace(go.Scatter(x=[third_std_deviation_start,third_std_deviation_start], y=[0,0.17], mode="lines", name="STANDARD DEVIATION 3 START"))
fig.add_trace(go.Scatter(x=[third_std_deviation_end,third_std_deviation_end], y=[0,0.17], mode="lines", name="STANDARD DEVIATION 3 END"))
fig.show()
fig.show()
def setup():
mean_list = []
for i in range(0,1000):
set_of_means= random_set_of_mean(100)
mean_list.append(set_of_means)
show_fig(mean_list)
mean = statistics.mean(mean_list)
print("Mean of sampling distribution :-",mean )
setup()
population_mean = statistics.mean(data)
print("population mean:- ", population_mean)
def standard_deviation():
mean_list = []
for i in range(0,1000):
set_of_means= random_set_of_mean(100)
mean_list.append(set_of_means)
std_deviation = statistics.stdev(mean_list)
print("Standard deviation of sampling distribution:- ", std_deviation)
standard_deviation()
| true |
8c887f070bbe1c190e0c28c93b2c1d0592594e72 | Python | sharadbhat/ClickbaitDetector | /backend/source/preprocessor/preprocess_embeddings.py | UTF-8 | 935 | 2.859375 | 3 | [
"MIT"
] | permissive | import numpy as np
from sklearn.decomposition import PCA
def preprocess_embeddings(embedding_dimension, vocabulary):
embeddings = {}
with open("models/glove.6B.50d.txt") as glove_file:
for line in glove_file:
start = line.find(" ")
word = line[:start]
embeddings[word] = np.fromstring(
line[start:], sep=" ", dtype=np.float32)
weights = np.zeros((len(vocabulary), 50)) # existing vectors are 50-D
for i, word in enumerate(vocabulary):
if word in embeddings:
weights[i] = embeddings[word]
pca = PCA(n_components=EMBEDDING_DIMENSION)
weights = pca.fit_transform(weights)
return weights
if __name__ == "__main__":
EMBEDDING_DIMENSION = 30
vocabulary = open("data/vocabulary.txt").read().split("\n")
weights = preprocess_embeddings(EMBEDDING_DIMENSION, vocabulary)
np.save("models/embeddings.npy", weights)
| true |
ac837d17dd56e01f53240408ecf04958e299ef68 | Python | Park-Dasol/SWEA-GITHUB | /D1/2063.py | UTF-8 | 216 | 3.046875 | 3 | [] | no_license | N = int(input())
lst = list(map(int, input().split()))
for i in range(N-1, 0, -1):
for j in range(0, i):
if lst[j] > lst[j+1]:
lst[j], lst[j+1] = lst[j+1], lst[j]
med = N // 2
print(lst[med]) | true |
1fb2348c93f5e28bf38aca5745a4437b20ecb4c6 | Python | prazp/Deep-Learning-PAD | /other_code/data_creation.py | UTF-8 | 1,702 | 2.734375 | 3 | [] | no_license | #!/usr/bin/python3
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import pickle
import resource, sys
fs = 16000
sin_files_genuine = []
DATADIR = "/mnt/c/Users/prasa/code/Thesis"
print("genuine creation")
gain_bin = np.linspace(1, 5, 9)
offset_bin = [-1, 0, 1]
phase_bin = [0, 2000, 4000, 6000, 8000, 10000, 12000, 14000, 16000]
#Generate genine sine waves
for gain in range(len(gain_bin)):
for phase in range(len(phase_bin)):
for offset in range(len(offset_bin)):
y = []
for x in range(32000):
y.append(gain_bin[gain]*np.sin(2 * np.pi * x / fs - 2 * math.pi * phase_bin[phase]/fs)+offset_bin[offset])
#y.append(gain*math.sin(2 * math.pi * x/fs - 2 * math.pi * phase/fs))
sin_files_genuine.append(y)
mean = 0
std =0.8
num_samples = 32000
sin_files_spoof = []
print("spoof creation")
#Generate spoofed sine waves
for wave in sin_files_genuine:
samples = np.random.normal(mean, std, size=num_samples)
temp = [wave[i]+samples[i] for i in range(len(wave))]
sin_files_spoof.append(temp)
print("done!")
#Save Genuine and Spoofed sine waves
pickle_out = open(os.path.join(DATADIR, "genuine_sines.pickle"), "wb")
pickle.dump(sin_files_genuine, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(DATADIR, "spoof_sines.pickle"), "wb")
pickle.dump(sin_files_spoof, pickle_out)
pickle_out.close()
#Plotting Sine waves for debugging
x = list(range(0, 32000))
plt.plot(x, sin_files_genuine[0])
plt.ylabel('Amplitude')
plt.xlabel('sample(n)')
plt.savefig('genuine.png')
plt.plot(x, sin_files_spoof[0])
plt.ylabel('Amplitude')
plt.xlabel('sample(n)')
plt.savefig('spoof.png') | true |
067a285e389cde5660fbef513fb9321dc0e3faac | Python | Gyanesh-Mahto/Edureka-Python | /Class_Codes_Module_3&4/P5_Scope_of_a_variable.py | UTF-8 | 479 | 4.375 | 4 | [] | no_license | #Scope of a Variable:
#Global Varables:
'''
The variables which are declared out of the function and can be accessed anywhere in the program
are called as Global Variables.
'''
#Local Variables:
'''
The variables which are declared inside of the function and can be accessed only inside the function
are called as Local Variables.
'''
a=50
def num():
b=10
print(a) #50 - Global variable
print(b) #10 - Local variable
print(a) #50 - Global Variable
num()
| true |
03e46e2418efc9f6e4603f2e97a28cf54743ec20 | Python | TUMH0404/kinematicstraining | /Sample/example.py | UTF-8 | 1,003 | 2.546875 | 3 | [] | no_license | # coding: utf-8
import common
import numpy as np
# jupyter notebookを使う人はコメントアウトする
#%matplotlib inline
## ここで,ファイル名を指定する。
name = ["20180302_8.csv","20180302_8.csv"]
ff = ["open","close"]
def COP(fname,figname):
datcop=common.Text2Numpy(filename=fname)
r = common.FFT_cop(freq=120,df=datcop["cop"],start=100,end=2500,bias=False)[:,1:3]
matrix,center,ang,d0,d1 = common.COPpca(r)
common.FIGshow(figname,d0,d0@common.Rotation(matrix,0))
dat = d0@common.Rotation(matrix,0)
#矩形面積と総軌跡長
a = np.max(dat,axis=0)-np.min(dat,axis=0)
rectarea = a[0]*a[1]
dat1 = np.diff(dat,axis=0)
totaltraj = np.sum(np.linalg.norm(dat1,ord=2,axis=1))
print(f"矩形面積:{rectarea}")
print(f"総軌跡長:{totaltraj}")
print(f"補正した角度:{ang}")
print(f"中心点:x_{center[0]},y_{center[1]}")
for n1,n2 in zip(name,ff):
print(f"{n2}\n")
COP(n1,n2)
print("\n\n") | true |
ed072686ec166c534ef1cc1df298e508ac392d8d | Python | isaacDiazP/test | /file1.py | UTF-8 | 108 | 3.640625 | 4 | [] | no_license | x = 0
if x == 0:
print("x es 0")
elif x< 0:
print("x es negativo")
else:
print("x es positivo")
| true |
e6a0986c2bc5617a15e18d9e393718a6cc0d1fe8 | Python | sdbit04/MySitePOM | /venv/Include/PositionalParam/positionalParam1.py | UTF-8 | 354 | 3.359375 | 3 | [] | no_license | var=10
def parentM():
global var
print("parentM local value of var = " + str(var))
var = 6
if 5 == 5:
var=2
print("It takes value of var from enclosed block = " + str(var))
print ("value of global var before method execution = " + str(var))
parentM()
print ("value of global var after method execution = " + str(var))
| true |
a402672df87a9ea32932c957ba58308a8f93f6af | Python | 0giru/python_practice | /CoffeeMachineProject/Main.py | UTF-8 | 5,675 | 3.203125 | 3 | [] | no_license | MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
import sys
global WATER
global MILK
global COFFEE
global MONEY
global ORDER
global CHECK
WATER = 500
MILK = 600
COFFEE = 120
MONEY = 0
ORDER = ""
CHECK = False
# TODO : 1. Prompt user by asking "What would you like?"
def check_order():
global ORDER
ORDER = input("What would you like? : ")
# TODO : 2. Turn Off Coffee Machine by entering "OFF" to the prompt
def off_machine():
sys.exit()
# TODO : 3. print all of resources of machine
def print_resources():
global WATER
global MILK
global COFFEE
global MONEY
global ORDER
print(f"WATER : {WATER}ml\n")
print(f"MILK : {MILK}ml\n")
print(f"COFFEE : {COFFEE}g\n")
print(f"MONEY : ${MONEY}\n")
def insert_cal_money():
global CHECK
global ORDER
global MONEY
global WATER
global MILK
global COFFEE
temp_quarters = 0
temp_dimes = 0
temp_nickles = 0
temp_pennies = 0
temp_quarters = input("how many quarters? : \n")
temp_dimes = input("how many dimes? : \n")
temp_nickles = input("how many nickles? : \n")
temp_pennies = input("how many pennies? : \n")
temp_total = float(temp_dimes) * 0.10 + float(temp_nickles) * 0.05 + float(temp_pennies) * 0.01 + float(temp_quarters) * 0.25
if ORDER == "espresso":
if temp_total >= 1.5:
MONEY += 1.5
WATER = WATER - 50
COFFEE = COFFEE - 18
change = temp_total - 1.5
print("Here is $" + str(round(change, 2)) + "dollars in change")
elif temp_total < 1.5:
print("Sorry, not enough money, Money Refunded")
MONEY = MONEY
elif ORDER == "latte":
if temp_total >= 2.5:
MONEY += 2.5
WATER = WATER - 200
COFFEE = COFFEE - 24
MILK = MILK - 150
change = temp_total - 2.5
print("Here is $" + str(round(change, 2)) + "dollars in change")
elif temp_total < 2.5:
print("Sorry, not enough money, Money Refunded")
MONEY = MONEY
elif ORDER == "cappuccino":
if temp_total >= 3:
MONEY += 3
WATER = WATER - 300
COFFEE = COFFEE - 100
MILK = MILK - 200
change = temp_total - 3
print("Here is $" + str(round(change, 2)) + "dollars in change")
elif temp_total < 3:
print("Sorry, not enough money, Money Refunded")
MONEY = MONEY
while True:
check_order()
if ORDER == "off":
# print("turn off machine")
off_machine()
elif ORDER == "report":
print_resources()
continue
else:
if ORDER == "espresso":
if (WATER >= 50) and (COFFEE >= 18):
CHECK = True
elif WATER < 50:
print("Sorry, Not enough Water")
continue
elif COFFEE < 18:
print("Sorry, Not enough Coffee")
continue
elif (WATER < 50) and (COFFEE < 18):
print("Sorry, Not enough Water And Coffee")
continue
elif ORDER == "latte":
if (WATER >= 200) and (COFFEE >= 24) and (MILK >= 150):
CHECK = True
elif WATER < 200:
print("Sorry, Not enough Water")
continue
elif COFFEE < 24:
print("Sorry, Not enough Coffee")
continue
elif MILK < 150:
print("Sorry, Not enough Milk")
continue
elif (WATER < 200) and (COFFEE < 24):
print("Sorry, Not enough Water And Coffee")
continue
elif (WATER < 200) and (MILK < 150):
print("Sorry, Not enough Water And Milk")
continue
elif (COFFEE < 24) and (MILK < 150):
print("Sorry, Not enough Water And Coffee")
continue
elif (COFFEE < 24) and (MILK < 150) and (WATER < 200):
print("Sorry, Not enough Water And Coffee And Milk")
continue
elif ORDER == "cappuccino":
if (WATER >= 300) and (COFFEE >= 100) and (MILK >= 200):
CHECK = True
elif WATER < 300:
print("Sorry, Not enough Water")
continue
elif COFFEE < 100:
print("Sorry, Not enough Coffee")
continue
elif MILK < 200:
print("Sorry, Not enough Milk")
continue
elif (WATER < 300) and (COFFEE < 100):
print("Sorry, Not enough Water And Coffee")
continue
elif (WATER < 300) and (MILK < 200):
print("Sorry, Not enough Water And Milk")
continue
elif (COFFEE < 100) and (MILK < 200):
print("Sorry, Not enough Water And Coffee")
continue
elif (COFFEE < 100) and (MILK < 200) and (WATER < 300):
print("Sorry, Not enough Water And Coffee And Milk")
continue
if CHECK == True:
insert_cal_money() | true |
259d8f06e6c782e1b0f7a35723dec87741ecf78c | Python | piddnad/pixare | /pixare/models/like.py | UTF-8 | 1,205 | 2.578125 | 3 | [] | no_license | from django.db import models
from django.contrib.auth.models import User
class Like(models.Model):
"""
喜欢的数据模型
"""
photo_id = models.IntegerField()
user = models.ForeignKey(User, related_name='user', on_delete=models.CASCADE,)
photo_owner = models.ForeignKey(User, related_name='like_photo_owner', on_delete=models.CASCADE,)
date_liked = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return str(self.id)
class Meta:
ordering = ['id']
# 计算照片的得分
def calPhotoScore(self, isMarkLike):
from pixare.models.photo import Photo
try:
photo = Photo.objects.get(id=self.photo_id, owner=self.photo_owner)
photo.like_count += 1 if isMarkLike else -1
photo.calculateScore()
photo.save()
except Photo.DoesNotExist:
pass
# 判断该喜欢记录是否已存在,避免重复喜欢
def isExist(self):
try:
Like.objects.get(photo_id=self.photo_id, user=self.user, photo_owner=self.photo_owner)
except Like.DoesNotExist:
return False
else:
return True
| true |
10d5591e8eca76a9ab6836553f3907e1c7b13510 | Python | procrastinatorT1000/costs_manager | /purchase_parser.py | UTF-8 | 1,968 | 3.046875 | 3 | [] | no_license | import re
from datetime import datetime
import table_writer
data = "t=20180806T122000&s=240.00&fn=8712000100040824&i=16588&fp=3931869026&n=1"
processed_check_info_list = []
def is_unique_data(data):
if data in processed_check_info_list:
print('Data: [%s] already processed' %data)
return False
else:
processed_check_info_list.append(data)
return True
def conv_split_date_time( yyyymmddThhmmss ):
date = ""
time = ""
status = False
try:
dt = datetime.strptime( yyyymmddThhmmss, "%Y%m%dT%H%M%S" )
print(dt)
type(dt)
date = dt.strftime("%d.%m.%Y")
time = dt.strftime("%H:%M:%S")
status = True
except:
print("Invalid Date-Time format: " + yyyymmddThhmmss + '\nshould be yyyymmddThhmmss')
return date, time, status;
def parse_data(data):
"""
:type data: str
"""
status = False
if is_unique_data(data) == False:
return status
print("**************************************")
print(data)
""" Split different tags separated by '&' """
tag_list = re.split('&', data)
params_dict = {}
for tag in tag_list:
name_val = re.split('=', tag)
params_dict[name_val[0]] = name_val[1]
try:
date_str, time_str, status = conv_split_date_time( params_dict["t"] )
except:
status = False
if status:
params_dict["D"] = date_str;
params_dict["T"] = time_str;
print ("Date of purchase " + params_dict["D"] +
"\nTime of purchase " + params_dict["T"] +
"\nSumm of purchase " + params_dict["s"] + '\n')
row = [params_dict["D"], params_dict["T"], params_dict["s"]]
try:
book, sheet = table_writer.init_table('records.xlsx')
table_writer.write_record(sheet, row)
table_writer.deinit_table(book)
except:
print("ERROR! Writing table: Something went wrong!")
return status
if __name__ == "__main__":
parse_data(data)
| true |
dc25e32e97d834a211bb3cfb8d80c38e20fb002e | Python | abhinavmodugula/NG_AI_Challenge | /main.py | UTF-8 | 8,370 | 2.75 | 3 | [] | no_license | import datetime
import cv2
import mediapipe as mp
import time
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
"""
Code created for the Northrup Grumman AI Challenge
Team 6
Simon C., Abhinav M., Sameer H.
This code runs the live simulator where the webcam
input from any device can be used. This script will
first allow a user to select the regions in the first frame.
Then, we run a live detection of any hands that touch a surface.
When a hand touches a region of interest, that region will
chnage color from green to red. It will transition back after a set
amount of time that the user can define.
The hand detection model is through the mediapipe ML API
"""
global regions, touch_map, touches, intregions
regions = []
intregions = []
touch_map = {}
mp_drawing = mp.solutions.drawing_utils #load in the detection API
mp_hands = mp.solutions.hands
W_NAME = "Single-Threaded Detection" #name for the displayed window
def start_select_roi(img):
"""
Allows the user to select which areas of the image to
monitor. Multiple regions can be selected
"""
global touches
touches = []
def mouse_fn(event, x, y, flags, param):
"""
Function to process the user's
mouse inputs
"""
global regions, touch_map
if event != 1: # left mouse button
return
# no previous touches or last touch is full? add a new touch
if len(touches) == 0 or len(touches[-1]) == 4:
touches.append([Point(x, y)])
else: # continue last touch
touches[-1].append(Point(x, y))
if len(touches[-1]) == 4: # add touch to regions
regions.append(Polygon(touches[-1]))
intregions.append([(int(i.x), int(i.y)) for i in touches[-1]])
touch_map[len(regions) - 1] = THRESHOLD + 1
cv2.setMouseCallback(W_NAME, mouse_fn)
def get_center(box, im_width, im_height):
#takes the output of the hand detection model and returns the center
(left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
box[0] * im_height, box[2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
c_x = (p2[1] - p1[0]) / 2
c_y = (p2[0] - p1[1]) / 2
center = (int(c_y), int(c_x))
return center
def point_in_roi(point, roi):
"""
Returns whether point (x, y) is in a roi
"""
return roi.contains(point)
# if (point[0] < roi[2] and point[0] > roi[0]):
# if (point[1] < roi[3] and point[1] > roi[1]):
# return True
# return False
def draw_region(image, region): # draws a region on the screen
r = region
cv2.line(image, r[0], r[1], (0, 0, 255), 2)
cv2.line(image, r[1], r[2], (0, 0, 255), 2)
cv2.line(image, r[2], r[3], (0, 0, 255), 2)
cv2.line(image, r[3], r[0], (0, 0, 255), 2)
# cv2.namedWindow("app")
vid = cv2.VideoCapture(0)
# vid.set(cv2.CAP_PROP_FRAME_WIDTH, width)
# vid.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
start_time = datetime.datetime.now()
num_frames = 0
im_width, im_height = (vid.get(3), vid.get(4))
# max number of hands we want to detect/track
num_hands_detect = 4
first = True #if first frame
# regions = None
camera_adjust_frames = 200
#
cv2.namedWindow(W_NAME)
THRESHOLD = 25 # number of seconds after which touches don't count anymore
with open("points2.txt", "w") as file: #log file
with mp_hands.Hands(
static_image_mode=True,
max_num_hands=2,
min_detection_confidence=0.5) as hands:
while vid.isOpened():
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
ret, image_np = vid.read()
# image_np = cv2.flip(image_np, 1)
try:
image_np = cv2.cvtColor(cv2.flip(image_np, 1), cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
if not ret:
print("tijoo")
continue
#if first frame, let user select region to monitor
if first:
first = False
start_select_roi(image_np)
# regions = select_roi(image_np)
# for i in range(len(regions)):
# touch_map[i] = THRESHOLD + 1
# Old detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
# boxes, scores = hand_detector.detect_objects(image_np,
# detection_graph, sess)
#
#
# # draw bounding boxes on frame
# centers = hand_detector.draw_box_on_image(num_hands_detect, score_thresh,
# scores, boxes, im_width, im_height,
# image_np)
# print(touches)
# print(regions)
image_np.flags.writeable = False # makes detection more efficient
result = hands.process(image_np)
image_np.flags.writeable = True
for i in touches:
if len(i) != 4: # draw the regions that are currently being created in red
for (a, b) in zip(i, i[1:]):
cv2.line(image_np, (int(a.x), int(a.y)), (int(b.x), int(b.y)), (255, 0, 0), 2)
if result.multi_hand_landmarks: # draw dots & lines on detected hands
for hand_landmarks in result.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image_np, hand_landmarks, mp_hands.HAND_CONNECTIONS)
n = time.time()
overlay = image_np.copy()
for i, (ir, region) in enumerate(zip(intregions, regions)):
if result.multi_hand_landmarks: # for each region, for each hand, check if it's in the region
for r in result.multi_hand_landmarks:
c = r.landmark[9] # 9th landmark is the center of the hand
center = (int(c.x * im_width), int(c.y * im_height))
print(center)
cv2.circle(image_np, center, 20, (0, 0, 255), 10)
if (point_in_roi(Point(center), region)):
print(f"Hand in region {i}!!!")
touch_map[i] = n
file.write(f"{center}\n")
t = int(((n - touch_map[i]) / THRESHOLD) * 255) # color of the circle: linearly related to the given threshold
if t > 255:
t = 255
r = int(region.length // 12) # min(region[2] - region[0], region[3] - region[1]) // 4 # circle only fills half of the region
# cv2.rectangle(image_np, (region[0], region[1]), (region[2], region[3]), (255 - t, t, 0), -1)
# cv2.rectangle(image_np, (region[0], region[1]), (region[2], region[3]), (0, 0, 255), 2)
p = region.centroid
cv2.circle(overlay, (int(p.x), int(p.y)), r, (255 - t, t, 0), -1)
draw_region(image_np, ir)
alpha = 0.4
image_np = cv2.addWeighted(overlay, alpha, image_np, 1 - alpha, 0) # overlay dots so it looks cooler :)
#
# # Calculate Frames per second (FPS)
# num_frames += 1
# elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
# fps = num_frames / elapsed_time
# if (fps > 0):
# Display FPS on frame
# hand_detector.draw_fps_on_image("FPS : " + str(int(fps)),
# image_np)
cv2.imshow(W_NAME,
cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
# else:
# print("frames processed: ", num_frames, "elapsed time: ",
# elapsed_time, "fps: ", str(int(fps)))
vid.release()
cv2.destroyAllWindows()
| true |
4139406b64f286362aadb6b9ed7b44bb3efc48b0 | Python | Gan-Jiang/util | /basic implementations/bit_manipulation.py | UTF-8 | 1,423 | 3.90625 | 4 | [] | no_license | '''
Some basic bit operations.
'''
def set_bit(n, i):
'''
set / turn on the ith bit
:param n:
:param i:
:return:
'''
return n | (1 << i)
def check_bit(n, i):
'''
check if the ith bit is on
:param n: the number we want to check
:param i: the bit
:return:
'''
return n & (1 << i)
def multiply_2(n):
#divided by 2 n >> 1
return n << 1
def toggle_bit(n, i):
'''
flip the status of i-th bit.
:param n:
:param i:
:return:
'''
return n ^ (1 << i)
def clear_bit(n, j):
'''
clear / turn off the j-th bit
:param n:
:param j:
:return:
'''
return n & (~ (1 << j))
def least_significant_bit(n):
'''
get the value of the least significant bit that is on (first from the right)
:param n:
:return:
'''
return n & (-n)
def turn_on_all_bits_m(n, m):
'''
turn on all bits in a set of size m
:param n: number
:param m: size
:return:
'''
return n | (1 << m - 1)
def get_remainder(n, m):
return n & (m - 1)
def is_power_2(n):
return (n & (n - 1)) == 0
def turn_off_last_bit(n):
return n & (n - 1)
def turn_on_last_zero(n):
return n | (n + 1)
def turn_off_last_consecutive_ones(n):
return n & (n + 1)
def turn_on_last_consecutive_zeros(n):
return n | (n - 1)
| true |
fe34dae905715774a97bad71118122f906b4a1a5 | Python | philippbayer/EcologyMaps | /data/suffGradient.py | UTF-8 | 1,849 | 3.625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/python
""" parses data from sys.argv[1], builds a red-to-green colorgradient based on numerical values """
""" Is written specifically for areadata.csv """
import sys
def is_number(n):
""" Checks whether a given String is a number """
try:
float(n)
return True
except ValueError:
return False
# parse the file, append every line cleaned up to an array
toParse = open(sys.argv[1])
lineArray = []
for line in toParse:
line = line.replace(",",".")
line = line.split(";")
lineArray.append(line)
# build an array which holds the biggest value for each year (which is, due to the scaling, not 100
# array-structure: 2010, 2009, 2008, ..., 2003
yearsMaxArray = [0,0,0,0,0,0,0,0]
i = 0
while i <= 7:
for country in lineArray:
# has to be converted to float, else weird stuff happens and wrong numbers are stored (maybe length of strings is compared?)
if is_number(country[i+18]) and float(country[i+18]) > float(yearsMaxArray[i]):
yearsMaxArray[i] = country[i+18]
i += 1
# cheat a little for empty 2009 so we don't get zero division error,
yearsMaxArray[1] = 100
# take the maximum value overall
overallMaxVal = 0.0
for country in lineArray:
i = 18
while i <= 25:
if is_number(country[i]) and float(country[i]) > overallMaxVal:
overallMaxVal = float(country[i])
i += 1
# now, we need to transform all of our countries values for each year according to the yearsMaxArray
i = 0
b = 2010
print "Note: the first value is for green in the rgb-spectrum, red is 1-green, e.g. the second value"
print "# ACCORDING TO EACH YEAR'S MAXIMUM"
for maxVal in yearsMaxArray:
print b
convert = 1.0/float(maxVal)
# now multiply this with every value
for country in lineArray:
if is_number(country[i+18]):
print country[1], ":", float(country[i+18])*convert*255,":",(1-float(country[i+18])*convert)*255
i += 1
b -= 1
| true |
65618d178658c8f90e28ca99fd01e8e4768929fb | Python | yenbohuang/online-contest-python | /test_template.py | UTF-8 | 475 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #
import unittest
class Solution(object):
def testMethod(self, value):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
return value
class TestSolution(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def tearDown(self):
pass
def test_case_1(self):
self.assertEqual(self.solution.testMethod(3), 3)
if __name__ == '__main__':
unittest.main()
| true |
3cf4bb282f5c424240359e20a05e4637e252579f | Python | palladine/edu_python | /list_unique.py | UTF-8 | 382 | 3.046875 | 3 | [] | no_license | def non_unique(data):
L = []
for x in data:
if isinstance(x,str):
L.append(x.upper())
else:
L.append(x)
return [x for x in data if L.count(L[data.index(x)]) > 1]
a = non_unique(['P', 7, 'j', 'A', 'P', 'N', 'Z', 'i',
'A', 'X', 'j', 'L', 'y', 's', 'K', 'g',
'p', 'r', 7, 'b'])
print(a) | true |
749c3c58cd00e173859c6186809d973b83884891 | Python | 278Mt/cotohappy | /examples/example_coreference.py | UTF-8 | 575 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 12:00:00 2019
COTOHA API for Python3.8
@author: Nicolas Toba Nozomi
@id: 278mt
"""
import cotohappy
if __name__ == '__main__':
coy = cotohappy.API()
document = '太郎は友人です。彼は焼き肉を食べた。'
kuzure = False
do_segment = True
coreference = coy.coreference(document, kuzure, do_segment)
print('\n#### coreference reference ####')
for content in coreference.coreference:
for referent in content.referents:
print(referent)
| true |
d4810d4ca8e0701d9db41648c4d4f02769a6b9dd | Python | alextanhongpin/project-euler | /python/21-amicable-numbers.py | UTF-8 | 783 | 4.0625 | 4 | [] | no_license | """
Problem 21: Amicable numbers
"""
def amicable_number (n):
count = 0
for i in range(1, n):
if n % i == 0:
count += i
return count
def main():
amicable_numbers = set()
for i in range(1, 10000):
n = i
o = amicable_number(n)
if amicable_number(o) == n and n != o:
amicable_numbers.add(n)
amicable_numbers.add(o)
print "The sum of all amicable numbers under 10000 is:", sum(amicable_numbers)
if __name__ == '__main__':
import timeit
ITERATIONS = 10
MESSAGE = "Function takes {} s to complete."
print MESSAGE.format(timeit.timeit("main()",
number=ITERATIONS,
setup="from __main__ import main") / ITERATIONS)
| true |
b314c18fb27067fae6940a0b77ba4828df93784e | Python | manraz/aws_bro | /getLst.py | UTF-8 | 1,604 | 2.59375 | 3 | [] | no_license | '''
Author: Manu Babanu
Date: 09/04/2018
Description: Script to parse bro IDS log files. Takes log fields and fields
data from logs into list for use in application.
'''
import datetime
import pygeoip
import urllib.request
# function to parse all fields from log file into lists
def getNestLst(path):
file = open(path, 'r')
valuesLst = []
fieldsLst = []
nestLst = []
ipLst = []
mostDomLst = []
for line in file:
if '#fields' in line:
fieldsLst = line.split()
fieldsLst.remove('#fields')
elif '#' in line:
continue
else:
valuesLst = line.split('\t')
# takes the epoch timestamp and replace it with formated date
if valuesLst[0]:
valuesLst.insert(0, datetime.datetime.fromtimestamp(
float(valuesLst[0])).strftime('%Y-%m-%d %H:%M:%S'))
valuesLst.pop(1)
nestLst.append(valuesLst)
ipLst.append(valuesLst[4])
mostDomLst.append(valuesLst[8])
return nestLst, fieldsLst, ipLst, mostDomLst
# takes ip and return geolocation json
def ipLocator(ip):
GeoIPDatabase = '/home/ubuntu/aws_bro/GeoLiteCity.dat'
ipData = pygeoip.GeoIP(GeoIPDatabase)
record = ipData.record_by_name(ip)
return record
# get company name of nic card from mac address. The best and easiest will be to use the IEEE Registration Autorithy oui.txt
def mac_info(mac):
url = "http://api.macvendors.com/"
response = urllib.request.urlopen(url + mac)
company = response.read().decode()
return company
| true |
5720267d0f1caba1cfcdd8cf0b2b9cc730f902c3 | Python | KongBaiVso/Scrap-Bilibili-bullet-screen-of-Dasima | /get_all_danmu_and_writein.py | UTF-8 | 3,475 | 2.796875 | 3 | [] | no_license | import requests
import re
from lxml import etree
import datetime
import pandas as pd
import time
import selenium.webdriver
# Selenium打开视频作者个人网页,并获取页面HTML
driver = selenium.webdriver.Chrome()
driver.get("https://space.bilibili.com/451618887/video?tid=0&keyword=&order=pubdate")
time.sleep(2)
response = driver.page_source
driver.close()
# 分析HTML,获取总页数
html = etree.HTML(response,etree.HTMLParser())
results = html.xpath('//div[@id=\"submit-video-list\"]/ul[@class=\"clearfix cube-list\"]//li/@data-aid')
all_pages = html.xpath("//span[@class=\"be-pager-total\"]/text()")
all_pages = re.findall("\d+",str(all_pages))[0]
# 获取所有页的HTML,获取所有视频链接
video_lastcode_list =[]
for page in range(eval(all_pages)):
url = "https://space.bilibili.com/451618887/video?tid=0&page={}&keyword=&order=pubdate".format(page+1)
# 获取该页中HTML
driver = selenium.webdriver.Chrome()
driver.get(url)
time.sleep(2)
response = driver.page_source
driver.close()
html = etree.HTML(response, etree.HTMLParser())
# 获取该页中所有视频链接
video_lastcode = html.xpath('//div[@id=\"submit-video-list\"]/ul[@class=\"clearfix cube-list\"]//li/@data-aid')
video_lastcode_list.extend(video_lastcode)
url_list = [] # 所有视频链接的列表
for video_lastcode in video_lastcode_list:
url = "https://www.bilibili.com/video/" + video_lastcode
url_list.append(url)
print(url_list)
# 下面开始抓取弹幕
def download(url): # 定义一个请求函数
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'}
request = requests.get(url, headers=headers)
return request.text,request.content
i = 0
for url in url_list:
i+=1
print("正在爬取第{}/{}个视频".format(i,len(url_list)))
video_url = url
response = download(video_url)[0]
oid = re.findall("\d\d/\d\d/(\d{9})/",response)[0] # 获取该视频oid
danmu_url = "https://api.bilibili.com/x/v1/dm/list.so?oid=" + str(oid) # 拼凑视频弹幕API链接
response = download(danmu_url)[1]
html = etree.HTML(response,etree.HTMLParser())
danmu_list = html.xpath('//d/text()') # 抓取弹幕内容
danmu_time_list = [datetime.datetime.fromtimestamp(eval(i.split(",")[4])).strftime("%Y-%m-%d %H:%M:%S") for i in html.xpath("//d/@p")] # 抓取弹幕发表时间
def judge_class(x):
x = eval(x)
if 1<= x <= 3:
return "滚动弹幕"
elif x==4:
return "底端弹幕"
elif x==5:
return "顶端弹幕"
elif x==6:
return "逆向弹幕"
elif x== 7:
return "精准定位弹幕"
else:
return "高级弹幕"
danmu_class_list = [judge_class(i.split(",")[1]) for i in html.xpath("//d/@p")] # 抓取弹幕类型
print(danmu_list)
#将抓取到的内容写入csv文件
print("正在写入……")
df = pd.DataFrame()
df["弹幕内容"] = danmu_list
df["发表时间"] = danmu_time_list
df["弹幕类型"] = danmu_class_list
print("一共",len(df),"条弹幕")
try:
df.to_csv("take off.csv", mode="a+", header=None, index=None, encoding="gb18030")
print("第{}个视频的弹幕写入成功".format(i))
except:
print("第{}个视频的弹幕写入失败".format(i))
time.sleep(1)
| true |
f9a82ce3d318b3959dcd7a1fff1baa0ae437526c | Python | mersted/python | /random/baseball.py | UTF-8 | 4,419 | 3.328125 | 3 | [] | no_license | # Calculates statistics of baseball player
# by inputting stats for each game
def main():
num_games = integer_check("How many games?")
tot_hits = 0
tot_plate_apps = 0
tot_walks = 0
tot_tb = 0
tot_runs = 0
tot_rbi = 0
tot_sb = 0
tot_hr = 0
tot_tr = 0
tot_do = 0
tot_sc = 0
# Acquires each game's stats and stores data in a set of variables
for x in range(num_games):
h, pa, bb, sc, ru, rb, st = one_game()
tbb, nd, nt, nh = the_total_bases(h)
tot_hits += h
tot_walks += bb
tot_plate_apps += pa
tot_tb += tbb
tot_runs += ru
tot_rbi += rb
tot_sb += st
tot_hr += nh
tot_tr += nt
tot_do += nd
tot_sc += sc
ob, a, s, op = percentages(tot_tb, tot_walks, tot_hits, tot_plate_apps, tot_sc)
print("\nAfter Game {}:".format(x + 1))
print("{0:.3f}/{1:.3f}/{2:.3f}".format(a, ob, s))
# Season Stats
print("\nSeason stats:")
print("{0:.3f}/{1:.3f}/{2:.3f}".format(a, ob, s))
print("{0} doubles, {1} triples, {2} home runs".format(tot_do, tot_tr, tot_hr))
print("{0} SBs, {1} runs scored, {2} RBIs".format(tot_sb, tot_runs, tot_rbi))
# Gathers stats from one game
def one_game():
# Plate appearances
plate_apps = integer_check("PAs:")
# Hits
hits = integer_check("Hits:")
while hits > plate_apps:
print("Error: too many hits")
hits = integer_check("Hits:")
# Walks, Intentinal Walks, and Hit by Pitches
walks = integer_check("BB/HBP/IBB")
while walks > (hits + plate_apps):
print("Error: too many walks")
walks = integer_check("BB/HBP/IBB")
# Sacrifices
sacs = integer_check("Sacrifices:")
while sacs > (hits + walks + plate_apps):
print("Error: too many sacrifices")
sacs = integer_check("Sacrifices:")
# Runs
runs = integer_check("Runs scored:")
while runs > plate_apps:
print("Error: too many runs")
runs = integer_check("Runs scored:")
# RBIs
rbis = integer_check("Runs batted in:")
while rbis > (plate_apps * 4):
print("Error: too many RBIs")
rbis = integer_check("Runs batted in:")
# Stolen Bases
stba = integer_check("Stolen bases:")
while stba > (plate_apps * 3):
print("Error: too many stolen bases")
stba = integer_check("Stolen bases:")
return hits, plate_apps, walks, sacs, runs, rbis, stba
# Calculates total bases using singles and extra base hits
def the_total_bases(hi):
if hi == 0:
total_bases = 0
if hi > 0:
ans = integer_check("Any extra base hits? (Y/N)")
if ans == 'N':
num_doubles = 0
num_triples = 0
num_homers = 0
total_bases = hi
if ans == 'Y':
num_xbh = integer_check("How many?")
while num_xbh > hi:
print("Can't have more extra base hits than hits.")
num_xbh = integer_check("How many?")
num_singles = hi - num_xbh
num_doubles = 0
num_triples = 0
num_homers = 0
for x in range(num_xbh):
type_ans = input("At bat {0}: Double (2), Triple (3), or Home Run (4)?".format(x + 1))
if type_ans == '2':
num_doubles += 1
elif type_ans == '3':
num_triples += 1
else:
num_homers += 1
total_bases = (1 * num_singles) + (2 * num_doubles) + (3 * num_triples) + (4 * num_homers)
return total_bases, num_doubles, num_triples, num_homers
# Calculates the average, on base, slugging,
# and on base plus slugging percentages
def percentages(tob, bbs, hh, papp, ss):
ab = papp - bbs - ss
obp = float((hh + bbs) / (papp))
avg = float(hh / ab)
slg = float(tob / ab)
ops = float(obp + slg)
return obp, avg, slg, ops
# Try to change value inputted into an int
# If not a valid integer, asks again
# Uses a try except while loop
def integer_check(theString):
while True:
try:
x = int(input(theString))
break
except:
print("Not a valid integer")
return x
| true |
379d77858ea7b031d773b61d9884b16e40baf03e | Python | adswati15/RSI-Stock-Screener | /RSI StockScreener.py | UTF-8 | 2,784 | 2.78125 | 3 | [] | no_license | # define parametetrs here
candle_width = "5m"
adx_limit = 20
rsi_limit = 40
stock_price_limit = 20
step_5_flag = 0
rsi_type = "rsi_6"
import pandas as pd
import yfinance as yf
import datetime
from stockstats import StockDataFrame
# stocklist = ['MTSL', 'LPCN', 'CLRB', 'TMQ']
#stocklist = ['MIST' , 'ABUS' , 'RNET' , 'BCOV' , 'MOGO' , 'HLX' , 'REED' , 'CBAY' , 'XSPA', 'TMQ' , 'TNXP' , 'TUP' , 'OMI' , 'SALM' , 'YVR' , 'ENTX' , 'AMAG' , 'GNCA' , 'AIKI' , 'PRTY' , 'PEIX' , 'UMC' , 'MYOS' , 'CNTG' , 'PLG' , 'CLSN' , 'SINT' , 'MITO' , 'FI' , 'CPSH' , 'IMRN' , 'EQT' ]
def createList():
df = pd.read_csv("stocks_list_07232020_fromIB2.csv")
stock_list = df['Financial Instrument'].values
return list(stock_list)
stocklist = createList()
Final_List = []
pd.core.common.is_list_like = pd.api.types.is_list_like
for stock in stocklist:
data = pd.DataFrame(columns=stocklist)
start_date = datetime.datetime.now() - datetime.timedelta(days=5)
end_date = datetime.date.today()
# start_date = datetime.date(2020,6,23)
#end_date = "2020-06-23"
data = yf.download(stock,interval = candle_width ,start=start_date, end=end_date)
data.fillna(method='bfill', inplace=True)
data.fillna(method='ffill', inplace=True)
my_stock = StockDataFrame.retype(data)
# UnComment down lines for evaluation
# print(data)
# print(my_stock['adx'])
# print(my_stock[rsi_type])
print("\n\n",stock)
if my_stock['adx'][-1] > adx_limit:
print("greater than adx_limit")
if my_stock['adx'][-1] > my_stock['adx'][-2] > my_stock['adx'][-3] > my_stock['adx'][-4]:
print("increasing adx")
if my_stock[rsi_type][-1] > rsi_limit:
print( "rsi greater than rsi_limit")
proceed = 1
if step_5_flag == 1:
print("step 5 executed")
if my_stock[rsi_type][-1] > my_stock[rsi_type][-2] > my_stock[rsi_type][-3] > my_stock[rsi_type][-4]:
print("Increasing RSI")
else:
proceed = 0
if proceed == 1:
print("adding to list with adx ", my_stock["adx"][-1], stock)
Final_List.append({"name" : stock , "adx" : my_stock["adx"][-1]})
print("\n---------------\n\n")
df = pd.DataFrame(Final_List)
try:
df = df.sort_values(by=['adx'], ascending= False)
print(df)
df = df.iloc[0:11,:]
except:
print("######################\nNO STOCK MEETING THE CRITERIA\n######################")
pass
with open("Output_Stocks.txt", "w") as File:
df.to_string(File, index = None)
File.close() | true |
830aa3db07190b2c52a17142c6cbd628bf68f26f | Python | mihokrusic/adventOfCode2020 | /tests/test01.py | UTF-8 | 1,042 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
import unittest
import os
os.sys.path.insert(0, os.getcwd())
from solutions import day01
from utility import inputs
class Part1(unittest.TestCase):
def test_01(self):
input = inputs.read("input01")
num_input = [int(el) for el in input]
result = day01.part1(num_input)
self.assertEqual(result, 514579)
def test_in(self):
input = inputs.read("input01_actual")
num_input = [int(el) for el in input]
result = day01.part1(num_input)
self.assertEqual(result, 440979)
class Part2(unittest.TestCase):
def test_01(self):
input = inputs.read("input01")
num_input = [int(el) for el in input]
result = day01.part2(num_input)
self.assertEqual(result, 241861950)
def test_in(self):
input = inputs.read("input01_actual")
num_input = [int(el) for el in input]
result = day01.part2(num_input)
self.assertEqual(result, 82498112)
if __name__ == '__main__':
unittest.main(verbosity=2) | true |
4c0ef49909de5b4d4fa81b933892dde8f31c6451 | Python | griefrelayer/django-simple-guestbook | /main/templatetags/extra_tags.py | UTF-8 | 1,159 | 2.75 | 3 | [] | no_license | from django import template
from datetime import datetime
register = template.Library()
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.filter(name='range')
def get_range(start, end):
try:
return range(start, end)
except TypeError:
return []
@register.filter(name='time_since')
def time_since(date):
seconds = round((datetime.now().timestamp() - date))
interval = seconds / 31536000
if interval > 1:
return str(round(interval)) + " лет назад"
interval = seconds / 2592000
if interval > 1:
return str(round(interval)) + " месяцев назад"
interval = seconds / 86400
if interval > 1:
return str(round(interval)) + " дней назад"
interval = seconds / 3600
if interval > 1:
return str(round(interval)) + " часов назад"
interval = seconds / 60
if interval > 1:
return str(round(interval)) + " минут назад"
else:
return str(round(seconds)) + " секунд назад"
| true |
00e236b3ff6a7cb0dd099f852b2890e390662de3 | Python | 1vladal1/Lottery_ideas | /Lottery_Keno/ver_0.2/ualottery.py | UTF-8 | 17,915 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
import collections
import itertools
"""Класс LotteryFilter:
start -- с какого тиража начинать
stop -- каким тиражом заканчивать
template -- по каким лототронам и наборам шаров фильтровать тиражи
"""
class LotteryFilter(object):
__UP = "UP"
__DOWN = "DOWN"
def __init__(self, start_pos=None, stop_pos=None, lototron=None,
balls_set=None):
self.__start = start_pos
self.__stop = stop_pos
self.__template = []
self.__template.append({"lototron": lototron, "balls_set": balls_set})
def add(self, lototron=None, balls_set=None, index=-1):
if index > len(self.__template) or index < 0:
index = len(self.__template)
self.__template.insert(index, {"lototron": lototron, "balls_set": balls_set})
def delete(self, index=-1):
if len(self.__template) > 1:
if index > len(self.__template) or index < 0:
index = None
if index is None:
self.__template.pop()
else:
self.__template.pop(index)
def move(self, index, direction):
if direction == "UP":
if index - 1 >= 0 and index < len(self.__template):
self.__template[index], self.__template[index-1] = self.__template[index-1], self.__template[index]
elif direction == "DOWN":
if index + 1 < len(self.__template) and index >= 0:
self.__template[index], self.__template[index+1] = self.__template[index+1], self.__template[index]
def moveto(self, pos1, pos2):
if pos1 < pos2:
tmp_pos = pos1
while tmp_pos < pos2:
self.move(tmp_pos, self.__DOWN)
tmp_pos += 1
elif pos1 > pos2:
tmp_pos = pos1
while tmp_pos > pos2:
self.move(tmp_pos, self.__UP)
tmp_pos -= 1
def show(self):
print("Filter is:")
print("[", self.__start, ":", self.__stop, "]")
for item in self.__template:
print(item["lototron"], item["balls_set"])
print("")
return None
@property
def DOWN(self):
return self.__DOWN
@property
def UP(self):
return self.__UP
@property
def length(self):
return len(self.__template)
@property
def start(self):
return self.__start
@property
def stop(self):
return self.__stop
@property
def template(self):
return self.__template
"""Класс Tirazh:
number -- номер тиража (число);
date -- дата тиража (строка);
lototron -- лототрон (строка);
balls_set -- набор шаров (строка);
balls -- шары выпавшие в тираже (кортеж)
"""
class Tirazh(object):
def __init__(self, line=None):
if line is None:
self.__number = None
self.__date = None
self.__lototron = None
self.__balls_set = None
self.__balls = ()
elif type(line) is list:
self.__number = int(line[0])
self.__date = line[1]
self.__lototron = line[2]
self.__balls_set = int(line[3])
self.__balls = ()
for ball in line[4:]:
self.__balls += (int(ball), )
def __sort(self):
self.__balls = tuple(sorted(self.__balls))
def __str__(self):
return "%s %s %s %s %s" % (str(self.__number), self.__date,
self.__lototron, str(self.__balls_set), str(self.__balls))
def shortinfo(self):
print(self.__number, self.__date, self.__lototron, self.__balls_set)
def get_combinations(self, n=2):
result = itertools.combinations(sorted(self.__balls), n)
return list(result)
@property
def length(self):
return len(self.__balls)
@property
def number(self):
return self.__number
@property
def date(self):
return self.__date
@property
def lototron(self):
return self.__lototron
@property
def balls_set(self):
return self.__balls_set
@property
def balls(self):
return self.__balls
""" Класс лотерея -- имеет такую структуру:
1. Свойство min_ball -- минимальный номер шара в лотерее
2. Свойство max_ball -- максимальный номер шара в лотерее
3. Свойство tirazh_length -- количество выпадающих шаров в тираже
4. Свойство lottery_length -- всего тиражей
5. lottery_result -- результаты тиражей
"""
class UALottery(object):
def __init__(self, filename=None):
self.__is_sort = False
self.__min_ball = None
self.__max_ball = None
self.__tirazh_length = None
self.__length = None
self.filter_length = 0
self.__results = []
if type(filename) is str:
with open(filename, newline="") as lottery_file:
lottery_reader = csv.reader(lottery_file, delimiter=";")
for line in lottery_reader:
tirazh = Tirazh(line)
self.__results.append(tirazh)
self.set_lottery_info()
def view_info(self):
print("______________________________________________________________________")
print("-= INFORMATION ABOUT LOTTERY =-")
print("min_ball ", self.min_ball)
print("max_ball ", self.max_ball)
print("tirazh_length ", self.tirazh_length)
print("lottery_length", self.length)
print("is_sort ", self.is_sort)
print("repeats ", self.repeats_teor())
print("lototron stat", self.lototron_balls_set_statistics())
print("")
print("Last Tirazh in basis lottery is:")
print(self.results[-1:][0])
print("______________________________________________________________________")
def set_lottery_info(self, lottery = None):
if lottery is None:
self.__tirazh_length = self.__results[0].length
self.__length = len(self.__results)
min_max_ball = self.__find_min_max_ball()
self.__min_ball = min_max_ball["min"]
self.__max_ball = min_max_ball["max"]
elif type(lottery) is UALottery:
self.__tirazh_length = lottery.tirazh_length
self.__min_ball = lottery.min_ball
self.__max_ball = lottery.max_ball
self.__length = len(self.__results)
def __find_min_max_ball(self):
result = {"min": 1000, "max": 0}
for tirazh in self.__results:
tmp_min = min(tirazh.balls)
tmp_max = max(tirazh.balls)
if result["min"] > tmp_min:
result["min"] = tmp_min
if result["max"] < tmp_max:
result["max"] = tmp_max
return result
# def print_tirazh(self, id=1):
"""Матрица выпавших шаров"""
def get_balls_array(self):
result = tuple()
for tirazh in self.results:
result += (tirazh.balls,)
return result
"""Вектор выпавших шаров"""
def get_balls_vector(self):
result = tuple()
for tirazh in self.results:
for ball in tirazh.balls:
result += (ball,)
return result
"""Вектор шаров в определенной колонке (начиная с нулевой)"""
def get_balls_in_column(self, column):
result = []
for tirazh in self.results:
result.append(tirazh.balls[column])
return result
"""Выбор тиража (по id при flag == False, и по номеру тиража при True)"""
def get_tirazh_by_number(self, position, flag = True):
if flag:
if position >= 1:
for tirazh in self.__results:
if position == tirazh.number:
return tirazh
else:
return None
else:
if position < 0 or position > self.__length:
return None
else:
return self.__results[position]
"""Сортировка шаров в тиражах по возрастанию"""
def sort(self):
if not self.__is_sort:
for tirazh in self.__results:
tirazh._Tirazh__sort()
self.__is_sort = True
"""Фильтрация по заданому шаблону (лототрон, набор шаров) лотереи.
Результат -- новый объект UALottery
Значение None в шаблоне указывает на любой лототрон или набор шаров.
"""
def get_lottery_by_filter(self, filter_):
if filter_.start is None:
start_pos = 0
else:
start_pos = filter_.start - 1
if filter_.stop is None:
end_pos = self.__length - 1
else:
end_pos = filter_.stop - 1
i = start_pos
result = UALottery()
while i <= end_pos: #- filter_.length + 1:
flag = False
k = i
for item in filter_.template:
if ((k >= end_pos + 1) and (item["lototron"] is None) and
(item["balls_set"] is None)):
flag = True
break
if k >= end_pos + 1:
break
if ((self.__results[k].lototron == item["lototron"] or
item["lototron"] is None) and (item["balls_set"] is None or
self.__results[k].balls_set == item["balls_set"])):
flag = True
k += 1
else:
flag = False
break
if flag:
for z in range(i,i+filter_.length):
if z <= end_pos:
tirazh = self.get_tirazh_by_number(z, False)
result._UALottery__results.append(tirazh)
i += len(filter_.template)
else:
i += 1
result.set_lottery_info(self)
result.filter_length = filter_.length
return result
"""Считает количество пар лототрон - набор шаров"""
def lototron_balls_set_statistics(self):
result = collections.Counter()
for tirazh in self.__results:
pair = tirazh.lototron + str(tirazh.balls_set)
result.update((pair, ))
return result
"""Сколько раз выпал каждый шар в лотерее"""
def balls_statistics(self):
result = collections.Counter()
for tirazh in self.__results:
result.update(tirazh.balls)
return result
"""Подсчитывает сколько необходимо тиражей для выпадания >= threshold шаров"""
def threshold_balls_out(self, threshold=None):
m = 1
min_, max_ = 1000, 0
if (threshold is None or threshold > self.__max_ball or
threshold <= self.__tirazh_length):
threshold = self.__max_ball
ball_matrix = [0 for i in range(0, self.__max_ball)]
result = []
tirazhs = ()
for tirazh in self.__results:
for ball in tirazh.balls:
if ball_matrix[ball-1] == 0:
ball_matrix[ball-1] = 1
if sum(ball_matrix) >= threshold:
tirazhs += (tirazh.number, )
result.append({"BallsCount": sum(ball_matrix), "TirazhCount": m, "Tirazhs": tirazhs})
if m < min_:
min_ = m
if m > max_:
max_ = m
ball_matrix = [0 for i in range(0, self.__max_ball)]
m = 1
tirazhs = ()
else:
m += 1
tirazhs += (tirazh.number, )
result.append((min_, max_))
return result
"""Подсчитывает сколько выпало шаров при tirazh_count тиражей"""
def tirazh_balls_out(self, tirazh_count = 1):
if tirazh_count < 1 or tirazh_count > self.__length:
tirazh_count = 1
result = []
ball_matrix = [0 for i in range(0, self.__max_ball)]
m = 1
tirazhs = ()
for tirazh in self.__results:
for ball in tirazh.balls:
if ball_matrix[ball-1] == 0:
ball_matrix[ball-1] = 1
if m == tirazh_count:
tirazhs += (tirazh.number, )
result.append({"BallsCount": sum(ball_matrix), "TirazhCount": m, "Tirazhs": tirazhs})
ball_matrix = [0 for i in range(0, self.__max_ball)]
m = 1
tirazhs = ()
else:
m += 1
tirazhs += (tirazh.number, )
return result
def t_balls(self, res):
ball_matrix = [0 for i in range(0, self.__max_ball)]
for tirazh in res:
for ball in tirazh.balls:
if ball_matrix[ball-1] == 0:
ball_matrix[ball-1] = 1
result = {"Balls": ball_matrix, "BallsCount": sum(ball_matrix)}
return result
def sum_balls_out(self, balls_out):
ball_matrix = [0 for i in range(0, self.__max_ball)]
result = []
for item in balls_out:
for tirazh_number in item["Tirazhs"]:
tmp = self.get_tirazh_by_number(tirazh_number)
for ball in tmp.balls:
ball_matrix[ball-1] += 1
result.append(tuple(ball_matrix))
ball_matrix = [0 for i in range(0, self.__max_ball)]
return result
def lottery_map(self):
result = []
for tirazh in self.__results:
ball_matrix = [0 for i in range(0, self.__max_ball)]
for ball in tirazh.balls:
ball_matrix[ball-1] = 1
result.append(tuple(ball_matrix))
return result
def position_map(self, position=1):
result = []
for tirazh in self.__results:
ball_matrix = [0 for i in range(0, self.__max_ball)]
ball = tirazh.balls[position-1]
ball_matrix[ball-1] = 1
result.append(tuple(ball_matrix))
return result
def compare_tirazhs_in_lottery(self, window=1):
if window < 1:
window = 1
k = 0
result = []
while (k + window) < self.__length:
tmp = compare_tirazhs(self.__results[k], self.__results[k+window])
result.append(tmp)
k += 1
return result
def compare_block_tirazhs_in_lottery(self):
result = []
if self.filter_length > 1:
for i in range(self.filter_length-1):
result.append([])
for i in range(0, self.length - self.filter_length, self.filter_length):
for j in range(self.filter_length-1):
#print(self.__results[i+j].number, self.__results[i+self.filter_length-1].number)
tmp = compare_tirazhs(self.__results[i+j], self.__results[i+self.filter_length-1])
result[j].append(tmp)
return result
def compare_tirazhs_with_last(self):
result = []
last_tirazh = self.__results[-1:][0]
for tirazh in self.__results[:-1]:
tmp = compare_tirazhs(tirazh, last_tirazh)
result.append(tmp)
return result
def compare_tirazhs_with_static(self, tirazh_number):
result = []
static = self.get_tirazh_by_number(tirazh_number, False)
for tirazh in self.__results[tirazh_number+1:]:
tmp = compare_tirazhs(static, tirazh)
result.append(tmp)
return result
def combinations_statistics(self, n=2):
result = collections.Counter()
for tirazh in self.__results:
combinations = tirazh.get_combinations(n)
for combination in combinations:
result.update((combination,))
return result
"""Среднее число повторов и предидущего тиража в следующий"""
def repeats_teor(self):
return (self.tirazh_length**2)/self.max_ball
"""Теоретическая вероятность появления комбинаций 1,2,3,... в лотерее"""
def probability_of_combination_teor(self, m=1):
if m > self.tirazh_length:
m = self.tirazh_length
a = 1
b = 1
for i in range(self.tirazh_length-m+1, self.tirazh_length+1):
a *= i
for i in range(self.max_ball-m+1, self.max_ball+1):
b *= i
return a/b
@property
def min_ball(self):
return self.__min_ball
@property
def max_ball(self):
return self.__max_ball
@property
def tirazh_length(self):
return self.__tirazh_length
@property
def length(self):
return self.__length
@property
def results(self):
return self.__results
@property
def is_sort(self):
return self.__is_sort
def compare_tirazhs(tirazh1, tirazh2):
result = [0 for i in range(0, tirazh1.length)]
for ball in tirazh1.balls:
if ball in tirazh2.balls:
result[tirazh1.balls.index(ball)] = 1
return tuple(result) | true |
680a39503c4c81add3bb11975be49a56397873c0 | Python | MidnightJava/adventOfCode | /AocPython/src/myAoc/2015/Day22.py | UTF-8 | 6,176 | 2.953125 | 3 | [] | no_license | '''
Created on Dec 22, 2015
@author: maleone
'''
from collections import namedtuple
import random
from _collections import defaultdict
import copy
Spell = namedtuple('Spell', 'cost damage heal armor recharge duration ')
spells = []
wins = []
winh = defaultdict(int)
spells.append(Spell(53, 4, 0, 0, 0, 0))
spells.append(Spell(73, 2, 2, 0, 0, 0))
spells.append(Spell(113, 0, 0, 7, 0, 6))
spells.append(Spell(173, 3, 0, 0, 0, 6))
spells.append(Spell(229, 0, 0, 0, 101, 5))
simCount = 0
loseCount = 0
winCount = 0
def validSpells(state):
_spells = [s for s in spells if s.cost <= state['mana'] and (s not in state['effects'].keys() or state['effects'][s] == 1)]
return _spells
def checkEnd(state):
global loseCount, winCount
if state['part2Done']:
return True
if state['hit'][0] <= 0 or state['mana'] <= 0:
loseCount += 1
state['part2Done'] = True
return True
if state['hit'][1] <= 0:
wins.append(state['spent'])
winCount += 1
state['part2Done'] = True
return True
return False
def checkEnd2(state):
global loseCount, winCount
if state['part2Done']:
return True
if state['hit'][0] <= 0 or state['mana'] <= 0:
loseCount += 1
state['part2Done'] = True
return True, False
if state['hit'][1] <= 0:
wins.append(state['spent'])
winCount += 1
state['part2Done'] = True
return True, True
return False, False
def sim(state, spell):
print min(wins) if wins else None
global simCount
simCount += 1
# state['hit'][0] -= 1 # part 2
if checkEnd(state):
return
delList = []
for e in state['effects'].keys():
state['hit'][1] -= e.damage
state['mana'] += e.recharge
if state['effects'][e] == 6:
state['armor'] += e.armor
if state['effects'][e] == 1:
state['armor'] -= e.armor
delList.append(e)
else:
state['effects'][e] -= 1
for x in delList:
del state['effects'][x]
delList = []
if checkEnd(state):
return
if spell.duration:
state['effects'][spell] = spell.duration
else:
state['hit'][1] -= spell.damage
state['hit'][0] += spell.heal
if checkEnd(state):
return
for e in state['effects']:
state['hit'][1] -= e.damage
state['mana'] += e.recharge
if state['effects'][e] == 6:
state['armor'] += e.armor
if state['effects'][e] == 1:
state['armor'] -= e.armor
delList.append(e)
else:
state['effects'][e] -= 1
for x in delList:
del state['effects'][x]
delList = []
if checkEnd(state):
return
# print "armor", state['armor']
state['hit'][0] -= max(1, 10 - state['armor'])
if checkEnd(state):
return
for spell in validSpells(state):
s2 = copy.deepcopy(state)
s2['mana'] -= spell.cost
s2['spent'] += spell.cost
sim(s2, spell)
#Tried this when there was a bug in sim(). Eventually gets the right answer, but takes several minutes
def sim2():
for i in xrange(1000000):
state = {'hit':[50,71], 'mana':500, 'armor': 0, 'spent':0, 'effects': {}, 'part2Done': False}
part2Done = False
while not part2Done:
global simCount
simCount += 1
state['hit'][0] -= 1 # part 2
res = checkEnd(state)
if res[0]:
if res[1]:
wins.append(state['spent'])
winh[state['spent']] += 1
part2Done = True
for e in state['effects'].keys():
state['hit'][1] -= e.damage
state['mana'] += e.recharge
if state['effects'][e] == 6:
state['armor'] += e.armor
if state['effects'][e] == 1:
state['armor'] -= e.armor
del state['effects'][e]
else:
state['effects'][e] -= 1
res = checkEnd(state)
if res[0]:
if res[1]:
wins.append(state['spent'])
winh[state['spent']] += 1
part2Done = True
spell = spells[random.randint(0,4)]
if spell.cost > state['mana'] or (spell in state['effects'].keys() and state['effects'][spell] != 1):
if len(validSpells(state)) == 0:
part2Done = True
continue
state['mana'] -= spell.cost
state['spent'] += spell.cost
if spell.duration:
state['effects'][spell] = spell.duration
else:
state['hit'][1] -= spell.damage
state['hit'][0] += spell.heal
res = checkEnd(state)
if res[0]:
if res[1]:
wins.append(state['spent'])
winh[state['spent']] += 1
part2Done = True
for e in state['effects'].keys():
state['hit'][1] -= e.damage
state['mana'] += e.recharge
state['armor'] += e.armor if state['effects'][e] == 6 else 0
if state['effects'][e] == 1:
state['armor'] -= e.armor
del state['effects'][e]
else:
state['effects'][e] -= 1
state['hit'][0] -= max(1, 10 - state['armor'])
res = checkEnd(state)
if res[0]:
if res[1]:
wins.append(state['spent'])
winh[state['spent']] += 1
part2Done = True
if i % 10000 == 0:
print i, min(wins) if len(wins) > 0 else None
for s in spells:
state = {'hit':[50,71], 'mana':500, 'armor': 0, 'spent':0, 'effects': {}, 'part2Done': False}
state['mana'] -= s.cost
state['spent'] += s.cost
sim(state, s)
# sim2()
print "Lowest cost to win:", min(wins), simCount, loseCount, winCount
# od = collections.OrderedDict(sorted(winh.items()))
# print od
| true |
01aa35790ba041010701dbab7dfa1a3b301dfd24 | Python | anubhav9199/funcode | /star_pattern.py | UTF-8 | 568 | 3.84375 | 4 | [] | no_license | import turtle
def star(turtle, size):
col = ('red', 'yellow', 'green', 'blue', 'white')
if size <= 10:
return
else:
turtle.begin_fill()
for i in range(5):
turtle.color(col[i])
turtle.forward(size)
star(turtle, size//3)
turtle.left(216)
turtle.end_fill()
def main():
t = turtle.Turtle()
t.getscreen().bgcolor("black")
t.penup()
t.goto(-200, 60)
t.pendown()
t.speed(1000)
star(t, 360)
turtle.done()
if __name__ == '__main__':
main() | true |
9763e1a951a7fcb74f7c3689b61b687eb991b847 | Python | donnex/pynotifikationnu | /notifikation_nu.py | UTF-8 | 1,322 | 2.796875 | 3 | [] | no_license | import urllib
try:
import json
except ImportError:
import simplejson as json
class NotifikationNuApiError(Exception):
pass
class NotifikationNu(object):
"""A library that provides a python interface to the
Notifikation.nu API
"""
def __init__(self, api_key):
self.api_key = api_key
if len(self.api_key) != 40:
raise ValueError('API key must be 40 characters.')
def send_notification(self, notification_id, message, category=None, event=None):
"""Send a notification to the notification_id with message as
notification content. Category and event will override the
values set on notifikation.nu.
"""
self.api_url = 'http://notifikation.nu/api/%s/send_notification/' % (self.api_key,)
self.post_data = {
'id': notification_id,
'message': message,
}
if category:
self.post_data['category'] = category
if event:
self.post_data['event'] = event
params = urllib.urlencode(self.post_data)
self.api_reply_content = urllib.urlopen(self.api_url, params).read()
self.api_reply = json.loads(self.api_reply_content)
if not self.api_reply['status']:
raise NotifikationNuApiError(self.api_reply['message'])
| true |
8ad81baae31bbfa3566e9599d7833c8230626117 | Python | mkukar/RaspiRadio | /Programming/writelcd.py | UTF-8 | 1,245 | 3.25 | 3 | [] | no_license | import sys, serial, time
global serialport
LCD = serial.Serial('/dev/ttyAMA0', 9600)
LCD.open()
#Clears the display
LCD.write('\xFE\x01')
#Checks if there are two line arguments, or else it displays nothing
if len(sys.argv) != 3:
LCD.close()
else:
#First line
LCD.write('\xFE\x80')
#checks length, if longer than 16 it appends it with ',,,'
if len(str(sys.argv[1])) > 16:
LCD.write(str(sys.argv[1])[0:13] + '...')
else:
#centers the screen by adding spaces to each side until the string length is 16 characters
topLine = str(sys.argv[1])
switch = 0
while len(topLine) < 16:
if switch == 0:
switch = 1
topLine = ' ' + topLine
else:
switch = 0
topLine = topLine + ' '
LCD.write(topLine)
#Second line
LCD.write('\xFE\xC0')
#checks length, if longer than 16 it appends it with '...'
if len(str(sys.argv[2])) > 16:
LCD.write(str(sys.argv[2])[0:13] + '...')
else:
#centers the screen by adding spaces to each side until the string length is 16 characters
bottomLine = str(sys.argv[2])
switch = 0
while len(bottomLine) < 16:
if switch == 0:
bottomLine = ' ' + bottomLine
switch = 1
else:
bottomLine = bottomLine + ' '
switch = 0
LCD.write(bottomLine)
LCD.close()
| true |
7215fb03fa76ea7673960db5f6ac89a3b3775bd0 | Python | k1nk33/django_test | /src/newsletters/views.py | UTF-8 | 2,940 | 2.75 | 3 | [] | no_license | # Import from settings.py for send_mail
from django.conf import settings
from django.core.mail import send_mail
from django.shortcuts import render
from .forms import SignUpForm, ContactForm
# # Create your views here.
def home(request):
# Display the user initiating the request
# print "User is %s" % request.user
# Test if the user has been authenticated already
# if request.user.is_authenticated():
# enter dynamic content here
# Displays the POST data on submit click
# if request.method == 'POST':
# print request.POST
title = "My Title"
# If there is Post dat send it through the form, otherwise send none.
form = SignUpForm(request.POST or None)
# Context dictionary to pass to the template
# Context variable ties to template tag in html
context = {
"template_title": title,
"form": form
}
# If the form passes all previous validations, including builtins
if form.is_valid():
print 'Is Valid'
# Skips validation to work with data, does not save data
instance = form.save(commit="False")
name = form.cleaned_data.get('name')
# If the name variable is empty, set a default
if not name:
name = "Darren Dowdall"
instance.name = name
# Displays the associated variables
# print instance.email
# print instance.timestamp
# Using the objects save method?
# Save details to the db
instance.save()
new_title = "Cheers!"
# Set new dynamically altered context variable (after form completion)
context = {
"template_title": new_title,
}
# Render combines the different components
# that make up the final product
return render(request, "base.html", context)
# Second method for forms
def contact(request):
form = ContactForm(request.POST or None)
if form.is_valid():
# # For each Key/Value pair in the cleaned data dict
# for key in form.cleaned_data:
# # Display the corresponding index
# print form.cleaned_data.get(key)
# Or to display the key. value pairs
# for key, value in form.cleaned_data.iteritems():
# print key, value
email = form.cleaned_data.get('email')
message = form.cleaned_data.get('message')
name = form.cleaned_data.get('name')
# Send email related variables
subject = 'Test Contact Form'
contact_msg = "%s : %s via %s" % (
name,
message,
email,
)
# from_email = 'someone@somwhere.com'
# to_email = ['someonelse@somewherelse.com']
# send_mail(
# subject,
# contact_msg,
# from_email,
# to_email,
# fail_silently=False,
# )
context = {
"form": form,
}
return render(request, 'forms.html', context)
| true |
93d12f015c8ea5f9a9c8f80d7efa18455e22a218 | Python | englhardt/adventofcode2019 | /11/solve.py | UTF-8 | 3,069 | 2.859375 | 3 | [
"MIT"
] | permissive | import itertools
import operator
from queue import SimpleQueue
class VM():
def __init__(self, d, start_color=None):
self.d = d.copy()
self.d += [0] * 10000
self.i = 0
self.base = 0
self.io = SimpleQueue()
self.pos = [0, 0]
self.dir = 0
self.dir_v = [(0, -1), (1, 0), (0, 1), (-1, 0)]
self.img = {} if start_color is None else {(0, 0): start_color}
def run(self):
i = self.i
io = self.io
d = self.d
def get_val(imm, i):
return d[get_addr(imm, i)]
def get_addr(imm, i):
if imm == 1:
return i
elif imm == 2:
return self.base + d[i]
else:
return d[i]
output = None
o_paint = True
while True:
instr = d[i] % 100
imm_a, imm_b, imm_c = d[i] // 100 % 10, d[i] // 1000 % 10, d[i] // 10000
if instr == 1:
d[get_addr(imm_c, i+3)] = get_val(imm_a, i+1) + get_val(imm_b, i+2)
i += 4
elif instr == 2:
d[get_addr(imm_c, i+3)] = get_val(imm_a, i+1) * get_val(imm_b, i+2)
i += 4
elif instr == 3:
d[get_addr(imm_a, i+1)] = self.img.get(tuple(self.pos), 0)
i += 2
elif instr == 4:
output = get_val(imm_a, i+1)
i += 2
self.i = i
if o_paint:
self.img[tuple(self.pos)] = output
o_paint = False
else:
self.dir = (self.dir + 1) % 4 if output else (self.dir - 1) % 4
o_paint = True
self.pos[0] += self.dir_v[self.dir][0]
self.pos[1] += self.dir_v[self.dir][1]
elif instr == 5:
if get_val(imm_a, i+1) != 0:
i = get_val(imm_b, i+2)
else:
i += 3
elif instr == 6:
if get_val(imm_a, i+1) == 0:
i = get_val(imm_b, i+2)
else:
i += 3
elif instr == 7:
d[get_addr(imm_c, i+3)] = int(get_val(imm_a, i+1) < get_val(imm_b, i+2))
i += 4
elif instr == 8:
d[get_addr(imm_c, i+3)] = int(get_val(imm_a, i+1) == get_val(imm_b, i+2))
i += 4
elif instr == 9:
self.base += get_val(imm_a, i+1)
i += 2
else:
assert d[i] == 99
return self.img
self.i = i
return output
d = list(map(int, open("input.txt").read().split(",")))
img = VM(d).run()
print(len(img.keys()))
img = VM(d, 1).run()
x_max = max(map(operator.itemgetter(0), img.keys()))
y_max = max(map(operator.itemgetter(1), img.keys()))
for y in range(y_max+1):
s = ""
for x in range(x_max+1):
v = str(img.get((x, y), " "))
s += "#" if v == "1" else " "
print(s)
| true |
53e9495abd53f11c3ec7636f3d7f25619149a730 | Python | voidlessVoid/advent_of_code_2019 | /day_05/mischa/day05.py | UTF-8 | 1,854 | 2.984375 | 3 | [] | no_license | data = open('day05_input.txt')
lines = data.readline().split(',')
lines1 = [int(x.strip()) for x in lines]
def get_opcode_mode(i):
instr = str(i).zfill(5)
op_code = instr[-2:]
par1,par2,par3 = instr[-3],instr[-4],instr[-5]
return op_code, par1,par2,par3
def get_par(copy_l, op, mode,count,par_num):
if int(mode) == 0 and int(op) !=3 and int(op)!=4 and int(par_num) !=3:
x = copy_l[count+par_num]
return copy_l[x]
else:
return copy_l[count+par_num]
def part_1(inp, inp_num):
copy = inp.copy()
i = 0
last_output= ''
while i <= len(copy)-2 and copy[i] !=99:
op, mod1,mod2,mod3 = get_opcode_mode(copy[i])
par1 = get_par(copy,op,mod1,i,1)
par2 = get_par(copy,op,mod2,i,2)
par3 = copy[i+3]
if int(op) == 1:
new = par1 + par2
copy[par3] = new
i +=4
elif int(op) == 2:
new = par1 * par2
copy[par3] = new
i += 4
elif int(op) == 3:
copy[par1] = inp_num
i += 2
elif int(op) == 4:
last_output = copy[par1]
i += 2
elif int(op) ==5:
if int(par1) != 0:
i = int(par2)
else:
i +=3
elif int(op) ==6:
if int(par1) == 0:
i = int(par2)
else:
i +=3
elif int(op) ==7:
if int(par1) < int(par2):
copy[par3] = 1
else:
copy[par3] = 0
i += 4
elif int(op) ==8:
if int(par1) == int(par2):
copy[par3] = 1
else:
print(par3)
copy[par3] = 0
i += 4
return last_output
#part1
print(part_1(lines1,1))
#part2
print(part_1(lines1,5))
| true |
c8cc7bcaee1871e7c3e5697be9758fb3b2c344eb | Python | SeoWeon-Kyung/Python-Seminar | /seaborn_tutorial.py | UTF-8 | 2,177 | 2.703125 | 3 | [] | no_license | # %%
import os
import re
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
print("Seaborn version : ", sns.__version__)
sns.set()
sns.set_style('darkgrid')
penguins = sns.load_dataset('penguins')
#sns.histplot(data=penguins, x="flipper_length_mm", hue="species", multiple="stack")
#sns.despine(left=True, bottom=True)
# %%
"""
sns.kdeplot(data=penguins, x="flipper_length_mm", hue="species", multiple="stack")
# %%
sns.displot(data=penguins, x="flipper_length_mm", hue="species", col="species")
# %%
f, axs = plt.subplots(1, 2, figsize=(8, 4), gridspec_kw=dict(width_ratios=[4, 3]))
sns.scatterplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species", ax=axs[0])
sns.histplot(data=penguins, x="species", hue="species", shrink=.8, alpha=.8, legend=False, ax=axs[1])
f.tight_layout()
# %%
tips = sns.load_dataset("tips")
g = sns.relplot(data=tips, x="total_bill", y="tip")
g.ax.axline(xy1=(10, 2), slope=.2, color="b", dashes=(5, 2))
# %%
f1, ax1 = plt.subplots()
f2, ax2 = plt.subplots(1, 2, sharey=True)
# %%
g = sns.FacetGrid(penguins)
# %%
g = sns.FacetGrid(penguins, col="sex", height=3.5, aspect=.75)
# %%
sns.jointplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species")
# %%
sns.pairplot(data=penguins, hue="species")
# %%
sns.jointplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species", kind="hist")
# %%
sns.pairplot(data=penguins, hue="species", kind="hist")
"""
# %%
flights = sns.load_dataset("flights")
"""
sns.set_color_codes()
current_palette = sns.color_palette()
sns.palplot(current_palette)
sns1 = sns.relplot(data=flights, x="year", y="passengers", hue="month", kind="line")
sns.despine(left=True, bottom=True)
plt.show()
flights_wide = flights.pivot(index="year", columns="month", values="passengers")
print(flights_wide.head())
sns.relplot(data=flights_wide, kind="line")
"""
# %%
flight_avg = flights.groupby(["year"]).mean()
flights_wide = flights.pivot(index="year", columns="month", values="passengers")
two_series = [flights_wide.loc[:1955, "Jan"], flights_wide.loc[1952:, "Aug"]]
sns.relplot(data=two_series, kind="line")
plt.show() | true |
3dfcb6d9bd0988ed19d2520346e875ab7a5cdd6a | Python | jisheng1997/pythontest | /main/number.py | UTF-8 | 2,152 | 4.15625 | 4 | [] | no_license | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""
@File : number.py
@Time : 2020/8/4 14:14
@Author : jisheng
"""
import math
import cmath
import random
#变量表
var1 = -1
var2 = 4.4
var3 = 3
list1 = [1,2,3,4,5,6]
print('--------------以下是数字函数----------------')
#返回数字的绝对值 fabs(x)为绝对值的浮点数
print(abs(var1))
#返回数字的上入整数
print(math.ceil(var2))
#返回数字的下舍整数
print(math.floor(var2))
#如math.log(math.e)返回1.0,math.log(100,10)返回2.0
print(math.log(100,10),end=" ")
print(math.log(math.e))
#返回以10为基数的x的对数,如math.log10(100)返回 2.0
print(math.log10(100))
#返回给定参数的最大最小值,参数可以为序列。
print(max(1,2,3,4,10),end=" ")
print(max(list1),end=" ")
print(min(3,4,10),end=" ")
print(min(list1))
#返回x的整数部分与小数部分,两部分的数值符号与x相同,整数部分以浮点型表示。
print(math.modf(114.514))
#x**y 运算后的值。
print(pow(var3,var1))
#返回浮点数x的四舍五入值,如给出n值,则代表舍入到小数点后的位数。
print(round(11.4514,2))
#返回数字x的平方根
print(math.sqrt(16))
print('--------------以下是随机数函数----------------')
#从序列的元素中随机挑选一个元素
print(random.choice(list1))
#从指定范围内,按指定基数递增的集合中获取一个随机数,基数默认值为 1
print(random.randrange(0,100,5))
#随机生成下一个实数,它在[0,1)范围内。
print(random.random())
#将序列的所有元素随机排序
random.shuffle(list1)
print(list1)
#随机生成下一个实数,它在[x,y]范围内。
print(random.uniform(1,10))
print('--------------以下是三角函数----------------')
#以下注释省略
print(math.acos(0),math.asin(1),math.atan(2))
print(math.atan2(1,1))
print(math.cos(math.pi),math.sin(math.pi/2),math.tan(3*math.pi/4))
#将弧度转换为角度,如degrees(math.pi/2) , 返回90.0
print(math.degrees(math.pi/2))
#将角度转换为弧度
print((math.radians(180)))
print('--------------以下是数学常量----------------')
print(math.pi)
print(math.e)
| true |
2b25f13530f968eabec9c1c2a2ca52bc348f0945 | Python | spudjo/Monster_Raising_Simulator | /creature_files/creatures/formless/_template.py | UTF-8 | 1,699 | 3.15625 | 3 | [] | no_license | from creature_files.body_types.Body_Formless import Body_Formless as Formless
import configparser
class Template:
def __init__(self, name, World):
self.config = configparser.ConfigParser()
self.config.read('creature_files/creatures_config/formless/' + self.__class__.__name__ + '.ini')
config_general = self.config['creature']
self.name = name
self.race = config_general['race']
self.description = config_general['description']
self.age = 1
self.level = 1
self.exp = 0
self.element = None
self.is_destroyed = True
self.body = Formless(self, World)
# ----------------------------------------------------------------------------------------------------------------------
# Display Functions
def display_values(self):
print("G E N E R A L")
print("Name: " + str(self.name))
print("Race: " + str(self.race))
print("Description: " + str(self.description))
print("Age: " + str(round(self.age, 2)))
print("Level: " + str(self.level))
print("Exp: " + str(self.exp))
self.body.display_values()
# changes that will occur every update based on world refresh_rate
# affects stamina expenditure and hunger gain
def update(self):
self.age += (1/60)
self.body.update()
# returns class name as a string with underscreatures replaced with spaces
def get_class_name(self):
class_name = self.__class__.__name__
class_name_spaces = ""
for char in class_name:
if char is '_':
char = ' '
class_name_spaces += char
return class_name_spaces
| true |
d8d5738586e27b96919e0ab53e07552b1347e5f5 | Python | fengges/leetcode | /301-350/301. 删除无效的括号.py | UTF-8 | 1,432 | 3.265625 | 3 | [] | no_license | class Solution:
def removeInvalidParentheses(self, s):
def isValid(s):
count = 0
for char in s:
if char == '(':
count += 1
if char == ')':
count -= 1
if count < 0:
return False # ())))
return count == 0
def dfs(s, start, l, r):
if l == 0 and r == 0:
if isValid(s):
self.ans.append(s)
return
for i in range(start, len(s)):
if i - 1 >= start and s[i] == s[i - 1]:
continue
if r > 0 and s[i] == ')':
dfs(s[:i] + s[i + 1:], i, l, r - 1)
if l > 0 and s[i] == '(':
dfs(s[:i] + s[i + 1:], i, l - 1, r)
l = 0
r = 0
for char in s:
if char == '(':
l += 1
elif char == ')':
if l == 0:
r += 1
else:
l -= 1
self.ans = []
dfs(s, 0, l, r)
return self.ans
s=Solution()
test=[
{"input":"(()())(())","output":"()()()"},
{"input":"(()())(())(()(()))","output":"()()()()(())"},
{"input":"()()","output":""},
]
for t in test:
r=s.removeInvalidParentheses(t['input'])
if r!=t['output']:
print("error:"+str(t)+" out:"+str(r))
| true |
531837b4160b31528f221a14f36f8c438e696c50 | Python | francisBae/boj-algorithm-study | /9400~9499/9461.py | UTF-8 | 217 | 2.921875 | 3 | [] | no_license | #파도반 수열
import sys
rd = lambda : int(sys.stdin.readline())
P = [0]*101
P[1] = 1
P[2] = 1
P[3] = 1
for i in range(4,101):
P[i] = P[i-2]+P[i-3]
T = rd()
for _ in range(T):
N = rd()
print(P[N]) | true |
c704e919cafdbab86649741f3804fa292c9b75f0 | Python | RachitBhargava99/SoundScape-Frontend | /frontend/models.py | UTF-8 | 573 | 2.5625 | 3 | [] | no_license | from frontend import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
name = db.Column(db.String(127))
email = db.Column(db.String(127))
auth_token = db.Column(db.String(255))
def __init__(self, id, name, email, auth_token):
self.user_id = id
self.name = name
self.email = email
self.auth_token = auth_token
| true |
c92e659350d39c5094a5b63a22c05583cbc5639d | Python | wudi024/testgit | /readDocx.py | UTF-8 | 2,760 | 2.640625 | 3 | [] | no_license | import os,re,sys
import win32com
from win32com.client import Dispatch, constants
from docx import Document
def parse_docx(file):
d = Document(file)
paras = d.paragraphs#段落
tables = d.tables#表格
print (file+'\t共有'+str(len(paras))+'个段落')
print (file+'\t共有'+str(len(tables))+'个表格')
try:
f =open('D:/99bill.com/接口使用手册/输出文本2.txt', 'a', encoding='utf-8')
f.write(file+'\t共有'+str(len(paras))+'个段落,'+str(len(tables))+'个表格\n\n')
for i in range(len(paras)):
if re.findall('^[A|B|C|M|P|S]+[0-9]+', paras[i].text):#接口格式:A|B|C|M|P|S开头+多个数字
print (type(paras[i]))
print ('是接口名称吧~~'+paras[i].text)
f.write("第"+str(i)+"段的内容是:"+paras[i].text+'\n')
for i in range(len(tables)):
print ('第'+str(i)+'个表格共有'+str(len(tables[i].rows))+'行')
f.write('第'+str(i)+'个表格共有'+str(len(tables[i].rows))+'行\n')
j=0
t=tables[i]
while(j<len(t.rows)):
if re.findall('请求', t.cell(j,0).text):
break
else:
j=j+1
for row in range(j,len(t.rows)):
print (type(tables[i].rows))
if re.findall('请求', t.cell(row,0).text):
print (t.cell(row,0).text+'在第'+str(row+1)+'行')
f.write(t.cell(row,0).text+'在第'+str(row+1)+'行\n')
continue
elif re.findall('响应', t.cell(row,0).text) :
print (t.cell(row,0).text+'在第'+str(row+1)+'行')
f.write(t.cell(row,0).text+'在第'+str(row+1)+'行\n')
continue
elif str(t.cell(row,0).text).strip()!='':
reqParam = t.cell(row,0).text
reqType = t.cell(row,1).text
paraName = t.cell(row,2).text
print (reqParam.ljust(20)+ reqType.ljust(10)+paraName)
#f.write(reqParam.ljust(20)+ reqType.ljust(10)+'\t\t'+paraName+'\n')
f.write(reqParam+'\t\t'+ reqType+'\t\t'+paraName+'\n')
finally:
f.close()
if __name__ == "__main__":
w = win32com.client.Dispatch('Word.Application')
# 遍历文件
PATH = "D:\\99bill.com\\接口使用手册" # windows文件路径
doc_files = os.listdir(PATH)
for doc in doc_files:
if os.path.splitext(doc)[1] == '.docx':
try:
parse_docx(PATH+'\\'+doc)
except Exception as e:
print (e)
| true |
42f8e6b3ce66452477bf9937870833fc7908534a | Python | aleneus/pvo | /design-patterns/python/composite/form.py | UTF-8 | 2,077 | 3.5625 | 4 | [] | no_license | """Composite pattern. Imitation of some GUI."""
class Component:
""" Abstract component. """
def __init__(self, caption=""):
self.caption = caption
def show(self):
raise NotImplementedError
class Button(Component):
def show(self):
print("[{}]".format(self.caption))
class LineEdit(Component):
def show(self):
print("{} [ ]".format(self.caption))
class CheckBox(Component):
def show(self):
print("[ ] {}".format(self.caption))
class Frame(Component):
""" Abstract frame. """
def __init__(self, caption):
super().__init__(caption)
self._components = []
def add_component(self, component):
if component not in self._components:
self._components.append(component)
def remove_component(self, component):
self._components.remove(component)
class Form(Frame):
def show(self):
print("======== {} ========".format(self.caption))
for c in self._components:
c.show()
bottom_line = ""
for i in range(len(self.caption)):
bottom_line += "="
print("========={}=========".format(bottom_line))
class CheckGroup(Frame):
def show(self):
print("-------- {} --------".format(self.caption))
for c in self._components:
c.show()
# TODO: repeated from Form
bottom_line = ""
for i in range(len(self.caption)):
bottom_line += "-"
print("---------{}---------".format(bottom_line))
def main():
f = Form("Human info")
f.add_component(LineEdit("Name: "))
f.add_component(LineEdit(" Age: "))
c = CheckGroup("Options")
c.add_component(CheckBox("Married"))
c.add_component(CheckBox("Drives car"))
c.add_component(CheckBox("Plays music"))
c.add_component(CheckBox("Speacks English"))
c.add_component(CheckBox("Likes sport"))
f.add_component(c)
f.add_component(Button("Save"))
f.add_component(Button("Cancel"))
f.show()
if __name__ == "__main__":
main()
| true |
f3dee292319cd17eb9f249f241c890fd5882d4b5 | Python | kekeho/NNCT3J-Training | /C/script/1.py | UTF-8 | 567 | 3.484375 | 3 | [
"MIT"
] | permissive | import subprocess
from time import sleep
def main():
result = [] #平均値を格納するリスト
for i in range(0, 100): # 100回ループ
output = subprocess.getoutput('./a.out') # プログラム実行
result.append(output[-1]) # 実行プログラムの出力の一番最後の文字が平均値である
sleep(1) # seed値に時間を使っているので1秒待つ
for i in range(0, 6):
count = result.count(str(i + 1))
print('{}: {}回'.format(i + 1, count))
if __name__ == '__main__':
main()
| true |
4a5a12e1f098f6ecc064af5840c5c52427ccb3ed | Python | alpha-kwhn/Baekjun | /GONASOO/11576.py | UTF-8 | 364 | 2.8125 | 3 | [] | no_license | A, B = map(int, input().split())
N = int(input())
_A = list(map(int, input().split()))
_A.reverse()
_B = []
k = r = 0
for i in range(len(_A)):
k += _A[i] * (A ** i)
for i in range(21):
if k % B**i == k:
r = i
break
for i in range(r-1, -1, -1):
_B.append(k // B ** i)
k %= B ** i
for i in _B: print(i, end=' ')
| true |
e4bc8e2dd0e0dd7ba81fa086107a43714c8aac87 | Python | Lucas-Froguel/Simple-Regression | /Non Linear Regression for Polynomials.py | UTF-8 | 4,539 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 13:21:24 2021
@author: Lucas
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import legendre
def points(n, e=0.1, a=-1, b=1, poly=legendre(3)):
x = (b-a)*np.random.rand(n) - (b-a)/2
y = np.zeros(len(x))
for i in range(0, len(poly)+1):
y += poly[i]*x**(i)
y += e*(2*np.random.rand(n)-1)
return x, y
def polynomial(x, coeff):
y = 0
for i in range(len(coeff)):
y += coeff[i]*(x**i)
return y
def leg_polynomial(x, poly):
y = 0
for i in range(len(poly)+1):
y += poly[i]*(x**i)
return y
def transform(x, d):
X = np.zeros((len(x), d+1))
for i in range(X.shape[1]):
X[:,i] = x**i
return X
def regression_with_regularization(X, Y, l=0.1):
Xt = np.transpose(X)
# l = 10**l
return np.matmul(np.matmul(np.linalg.inv(np.matmul(Xt, X)+l*np.identity(X.shape[1])), Xt), Y)
def E_in(w, x, y):
return sum((polynomial(x, w) - y)**2)/len(y)
def E_out(w, poly):
X = np.random.rand(10000)
y = leg_polynomial(X, poly)
return E_in(w, X, y)
class Non_Linear_Regression:
def __init__(self, l=0):
self.l = l
self.w = 0
def transform(self, x, d):
X = np.ones((len(x), d+1))
for i in range(1, X.shape[1]):
X[:,i] = x**i
return X
def fit(self, X, Y, in_place=True, l = -1):
Xt = np.transpose(X)
if l == -1:
l = self.l
w = np.matmul(np.matmul(np.linalg.inv(np.matmul(Xt, X)+l*np.identity(X.shape[1])), Xt), Y)
if in_place:
self.w = w
return w
def E_in(self, x, y, w=0):
return sum((self.fit_polynomial(x, w=w) - y)**2)/len(y)
def E_in_new(self, X, y, w=0):
return sum((np.dot(X, w)-y)**2)/len(y)
def E_out(self, poly):
X = np.random.rand(10000)
y = leg_polynomial(X, poly)+0.1*np.random.rand(10000)
return E_in(self.w, X, y)
def fit_polynomial(self, x, w=0):
y = 0
if np.isscalar(w):
w = self.w
for i in range(len(self.w)):
y += w[i]*(x**i)
return y
def cross_validation_lambda(self, X, y, P=10):
part = np.random.choice(range(P), len(x))
num = 1000
E_cv = np.zeros(num)
t = 0
for l in np.linspace(0, 1, num=num):
for i in range(P):
ind = np.where(part == i, True, False)
w = self.fit(X[~ind], y[~ind], in_place=False, l=l)
#print(w, X.shape, X[~ind].shape, X[ind].shape, y[ind].shape)
E_cv[t] += self.E_in_new(X[ind], y[ind], w=w)
E_cv[t] = E_cv[t]/P
t += 1
return np.where(min(E_cv)==E_cv)[0][0]/num
def cross_validation_degree(self, x, y, degrees, lambdas, P=10):
part = np.random.choice(range(P), len(x))
E_cv = np.zeros(degrees)
t = 0
for k in range(degrees):
X = self.transform(x, k)
for i in range(P):
ind = np.where(part == i, True, False)
w = self.fit(X[~ind], y[~ind], in_place=False, l=lambdas[k])
E_cv[t] += self.E_in_new(X[ind], y[ind], w=w)
E_cv[t] = E_cv[t]/P
t += 1
return np.where(min(E_cv)==E_cv)[0][0], min(E_cv)
k = 40 #highest degree to be tested
n = 100 #number of points on the data set
Ein = np.zeros(k)
Eout = np.zeros(k)
plt.figure()
#creates some random points with noise out from an legendre polynomial for testing the code
d = 20
poly = legendre(d)
val_x = np.linspace(-1, 1, num=500)
val_y = leg_polynomial(val_x, poly)
x, y = points(n, e=0.1, a=-1, b=1, poly=poly)
lambdas = np.zeros(k)
#cross validates the regularization
for i in range(k):
Reg = Non_Linear_Regression()
X = Reg.transform(x, i)
lambdas[i] = Reg.cross_validation_lambda(X, y)
Reg = Non_Linear_Regression()
#cross validates the degree of the polynomial
K, Ecv = Reg.cross_validation_degree(x, y, k, lambdas)
Reg = Non_Linear_Regression(l=lambdas[K])
X = Reg.transform(x, K)
w = Reg.fit(X, y)
Y = Reg.fit_polynomial(val_x)
Eout = Reg.E_out(poly)
#plot the errors
print("Degree: ", K, "\nRegularization: ", lambdas[K], "\nE_out: ", Eout, "\nE_cv: ", Ecv)
#plot everything
plt.plot(x, y, 'o')
plt.plot(val_x, val_y, 'r', label="Target")
plt.plot(val_x, Y, 'g', label="Best")
plt.xlim(-1, 1)
plt.ylim(min(val_y), max(val_y))
| true |
233be4a75ea587424a3fdb16f9a9258b30ce01ab | Python | wensiso/django-singleton-model | /example/example/tests.py | UTF-8 | 747 | 2.828125 | 3 | [
"MIT"
] | permissive | from django.test import TestCase
from .models import ConcreteSingleModel
class TestSingleModel(TestCase):
def test_first_instance(self):
ConcreteSingleModel.objects.create()
ConcreteSingleModel.objects.all().delete()
new_object = ConcreteSingleModel.objects.create()
self.assertEqual(new_object.pk, 1)
def test_many_instances(self):
def test_for_instance(attr):
new_object = ConcreteSingleModel.objects.create(attr=attr)
self.assertEqual(new_object.attr, attr)
self.assertEqual(new_object.pk, 1)
test_for_instance('1')
test_for_instance('2')
test_for_instance('3')
self.assertEqual(len(ConcreteSingleModel.objects.all()), 1)
| true |
1d7a2644cb38c600d005849799ca22a3f15de07a | Python | dennisgilliam/django-ember-example | /djangoapp/visits/models.py | UTF-8 | 549 | 2.515625 | 3 | [] | no_license | from django.db import models
class Customer(models.Model):
name = models.CharField(max_length=30)
dateNextVisit = models.CharField(max_length=30)
typeNextVisit = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class VisitLog(models.Model):
visitDate = models.CharField(max_length=30)
visitType = models.CharField(max_length=30)
customer = models.ForeignKey(Customer)
def __unicode__(self):
return "%s - %s" % (self.visitDate,
self.visitType)
| true |
b090a816054626ecb6f6b3bfcc57171ee0387cb4 | Python | SaiSujithReddy/CodePython | /table_join_with_max_values.py | UTF-8 | 1,114 | 3.21875 | 3 | [] | no_license | import operator
table1 = (('L',15),('I',12),('H',14),('J',1),('C',9),('X',4),('N',11))
table2 = (('B',13),('O',23),('H',56),('V',777),('B',171),('X',43),('N',65))
def table_join_with_max_values(table1,table2):
dict_1 = {}
dict_2 = {}
dict_3 = {}
output_list_tuples = []
for x in table1:
dict_1[x[0]] = x[1]
for x in table2:
dict_2[x[0]] = x[1]
for x in dict_1:
if x in dict_2:
dict_3[x] = max(dict_1[x],dict_2[x])
output_list_tuples.append((x, dict_3[x]))
sorted_values = sorted(dict_3.values())
print(sorted_values)
# sorted_keys = sorted(dict_3.keys())
#
# for x in sorted_keys:
# output_list_tuples.append((x,dict_3[x][0],dict_3[x][1]))
#sorted_by_values = sorted(dict_3,key=dict_3.get,reverse=True)
# for w in sorted_by_values:
# print(w,dict_3[w])
print(dict_1)
print(dict_2)
print(dict_3)
print(output_list_tuples)
sorted_by_values = sorted(dict_3.items(), key=operator.itemgetter(1))
print(sorted_by_values)
table_join_with_max_values(table1,table2) | true |
0e2b66b5fce72a6ec3319f845039d2bdbbe0b0b5 | Python | reppertj/algorithms | /union_find/union_find.py | UTF-8 | 3,231 | 3.796875 | 4 | [
"MIT"
] | permissive | """
Dynamic connectivity algorithms:
Given a set of n objects,
union command: connect two objects
find query: is there a path connecting the two objects?
These algorithms are more efficient than pathfinding algorithms because they
do not need to preserve the path,
only the fact that there is one. 'is connected to' is an equivalence relation;
these algorithms exploit that fact
"""
class QuickFindUF():
"""The text's first approach:
Eager
initialize: O(n)
union: O(n) <-- this is still a problem for n unions
find: O(1)
"""
def __init__(self, n: int):
self.components = list(range(0, n))
def find(self, n1, n2):
return self.components[n1] == self.components[n2]
def union(self, n1, n2):
self.components = [self.components[n2]
if self.components[n1] == n
else n
for n in self.components]
class QuickUnionUF():
"""Lazy approach
Interpretation: id[i] is parent of i
Root of i is id[id[...id[i]...]] <-- keep going until it doesn't change
algorithm ensures no cycles
initialize: O(n)
union: O(n) <-- includes cost of finding roots
find: O(n)
"""
def __init__(self, n: int):
self.components = list(range(0, n))
def find(self, n1, n2):
return self._root(n1) == self._root(n2)
def union(self, n1, n2):
r1 = self._root(n1)
self.components[r1] = self._root(n2)
def _root(self, n):
while n != self.components[n]:
n = self.components[n]
return n
"""
For quick-find, union is too expensive (n array accesses)
Trees are flat, but too expensive to keep them flat
For quick-union, trees can get tall
Find is too expensive (could be n array accesses)
"""
class WeightedQuickUnionUF(QuickUnionUF):
"""Avoid tall trees by keeping track of the size of each tree
(number of objects)
Balance by linking root of smaller tree to root of larger tree
initialize: O(n)
find: O(log_2(n)) <- tree stays balanced, depth is at most log_2(n)
union: O(log_2(n)) <- constant time, given roots
"""
def __init__(self, n: int):
super().__init__(n)
self.sz = [1 for n in self.components]
def find(self, n1, n2):
return self._root(n1) == self._root(n2)
def union(self, n1, n2):
r1 = self._root(n1)
r2 = self._root(n2)
if r1 == r2:
pass
elif self.sz[r1] < self.sz[r2]:
self.components[r1] = r2
self.sz[r2] += self.sz[r1]
else:
self.components[r2] = r1
self.sz[r1] += self.sz[r2]
class WeightedQuickUnionPathCompressionUF(WeightedQuickUnionUF):
"""
After finding the root, compress the nodes on the path
to keep the tree as flat as possible.
"""
def _root(self, n1):
def parent(n):
return self.components[n]
nodes = [n1]
while n1 != parent(n1):
n1 = parent(n1)
nodes.append(n1)
root = n1
for n in nodes:
self.components[n] = root
return root | true |
6f332b45de1cd1d8c9cadaf8706d164d4fe7095b | Python | PPL-IIITA/ppl-assignment-newage-newton | /submission1/gift_luxury.py | UTF-8 | 663 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python3
"""Module containing class for luxury gits."""
class GiftLuxury(object):
"""Class for luxury gifts.
Methods:
__init__ : Initialize gifts.
"""
def __init__(self, gift):
"""Method to initialize luxury gift.
Arguments:
gift : Dictionary from input file.
Object attributes:
price : Price
value : Value
lux_rating : Luxury rating
lux_diff : Difficulty to obtain.
"""
self.price = gift['price']
self.value = gift['value']
self.lux_rating = gift['lux_rating']
self.lux_diff = gift['lux_diff']
| true |
febb81f8aa9e16968468ce331a57da6ba6fd400c | Python | alanrods/Lenguajes | /Programa5/lenguaje_test.py | UTF-8 | 1,739 | 4.3125 | 4 | [] | no_license | "Calular expresiones booleans"
def bool_Oper(equation, dic):
"""
Cambiamos cada uno de los elementos operadorees 'formales' a argumentos con las que pueda trabajar python
iterando sobre un diccionario para identificar que operando se va cambiar.
"""
for i, j in dic.items():
equation = equation.replace(i, j)
return equation
def get_stacks(f):
"""
Guarda en pilas diferentes los operadores y los operandos en la ecuacion, realiza los operaciones correspondientes cuando encuentra ')'
"""
operands = []
operators = []
operations = []
for item in f:
if item != 'and'and item != 'or' and item != 'not' and item != '==' and item != ')' and item != '<=' and item != '>=':
operands.append(item)
elif item == ')':
operations.insert(0, operands.pop())
operations.insert(0, operators.pop())
if operations[0] == 'not':
str1 = " ".join(str(x) for x in operations)
operations = []
operands.append(str(eval(str1)))
else:
operations.insert(0, operands.pop())
str1 = " ".join(str(x) for x in operations)
operations = []
operands.append(str(eval(str1)))
else:
operators.append(item)
return operands
def remove(f):
"""
Removemos '(' ya que no lo necesitaremos para el algoritmo
"""
f = f.split()
for item in f:
if '(' in f:
f.remove("(")
return f
dic = {"^":"and", "v":"or", "~":"not", "==":"==", "<" : "<=", ">":">="}
equation = input("Ingresa tu cadena bien formada\n")
f = bool_Oper(equation, dic)
f = remove(f)
print (get_stacks(f)) | true |
b61e2cb19750c89a63d799d220659c5345a1c471 | Python | nambelaas/Operasi-Number-Studycle | /number.py | UTF-8 | 419 | 3.859375 | 4 | [] | no_license | import numpy
arr_num = []
n = int(input("Masukkan jumlah elemen: "))
for i in range(0, n):
ele = int(input())
arr_num.append(ele)
print(arr_num)
print("\n")
s_num = sorted(arr_num)
print("Diurutkan menjadi: ")
print(s_num)
print("\n")
print("Median dari array diatas: ")
m_num = numpy.mean(s_num)
print(m_num)
print("\n")
print("Total array setelah dikalikan: ")
t_num = numpy.prod(s_num)
print(t_num) | true |
fcd4f8f762ab0f3af38f5c94ab200df5879bbfba | Python | Sanardi/bored | /PortScanner.py | UTF-8 | 2,133 | 3.296875 | 3 | [
"MIT"
] | permissive | # Thank you so much MR. GUS KHAWAJA for teaching me how to do this.
import argparse
from socket import *
# Usage python3 PortScanner.py - a 192.168.0.1 -p 21,80,8080,8081,8443
def printBanner(connSock, tgtPort):
try:
# Send data to target
if tgtPort == 80:
connSock.send("GET HTTP/1.1 \r\n")
else:
connSock.send("\r\n")
# Receive data from target
results = connSock.recv(4096)
# Print the banner
print('[+] Banner: '+ str(results))
except:
print(" [+] Banner not available\n")
def connScan(tgtHost, tgtPort):
try:
# Create the socket object
connSock=socket(AF_INET,SOCK_STREAM)
# try to connect with the target
connSock.connect((tgtHost,tgtPort))
print('[+] {} tcp open'.format(tgtPort))
printBanner(connSock,tgtPort)
except:
# Print failure
print(' [+] {} tcp closed'.format(tgtPort))
finally:
# close the socket object
connSock.close()
def portScan(tgtHost, tgtPorts):
try:
# if -a was not an IP address the will resolve it to an IP
tgtIP = gethostbyname(tgtHost)
except:
print("[-] Error Unknown Host")
exit(0)
try:
# if the domain can be resolved
tgtName = gethostbyaddr(tgtIP)
print("[+] --- Scan result for: " + tgtName[0] + " ---")
except:
print("[+] --- Scan result for: " + tgtIP+ " ---")
setdefaulttimeout(10)
# For each port number call connScan function
for tgtPort in tgtPorts:
connScan(tgtHost, int(tgtPort))
def main():
# Parse the command line arguments:
parser = argparse.ArgumentParser("Smart TCP Client Scanner")
parser.add_argument("-a", "--address", type=str, help="the target IP address")
parser.add_argument("-p", "--port", type=str, help="the target port")
args = parser.parse_args()
print(args)
# Store the argument values
ipAddress = args.address
portNumbers = args.port.split(',')
portScan(ipAddress, portNumbers)
print(portNumbers)
if __name__ == "__main__":
main() | true |
e7bd6f1db5151289da2e5940bad1e53687dcb6bf | Python | riffAt2013/PythonPracs | /PythonBasics/filewriting.py | UTF-8 | 330 | 3.640625 | 4 | [] | no_license | def get_user(**user):
return user
name = input("Whats your name: ")
age = input("Whats your age: ")
mobile_number = input("Enter your personal number: ")
user1 = get_user(name = name, age = age, phone = mobile_number)
for index,values in enumerate(user1.keys()):
print("Info {} -->{}".format(index,user1[values]))
| true |
0578fd3ac14d28c549433733371d20417ec3b455 | Python | CodeForGreenLO8/stacja-badawcza | /sensors/filehandler.py | UTF-8 | 668 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python3
# ABOUT THIS MODULE
# This module provides a few basic methods for interacting with files.
# It is used by numerous other scripts and modules.
import os
def file_exists(path):
try:
f = open(path)
f.close()
return True
except FileNotFoundError:
return False
def file_delete(path):
if file_exists(path):
try:
os.remove(path)
return True
except Exception as exception:
print('E: Exception occurred: {}'.format(type(exception).__name__))
return False
else:
print('W: The specified file doesn\'t exist!')
return False
| true |
a7115604c5c1b1ab9ba5a11533e82c82e64273c2 | Python | binariusO1/Programming-Challenges-v3.0 | /034 - SnakeGame (python 3.7)/snake.py | UTF-8 | 5,032 | 3.15625 | 3 | [] | no_license | # programming challenge
# Snake game
# Python 3.7
# binariusO1
# import only system from os
from os import system, name
# import sleep to show output for some time period
from time import sleep
import keyboard # using module keyboard
import random # for random
import sys # for esc->exit
# define clear function
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# some variables
n = 44
m = 20
score =0
num =4 # down = 0, up = 3, right = 2, left = 1
dir = 0
class Snake:
def __init__(self,x,y):
self.x = 0
self.y = 0
class Fruit:
def __init__(self,x,y):
self.x = 0
self.y = 0
s=[]
f = Fruit(10, 10)
for i in range(100):
#s[i]=Snake
s.append(Snake(0,0))
s[0].x=int(n/2)
s[0].y=int(m/2)
s[1].x=int(n/2)
s[1].y=int(m/2-1)
s[2].x=int(n/2)
s[2].y=int(m/2-2)
s[3].x=int(n/2)
s[3].y=int(m/2-3)
def Tick():
global num
global f
global score
global dir
defeat = 0 # comment '#' if you want board without braces
for i in range(num-1,0,-1):
s[i].x=s[i-1].x
s[i].y=s[i-1].y
if dir == 0:
if s[0].y<m-1: s[0].y+=1
# else: s[0].y=0 # uncomment '#' if you want board without braces
else: defeat = 1 # comment '#' if you want board without braces
if dir == 1:
if s[0].x>0: s[0].x-=1
# else: s[0].x=n-1 # uncomment '#' if you want board without braces
else: defeat = 1 # comment '#' if you want board without braces
if dir == 2:
if s[0].x<n-1: s[0].x+=1
# else: s[0].x=0 # uncomment '#' if you want board without braces
else: defeat = 1 # comment '#' if you want board without braces
if dir == 3:
if s[0].y>0: s[0].y-=1
# else: s[0].y=m-1 # uncomment '#' if you want board without braces
else: defeat = 1 # comment '#' if you want board without braces
if s[0].x == f.x and s[0].y == f.y:
num += 1
s[num-1].x = s[num-2].x
s[num - 1].y = s[num - 2].y
f.x = random.randrange((0), n-1)
f.y = random.randrange((0), m-1)
score += 1
for i in range(1,num-1,1):
if s[0].x == s[i].x and s[0].y == s[i].y or defeat == 1: # delete phrase ('or defeat == 1') if you want board without braces
num = 4
s[0].x = int(n / 2)
s[0].y = int(m / 2)
s[1].x = int(n / 2)
s[1].y = int(m / 2 - 1)
s[2].x = int(n / 2)
s[2].y = int(m / 2 - 2)
s[3].x = int(n / 2)
s[3].y = int(m / 2 - 3)
f.x = random.randrange((0), n - 1)
f.y = random.randrange((0), m - 1)
dir = 0
score = 0
defeat = 0 # uncomment '#' if you want board without braces
def loop():
speed = float(0.05)
global dir
f.x = int(n / 2)
f.y = int(m / 2)
# put fruit away from snake
while f.x == s[0].x or f.x == s[1].x or f.x == s[2].x or f.x == s[3].x:
f.x = random.randrange((0), n - 1)
while f.y == s[0].y or f.y == s[1].y or f.y == s[2].y or f.y == s[3].y:
f.y = random.randrange((0), m - 1)
while(True):
# drawing score
print("score: " , f"{score:04d}")
# print("s0: " , s[0].x,s[0].y , "s1: " ,s[1].x,s[1].y , "s2: " ,s[2].x,s[2].y, "s3: " ,s[3].x,s[3].y , "dir: ", dir, "num: ", num, "f:",f.x,f.y)
# create a table
a = [[' '] * n for i in range(m)]
# update Snake's body
Tick()
# drawing fruit
a[f.y][f.x]="*"
# drawing Snake's head
a[s[0].y][s[0].x] = "@"
# drawing Snake's tail
for i in range(1,num-1,1):
a[s[i].y][s[i].x]="x"
a[s[num-1].y][s[num-1].x] = "+"
# drawing table
print('_'*(n+2))
for x in a:
print('|',*x,'|', sep ='')
print('-' * (n + 2))
# clear table
a.clear()
# make game some harder
if score > 15:
speed = 0.04
if score > 45:
speed = 0.03
# hotkeys
k=0
while k != 4:
sleep(speed)
if keyboard.is_pressed('down') and dir != 3:
dir = 0
if keyboard.is_pressed('up') and dir != 0:
dir = 3
if keyboard.is_pressed('left') and dir != 2:
dir = 1
if keyboard.is_pressed('right') and dir != 1:
dir = 2
if keyboard.is_pressed('esc'):
sys.exit("\nHave a nice day")
k += 1
#clear screen
clear()
loop()
| true |
ff894845e9b29f32652dc042f69487955bcb90a3 | Python | asmuelle/UdemyTF | /Chapter2_Python/Logic.py | UTF-8 | 312 | 3.15625 | 3 | [
"MIT"
] | permissive | #### Abfragen und Logik in Python ####
bin_ich_pleite = None
bin_ich_reich = None
kontostand = 0
if kontostand > 0:
bin_ich_pleite = False
elif kontostand == 0:
print("Mies gelaufen.")
bin_ich_pleite = True
else:
bin_ich_pleite = True
print("Bin ich pleite?", bin_ich_pleite)
| true |
8763ed44087848441af0e5621c0125502014ab30 | Python | Julian21A/Python-HackerRank-Challenges-Medium | /Triangle Quest.py | UTF-8 | 79 | 3.109375 | 3 | [] | no_license | for i in range(1,int(input())):
if i>=1 and i<=9:print(int(i * 10**i / 9))
| true |
a625fddd8405544554fec10cf709dc3500ce2d12 | Python | alex-akn/traceroute-visualization | /vis_route.py | UTF-8 | 2,929 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
import urllib.request
import json
import os, sys
import re
import getopt
import subprocess
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# from gcmap import GCMapper
# gcm = GCMapper()
def getLoc(IP):
"Turn a string representing an IP address into a lat long pair"
#Other geolocation services are available
url = "https://geolocation-db.com/json/"+IP
response = urllib.request.urlopen(url)
encoding = response.info().get_content_charset('utf8')
data = json.loads(response.read().decode(encoding))
#print(data)
try:
lat= float(data["latitude"])
lon= float(data["longitude"])
#country = data["country_name"]
country = data["country_code"]
city = data["city"]
if not city:
country = data["country_name"]
if lat == 0.0 and lon == 0.0:
return (None, None, None, None)
return (lat,lon, country, city)
except:
return (None,None, None, None)
def printHelp():
print ("./vis_route.py IPv4Address")
print (" e.g. ./vis_route.py 213.138.111.222")
try:
opts, args = getopt.getopt(sys.argv,"h")
except getopt.GetoptError:
printHelp()
sys.exit()
for opt, arg in opts:
if opt == '-h':
printHelp()
sys.exit()
if len(args) != 2:
printHelp()
sys.exit()
IP= args[1]
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img()
#Start traceroute command
# proc = subprocess.Popen(["traceroute -m 25 -n "+IP], stdout=subprocess.PIPE, shell=True,universal_newlines=True)
proc = subprocess.Popen(["tracert", IP], stdout=subprocess.PIPE, shell=True,universal_newlines=True)
#Where we are coming from
lastLon= None
lastLat= None
lastCountry = ""
lastCity = ""
#Parse individual traceroute command lines
for line in proc.stdout:
print(line,end="")
if re.match(r"Tracing route to", line):
continue
splitline=line.split()
if len(splitline) < 4:
continue
hopIP = ""
for w in splitline:
m = re.search(r"\[?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\]?", w)
if m:
hopIP = m.group(0)
break
if hopIP == "":
continue
(lat,lon, country, city)=getLoc(hopIP)
if (lat == None):
continue
if lastLat != None and (lastLat-lat + lastLon-lon) != 0.0:
if city:
text = city + ", " + country
else:
text = country
if country == lastCountry and city == lastCity:
text = ""
plt.text(lon - 3, lat - 12, text,
horizontalalignment='right',
transform=ccrs.Geodetic())
plt.plot([lon, lastLon], [lat, lastLat],
color='blue', linewidth=2, marker='o',
transform=ccrs.Geodetic(),
)
lastLat= lat
lastLon= lon
lastCountry = country
lastCity = city
plt.tight_layout()
plt.show()
| true |
bf2ff6c76e598acf8b57b2424e0160147eef470a | Python | SMY99/kpk_180921 | /gadget.py | UTF-8 | 1,229 | 2.875 | 3 | [] | no_license | class Smartphone:
def __init__(self, name, characteristic, price):
self.name = name
self.characteristic = characteristic
self.price = price
def __str__(self):
return f'смартфон: {self.name}, {self.characteristic}, {self.price} руб.'
@classmethod
def import_from_file(cls, gadget):
items_source = open(gadget, 'r', encoding='utf-8').readlines()
items_source = list(map(lambda x: x.replace('\n', '').split(', '), items_source))
items_schema = items_source.pop(0)
items_source_as_dict = list(map(lambda x: dict(zip(items_schema, x)), items_source))
items = []
for item_dict in items_source_as_dict:
_item = cls(**item_dict)
items.append(_item)
return items
class Consolys(Smartphone):
def __init__(self, name, price):
self.name = name
self.price = price
def __str__(self):
return f'консоли: {self.name}, {self.price} руб.'
class Compudahters(Smartphone):
def __init__(self, name, price):
self.name = name
self.price = price
def __str__(self):
return f'компьютеры: {self.name}, {self.price} руб.' | true |
516e38a2295a71947294aa2eaf7cfcc6dace9104 | Python | RohanLodhi/pyprograms-filehandling | /readlines.py | UTF-8 | 129 | 3.15625 | 3 | [] | no_license | with open("test.txt", "r") as f:
##Small Files:
f_contents = f.readlines() #return list
print(f_contents)
print(f.closed)
| true |
911a7952de46a246c2ca80a34846091f9e3359a3 | Python | alsohas/CS455 | /tcss455group7/likes_gender_classifier.py | UTF-8 | 2,030 | 2.640625 | 3 | [] | no_license | import codecs
import os
import pickle
from os.path import basename, exists, join, splitext
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
class likes_gender_classifier:
def __init__(self):
'''empty constructor'''
def __get_model(self):
# file = open("/data/userlikes.pkl",'rb')
file = open("/home/itadmin/src/CS455/likes/userlikes.pkl",'rb')
model = pickle.load(file)
return model
def __get_count_vectorizer(self):
file = open("/home/itadmin/src/CS455/likes/likeVectors.pkl",'rb')
# file = open("/data/likeVectors.pkl",'rb')
cv = pickle.load(file)
return cv
def test(self, **kwargs):
prediction = None
input_dir = kwargs['input_dir']
# loading the pickled NB model
model = self.__get_model()
# loeading the count vectorizer created using test data to keep everything consistent
cv = self.__get_count_vectorizer()
if (model == None):
return {}
# checking if directory to the text files exist
if (os.path.isdir(input_dir+"/relation/")):
input_dir = input_dir+"/relation/"
elif (os.path.isdir(input_dir+"relation/")):
input_dir = input_dir+"/relation"
else:
print("Test directory to statuses not found.")
exit()
df = pd.read_csv(input_dir+"relation.csv").astype(str).drop_duplicates()
df = df.sort_values(by='userid', ascending=True).groupby('userid')
df = df.agg({'like_id':lambda x:' '.join(x.astype(str))}).reset_index()
like_ids = df['like_id']
userids = df['userid']
vector = cv.transform(like_ids)
# using the count vector to predict gender
prediction = model.predict(vector)
# using the ID and gender columns in our dataframe to create a dictionary
results = dict(zip(userids, prediction))
print(results)
return results
| true |
3b4f5e241db852d36557d2c9f9bdadfe0635ed7f | Python | prrn-pg/Shojin | /templates/Typical/Math/nCrAll.py | UTF-8 | 609 | 3.265625 | 3 | [] | no_license | def nCr(i, cur, rest, target):
if rest == 0:
yield cur
elif len(target) - i == rest:
# 今回のやつを取るしかない
nex = cur[:]
nex.append(target[i])
for ncr in nCr(i+1, nex, rest-1, target):
yield ncr
else:
# 含めるか含めないか
nex = cur[:]
nex.append(target[i])
for ncr in nCr(i+1, nex, rest-1, target):
yield ncr
for ncr in nCr(i+1, cur[:], rest, target):
yield ncr
arr = list(range(8))
g = nCr(0, [], 5, arr)
for x in g:
print(x)
| true |
900ee9b30f1c1e5a15e81847bb0880f88fdc5c32 | Python | tangerine122/Spider | /juejin.py | UTF-8 | 1,101 | 2.875 | 3 | [] | no_license | """
@author:Adam
@time:2018-10-15 20:10
@desc:掘金小册抓取
"""
import requests
import json
for page in range(1, 3):
url = "https://xiaoce-timeline-api-ms.juejin.im/v1/getListByLastTime?uid=&client_id=&token=&src=web&alias=&pageNum={}".format(page)
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/69.0.3497.100 Safari/537.36"
}
r = requests.get(url, headers=header)
content = json.loads(r.text)
for d in content["d"]:
title = d["title"]
print(title)
author = d["userData"]["username"]
print("作者:{}".format(author))
price = d["price"]
print(price)
buyCount = d["buyCount"]
print("{}人已购买".format(buyCount))
contentSize = d["contentSize"]
print("字数:{}".format(contentSize))
lastSectionCount = d["lastSectionCount"]
print("{}小节".format(lastSectionCount))
desc = d["desc"]
print("简介:{}".format(desc))
print("="*70)
| true |
db47c6c6187c99bdd6ad15236d2cbef3d6076566 | Python | microsoft/qlib | /qlib/contrib/model/catboost_model.py | UTF-8 | 3,778 | 2.625 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
from typing import Text, Union
from catboost import Pool, CatBoost
from catboost.utils import get_gpu_device_count
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from ...model.interpret.base import FeatureInt
from ...data.dataset.weight import Reweighter
class CatBoostModel(Model, FeatureInt):
"""CatBoost Model"""
def __init__(self, loss="RMSE", **kwargs):
# There are more options
if loss not in {"RMSE", "Logloss"}:
raise NotImplementedError
self._params = {"loss_function": loss}
self._params.update(kwargs)
self.model = None
def fit(
self,
dataset: DatasetH,
num_boost_round=1000,
early_stopping_rounds=50,
verbose_eval=20,
evals_result=dict(),
reweighter=None,
**kwargs
):
df_train, df_valid = dataset.prepare(
["train", "valid"],
col_set=["feature", "label"],
data_key=DataHandlerLP.DK_L,
)
if df_train.empty or df_valid.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
x_train, y_train = df_train["feature"], df_train["label"]
x_valid, y_valid = df_valid["feature"], df_valid["label"]
# CatBoost needs 1D array as its label
if y_train.values.ndim == 2 and y_train.values.shape[1] == 1:
y_train_1d, y_valid_1d = np.squeeze(y_train.values), np.squeeze(y_valid.values)
else:
raise ValueError("CatBoost doesn't support multi-label training")
if reweighter is None:
w_train = None
w_valid = None
elif isinstance(reweighter, Reweighter):
w_train = reweighter.reweight(df_train).values
w_valid = reweighter.reweight(df_valid).values
else:
raise ValueError("Unsupported reweighter type.")
train_pool = Pool(data=x_train, label=y_train_1d, weight=w_train)
valid_pool = Pool(data=x_valid, label=y_valid_1d, weight=w_valid)
# Initialize the catboost model
self._params["iterations"] = num_boost_round
self._params["early_stopping_rounds"] = early_stopping_rounds
self._params["verbose_eval"] = verbose_eval
self._params["task_type"] = "GPU" if get_gpu_device_count() > 0 else "CPU"
self.model = CatBoost(self._params, **kwargs)
# train the model
self.model.fit(train_pool, eval_set=valid_pool, use_best_model=True, **kwargs)
evals_result = self.model.get_evals_result()
evals_result["train"] = list(evals_result["learn"].values())[0]
evals_result["valid"] = list(evals_result["validation"].values())[0]
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
if self.model is None:
raise ValueError("model is not fitted yet!")
x_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I)
return pd.Series(self.model.predict(x_test.values), index=x_test.index)
def get_feature_importance(self, *args, **kwargs) -> pd.Series:
"""get feature importance
Notes
-----
parameters references:
https://catboost.ai/docs/concepts/python-reference_catboost_get_feature_importance.html#python-reference_catboost_get_feature_importance
"""
return pd.Series(
data=self.model.get_feature_importance(*args, **kwargs), index=self.model.feature_names_
).sort_values(ascending=False)
if __name__ == "__main__":
cat = CatBoostModel()
| true |
d78cb28bbc84535787e73d5d464393e3b4125c77 | Python | AZ-OO/Python_Tutorial_3rd_Edition | /4章 制御構造ツール/4.7.2.py | UTF-8 | 2,424 | 3.703125 | 4 | [] | no_license | """
4.7.2 キーワード引数
"""
# 関数はキーワード引数もとれる
# 『キーワード = 値』のかたち
def parrot(voltage, state = 'a stiff', action = 'voom', type = 'Norwegian Blue'):
print("This parrot wouldn't", action, end = '')
print("if you put", voltage, "volts through it.")
print(" -- Lovery plumage, the", type)
print(" -- It's", state, "!")
#parrot(1000) # 位置引数1個
#parrot(voltage = 1000) # キーワード引数1個
#parrot(voltage = 1000000, action = 'V00000M') # キーワード引数2個
#parrot(action = 'V00000M', voltage = 1000000) # キーワード引数2個
#parrot('a million', 'bereft of life', 'jump') # キーワード引数3個
#parrot('a thousand', state = 'pushing up the daisies') # 位置引数1個キーワード引数1個
# 関数をコールする時は、必ず位置引数が先でキーワード引数を後にしなければなrない
# キーワード引数は全て関数定義の仮引数に書いたものと一致している必要があるが、その順序は問われない
#parrot() # 必要な引数がない
#parrot(voltage = 5.0, 'dead') # キーワード引数の後に非キーワード引数
#parrot(110,voltage = 200) # 同じ引数に値を2度与えた
#parrot(actor = 'John Cleese') # 未知のキーワード引数
# 仮引数の最後が**名前の形になっていると、この引数はディクショナリを受け取る
# このディクショナリには、仮引数に対応するキーワードを除いたすべてのキーワード引数が入っている
# つまり、この形にすればカリヒキスにないキーワードが使える
# またこれは、*名前の形式と組み合わせて使うことができる
# この形式では、仮引数にない位置指定型引数を全て含んだタプルが関数に渡る
def chessshop(kind, * arguments, **keywords):
print('-- Do you have any', kind, '?')
print("-- I'm sorry, we're all out of", kind)
for arg in arguments:
print(arg)
print('-' * 40)
keys = sorted(keywords.keys())
for kw in keys:
print(kw,':', keywords[kw])
chessshop("Limburger", "It's very runny sir.",
"It's really very, VERY runny, sir.",
shopkeeper = "Mihael Palin",
client = 'John Cleese',
sketch = 'Cheese SHop Sketch')
| true |
28b33d8719309088be29950925b5169158592c27 | Python | vivekpandian08/30days_LeetCode_Challenge_June | /Day_5_Random_pick_with_Weight.py | UTF-8 | 463 | 3.25 | 3 | [] | no_license | import bisect
import random
class Solution(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.prefisSum = w
for i in range(1, len(self.prefisSum)):
self.prefisSum[i] = self.prefisSum[i] + self.prefisSum[i - 1]
def pickIndex(self):
"""
:rtype: int
"""
target = random.randint(1, self.prefisSum[-1])
return bisect.bisect_left(self.prefisSum, target) | true |
a0ccb2ff0b43adac63f1472e50c25d9e1ebc2e7e | Python | samyuktahegde/Python | /datastructures/arrays/delete_next_smaller_element.py | UTF-8 | 547 | 3.546875 | 4 | [] | no_license | def delete_next_smaller_element(array, k):
stack = []
count = 0
for i in range(0, len(array)):
stack.append(array[i])
print(stack)
print('i', i)
for j in range(i+1, len(array)):
if len(stack)==0:
break
elif stack[-1]>array[j]:
continue
else:
stack.pop()
count+=1
if count==k:
break
print(stack)
a = [3, 100, 1]
delete_next_smaller_element(a, 1)
| true |
354de2243f1f1814ed24fecd609370dd18a56df3 | Python | mhcrnl/py-editor | /py_editor/oozaar/linenumber.py | UTF-8 | 4,575 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# ---------------- READ ME ---------------------------------------------
# This Script is Created Only For Practise And Educational Purpose Only
# This Script Is Created For http://bitforestinfo.blogspot.com
# This Script is Written By
#
#
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
__author__='''
######################################################
By S.S.B Group
######################################################
Suraj Singh
Admin
S.S.B Group
surajsinghbisht054@gmail.com
http://bitforestinfo.blogspot.com/
Note: We Feel Proud To Be Indian
######################################################
'''
import Tkinter as tk
class LineNumberCanvas(tk.Canvas):
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs)
self.text_widget = None
self.breakpoints = []
def connect(self,text_widget):
self.text_widget = text_widget
def re_render(self):
"""Re-render the line canvas"""
self.delete('all') # To prevent drawing over the previous canvas
temp = self.text_widget.index("@0,0")
while True :
dline= self.text_widget.dlineinfo(temp)
if dline is None:
break
y = dline[1]
x = dline[0]
linenum = str(temp).split(".")[0]
id = self.create_text(2,y,anchor="nw", text=linenum)
if int(linenum) in self.breakpoints:
x1,y1,x2,y2 = self.bbox(id)
self.create_oval(x1,y1,x2,y2,fill='red')
self.tag_raise(id)
temp = self.text_widget.index("%s+1line" % temp)
def get_breakpoint_number(self,event):
if self.find_withtag('current'):
i = self.find_withtag('current')[0]
linenum = int(self.itemcget(i,'text'))
if linenum in self.breakpoints:
self.breakpoints.remove(linenum)
else:
self.breakpoints.append(linenum)
self.re_render()
class Text(tk.Text):
def __init__(self, *args, **kwargs):
tk.Text.__init__(self, *args, **kwargs)
self.tk.eval('''
proc widget_interceptor {widget command args} {
set orig_call [uplevel [linsert $args 0 $command]]
if {
([lindex $args 0] == "insert") ||
([lindex $args 0] == "delete") ||
([lindex $args 0] == "replace") ||
([lrange $args 0 2] == {mark set insert}) ||
([lrange $args 0 1] == {xview moveto}) ||
([lrange $args 0 1] == {xview scroll}) ||
([lrange $args 0 1] == {yview moveto}) ||
([lrange $args 0 1] == {yview scroll})} {
event generate $widget <<Changed>>
}
#return original command
return $orig_call
}
''')
self.tk.eval('''
rename {widget} new
interp alias {{}} ::{widget} {{}} widget_interceptor {widget} new
'''.format(widget=str(self)))
class EditorWindow(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.text = Text(self)
self.scrollbar = tk.Scrollbar(orient="vertical", command=self.text.yview)
self.text.configure(yscrollcommand=self.scrollbar.set)
self.linenumbers = LineNumberCanvas(self, width=40)
self.linenumbers.connect(self.text)
self.scrollbar.pack(side="right", fill="y")
self.linenumbers.pack(side="left", fill="y")
self.text.bind('<Down>',self.changed)
self.text.bind('<Up>',self.changed)
self.text.bind("<<Changed>>", self.changed)
self.text.bind("<Configure>", self.changed)
self.linenumbers.bind('<Button-1>',self.linenumbers.get_breakpoint_number)
self.text.pack(side="right", fill="both", expand=True)
def changed(self, event):
self.linenumbers.re_render()
if __name__ == '__main__':
root = tk.Tk()
l = EditorWindow(root)
l.pack()
root.mainloop()
| true |
8bef6c76ac744dcaf55630a87a5b538ccb693d76 | Python | Zhoroev/homework_2.5 | /h2_5_quest2.py | UTF-8 | 550 | 3.203125 | 3 | [] | no_license | import random
names = ['dhhgfjjhsa.txt', 'hhdsdahffh.txt', 'afdgdhjsds.txt',
'sggjghddss.txt', 'fjdjgdghdf.txt', 'sjssahjfga.txt',
'agsgdjhhfj.txt', 'gafadhadda.txt', 'hdagajfhhj.txt',
'fhjhafhdfa.txt']
file = open(f'{names[random.randint(0, 10)]}', 'w')
def func(argument):
for name in argument:
try:
with open(f'{name}', 'r+') as f:
f.write('Erzhan')
except FileNotFoundError:
print(f'Такого файла {name} не существует')
func(names)
| true |
507342f8824111c1b48db704bfce1e00852113e3 | Python | Sinedd231/2I013-groupe4 | /old/projet-S3/src/mainS3_objectif.py | UTF-8 | 1,337 | 3.1875 | 3 | [] | no_license | '''
@author: Alexandre
@test: Denis
'''
from fenetre import *
from robot import *
from controlleur import *
import time
from obstacle import *
from objectif import *
#on creer la fenetre
ma_fenetre=Fenetre(900,900) #a ne pas changer, ou alors reflechir a comment creer des constantes inter-fichiers
#on creer les robots
mon_robot=Robot("robocop",400,400, [1,0] ) #ne pas mettre 0,0 en direction initiale ou les fonctions tourner ne marcheront pas
#on creer leur controlleur
controlleur = Controlleur(mon_robot)
#on dessine les robot
triangle = mon_robot.disp_robot(ma_fenetre.fenetre, None)
#on dessine les capteurs (NOTE: les capteurs ont ete crees en meme temps que les robots)
ligne=mon_robot.capteur.disp_capteur(ma_fenetre.fenetre)
#on creer les obstacles, qui seront stocke dans un dictionnaire et on realise l'affichage en meme temps
obstacles= Obstacle.create_and_disp_obstacle(ma_fenetre.fenetre, 10)
objectif= Objectif.create_and_disp_objectif(ma_fenetre.fenetre) #NOTE: objectif est un 2uplet compose de l'objet reel objectif et d'un objet tkinter
controlleur.tourner_vers_objectif(objectif[0])
while controlleur.pause==False:
controlleur.avancer(ma_fenetre.fenetre, triangle, ligne, obstacles)
ma_fenetre.actualiser()
time.sleep(0.01)
print("FINI")
ma_fenetre.master.mainloop() | true |
677f713215dc12f390a51b0afb6eb5d15ec22cbf | Python | dpazel/music_rep | /transformation/harmonictranscription/t_harmonic_transcription.py | UTF-8 | 9,682 | 2.75 | 3 | [
"MIT"
] | permissive | """
File: t_harmonic_transcription.py
Purpose: Given a line and its hct, and a target hct as long as the prior said given, reproduce the line
to the new target hct, based on its constraints plus those of the melodic search analysis.
"""
from melody.constraints.chordal_pitch_constraint import ChordalPitchConstraint
from melody.constraints.comparative_pitch_constraint import ComparativePitchConstraint
from melody.constraints.pitch_range_constraint import PitchRangeConstraint
from melody.solver.melodic_constraint_solver import MelodicConstraintSolver
from search.melodicsearch.melodic_search_analysis import MelodicSearchAnalysis, NotePairInformation
from structure.LineGrammar.core.line_grammar_executor import LineGrammarExecutor
from structure.line import Line
from structure.tempo import Tempo
from structure.time_signature import TimeSignature
from timemodel.duration import Duration
from timemodel.event_sequence import EventSequence
from timemodel.position import Position
from timemodel.tempo_event import TempoEvent
from timemodel.tempo_event_sequence import TempoEventSequence
from timemodel.time_signature_event import TimeSignatureEvent
from tonalmodel.diatonic_pitch import DiatonicPitch
from tonalmodel.pitch_range import PitchRange
from tonalmodel.interval import Interval, IntervalType
from transformation.transformation import Transformation
class THarmonicTranscription(Transformation):
"""
THarmonicTranscription: Construct a pattern with similar melodic structure to a source pattern, but given
a specific hct to follow.
"""
TUNNEL_HALF_INTERVAL = Interval(5, IntervalType.Perfect)
def __init__(self, source_line, source_hct, source_melodic_form=None):
"""
Constructor
:param source_line: Source line.
:param source_hct: Source line's hct.
:param source_melodic_form: MelodicForm for the source line (optional).
"""
self.__source_line = source_line
self.__source_hct = source_hct
self.__source_melodic_form = source_melodic_form
self.__source_analysis = MelodicSearchAnalysis(self.source_line, self.source_hct)
min_pitch, max_pitch = THarmonicTranscription.compute_min_max_pitches(self.source_line.get_all_notes())
self.__height = max_pitch.chromatic_distance - min_pitch.chromatic_distance
self.__tunnel_half_interval = THarmonicTranscription.TUNNEL_HALF_INTERVAL
Transformation.__init__(self)
@staticmethod
def create(source_expression):
lge = LineGrammarExecutor()
source_line, source_hct = lge.parse(source_expression)
return THarmonicTranscription(source_line, source_hct)
@property
def source_line(self):
return self.__source_line
@property
def source_hct(self):
return self.__source_hct
@property
def source_analysis(self):
return self.__source_analysis
@property
def source_melodic_form(self):
return self.__source_melodic_form
@property
def height(self):
return self.__height
@property
def tunnel_half_interval(self):
return self.__tunnel_half_interval
def apply(self, target_hct,
window_anchor_pitch,
tag_map=None,
window_height=None,
num_solutions=-1,
tunnel_half_interval=Interval(5, IntervalType.Perfect)):
"""
Apply method for transformation.
:param target_hct: Target hct for new target line.
:param window_anchor_pitch: Pitch specifying the lowest pitch for the target line window.
:param tag_map: map index of source/target note to specified pitch.
:param window_height: Height of target pitch window (in semi-tones) - use source line height if None specified.
:param num_solutions: Maximum number of solutions to return, -1 == unbounded.
:param tunnel_half_interval: half-interval for pitch range on each target tone.
:return: MCSResults
"""
if self.source_hct.duration != target_hct.duration:
raise Exception('Target hct duration {0} does not match source hct duration {1}.'.
format(target_hct.duration, self.source_hct.duration))
window_anchor_pitch = DiatonicPitch.parse(window_anchor_pitch) if isinstance(window_anchor_pitch, str) \
else window_anchor_pitch
target_line = self._build_target_line()
self.__tunnel_half_interval = tunnel_half_interval
source_notes = self.source_line.get_all_notes()
target_notes = target_line.get_all_notes()
source_to_target = {source_note: target_note for source_note, target_note in zip(source_notes, target_notes)}
constraints = self._build_constraints(source_to_target, tag_map)
ts_seq, tempo_seq = THarmonicTranscription._build_default_time_sig_tempo()
height = window_height if window_height else self.height
pitch_range = PitchRange(window_anchor_pitch.chromatic_distance,
window_anchor_pitch.chromatic_distance + height)
solver = MelodicConstraintSolver(target_line, tempo_seq, ts_seq, target_hct, pitch_range, constraints)
initial_map = {target_notes[k]: v for k, v in tag_map.items()} if tag_map else None
results = solver.solve(initial_map, num_solutions)
return results
def _build_target_line(self):
# Build a target line, all notes C:4 with onsets/durations of original line.
target_line = Line()
initial_pitch = DiatonicPitch.parse('C:4')
source_notes = self.source_line.get_all_notes()
for note in source_notes:
t_note = note.clone()
t_note.diatonic_pitch = initial_pitch
target_line.append(t_note)
return target_line
def _build_constraints(self, source_to_target, tag_map):
# Constraints:
# contour based on pair analysis
# chordal if original note is chordal
# melodic form constraints
# Tunnel: for diatonic notes, a pitch range constraint based on the specified "tunnel" over target notes.
pair_annotations = self.source_analysis.note_pair_annotation
note_annotations = self.source_analysis.note_annotation
constraints = list()
for pair_annotation in pair_annotations:
t1 = source_to_target[pair_annotation.first_note]
t2 = source_to_target[pair_annotation.second_note]
if pair_annotation.relationship == NotePairInformation.Relationship.LT:
rel = ComparativePitchConstraint.LESS_THAN
elif pair_annotation.relationship == NotePairInformation.Relationship.GT:
rel = ComparativePitchConstraint.GREATER_THAN
else:
rel = ComparativePitchConstraint.EQUAL
constraint = ComparativePitchConstraint(t1, t2, rel)
constraints.append(constraint)
for annotation in note_annotations:
if annotation.is_chordal:
constraint = ChordalPitchConstraint(source_to_target[annotation.note])
constraints.append(constraint)
# Get the constraints off the motifs
if self.source_melodic_form:
form_constraints = self.source_melodic_form.constraints
for c in form_constraints:
c_prime = c.clone([source_to_target[n] for n in c.actors])
constraints.append(c_prime)
tunnel_constraints = self._build_tunnel_constraints(source_to_target, tag_map)
constraints.extend(tunnel_constraints)
return constraints
def _build_tunnel_constraints(self, source_to_target, tag_map):
if tag_map is None or len(tag_map) == 0:
return []
one_id = next(iter(tag_map.keys()))
source_note = self.source_line.get_all_notes()[one_id]
target_pitch = tag_map[one_id]
mvmt_interval = Interval.create_interval(source_note.diatonic_pitch, target_pitch)
constraints = list()
note_annotations = self.source_analysis.note_annotation
for annotation in note_annotations:
if annotation.note.diatonic_pitch is None:
continue
target_note = source_to_target[annotation.note]
dest_ctr_pitch = mvmt_interval.get_end_pitch(annotation.note.diatonic_pitch)
low_pitch = self.tunnel_half_interval.get_start_pitch(dest_ctr_pitch)
high_pitch = self.tunnel_half_interval.get_end_pitch(dest_ctr_pitch)
p_range = PitchRange.create(low_pitch, high_pitch)
constraint = PitchRangeConstraint([target_note], p_range)
constraints.append(constraint)
return constraints
@staticmethod
def _build_default_time_sig_tempo():
tempo_seq = TempoEventSequence()
ts_seq = EventSequence()
tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
ts_seq.add(TimeSignatureEvent(TimeSignature(3, Duration(1, 4), 'sww'), Position(0)))
return ts_seq, tempo_seq
@staticmethod
def compute_min_max_pitches(notes):
min_pitch = None
max_pitch = None
for n in notes:
p = n.diatonic_pitch
if p is None:
continue
min_pitch = p if min_pitch is None else p if p.chromatic_distance < min_pitch.chromatic_distance else \
min_pitch
max_pitch = p if max_pitch is None else p if p.chromatic_distance > max_pitch.chromatic_distance else \
max_pitch
return min_pitch, max_pitch
| true |
4b710a760186e3f32f3fbcf2dcb1185e03de662c | Python | morgoth1145/advent-of-code | /2019/10/solution.py | UTF-8 | 2,618 | 3.65625 | 4 | [] | no_license | import collections
import math
import lib.aoc
import lib.grid
def compute_minimum_angle(dx, dy):
if dx == 0:
if dy > 0:
return 0, 1
else:
return 0, -1
if dy == 0:
if dx > 0:
return 1, 0
else:
return -1, 0
else:
# Simplify the angle
denom = math.gcd(abs(dx), abs(dy))
return dx // denom, dy // denom
# Constructs a map of angle to which asteroids are visible at that angle
def determine_asteroid_visibility(grid, x, y):
asteroids = collections.defaultdict(list)
for (ox, oy), c in grid.items():
if c == '.':
continue
if x == ox and y == oy:
continue
asteroids[compute_minimum_angle(ox-x, oy-y)].append((ox, oy))
for asteroid_list in asteroids.values():
asteroid_list.sort(key=lambda a: abs(a[0]-x) + abs(a[1]-y))
return asteroids
def get_best_asteroid(grid):
best = (0, None)
for (x, y), c in grid.items():
if c == '.':
continue
asteroids = determine_asteroid_visibility(grid, x, y)
# len(angles) tells us how many angles survived, so how many asteroids
# we can see
best = max(best, (len(asteroids), (x, y)))
return best[1]
def part1(s):
grid = lib.grid.FixedGrid.parse(s)
x, y = get_best_asteroid(grid)
answer = len(determine_asteroid_visibility(grid, x, y))
lib.aoc.give_answer(2019, 10, 1, answer)
def part2(s):
grid = lib.grid.FixedGrid.parse(s)
x, y = get_best_asteroid(grid)
asteroids = determine_asteroid_visibility(grid, x, y)
angle_order = sorted(asteroids,
key=lambda a: math.atan2(a[1], a[0]))
# Find the first angle that's either pointing up or in the right quadrant
# Once found, rotate the angle order as needed
shift_idx = min(i for i in range(len(angle_order))
if math.atan2(angle_order[i][1],
angle_order[i][0]) >= math.atan2(-1, 0))
angle_order = angle_order[shift_idx:] + angle_order[:shift_idx]
destruction_order = []
while angle_order:
new_angle_order = []
for a in angle_order:
asteroid_list = asteroids[a]
destruction_order.append(asteroid_list.pop(0))
if len(asteroid_list) > 0:
new_angle_order.append(a)
angle_order = new_angle_order
x, y = destruction_order[199]
answer = x * 100 + y
lib.aoc.give_answer(2019, 10, 2, answer)
INPUT = lib.aoc.get_input(2019, 10)
part1(INPUT)
part2(INPUT)
| true |
faa19296416e7945cfe6c71550f9d49dc650bb33 | Python | timjdavey/google-doc-sync | /spreadsheet.py | UTF-8 | 6,504 | 3.09375 | 3 | [] | no_license | import gdata.spreadsheet
import gdata.spreadsheet.service
import gdata.service
class EntryDoesNotExist(Exception):
pass
class EntryAlreadyExists(Exception):
pass
class MutipleEntriesExist(Exception):
pass
class GoogleRow(object):
"""Helper object to pass info. Please see docs for usage."""
def __init__(self, payload, data, entry, sheet):
self.payload = payload
self.converted = self.clean(data)
self._entry = entry # underscored for proper error handling
self.sheet = sheet
@property
def entry(self):
""" Gets the google entry with proper exceptions """
if self._entry is None:
raise EntryDoesNotExist
else:
return self._entry
def exists(self):
""" Checks to see if entry exists on google """
try:
self.entry
except EntryDoesNotExist:
return False
else:
return True
def clean(self, data):
""" Cleans the data ready for Googles consumption """
d = {}
for k, v in data.items():
if v is not None: # leave in None (blank cells)
v = str(v) # otherwise make string
d[str(k)] = v
return d
def outgoing(self):
""" Data to be sent to Google. Cleaned & updated with incoming. """
try:
out = self.incoming()
except EntryDoesNotExist:
out = self.converted
else:
out.update(self.converted)
return out
def incoming(self):
""" Data returned from Google row. Contains extra cols. """
data = dict([(k, v.text) for k,v in self.entry.custom.items()])
return self.clean(data)
def out_of_sync(self):
""" Returns bool on if incoming & outgoing are out of sync"""
try:
self.entry
except EntryDoesNotExist:
return True
else:
incoming = self.incoming()
outgoing = self.outgoing()
for k, v in incoming.items():
if k in outgoing and not v == outgoing[k]:
return True
return False
def save(self, refresh=False):
if self.out_of_sync() and refresh:
self.sheet.feed(refresh=True)
return self.sheet.save(self)
def delete(self, silent=False):
try:
self.sheet.delete(self)
except EntryDoesNotExist:
return None # None not False
else:
return True
class GoogleSpreadsheet(object):
""" Base Spreadsheet example. Please see documents on how to extend. """
primary_key = 'pk'
def __init__(self, email, password,
spreadsheet, worksheet, cache_feed=False, source=None):
self.email = email
self.password = password
self.spreadsheet = spreadsheet
self.worksheet = worksheet
self.source = email if source is None else source
self.cache_feed = cache_feed
def convert(self, payload):
""" Please specify how the payload (e.g. User) converts to dict """
raise NotImplementedError
def convert_back(self, row, payload):
""" Please specify how the row should convert back to a payload """
raise NotImplementedError
@property
def client(self):
""" Logs you into the spreadsheet lazily with creditials """
try:
self._client
except AttributeError:
client = gdata.spreadsheet.service.SpreadsheetsService()
client.email = self.email
client.password = self.password
client.source = self.source
client.ProgrammaticLogin()
self._client = client
return self._client
def feed(self, refresh=False):
""" Managed feed from Google, list dict by primary key """
if not self.cache_feed or refresh \
or not hasattr(self, '_cached_entries'):
ents = {}
feed = self.client.GetListFeed(self.spreadsheet, self.worksheet)
for entry in feed.entry:
t = str(entry.title.text)
if t in ents:
ents[t].append(entry)
else:
ents[t] = [entry]
self._cached_entries = ents
return self._cached_entries
def entry(self, pk):
""" Returns the single entry with proper Exception handling """
feed = self.feed()
try:
lst = feed[str(pk)]
except KeyError:
raise EntryDoesNotExist
else:
if len(lst) > 1:
raise MutipleEntriesExist
else:
return lst[0]
def get(self, payload):
""" Returns a GoogleRow given a payload object """
data = self.convert(payload)
pk = data[self.primary_key]
try:
entry = self.entry(pk)
except EntryDoesNotExist:
entry = None
return GoogleRow(payload, data, entry, self)
def create(self, row):
""" Creates a row in Google, making sure it doesn't already exist """
try:
# checks the make sure the code isn't doing something stupid
row.entry
except EntryDoesNotExist:
self.client.InsertRow(
row.outgoing(), self.spreadsheet, self.worksheet)
else:
raise EntryAlreadyExists
return row
def update(self, row):
""" Updates a given row, with built in exception handling """
self.client.UpdateRow(row.entry, row.outgoing())
return row
def save(self, row):
""" Create or Update appropriately """
try:
self.create(row)
except EntryAlreadyExists:
if row.out_of_sync():
self.update(row)
return row
def delete(self, entry):
""" Simply deletes a row """
if isinstance(entry, GoogleRow):
entry = row.entry
self.client.DeleteRow(entry)
def deduplicate(self):
""" Removes duplicate rows """
feed = self.feed()
c = []
for k, v in feed.items():
if len(v) > 1:
# could do something more fancy here like check last updated
for entry in v[1:]:
self.delete(entry)
c.append(k)
return c
| true |
72e514094ead74ac8a63eb51d54ac8a1d5f78903 | Python | FilipKomljenovic/TetrisAgent | /pieces/opiece.py | UTF-8 | 2,126 | 2.875 | 3 | [] | no_license | from pieces.piece import Piece
class OPiece(Piece):
HEIGHT = 2
WIDTH = 2
LEFT = 1
RIGHT = 2
# add piece color and setter
def __init__(self, shape, board):
super().__init__(shape, board)
def fill_configurations(self, board):
if not len(self.configurations) == 0:
return self.configurations
for x in range(0, self.BOARDWIDTH - 1):
self.configurations.append((x, x + 1))
return self.configurations
def generate_board(self, conf, board):
new_board = [i[:] for i in board]
height = 0
for x in range(0, self.BOARDHEIGHT):
flag = False
for i in range(conf[0], conf[1] + 1):
if conf[1] + 1 <= self.BOARDWIDTH and self.board[x][i] == '.':
if self.can_fall(x, conf[0]):
flag = True
break
if flag:
height = x
break
if self.can_fall(height, conf[0]):
max_height = height + self.HEIGHT
if max_height > self.BOARDHEIGHT:
max_height = self.BOARDHEIGHT
for x in range(height, max_height):
for y in range(conf[0], conf[1] + 1):
# change with color ID
new_board[x][y] = '1'
return new_board
def can_fall(self, height, column):
for x in range(height, self.BOARDHEIGHT):
for y in range(column, column + self.WIDTH):
if self.board[x][y] != '.':
return False
return True
def generate_actions(self, column, conf):
left = 4
right = 5
actions = []
if column > right:
for i in range(right, column + self.WIDTH - 1):
actions.append(self.RIGHT)
elif column < left:
for i in range(0, left - column):
actions.append(self.LEFT)
elif left < column <= right:
for i in range(left, left + (column - left)):
actions.append(self.RIGHT)
return actions
| true |
b3d8abb37a557544d1933c5c3468a32ead1a7380 | Python | daniel-kullmann/advent-of-code | /2.py | UTF-8 | 251 | 3.109375 | 3 | [] | no_license | fh = open('2.txt', 'r')
area = 0
for line in fh.readlines():
sizes = map(int, line.strip().split('x'))
sides = [sizes[0]*sizes[1], sizes[1]*sizes[2], sizes[0]*sizes[2]]
smallestSide = min(sides)
area += smallestSide + 2*sum(sides)
print area
| true |
c93e0144366062e193efe6cb092b3e2d6bea1297 | Python | Smookii/PossibleGame | /ball.py | UTF-8 | 1,102 | 3.390625 | 3 | [] | no_license | import pygame
from pygame import Vector2
class Ball():
def __init__(self, color, startposition, width,window):
self.color = color
self.width = width
self.float_pos = Vector2(startposition)
self.position = [int(self.float_pos[0]),int(self.float_pos[1])]
self.window = window
def __str__(self):
print(self.color)
def update_position(self):
self.position = [int(self.float_pos[0]),int(self.float_pos[1])]
def in_borders(self, vec):
vect_temp = Vector2(self.position)
vect_temp += vec
if vect_temp[0] - self.width < 0 or vect_temp[0]+self.width > self.window.get_size()[0]:
return False
if vect_temp[1] - self.width < 0 or vect_temp[1]+self.width > self.window.get_size()[1]:
return False
return True
def draw(self, window):
pygame.draw.circle(window, self.color, self.position, self.width)
def new_position(self, startposition):
self.position = startposition
self.float_pos = Vector2(startposition)
| true |
c9eb3b76b56be615b76adf1395fd0d45567c0988 | Python | choococo/MoMLearning | /2.opencvLearning/stage04/4. findContours.py | UTF-8 | 1,505 | 3.265625 | 3 | [] | no_license | import cv2
import numpy as np
'轮廓查找与绘制:findContours()、drawContours()'
'轮廓检索、轮廓近似'
img = cv2.imread("../images/23.jpg")
# img = cv2.imread("../images/1.jpg")
# 1. 灰度化
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 2. 阈值二值化
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# 3. 查找轮廓:包括的canny算法
# findContours(image, mode, method, contours=None, hierarchy=None, offset=None)
# image:输入图像(二值化图像)
# mode:轮廓检索方式
# method:轮廓近似方法
"""
轮廓检索方式:
cv2.RETR_EXTERNAL 只检测外轮廓 _______________用的是最多的
cv2.RETR_LIST 检测的轮廓不建立等级关系
cv2.RETR_CCOMP 建立两个等级的轮廓,上一层为外边界,里面一层为内孔的边界信息
cv2.RETR_TREE 建立一个等级树结构轮廓,包含关系
轮廓近似方法:
cv2.CHAIN_APPROX_NONE 存储所有外边界
cv2.CHAIN_APPROX_SIMPLE 压缩垂直、水平、对角方向,只保留端点
"""
contours, hierarchy = cv2.findContours(image=thresh, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
print(len(contours[0])) # 点的数量
# print(contours[0])
cv2.imshow("thresh", thresh)
print(hierarchy) # 层次树
# 绘制轮廓:直接对原图进行操作
img_contour = cv2.drawContours(image=img, contours=contours, contourIdx=-1, color=(0, 255, 0), thickness=2)
cv2.imshow("img_contour", img_contour)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
8180dcbc9b42d010841d0fbfa22c5c1e4ca85943 | Python | KrakSat-2016/kraksat-server | /api/tests/test_telemetry.py | UTF-8 | 1,795 | 2.53125 | 3 | [
"MIT"
] | permissive | from django.core.urlresolvers import reverse
from api.models import Telemetry
from api.tests.utils import KrakSatAPITestCase
class TelemetryTests(KrakSatAPITestCase):
"""Tests for /telemetry API endpoint"""
list_url = reverse('telemetry-list')
model = Telemetry
valid_data = {
'timestamp': KrakSatAPITestCase.TIMESTAMP,
'voltage': 0, 'current': 0, 'oxygen': 0, 'ion_radiation': 0,
'humidity': 20, 'temperature': 30, 'pressure': 1020.5,
'gyro_x': 5.5, 'gyro_y': 12.5, 'gyro_z': 3.5,
'accel_x': 0.1, 'accel_y': 0.15, 'accel_z': 0.31,
'magnet_x': 5.001, 'magnet_y': 2.01, 'magnet_z': 9.999
}
def test_create(self):
"""Ensure we can create a new Telemetry object"""
self._test_create()
def test_invalid_params(self):
"""Ensure sending invalid parameters to /telemetry returns error 400"""
self._test_invalid_params(
('Invalid timestamp', {'timestamp': 'foobar'}),
('Humidity below 0', {'humidity': -5}),
('Humidity above 100', {'humidity': 100.04}),
('Temperature below -40', {'temperature': -40.01}),
('Temperature above 125', {'temperature': 125.01}),
('Invalid pressure', {'pressure': 'foobar'}),
('Invalid gyro_x', {'gyro_x': 'foobar'}),
('Invalid gyro_y', {'gyro_y': 'foobar'}),
('Invalid gyro_z', {'gyro_z': 'foobar'}),
('Invalid accel_x', {'accel_x': 'foobar'}),
('Invalid accel_y', {'accel_y': 'foobar'}),
('Invalid accel_z', {'accel_z': 'foobar'}),
('Invalid magnet_x', {'magnet_x': 'foobar'}),
('Invalid magnet_y', {'magnet_y': 'foobar'}),
('Invalid magnet_z', {'magnet_z': 'foobar'})
)
| true |
0d75fc2567304415cc5c6310d6ff5bec4d47c5c8 | Python | siddharththakur26/data-science | /Core/Languages/python/HackerRank/birthdayChocolate.py | UTF-8 | 652 | 3.109375 | 3 | [] | no_license | s = [2,5,1, 3, 4, 4, 3, 5, 1, 1, 2, 1, 4, 1, 3, 3, 4, 2, 1]
d = 18
m = 7
temp=[]
cnt=0
for i in range(0,len(s)):
temp = s[i:m+i]
if len(temp) == m:
#print temp
sumValue = sum(temp)
#print sumValue
if sumValue == d:
cnt +=1
print cnt
'''
result =[]
sum=0
cnt=0
for i in range(0,len(s)):
j=0
while(j < m and i+j < len(s)):
sum = sum + s[i+j]
if sum <= d:
result.append(s[i+j])
if len(result) == m:
if sum == d:
cnt +=1
j +=1
del result[:]
sum = 0
print cnt
'''
| true |
f0fad501a1f01191e2a3e742fd18708cb280a653 | Python | alfredang/tensorflow_workshop | /Module_3_0.py | UTF-8 | 2,033 | 3.328125 | 3 | [] | no_license | # Tensorflow workshop with Jan Idziak
#-------------------------------------
#
#script harvested from:
#https://github.com/nfmcclure
#
# Loss Functions
#----------------------------------
#
# This python script illustrates the different
# loss functions for regression and classification.
import matplotlib.pyplot as plt
import tensorflow as tf
# Create graph
sess = tf.Session()
###### Numerical Predictions ######
#try different values
x_vals = tf.linspace(-2., 2., 1000)
target = tf.constant(0.)
# L2 loss
# L = (pred - actual)^2
l2_y_vals = tf.square(target - x_vals)
l2_y_out = sess.run(l2_y_vals)
# L1 loss
# L = abs(pred - actual)
l1_y_vals = tf.abs(target - x_vals)
l1_y_out = sess.run(l1_y_vals)
# Plot the output:
x_array = sess.run(x_vals)
plt.plot(x_array, l2_y_out, 'b-', label='L2 Loss')
plt.plot(x_array, l1_y_out, 'r--', label='L1 Loss')
plt.ylim(-0.2, 5)
plt.legend(loc='lower right', prop={'size': 11})
plt.show()
sess.close()
# ###### Categorical Predictions ######
x_vals = tf.linspace(-3., 5., 500)
target = tf.constant(1.)
targets = tf.fill([500,], 1.)
sess = tf.Session()
# Cross entropy loss
# L = -actual * (log(pred)) - (1-actual)(log(1-pred))
xentropy_y_vals = - tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals))
xentropy_y_out = sess.run(xentropy_y_vals)
# Sigmoid entropy loss
# L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred)))
# or
# L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual)))
xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_vals, labels=targets)
xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals)
# Plot the output
x_array = sess.run(x_vals)
plt.plot(x_array, xentropy_y_out, 'r--', label='Cross Entropy Loss')
plt.plot(x_array, xentropy_sigmoid_y_out, 'k-.', label='Cross Entropy Sigmoid Loss')
plt.ylim(-1.5, 3)
#plt.xlim(-1, 3)
plt.legend(loc='lower right', prop={'size': 11})
plt.show()
# ### Exercise modelue_3_0
# # We will use some of the loss functions in further studies
| true |
5cb583a31b1993419cb6b2ca3691609d4b74b56f | Python | Data-Designer/Leetcode-Travel | /leetcode/138.复制带随机指针的链表.py | UTF-8 | 1,210 | 3.15625 | 3 | [
"MIT"
] | permissive | '''
Description: hash表,先单纯复制然后再处理random
version:
Author: Data Designer
Date: 2021-08-30 10:13:31
LastEditors: Data Designer
LastEditTime: 2021-08-30 10:32:23
'''
#
# @lc app=leetcode.cn id=138 lang=python3
#
# [138] 复制带随机指针的链表
#
# @lc code=start
"""
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
# if not head:
# return []
hashmap = dict()
dummy = Node(-1)
tail,tmp = dummy,head
while tmp:
node = Node(tmp.val)
hashmap[tmp] = node # 这个是我没想到的,哈希存储
tail.next = node
tail = tail.next
tmp = tmp.next
tail,tmp = dummy.next,head
while tmp:
if tmp.random:
tail.random = hashmap[tmp.random] # 指向tmp的random的节点
else:
tmp.random = None
tail = tail.next
tmp = tmp.next
return dummy.next
# @lc code=end
| true |
5b3c5edd9d71a0dde216a3217658d265b54432a2 | Python | karlhl/Machine-Learning | /1.4RNN/RNN-classifier/RNN_classifier_gpu.py | UTF-8 | 2,278 | 2.734375 | 3 | [] | no_license | import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torchvision.transforms as transforms
import os
EPOCH = 30 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height
INPUT_SIZE = 28 # rnn input size / image width
LR = 0.01 # learning rate
DOWNLOAD_MNIST = False # set to True if haven't download the data
use_cuda = False
if torch.cuda.is_available():
use_cuda = True
if not(os.path.exists('./mnist/')) or not os.listdir('./mnist/'):
# not mnist dir or mnist is empyt dir
DOWNLOAD_MNIST = True
train_data = dsets.MNIST(
root="./mnist/",
train=True,
transform = transforms.ToTensor(),
download=DOWNLOAD_MNIST,
)
train_loader = torch.utils.data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True)
test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor).cuda()/255.
test_y = test_data.test_labels.cuda()
class RNN(nn.Module):
def __init__(self):
super(RNN,self).__init__()
self.rnn = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=64,
num_layers=1,
batch_first=True,
)
self.out = nn.Linear(64,10)
def forward(self,x):
r_out,(h_n,h_c) = self.rnn(x,None)
out = self.out(r_out[:,-1,:])
return out
rnn = RNN()
if use_cuda:
rnn.cuda()
optimizer = torch.optim.Adam(rnn.parameters(),lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step,(b_x,b_y) in enumerate(train_loader):
b_x = b_x.view(-1, 28, 28)
if use_cuda:
b_x = b_x.cuda()
b_y = b_y.cuda()
output = rnn(b_x)
loss = loss_func(output,b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_out = rnn(test_x)
pred_y = torch.max(test_out,1)[1].cuda().data
accuracy = torch.sum(pred_y==test_y).type(torch.FloatTensor)/test_y.size(0)
print('epoch:{}, step:{}, loss:{},acc:{:.4f}'.format(epoch,step,loss.data.cpu().numpy(),accuracy))
| true |
478998af9a3aae5058a5dd0258bdd7d85a1205b8 | Python | MorgannSabatier/gpt3_gender | /code/get_entity_info.py | UTF-8 | 19,287 | 3.09375 | 3 | [] | no_license | """
Getting entities in stories
and the pronouns associated with them.
"""
import os
import csv
from collections import defaultdict, Counter
import json
import re
import numpy as np
LOGS = '/mnt/data0/lucy/gpt3_bias/logs/'
def remove_punct(s):
#regex = re.compile('[%s]' % re.escape(string.punctuation))
regex = re.compile('[^a-zA-Z0-9]')
return regex.sub('', s)
def get_characters_to_prompts(prompts_path, tokens_path, txt_path, char_idx_path, num_gens=5):
'''
Input: path to original prompts, path to tokens
Assumes that the generated stories are in the same order as the prompts,
with num_gens stories per prompt.
'''
count = 0
char_story_count = 0
for filename in os.listdir(tokens_path):
title = filename.replace('.tokens', '')
print(title)
char_order = [] # character, where index is generated story index
with open(prompts_path + title, 'r') as infile:
reader = csv.reader(infile, delimiter='\t')
for row in reader:
char_ID = row[0]
char_name = row[1]
prompt = row[2]
char_order.extend([(char_ID, char_name)]*num_gens)
if len(char_order) == 0:
print("----- No prompts -----")
continue
count += 1
# sanity check that story has the character in it
with open(txt_path + title, 'r') as infile:
story = ''
story_idx = 0
dot_count = 0
for line in infile:
if line.strip() == '@':
dot_count += 1
else:
dot_count = 0
if dot_count == 20:
assert char_order[story_idx][1] in story
story = ''
story_idx += 1
dot_count = 0
else:
story += line
# get mapping from story idx to its token span
idx_tokenIDs = defaultdict(list) # { story idx : (start token ID, end token ID) }
with open(tokens_path + title + '.tokens', 'r') as infile:
reader = csv.DictReader(infile, delimiter='\t', quoting=csv.QUOTE_NONE)
start_tokenID = 0
end_tokenID = 0
story_idx = 0
dot_count = 0
for row in reader:
if row['normalizedWord'] == '@':
dot_count += 1
else:
dot_count = 0
if dot_count == 20:
end_tokenID = row['tokenId']
idx_tokenIDs[story_idx] = (start_tokenID, int(end_tokenID))
story_idx += 1
start_tokenID = int(end_tokenID) + 1
dot_count = 0
# the number of stories should be the number of character prompts * num_gens
if len(idx_tokenIDs) != len(char_order):
print("PROBLEM!!!!!", len(idx_tokenIDs), len(char_order))
continue
assert len(idx_tokenIDs) == len(char_order)
# mapping from character to story spans
char_story = defaultdict(list) # {character name: [(story_idx, start token idx, end token idx)] }
for story_idx in idx_tokenIDs:
tup = (story_idx, idx_tokenIDs[story_idx][0], idx_tokenIDs[story_idx][1])
char_story[char_order[story_idx][1]].append(tup)
with open(char_idx_path + title + '.json', 'w') as outfile:
char_story_count += 1
json.dump(char_story, outfile)
print(count)
print(char_story_count)
def get_entities_dict(ents_path, title, main_characters):
'''
Gets the start and end tokens for every entity
'''
entities = {} # { (start, end) : entity name }
with open(ents_path + title + '/' + title + '.ents', 'r') as infile:
for line in infile:
contents = line.strip().split('\t')
start = int(contents[0])
end = int(contents[1])
ner = contents[2]
entity = contents[3]
if ner == 'PROP_PER' or entity in main_characters:
entities[(start, end)] = entity
return entities
def get_coref_label_dict(ents_path, title, entities, idx2story):
'''
Get the coref group for every proper name person entity
Coref group is groupnumber_storyID, where storyIDs are unique,
and group number is from the coref results.
'''
coref_label = {} # { (start, end, entity name) : coref_group_id }
max_group = 0
with open(ents_path + title + '/' + title + '.predicted.conll.ents', 'r') as infile:
for line in infile:
contents = line.strip().split('\t')
group = contents[0]
entity = contents[1]
start = int(contents[2])
end = int(contents[3])
chain_id = group + '_' + str(idx2story[start])
max_group = max(max_group, int(group))
if (start, end) in entities:
coref_label[(start, end, entities[start, end])] = chain_id
# some entities don't have coref chains
max_group += 1
for tup in entities:
start = tup[0]
end = tup[1]
if (start, end, entities[tup]) not in coref_label:
chain_id = str(max_group) + '_' + str(idx2story[start])
coref_label[(start, end, entities[start, end])] = chain_id
max_group += 1
return coref_label
def get_coref_chain_dict(ents_path, title, pronouns, coref_label, idx2story):
'''
Get all of the pronouns associated with a
coref group associated with entities
Each coref group is split by story, so that one story's pronouns are not
connected to another.
'''
coref_chain = defaultdict(list) # { coref_group_ID : [pronouns] }
with open(ents_path + title + '/' + title + '.predicted.conll.ents', 'r') as infile:
for line in infile:
contents = line.strip().split('\t')
group = contents[0]
entity = contents[1]
start = int(contents[2])
end = int(contents[3])
chain_id = group + '_' + str(idx2story[start])
# only groups containing proper names matter
if chain_id in list(coref_label.values()):
if entity.lower() in pronouns:
coref_chain[chain_id].append(pronouns[entity.lower()])
return coref_chain
def print_character_network(char_neighbors, char_pronouns):
for main_char in char_neighbors:
neighbor_dict = char_neighbors[main_char]
print(main_char)
print(' ------------------------- ')
for neighbor in neighbor_dict:
print(neighbor['character_name'], neighbor['aliases'], neighbor['gender'])
print()
def get_entities_pronouns(ents_path, prompts_path, char_idx_path, char_nb_path, char_group_path=None):
'''
For each named person, find how GPT-3 tends to gender that name
based on coref chains in the text
For each main character, what are the genders of other named people in their
stories?
inputs:
- path to entities
- path to prompts
'''
pronouns = {'he' : 'masc', 'his' : 'masc', 'him' : 'masc',
'himself' : 'masc', 'she' : 'fem', 'her' : 'fem',
'hers' : 'fem', 'herself' : 'fem', 'they' : 'neut', 'their' : 'neut',
'them' : 'neut', 'theirs' : 'neut', 'theirself' : 'neut'}
for title in os.listdir(ents_path):
print(title)
# now, get characters associated with a character
if not os.path.exists(char_idx_path + title + '.json'): continue
with open(char_idx_path + title + '.json', 'r') as infile:
char_story = json.load(infile) # {character name: [(story idx, start token idx, end token idx)] }
idx2story = {} # token id to story idx
for char in char_story:
for tup in char_story[char]:
story_idx, start, end = tup
for i in range(start, end + 1):
idx2story[i] = story_idx
main_characters = set(char_story.keys())
entities = get_entities_dict(ents_path, title, main_characters) # (start, end) : entity name
coref_label = get_coref_label_dict(ents_path, title, entities, idx2story) # entities to coref group
coref_chain = get_coref_chain_dict(ents_path, title, pronouns, coref_label, idx2story) # coref group to pronouns
char_pronouns = defaultdict(Counter) # {character name : [pronouns in all coref chains]}
# This is a list because one name, e.g. Michelle, might have multiple coref chains to create a cluster
char_group_ids = defaultdict(set) # { char_ID : set([coref_group_ids]) }
for ent in entities:
char = entities[ent]
story_idx = idx2story[ent[0]]
if (ent[0], ent[1], char) in coref_label:
char_group_ids[char + '_' + str(story_idx)].add(coref_label[(ent[0], ent[1], char)])
if char_group_path is not None:
cgi_out = defaultdict(list)
for k in char_group_ids:
cgi_out[k] = list(char_group_ids[k])
with open(char_group_path + title + '.json', 'w') as outfile:
json.dump(cgi_out, outfile)
# if "Michelle" and "Michelle Obama" are in the same coref chain together, we group their clusters together
# We can have one name be the "base char" that other renamings of that character are then grouped with
chainID2name = {} # one to one mapping of already-seen chain_id to base_char
aliases = defaultdict(set) # base_char : [other chars that share coref chains with it]
for char in char_group_ids:
pns = []
base_char = char
for group in char_group_ids[char]:
if group in chainID2name:
base_char = chainID2name[group]
char_name = '_'.join(char.split('_')[:-1])
aliases[base_char].add(char_name)
pns.extend(coref_chain[group])
for group in char_group_ids[char]:
# assign this group to base
chainID2name[group] = base_char
pns = Counter(pns)
char_pronouns[base_char] += pns
char_story_rev = {} # story idx to character
for char in char_story:
story_indices = char_story[char] # list of story starts and ends
for story_span in story_indices:
story_idx = story_span[0]
char_story_rev[story_idx] = char
# {character name : [{"character name": "", "gender": {masc: #, fem: #, neut: #}, "aliases": [name]}] }
char_neighbors = defaultdict(list)
seen_mains = set() # set of main character _ story idx
for base_char in char_pronouns:
story_idx = int(base_char.split('_')[-1])
main_char = char_story_rev[story_idx]
if base_char == main_char + '_' + str(story_idx):
seen_mains.add(base_char)
neighbor_dict = {}
neighbor_dict['character_name'] = base_char
neighbor_dict['aliases'] = list(aliases[base_char])
neighbor_dict['gender'] = char_pronouns[base_char]
char_neighbors[main_char].append(neighbor_dict)
# due to NER error, some main characters weren't recognized and thus have no pronouns
for story_idx in char_story_rev:
main_char = char_story_rev[story_idx]
if main_char + '_' + str(story_idx) not in seen_mains:
neighbor_dict = {}
base_char = main_char + '_' + str(story_idx)
neighbor_dict['character_name'] = base_char
neighbor_dict['aliases'] = []
neighbor_dict['gender'] = {}
char_neighbors[main_char].append(neighbor_dict)
with open(char_nb_path + title + '.json', 'w') as outfile:
json.dump(char_neighbors, outfile)
def calculate_recurrence(tokens_path, char_idx_path):
num_times = [] # number of times main character occurs in story
ranges = [] # range of tokens the main character spans
for f in os.listdir(tokens_path):
print(f)
title = f.replace('.tokens', '')
if not os.path.exists(char_idx_path + title + '.json'): continue
with open(char_idx_path + title + '.json', 'r') as infile:
char_story = json.load(infile)
start2char = {}
for charname in char_story:
for tup in char_story[charname]:
start2char[tup[0]] = charname
with open(tokens_path + f, 'r') as infile:
curr_start = None
charId = None
main_char_idx = [] # all of the indices in which the main character occurs
reader = csv.DictReader(infile, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if int(row['tokenId']) in start2char:
if curr_start is not None:
num_times.append(len(main_char_idx))
ranges.append(main_char_idx[-1] - main_char_idx[0])
curr_start = int(row['tokenId'])
charId = None
main_char_idx = []
if row['originalWord'] == start2char[curr_start] and charId is None:
charId = row['characterId']
if row['characterId'] == charId:
main_char_idx.append(int(row['tokenId']))
num_times.append(len(main_char_idx))
ranges.append(main_char_idx[-1] - main_char_idx[0])
print(np.mean(num_times), np.mean(ranges))
def get_topics_for_txt(txt_path, prompts_path, topic_out_path, \
gender_path, generated, story_topics, matched=False, num_gens=5):
if matched:
assert generated == True
print("doing this for matched prompts")
with open(LOGS + 'prompt_matching/same_prompt_pairs.json', 'r') as infile:
matched_pairs = json.load(infile)
gender_topics = {'gender':[], 'topic':[], 'value':[]}
for title in sorted(os.listdir(txt_path)):
char_order = [] # character, where index is generated story index
with open(prompts_path + title, 'r') as infile:
reader = csv.reader(infile, delimiter='\t')
for row in reader:
char_ID = row[0]
char_name = row[1]
prompt = row[2]
char_order.extend([char_name]*num_gens)
if len(char_order) == 0: continue
with open(gender_path + title + '.json', 'r') as infile:
gender_dict = json.load(infile)
for i, char in enumerate(char_order):
if matched:
# only select matched pairs
if char + '_' + str(i) not in matched_pairs[title]:
continue
story_title_id = title + str(i+1)
if not generated:
story_title_id = 'ORIG_' + story_title_id
topic_dict = story_topics[story_title_id]
assert char in gender_dict
neighbors = gender_dict[char]
gender = None
for neighbor in neighbors:
if neighbor['character_name'] == char + '_' + str(i):
gender = neighbor['gender_label']
if gender is None:
# failed to detect main character entity
print("PROBLEM!!!!", title, char, i)
gender = 'other'
for topic_id in topic_dict:
gender_topics['gender'].append(gender)
gender_topics['topic'].append(topic_id)
gender_topics['value'].append(topic_dict[topic_id])
with open(topic_out_path, 'w') as outfile:
json.dump(gender_topics, outfile)
def get_gendered_topics(txt_path, prompts_path, topic_out_path, \
gender_path, generated, matched=False):
topic_dir = LOGS + 'topics_0.9'
doc_topic_file = '%s/doc-topics.gz' % topic_dir
doc_topics = open(doc_topic_file).read().splitlines() # list of topics
story_ids = open(topic_dir + '/story_id_order').read().splitlines() # story IDs
story_topics = defaultdict(dict) # story ID : {topic id : value, topic id: value}
for i, doc in enumerate(doc_topics):
contents = doc.split('\t')
topics = [float(i) for i in contents[2:]]
story_title_id = story_ids[i]
if (generated & (not story_title_id.startswith("ORIG_"))) or \
(not generated & story_title_id.startswith("ORIG_")):
assert len(topics) == 50
for topic_id, value in enumerate(topics):
story_topics[story_title_id][topic_id] = value
if generated:
get_topics_for_txt(txt_path, prompts_path, \
topic_out_path, gender_path, generated, story_topics, matched=matched)
else:
get_topics_for_txt(txt_path, prompts_path, \
topic_out_path, gender_path, generated, story_topics, num_gens=1)
def main():
generated = True
matched = False
if matched:
ents_path = LOGS + 'generated_0.9_ents/'
tokens_path = LOGS + 'plaintext_stories_0.9_tokens/'
txt_path = LOGS + 'plaintext_stories_0.9/'
char_idx_path = LOGS + 'char_indices_0.9/'
char_nb_path = LOGS + 'char_neighbors_0.9/'
topic_out_path = LOGS + 'gender_topics_0.9_matched.json'
gender_path = LOGS + 'char_gender_0.9/'
char_group_path = LOGS + 'char_coref_groups/'
num_gens = 5
elif generated:
ents_path = LOGS + 'generated_0.9_ents/'
tokens_path = LOGS + 'plaintext_stories_0.9_tokens/'
txt_path = LOGS + 'plaintext_stories_0.9/'
char_idx_path = LOGS + 'char_indices_0.9/'
char_nb_path = LOGS + 'char_neighbors_0.9/'
topic_out_path = LOGS + 'gender_topics_0.9.json'
gender_path = LOGS + 'char_gender_0.9/'
char_group_path = LOGS + 'char_coref_groups/'
num_gens = 5
else:
ents_path = LOGS + 'book_excerpts_ents/'
tokens_path = LOGS + 'book_excerpts_tokens/'
txt_path = LOGS + 'book_excerpts/'
char_idx_path = LOGS + 'orig_char_indices/'
char_nb_path = LOGS + 'orig_char_neighbors/'
topic_out_path = LOGS + 'orig_gender_topics.json'
gender_path = LOGS + 'orig_char_gender/'
char_group_path = LOGS + 'orig_char_coref_groups/'
num_gens = 1
prompts_path = LOGS + 'original_prompts/'
#get_characters_to_prompts(prompts_path, tokens_path, txt_path, char_idx_path, num_gens=num_gens)
get_entities_pronouns(ents_path, prompts_path, char_idx_path, char_nb_path, char_group_path=char_group_path)
#calculate_recurrence(tokens_path, char_idx_path)
#get_gendered_topics(txt_path, prompts_path, topic_out_path, gender_path, generated, matched=matched)
if __name__ == '__main__':
main()
| true |
1dea2b38e2e8581773b2e5070d170cfdf02f87a1 | Python | abhi8893/Intensive-python | /exercises/factorials.py | UTF-8 | 446 | 4.34375 | 4 | [] | no_license | # Find factorials of a list of numbers
def factorialize(numbers):
""" Return factorials of a list of numbers.
>>> factorialize([1, 2, 3, 4, 5])
>>> [1, 2, 6, 24, 120]
"""
res = list(map(fact, numbers))
return(res)
def fact(n):
if n == 0:
return(1)
else:
return(n*fact(n-1))
def main():
print(factorialize(range(1, 6)))
if __name__ == '__main__':
main()
| true |
c5ed6d7c52d8765028b2ba2bf1fd932a32f359b3 | Python | jimxliu/rosalind | /dict.py | UTF-8 | 227 | 3.5625 | 4 | [] | no_license | with open("dict.txt","r") as f:
s = f.readline().strip()
l = s.split(" ")
my_dict = {}
for word in l:
if word in my_dict:
my_dict[word] += 1
else:
my_dict[word] = 1
for key, value in my_dict.items():
print(key,value)
| true |
b4e2dc291d0ebc596fe74048efc0787966830b9d | Python | palunel/DemoGitRepo | /app.py | UTF-8 | 146 | 2.578125 | 3 | [] | no_license | print("This is a GitHub repository demo app.")
print("Updated on local repository")
print("Yeah we are done!")
print("THought we were done?!")
| true |
8c2e2ee33cf32e5baaaa113eef80d59c089ab801 | Python | Agungtirtayashaa/labspy02 | /lab.py | UTF-8 | 274 | 3.765625 | 4 | [] | no_license | print (" tugas praktikum 2")
a = int(input('Masukkan nilai a: '))
b = int(input('Masukkan nilai b: '))
c = int(input('Masukkan nilai c: '))
if a > b and a > c:
print('A yang terbesar')
elif b > a and b > c:
print('B yang terbesar')
else:
print('C yang terbesar')
| true |
139279959956883586757481f7a19a5f1876c58f | Python | VladOvadiuc/Python | /Student_Lab_Assignments/AssigController.py | UTF-8 | 1,965 | 2.953125 | 3 | [] | no_license | from domain import Assignment
class AssigController:
def __init__(self,assigRepo):
self.__assigRepo = assigRepo
def findID(self, ID):
'''
Search an assignment by it's id
:param ID: the id to be found
:return: true / false if the id is found or not
'''
for i in range(len(self.__assigRepo.getAll())):
s = self.__assigRepo.getAll()[i]
if s.getID() == ID:
return s
return -1
def add(self,assigID,desc,deadline):
'''
Add a assignment in repository
:param assig: the assignment to be added
'''
assig=Assignment(assigID,desc,deadline)
self.__assigRepo.add(assig)
return assig
def remove(self,ID):
'''
Remove an assignment by id
:param pos: the id of assignment to remove
'''
exist = True
while exist == True:
exist = False
for s in self.__assigRepo.getAll():
if s.getID() == ID:
self.__assigRepo.remove(s)
exist = True
return s
def removeAllAssig(self):
'''
removes all asignments from repository
'''
self.__assigRepo.removeAll()
def getAllAssig(self):
'''
:return: the repository for assignments
'''
return self.__assigRepo.getAll()
def update(self,id,descr,deadline):
'''
update the description and the deadline of an assignment BY ID
:param id: the id of the assignment to update
:param descr: the new description
:param deadline: the new deadline
'''
for s in self.__assigRepo.getAll():
if s.getID()==id:
old=s
s.setDescription(descr)
s.setDeadline(deadline)
new=s
return self.__assigRepo.update(old, new)
| true |