blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8a13792c40fbc578b3c9e12900d02d5fe048c5f
|
49f2e737393de17c5a97bdc597d3708066f984d1
|
/__code/ui_registration_tool.py
|
1755de1f553f2b163d4dc84eb91a7b33c45cb8d4
|
[] |
no_license
|
RicardoCarreon/Neutron_imaging
|
adc226f807fc41c4bba11b5156fdf72092d34942
|
84d564850ed246070ddb4586501db194210dc5a7
|
refs/heads/master
| 2021-05-25T19:21:23.149964
| 2020-02-25T01:27:03
| 2020-02-25T01:27:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,504
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Volumes/my_book_thunderbolt_duo/git/IPTS/python_notebooks/ui/ui_registration_tool.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(610, 532)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(600, 500))
MainWindow.setMaximumSize(QtCore.QSize(650, 550))
MainWindow.setBaseSize(QtCore.QSize(650, 550))
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.up_button = QtWidgets.QPushButton(self.centralwidget)
self.up_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.up_button.setText("")
self.up_button.setFlat(True)
self.up_button.setObjectName("up_button")
self.horizontalLayout.addWidget(self.up_button)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem2)
self.left_button = QtWidgets.QPushButton(self.centralwidget)
self.left_button.setText("")
self.left_button.setFlat(True)
self.left_button.setObjectName("left_button")
self.verticalLayout.addWidget(self.left_button)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem3)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.rotate_right_button = QtWidgets.QPushButton(self.centralwidget)
self.rotate_right_button.setText("")
self.rotate_right_button.setFlat(True)
self.rotate_right_button.setObjectName("rotate_right_button")
self.horizontalLayout_3.addWidget(self.rotate_right_button)
self.small_rotate_right_button = QtWidgets.QPushButton(self.centralwidget)
self.small_rotate_right_button.setText("")
self.small_rotate_right_button.setFlat(True)
self.small_rotate_right_button.setObjectName("small_rotate_right_button")
self.horizontalLayout_3.addWidget(self.small_rotate_right_button)
self.small_rotate_left_button = QtWidgets.QPushButton(self.centralwidget)
self.small_rotate_left_button.setText("")
self.small_rotate_left_button.setFlat(True)
self.small_rotate_left_button.setObjectName("small_rotate_left_button")
self.horizontalLayout_3.addWidget(self.small_rotate_left_button)
self.rotate_left_button = QtWidgets.QPushButton(self.centralwidget)
self.rotate_left_button.setText("")
self.rotate_left_button.setFlat(True)
self.rotate_left_button.setObjectName("rotate_left_button")
self.horizontalLayout_3.addWidget(self.rotate_left_button)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem4)
self.right_button = QtWidgets.QPushButton(self.centralwidget)
self.right_button.setText("")
self.right_button.setFlat(True)
self.right_button.setObjectName("right_button")
self.verticalLayout_2.addWidget(self.right_button)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem5)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem6)
self.down_button = QtWidgets.QPushButton(self.centralwidget)
self.down_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.down_button.setText("")
self.down_button.setFlat(True)
self.down_button.setObjectName("down_button")
self.horizontalLayout_2.addWidget(self.down_button)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem7)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.verticalLayout_4.addLayout(self.verticalLayout_3)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 610, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.up_button.clicked.connect(MainWindow.up_button_clicked)
self.right_button.clicked.connect(MainWindow.right_button_clicked)
self.down_button.clicked.connect(MainWindow.down_button_clicked)
self.left_button.clicked.connect(MainWindow.left_button_clicked)
self.rotate_left_button.clicked.connect(MainWindow.rotate_left_button_clicked)
self.rotate_right_button.clicked.connect(MainWindow.rotate_right_button_clicked)
self.small_rotate_right_button.clicked.connect(MainWindow.small_rotate_right_button_clicked)
self.small_rotate_left_button.clicked.connect(MainWindow.small_rotate_left_button_clicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
|
[
"bilheuxjm@ornl.gov"
] |
bilheuxjm@ornl.gov
|
cbdb5a9e303ebf965d38718b40330400c64a50ee
|
293fc20a69f317f8360de484b9f671a5d84e93c5
|
/steeb/preference.py
|
986615c909d40625ca3dc4aea980b1c55b16f11c
|
[
"Unlicense"
] |
permissive
|
edsoncudjoe/steeb
|
641556a723901a5216c88b0bc641b1312113346e
|
11b845cac2fad4e696561652509ebff76f89be3d
|
refs/heads/master
| 2021-01-18T14:57:44.767828
| 2015-10-20T13:34:54
| 2015-10-20T13:34:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
import getpass
download_dir = "/home/" + getpass.getuser() + "/Music/Downloads/"
force_hq = False #TODO: implement
|
[
"robertjankeizer@gmail.com"
] |
robertjankeizer@gmail.com
|
b8b657e5751f27c11d096fdd94bdce2298bc764f
|
a750c280430f15cd6f5554fe8f19032713a2b75e
|
/predictor/predictor.py
|
967559e0ca1239057498666c4f6d9de725d4d5ac
|
[] |
no_license
|
Tahiya31/Stock-Guru
|
2478b9e41af9b9f45a7de6a6c525dd2b60249ecf
|
0dfe9a16f75c1f9cb57099b63b0e4b13a9240384
|
refs/heads/master
| 2020-04-03T08:41:25.759110
| 2019-07-31T18:16:16
| 2019-07-31T18:16:16
| 155,141,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,070
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 18:42:01 2018
@author: giorgoschantzialexiou
"""
import os
import sys
import json
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.models import model_from_json
from sklearn.cross_validation import train_test_split
import time #helper libraries
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from numpy import newaxis
from pandas import read_csv
import real_time
def Create_Min_Max_Scaler(data_csv,stock_name):
prices_dataset = pd.read_csv(data_csv, header=0)
stock = prices_dataset[prices_dataset['stock_name']==stock_name]
stock_prices = stock.close.values.astype('float32')
stock_prices = stock_prices.reshape(stock_prices.shape[0], 1)
to_create_test = stock_prices
scaler = MinMaxScaler(feature_range=(0, 1))
stock_prices = scaler.fit_transform(stock_prices)
# create the test data set:
train_size = int(len(to_create_test) * 0.80) # TODO: break this to different function
test_size = len(to_create_test) - train_size # it is not part of the minscaler
test = to_create_test[train_size:len(to_create_test),:]
return (scaler,test)
def Deserialize_Model(model_dir):
json_dir = model_dir + '.json'
h5_dir = model_dir + '.h5'
json_file = open(json_dir,'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(h5_dir)
print("Loaded model from disk")
return loaded_model
def predict_stock_price(model,firstValue, days = 1):
prediction_seqs = []
curr_frame = firstValue
for i in range(days):
predicted = []
print(model.predict(curr_frame[newaxis,:,:]))
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[0:]
curr_frame = np.insert(curr_frame[0:], i+1, predicted[-1], axis=0)
prediction_seqs.append(predicted[-1])
return prediction_seqs
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def recomend_buy_sell_hold(days): ## HISTORICAL
recomendation = "HOLD" #TODO: implement the indicator
past_data = read_csv(data_csv)
past_data = past_data['close']
past_data.append(df, ignore_index=True)
past_data = past_data.tail(30)
ema_26_days = pd.ewma(past_data, span=26)
ema_12_days = pd.ewma(past_data, span=12)
MACD = ema_12_days - ema_26_days
##plt.plot(MACD)
MACD = MACD.tolist()
if days < 5:
days = 5
MACD = MACD[-6:]
start = MACD[0]
# if start >0 and we find a price less than zero recomend SELL
if start>=0:
for price in MACD:
if price < 0:
recomendation = "SELL"
else:
for price in MACD:
if price>=0:
recomendation = "BUY"
return recomendation
if __name__=='__main__':
stock_name = 'AAPL'
days = 3
realtime_pred = False
if len(sys.argv) < 2:
print 'Usaga: Not all arguments have been specified'
else:
stock_name = sys.argv[1]
days = int(sys.argv[2])
#input_prediction = np.array([[float(sys.argv[3])]]) # current price
if len(sys.argv)==4:
if sys.argv[3] == int(1):
realtime_pred = True
stock_name = stock_name.upper()
predictor_dir = os.path.dirname(os.path.realpath(__file__))
trained_models_dir = os.path.join(predictor_dir,'trained_models')
model_dir = os.path.join(trained_models_dir,stock_name)
model_dir = os.path.join(model_dir,stock_name.lower())
data_csv = '../data/historical_stock_price_data/hist_' + stock_name + '.csv'
remake_scaler, test = Create_Min_Max_Scaler(data_csv,stock_name)
model = Deserialize_Model(model_dir)
look_back = 1
testX, testY = create_dataset(test, look_back)
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
predict_length=5
last_price_data = read_csv(data_csv)
input_prediction = np.array([[float(last_price_data.tail(1)['close'].tolist()[0])]])
predictions = predict_stock_price(model, input_prediction, days)
print "predictions"
print(remake_scaler.inverse_transform(np.array(predictions).reshape(-1, 1)))
actual_predictions = remake_scaler.inverse_transform(np.array(predictions).reshape(-1, 1))
## savigin predictions
predictions_file = os.path.join(predictor_dir,'predictions.json')
#predictions_file = '/Users/giorgoschantzialexiou/Repositories/stock_prediction_web_app/predictor/predictions.json'
data = {"success": False}
data['predictions'] = []
data['stock_name:'] = stock_name
predictions_to_recommend = []
i = 1
time_or_day = 'day'
if realtime_pred:
time_or_day = 'minute'
for predict in actual_predictions:
r = {time_or_day: i, 'predicted_price': str(predict[0])}
data['predictions'].append(r)
predictions_to_recommend.append(predict[0])
i += 1
predictions_to_recommend = np.asarray(predictions_to_recommend).reshape(len(predictions_to_recommend),1)
df = pd.DataFrame(predictions_to_recommend) ## new predictions in dataframe
if realtime_pred:
recomended_action = real_time.HFT_RSI_RECOM(days=days,stock_name=stock_name)
else:
recomended_action = recomend_buy_sell_hold(days)
data['recommendation'] = recomended_action
f = open(predictions_file,'w')
json.dump(data, f)
f.close()
|
[
"noreply@github.com"
] |
Tahiya31.noreply@github.com
|
4e983e2ce3a316265354c301224483e02e67df44
|
24be9d9e10f8e0f4fa5d222811fd1ab5831d9f28
|
/flask_homework/fruits/__init__.py
|
595e0ef7a15ca053cb92a2dae39ae1904356b1b7
|
[] |
no_license
|
zulteg/python-course-alphabet
|
470149c3e4fd2e58bdde79a2908ffba1d7438dc1
|
dd2399f6f45c42c5847cf3967441a64bdb64a4cf
|
refs/heads/master
| 2020-05-14T21:25:02.627900
| 2019-09-17T10:19:51
| 2019-09-17T10:19:51
| 181,962,678
| 0
| 0
| null | 2019-06-20T08:39:27
| 2019-04-17T20:20:47
|
Python
|
UTF-8
|
Python
| false
| false
| 580
|
py
|
from flask import Blueprint, render_template
from flask_homework.utils import FileManager
fruits = Blueprint('fruits', __name__, template_folder='templates')
@fruits.route("/fruits")
def list_page():
return render_template("fruits_list.html",
items=FileManager.load_data('fruits'))
@fruits.route("/fruits/add", methods=["POST"])
def add_item():
return 'success' if FileManager.add_item('fruits') else 'error'
@fruits.route("/fruits/rm", methods=["POST"])
def rm_item():
return 'success' if FileManager.rm_item('fruits') else 'error'
|
[
"zulteg@gmail.com"
] |
zulteg@gmail.com
|
57f8563ac31d86c804a07aaf81e266e5356e7ba3
|
5a29d690a031ba75d2f6747cf321a8eef70781f7
|
/mgrsconverter2.py
|
cb3f27885c4dd83af33dd01d25b50bc16108d6a4
|
[] |
no_license
|
J-Rigondo/MGRS_CONVERTER
|
59da54b20c026284b26c1d10095ca4818e353770
|
3387c304d00d1f7ac60c4347e4f654cb1e6b7ff5
|
refs/heads/master
| 2020-11-28T11:08:14.430238
| 2019-12-23T17:16:13
| 2019-12-23T17:16:13
| 229,793,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,600
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mgrsconverter2.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QMessageBox
import mgrs
import pandas as pd
class Ui_MainWindow(QtWidgets.QMainWindow):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(385, 467)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(40, 40, 321, 51))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(30)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(120, 390, 151, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(240, 130, 111, 41))
self.pushButton.clicked.connect(self.fileUpload)
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(30, 130, 201, 41))
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_2.setGeometry(QtCore.QRect(30, 180, 201, 41))
self.textEdit_2.setObjectName("textEdit_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(240, 180, 111, 41))
self.pushButton_2.clicked.connect(self.saveDir)
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setBold(True)
font.setWeight(75)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(30, 300, 321, 71))
self.pushButton_3.clicked.connect(self.getFile)
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
self.textEdit_3 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_3.setGeometry(QtCore.QRect(150, 230, 201, 41))
self.textEdit_3.setObjectName("textEdit_3")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(30, 240, 121, 21))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 385, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def fileUpload(self):
fname=QFileDialog.getOpenFileName()
if fname==False:
return
self.textEdit.setText(fname[0])
def saveDir(self):
fname=QFileDialog.getExistingDirectory()
if fname == False:
return
self.textEdit_2.setText(fname)
def getFile(self):
try:
m=mgrs.MGRS()
file_path=self.textEdit.toPlainText().strip()
save_path=self.textEdit_2.toPlainText().strip()
sheet_name=self.textEdit_3.toPlainText().strip()
if(file_path =='' or save_path=='' or sheet_name==''):
QMessageBox.about(self,"알림창","공백란을 반드시 입력하세요.")
return
data=pd.read_excel(file_path,sheet_name=sheet_name)
data=data.fillna('')
for i in range(len(data['군 MGRS'])):
if data['군 MGRS'][i]:
if not '52S' in data['군 MGRS'][i]:
data['군 MGRS'][i]='52S' + data['군 MGRS'][i]
data["군 MGRS"][i] = data['군 MGRS'][i].replace(" ","")
data['GPS'][i]=m.toLatLon(data['군 MGRS'][i].encode())
data.to_excel(save_path+'/'+sheet_name+'.xlsx')
QMessageBox.about(self,"알림창","변환파일이 지정된 경로에 저장되었습니다.")
self.textEdit.setPlainText('')
self.textEdit_2.setPlainText('')
self.textEdit_3.setPlainText('')
except Exception as e:
QMessageBox.about(self,"알림창","에러내용: "+str(e)+"\n"+"파일 헤더에 군 MGRS, GPS 두 항목이 반드시 있어야합니다.\n 시트명 대소문자 구분하셔야 합니다.")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "MGRS CONVERTER"))
self.label_2.setText(_translate("MainWindow", "Release by JunHyeong"))
self.pushButton.setText(_translate("MainWindow", "파일업로드"))
self.pushButton_2.setText(_translate("MainWindow", "저장 경로 선택"))
self.pushButton_3.setText(_translate("MainWindow", "변환 파일 저장"))
self.label_3.setText(_translate("MainWindow", "엑셀 시트 이름"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"futuregoing@naver.com"
] |
futuregoing@naver.com
|
ae2778df236dfea2d0ad68d6e05d16487079ed94
|
77ac20270aaa1e17c83c9aeaf889484b1876d050
|
/specialized_scripts/compare_filtered.py
|
e97f473e836c340f06a32d4cd148a1a88bc1dd3d
|
[] |
no_license
|
nmmsv/str-expansions
|
523b5a310155d5de04dde3a93baaac77fd9efe75
|
452aa145cfdcf98bbc1031c1b2233c80f1c337a0
|
refs/heads/master
| 2021-01-25T05:50:33.043826
| 2019-09-03T22:45:53
| 2019-09-03T22:45:53
| 80,690,312
| 0
| 1
| null | 2017-02-02T03:37:11
| 2017-02-02T03:37:10
| null |
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
import sys
sys.path.append('/storage/nmmsv/str-expansions/functions/')
from realignment import expansion_aware_realign, classify_realigned_read
from load_info import load_profile, extract_locus_info
from extract_genome import extract_pre_post_flank
read_class = 'srp'
nCopy = 70
filt_path = '/storage/nmmsv/expansion-experiments/ATXN3_32_cov60_dist500_hap_viz/aligned_read/nc_'+str(nCopy)+'_'+read_class+'.sam'
# filt_path = '/storage/nmmsv/python_playground/test_filter_IRR/nc_'+str(nCopy)+'.sam'
filt_path_true = '/storage/nmmsv/expansion-experiments/ATXN3_32_cov60_dist500_hap_viz/aligned_read/true_filter/nc_'+str(nCopy)+'_'+read_class+'.sam'
sam_path = '/storage/nmmsv/expansion-experiments/ATXN3_32_cov60_dist500_hap_viz/aligned_read/nc_'+str(nCopy)+'.sam'
exp_dir = '/storage/nmmsv/expansion-experiments/ATXN3_32_cov60_dist500_hap_viz/'
arg_dict = load_profile(exp_dir)
locus = arg_dict['locus']
read_len = arg_dict['read_len']
motif = arg_dict['motif']
chrom, locus_start_ref, locus_end_ref = extract_locus_info(locus)
pre, post = extract_pre_post_flank(exp_dir, read_len)
score_dict = { 'match': 3, \
'mismatch': -1, \
'gap': -3}
verbose = False
margin = 2
print locus_start_ref, locus_end_ref
true_reads = []
kk = 0
with open (filt_path, 'r') as handle:
for record in handle:
if record[0] != '@':
kk = kk + 1
QNAME = record.split()[0]
true_reads.append(QNAME)
ll = 0
with open (filt_path_true, 'r') as handle:
for record in handle:
if record[0] != '@':
QNAME = record.split()[0]
SEQ = record.split()[9]
ll = ll + 1
if QNAME not in true_reads:
if QNAME == 'ATXN7_27_cov60_dist500_hap_viz_50_haplo_2617_3124_0:0:0_0:0:0_f':
print
print
print record
print kk, ll
# nCopy, pos, score = expansion_aware_realign(SEQ, pre, post, motif, score_dict, verbose)
# read_class = classify_realigned_read(SEQ, motif, pos, nCopy, score, score_dict, read_len, margin, verbose)
# if read_class == 'IRR':
# print nCopy, score
# print record
# print
|
[
"mousavi@ucsd.edu"
] |
mousavi@ucsd.edu
|
d81392e5431f8db292da4368dccbb0c50a21cc4e
|
1e7b6b5460e66e6673be7d8acdf0109d81bb8148
|
/HappyNumbers.py
|
b3fdf7bf4c05adfa02bfbcaa8d8eeeca8c0ad312
|
[] |
no_license
|
paulocesarcsdev/HappyNumbers
|
a0d5a7c9a659dc57c7ea4b38cef3408563eb18ae
|
6f21552e4dd0d2aa0d410487e44280a27f18c436
|
refs/heads/master
| 2021-04-29T01:32:01.022418
| 2016-12-31T21:14:33
| 2016-12-31T21:14:33
| 77,750,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
def sum_of_squares(number):
return sum(int(char) ** 2 for char in str(number))
def happy(number):
next_ = sum(int(char) ** 2 for char in str(number))
return number in (1, 7) if number < 10 else happy(next_)
assert sum_of_squares(130) == 10
assert all([happy(n)for n in (1, 10, 100, 130, 97)])
assert not all(happy(n) for n in (2, 3, 4, 5, 6, 8, 9))
|
[
"paulocesarcs.dev@gmail.com"
] |
paulocesarcs.dev@gmail.com
|
700906fa49f7fb20300de817958ec2be320c619f
|
e2271fa026cdfb5aaf145970e1d9252b8c2dd001
|
/atcoder.jp/dp/dp_g/Main.py
|
dd25435da4afbe84870cf509c95be35a324942ae
|
[] |
no_license
|
tsuchan19991218/atcoder_achievement
|
802daaae640d5157f1bd55cc5d6ef08546fc4ca2
|
15502aba97dada1809deed05ea6c349122578a01
|
refs/heads/master
| 2023-06-11T11:52:11.896838
| 2021-07-02T20:44:40
| 2021-07-02T20:44:40
| 290,015,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import sys
sys.setrecursionlimit(2000)
N, M = map(int, input().split())
x = [0] * M
y = [0] * M
G = [[] for _ in range(N + 1)]
for i in range(M):
X, Y = map(int, input().split())
x[i] = X
y[i] = Y
G[X] += [Y]
dp = [-1] * (N + 1)
def f(n):
if dp[n] != -1:
return dp[n]
res = 0
for m in G[n]:
res = max(res, f(m) + 1)
dp[n] = res
return res
for i in range(1, N + 1):
f(i)
print(max(dp))
|
[
"ylwrvr.t.kogagmail.com@MacBook-Pro.local"
] |
ylwrvr.t.kogagmail.com@MacBook-Pro.local
|
a39b67845b740777a18f5c4f2e0e14c6291115ed
|
d2c4151eff768af64946ababc2e41c13d8973cd3
|
/practice/ARC037_b.py
|
f0e197b3a9f648d1db102f36cfa6409682550efc
|
[] |
no_license
|
Intel-out-side/AtCoder
|
2de19b71981247135432aed2d6d9c2a16c3ab7f0
|
0c419d2df15fff02032432cb1b1323612484e16e
|
refs/heads/master
| 2022-06-23T04:21:12.886072
| 2022-06-13T14:39:07
| 2022-06-13T14:39:07
| 235,240,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
N, M = list(map(int, input().split()))
#隣接リスト表現
connection = [[] for _ in range(N)]
for i in range(M):
u, v = list(map(int, input().split()))
connection[u-1].append(v-1)
connection[v-1].append(u-1)
#すでに訪問されたかどうか
visited = [False] * N
#木の個数のカウンター
counter = 0
def dfs(now, prev):
global flag
visited[now] = True
for next in connection[now]:
if next != prev:
if visited[next] == True:
flag = False
else:
dfs(next, now)
for i in range(N):
if not visited[i]:
flag = True
dfs(i, -1)
if flag:
counter += 1
print(counter)
|
[
"so.eng.eng.1rou@gmail.com"
] |
so.eng.eng.1rou@gmail.com
|
648c857f3216d922f48b7f190574f85ba07e289b
|
6640d7daa72a65184365ce623f8ead5a06d375fa
|
/createCloudFrequency_netCDF.py
|
7522003f29c69a63bd352ea452cd1dd7ab696080
|
[] |
no_license
|
mikejwx/main-bin
|
862289bca608dadb96ee7fa9b9ff756c0346920e
|
f1b464643f882059479eb40393ebd9a945be52c0
|
refs/heads/master
| 2020-04-17T04:53:19.261046
| 2019-02-26T16:10:56
| 2019-02-26T16:10:56
| 166,251,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,567
|
py
|
#!python
#Michael Johnston:
#=========================================================================
#creates a netCDF of all the cloud frequencies for each day in my dataset.
#POR 1 May to 31 October, 2012 to 2016
#=========================================================================
import numpy as np
from netCDF4 import Dataset, date2num
from os import listdir
import datetime
execfile('/home/xb899100/bin/projectFun.py')
#create a netcdf to store cloud frequency fields
dataset = Dataset('/glusterfs/greybls/users/xb899100/SatData/cloudFrequencies.nc', 'w', format = 'NETCDF4_CLASSIC')
#read a sample image
data = Dataset('/glusterfs/greybls/users/xb899100/SatData/goes13.2016.259.214519.BAND_01.nc')
#get the lat lon
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
#get the date and time of the image
date = data.variables['imageDate'][:]
myShape = lat.shape
#create dimensions for our netCDF
x = dataset.createDimension('x', size = lon.shape[0])
y = dataset.createDimension('y', size = lon.shape[1])
time = dataset.createDimension('time', size = None)
#create variables for our netCDF
times = dataset.createVariable('time', np.float64, ('time',))
xs = dataset.createVariable('x', np.float64, ('x', 'y'))
ys = dataset.createVariable('y', np.float64, ('x', 'y'))
cldfreq = dataset.createVariable('cldfreq', np.float64, ('time', 'x', 'y'))
#set some global attributes
dataset.description = 'cloud frequencies for days in 2012-2016 averaged between 09:30:00 UTC and 23:59:59 UTC each day.'
dataset.source = 'Michael Johnston, Department of Meteorology at University of Reading, UK'
#set some variable attributes
xs.units = 'degree_east'
ys.units = 'degree_north'
cldfreq.units = 'dimensionless'
times.units = 'hours since 0001-01-01 00:00:00'
times.calendar = 'gregorian'
#writing dimension data
xs[:,:] = lon
ys[:,:] = lat
#gather the cldfreq data
thresh = 0.15
#get a list of files
files = listdir('.')
files.sort()#organize in alphanumerical order
files = files[5:-5] #ignore the first file i.e. the netCDF we're making
Lf = len(files)
data2 = []
doy0 = 122 #the first day of year 1 May 2012
visLevels = np.linspace(0.0, 1.0, 11)
timesIndex = 0
counts = []
weirdData = []
for i in range(Lf):
#read the data
data = Dataset('./'+files[i])
#get the lat lon
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
myShape = lon.shape
#get the date and time of the image
date = data.variables['imageDate'][:]
iTime = data.variables['imageTime'][:]
#get the doy to check if a new day has started
year = date/1000
doy = date - year*1000
if doy != doy0:
counts.append(len(data2))
#check that there isn't significant missing data
if len(data2) > 14:
#if we've moved onto the next day, add to the netCDF
#take the average of the cloud masks to get the frequency
#data 2 is a list of all the cloud masks
#define a data3 that has the frequency
data3 = np.zeros_like(data2[0])
for iData in range(len(data2)):
if myShape == data2[iData].shape:
data3 += data2[iData]
else:
weirdData.append(files[i])
data3 = data3/len(data2)
#add to the cldfreq variable in the ntCDF along the time dimension
cldfreq[timesIndex,:,:] = data3
#add to the time variable in the time dimension
times[timesIndex] = date2num(datetime.datetime(year, 1, 1, 23, 59, 59) + datetime.timedelta(doy0 - 1), units = times.units, calendar = times.calendar)
timesIndex += 1
data2 = [] #empty the data2 list, ready for the next day
doy0 = doy #reset doy
#calibrate the data
#step 1 divide by 32 to convert from 16-bit to 10-bit
excelDate = date2num(datetime.datetime(year, 1, 1, 23, 59, 59) + datetime.timedelta(doy0 - 1), units = 'days since 1900-01-01', calendar = 'gregorian')
data = calibrateVis(data, 0.00012*excelDate - 3.72315)
#step 4 convert from nominal reflectance to reflectance/albedo
data1 = getReflectance(date, iTime, lon, lat, data)
#bound the data between 0 and 1
data1[data1 >= thresh] = 1.0
data1[data1 < thresh] = 0.0
#check that there is some sun
mydate = str(datetime.datetime(year,1,1)+datetime.timedelta(doy - 1)).split()[0]
hour = iTime/10000
minute = (iTime - hour*10000)/100
mytime = str(datetime.time(hour, minute, 00))
SZA = np.max(getSZA(mydate, mytime, lon, lat)) #maximum solar zenith angle
if SZA < 75:
#add data1 to data2 list
data2.append(data1)
dataset.close()
#aside: find out the data availability
year = 2012
doy = 121
counts1 = []
dates = []
count = 0
for f in range(Lf):
if files[f].split('.')[2] == str(doy):
count += 1
else:
while files[f].split('.')[2] != str(doy):
counts1.append(count)
dates.append(str(year)+'-'+str(doy))
if doy > 307:
doy = 121
year += 1
count = 0
else:
doy += 1
count = 0
count += 1
plt.clf()
fig = plt.gcf()
fig.set_size_inches(8, 6)
plt.plot(np.array(counts)/28.)
plt.ylabel('number of images')
plt.xlabel('date')
plt.xticks(np.arange(0, len(counts), 60), dates[0:len(counts):61], rotation = 45)
plt.title('Number of images per day, 100% = 29 images')
plt.show()
|
[
"mcjohnston2803@gmail.com"
] |
mcjohnston2803@gmail.com
|
f706bbd5ae13716cb9d5b0e0bded1c7510b28021
|
c1533ec365dd1b43dc6b6c1c17b7d1a63557f601
|
/twitterbot.py
|
1d02d684f4b2a17dc9e7c95c88a3169d5e8ab9a6
|
[] |
no_license
|
sonya-irsay/phone-leak-twitter
|
b956337b9d101961c1c3df586f45f71e1a90b154
|
ec8abe37bf1d89b3daa65e471e55e477f6047485
|
refs/heads/master
| 2020-03-20T11:26:10.629443
| 2018-06-15T08:51:06
| 2018-06-15T08:51:06
| 137,403,050
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tweepy, time
#Twitter credentials
CONSUMER_KEY = 'YzlS7uAWdXmhEDUhc4zDoWQL6'
CONSUMER_SECRET = 'fTPQvdgbfQ0blPdGLlZJVxvwJEwh9UG6VdpoNRX1KEEomA9zbZ'
ACCESS_KEY = '991674087721324547-hm452a50s96kJdFcSTndtqgWBKX5fw3'
ACCESS_SECRET = '2rNTYmqoPHCeVMaDgtYdsJUC9ICFUsqiUJd3BShSJ4Pex'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
# ADAFRUIT ------------------------------------------------------------
# Import library and create instance of REST client.
# Example of using the MQTT client class to subscribe to a feed and print out
# any changes made to the feed. Edit the variables below to configure the key,
# username, and feed to subscribe to for changes.
# Import standard python modules.
import sys
# Import Adafruit IO MQTT client.
from Adafruit_IO import MQTTClient
# Set to your Adafruit IO key & username below.
ADAFRUIT_IO_KEY = '8999ffefe40647799ecb8b762983e797'
ADAFRUIT_IO_USERNAME = 'alfatiharufa' # See https://accounts.adafruit.com # to find your username.
# Set to the ID of the feed to subscribe to for updates.
FEED_ID = 'phone.translations'
# Define callback functions which will be called when certain events happen.
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print('Connected to Adafruit IO! Listening for {0} changes...'.format(FEED_ID))
# Subscribe to changes on a feed named DemoFeed.
client.subscribe(FEED_ID)
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print('Disconnected from Adafruit IO!')
sys.exit(1)
def message(client, feed_id, payload, retain):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
import json
# received message example: [{"message":"the collaboration","lang":"phone.en-us"}]
j = json.loads('{1}'.encode('ascii', 'ignore').decode('ascii').format(feed_id, payload))
#print incoming data
print "leaking new data:", j[0]["message"]
#check if data is a duplicate - if yes, don't post it; if no, post it;
try:
api.update_status(j[0]["message"])
except tweepy.error.TweepError:
pass
print("duplicate data, not posted")
# print('{1}'.format(feed_id, payload))
# Create an MQTT client instance.
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
# Setup the callback functions defined above.
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
# Connect to the Adafruit IO server.
client.connect()
# Start a message loop that blocks forever waiting for MQTT messages to be
# received. Note there are other options for running the event loop like doing
# so in a background thread--see the mqtt_client.py example to learn more.
client.loop_blocking()
# --------------------------------------------------------------------------
|
[
"sanie96@gmail.com"
] |
sanie96@gmail.com
|
19e8a2c0ff2bba71ee5e1780d86bf55c513dae39
|
84d891b6cb6e1e0d8c5f3e285933bf390e808946
|
/Demo/PO_V4/Common/basepage.py
|
28d4919ba894a2988f328f434b37108a302dffdf
|
[] |
no_license
|
zzlzy1989/web_auto_test
|
4df71a274eb781e609de1067664264402c49737e
|
3e20a55836144e806496e99870f5e8e13a85bb93
|
refs/heads/master
| 2020-05-24T10:37:29.709375
| 2019-10-28T06:14:31
| 2019-10-28T06:14:31
| 187,230,775
| 2
| 0
| null | 2019-06-20T11:06:32
| 2019-05-17T14:29:11
| null |
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Name: basepage
# Author: 简
# Time: 2019/6/18
from Demo.PO_V4.Common import logger
import logging
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import datetime
import time
from Demo.PO_V4.Common.dir_config import screenshot_dir
class BasePage:
# 包含了PageObjects当中,用到所有的selenium底层方法。
# 还可以包含通用的一些元素操作,如alert,iframe,windows...
# 还可以自己额外封装一些web相关的断言
# 实现日志记录、实现失败截图
def __init__(self,driver):
self.driver = driver
def wait_eleVisible(self,loc,img_doc="",timeout=30,frequency=0.5):
logging.info("等待元素 {} 可见。".format(loc))
try:
# 起始等待的时间 datetime
start = datetime.datetime.now()
WebDriverWait(self.driver,timeout,frequency).until(EC.visibility_of_element_located(loc))
# 结束等待的时间
end = datetime.datetime.now()
logging.info("开始等待时间点:{},结束等待时间点:{},等待时长为:{}".
format(start,end,end-start))
except:
# 日志
logging.exception("等待元素可见失败:")
# 截图 - 哪一个页面哪一个操作导致的失败。+ 当前时间
self.save_web_screenshot(img_doc)
raise
# 查找一个元素
def get_element(self,loc,img_doc=""):
"""
:param loc: 元素定位。以元组的形式。(定位类型、定位时间)
:param img_doc: 截图的说明。例如:登陆页面_输入用户名
:return: WebElement对象。
"""
logging.info("查找 {} 中的元素 {} ".format(img_doc,loc))
try:
ele = self.driver.find_element(*loc)
return ele
except:
# 日志
logging.exception("查找元素失败")
# 截图
self.save_web_screenshot(img_doc)
raise
def click_element(self,loc,img_doc,timeout=30,frequency=0.5):
"""
实现了,等待元素可见,找元素,然后再去点击元素。
:param loc:
:param img_doc:
:return:
"""
# 1、等待元素可见
self.wait_eleVisible(loc,img_doc,timeout,frequency)
# 2、找元素
ele = self.get_element(loc,img_doc)
# 3、再操作
logging.info(" 点击元素 {}".format(loc))
try:
ele.click()
except:
# 日志
logging.exception("点击元素失败")
# 截图
self.save_web_screenshot(img_doc)
raise
# 文本输入
def input_text(self,loc,img_doc,*args):
# 1、等待元素可见
self.wait_eleVisible(loc,img_doc)
# 2、找元素
ele = self.get_element(loc,img_doc)
# 3、再操作
logging.info(" 给元素 {} 输入文本内容:{}".format(loc,args))
try:
ele.send_keys(*args)
except:
# 日志
logging.exception("元素输入操作失败")
# 截图
self.save_web_screenshot(img_doc)
raise
# 获取元素的属性值
def get_element_attribute(self,loc,attr_name,img_doc):
ele = self.get_element(loc,img_doc)
# 获取属性
try:
attr_value = ele.get_attribute(attr_name)
logging.info("获取元素 {} 的属性 {} 值为:{}".format(loc, attr_name,attr_value))
return attr_value
except:
# 日志
logging.exception("获取元素属性失败")
# 截图
self.save_web_screenshot(img_doc)
raise
# 获取元素的文本值。
def get_element_text(self,loc,img_doc):
ele = self.get_element(loc, img_doc)
# 获取属性
try:
text = ele.text
logging.info("获取元素 {} 的文件值为:{}".format(loc, text))
return text
except:
# 日志
logging.exception("获取元素文本值失败")
# 截图
self.save_web_screenshot(img_doc)
raise
# 实现网页截图操作
def save_web_screenshot(self,img_doc):
# 页面_功能_时间.png
now = time.strftime("%Y-%m-%d %H_%M_%S")
filepath = "{}_{}.png".format(img_doc,now)
try:
self.driver.save_screenshot(screenshot_dir +"/" + filepath)
logging.info("网页截图成功。图片存储在:{}".format(screenshot_dir +"/" + filepath))
except:
logging.exception("网页截屏失败!")
# windows切换
# iframe切换
# select下拉列表
# 上传操作 -
|
[
"394845369@qq.com"
] |
394845369@qq.com
|
b6d96c8b7e7483ab8e67d4d427d162850d8442c8
|
96f47ca4966969c34e2ed051f67a2492d2fbb4ef
|
/venv/bin/easy_install
|
a6f53cab5b0c6486dede829266f9574ac831bc89
|
[
"MIT"
] |
permissive
|
williamHuang5468/StockServer
|
d4fe9203e489aa2aa6d63c32afe6eab0beccbd10
|
9a70d0dc2f36b6f1ccb61c593334d518415e166f
|
refs/heads/master
| 2021-01-25T05:51:07.132994
| 2017-02-16T15:03:40
| 2017-02-16T15:03:40
| 80,696,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
#!/home/william/Desktop/StockServer/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"chobits5468@gmail.com"
] |
chobits5468@gmail.com
|
|
3068f2b6c059a9d5ede0614ea0c411a4784d3fe2
|
dbd496c9405c7b6b81c49818f16c6f94f84ee0a6
|
/T1038_김진현/Object_dtection/mmdet/models/necks/bifpn.py
|
16bdb647ead2609488ae8b0b5c7063a0f376ec6c
|
[
"Apache-2.0"
] |
permissive
|
bcaitech1/p3-ims-obd-pepsimintchocolatepineapplepizza
|
e40e6483c5de4fb6d2893e37d5de3266c7cde19d
|
ac6a29dd4b8e3edb1043c1ce491ad61983391bfd
|
refs/heads/master
| 2023-05-02T16:20:33.033766
| 2021-05-22T13:20:37
| 2021-05-22T13:20:37
| 361,668,695
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,067
|
py
|
""" Mission """
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, xavier_init
from mmcv.runner import auto_fp16
from mmcv.cnn import build_norm_layer
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.cnn import constant_init
from ..builder import NECKS
# swish activation
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# separable convolution
class SeparableConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
bias=False,
relu=False):
super(SeparableConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.relu = relu
self.sep = nn.Conv2d(in_channels,
in_channels,
3,
padding=1,
groups=in_channels,
bias=False)
self.pw = nn.Conv2d(
in_channels,
out_channels,
1,
bias=bias)
if relu:
self.relu_fn = Swish()
def forward(self, x):
x = self.pw(self.sep(x))
if self.relu:
x = self.relu_fn(x)
return x
class WeightedInputConv(nn.Module):
# TODO weighted Convolution
# Fast normalized fusion
"""
inputs = [features1, features2, features3]
out = conv((w1*feature1 + w2*feature2 + w3*feature3) / (w1 + w2 + w3 + eps))
"""
def __init__(self,
in_channels,
out_channels,
num_ins,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
eps=0.0001):
super(WeightedInputConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.num_ins = num_ins
self.eps = eps
_, bn_layer = build_norm_layer(norm_cfg, out_channels)
"""
1. convolution
2. weight
3. swish
"""
# use separable conv
self.conv_op = nn.Sequential(
SeparableConv(
in_channels,
out_channels,
bias=True,
relu=False),
bn_layer
)
# edge weight and swish
self.weight = nn.Parameter(torch.Tensor(self.num_ins).fill_(1.0))
self._swish = Swish()
def forward(self, inputs):
"""
1. relu (weight)
2. / (w.sum + eps)
3. w * feature
4. swish
5. convolution
"""
w = F.relu(self.weight)
w /= (w.sum() + self.eps)
x = 0
for i in range(self.num_ins):
x += w[i] * inputs[i]
output = self.conv_op(self._swish(x))
return output
class ResampingConv(nn.Module):
def __init__(self,
in_channels,
in_stride,
out_stride,
out_channels,
conv_cfg=None,
norm_cfg=None):
super(ResampingConv, self).__init__()
self.in_channels = in_channels
self.in_stride = in_stride
self.out_stride = out_stride
self.out_channels = out_channels
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
if self.in_stride < self.out_stride:
scale = int(self.out_stride // self.in_stride)
assert scale == 2
self.rescale_op = nn.MaxPool2d(
scale + 1,
stride=scale,
padding=1)
else:
if self.in_stride > self.out_stride:
scale = self.in_stride // self.out_stride
self.rescale_op = functools.partial(
F.interpolate, scale_factor=scale, mode='nearest')
else:
self.rescale_op = None
if self.in_channels != self.out_channels:
self.conv_op = ConvModule(
in_channels,
out_channels,
1,
norm_cfg=norm_cfg,
act_cfg=None,
inplace=False)
def forward(self, x):
if self.in_channels != self.out_channels:
x = self.conv_op(x)
x = self.rescale_op(x) if self.rescale_op else x
return x
class bifpn(nn.Module):
# feature path
nodes_settings = [
{'width_ratio': 64, 'inputs_offsets': [3, 4]},
{'width_ratio': 32, 'inputs_offsets': [2, 5]},
{'width_ratio': 16, 'inputs_offsets': [1, 6]},
{'width_ratio': 8, 'inputs_offsets': [0, 7]},
{'width_ratio': 16, 'inputs_offsets': [1, 7, 8]},
{'width_ratio': 32, 'inputs_offsets': [2, 6, 9]},
{'width_ratio': 64, 'inputs_offsets': [3, 5, 10]},
{'width_ratio': 128, 'inputs_offsets': [4, 11]},
]
def __init__(self,
in_channels,
out_channels,
strides=[8, 16, 32, 64, 128],
num_outs=5,
conv_cfg=None,
norm_cfg=None,
act_cfg=None):
super(bifpn, self).__init__()
assert num_outs >= 2
assert len(strides) == len(in_channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.num_outs = num_outs
self.channels_nodes = [i for i in in_channels]
self.stride_nodes = [i for i in strides]
self.resample_op_nodes = nn.ModuleList()
self.new_op_nodes = nn.ModuleList()
for _, fnode in enumerate(self.nodes_settings):
new_node_stride = fnode['width_ratio']
op_node = nn.ModuleList()
for _, input_offset in enumerate(fnode['inputs_offsets']):
input_node = ResampingConv(
self.channels_nodes[input_offset],
self.stride_nodes[input_offset],
new_node_stride,
out_channels,
norm_cfg=norm_cfg)
op_node.append(input_node)
new_op_node = WeightedInputConv(
out_channels,
out_channels,
len(fnode['inputs_offsets']),
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.new_op_nodes.append(new_op_node)
self.resample_op_nodes.append(op_node)
self.channels_nodes.append(out_channels)
self.stride_nodes.append(new_node_stride)
def forward(self, inputs):
assert len(inputs) == self.num_outs , f'inputs : {len(inputs)}, numouts : {self.num_outs}'
feats = [i for i in inputs]
for fnode, op_node, new_op_node in zip(self.nodes_settings,
self.resample_op_nodes, self.new_op_nodes):
input_node = []
for input_offset, resample_op in zip(fnode['inputs_offsets'], op_node):
# reshape input before weighted conv
input_node.append(resample_op(feats[input_offset]))
# weighted convolution
feats.append(new_op_node(input_node))
outputs = feats[-self.num_outs:]
return outputs
@NECKS.register_module
class BiFPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
strides=[8, 16, 32, 64, 128],
start_level=0,
end_level=-1,
stack=3,
norm_cfg=dict(type='BN', momentum=0.01, eps=1e-3, requires_grad=True),
act_cfg=None):
super(BiFPN, self).__init__()
assert len(in_channels) >= 3
assert len(strides) == len(in_channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.strides = strides
self.num_ins = len(in_channels)
self.act_cfg = act_cfg
self.stack = stack
self.num_outs = num_outs
self.fp16_enabled = False
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
# add extra conv layers (e.g., RetinaNet)
bifpn_in_channels = in_channels[self.start_level:self.backbone_end_level]
bifpn_strides = strides[self.start_level:self.backbone_end_level]
bifpn_num_outs = self.num_outs
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_convs = None
if extra_levels >= 1:
self.extra_convs = nn.ModuleList()
for _ in range(extra_levels):
self.extra_convs.append(
ResampingConv(
bifpn_in_channels[-1],
bifpn_strides[-1],
bifpn_strides[-1] * 2,
out_channels,
norm_cfg=norm_cfg))
bifpn_in_channels.append(out_channels)
bifpn_strides.append(bifpn_strides[-1] * 2)
self.stack_bifpns = nn.ModuleList()
for _ in range(stack):
self.stack_bifpns.append(
bifpn(
bifpn_in_channels,
out_channels,
strides=bifpn_strides,
num_outs=bifpn_num_outs,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=None))
bifpn_in_channels = [out_channels for _ in range(bifpn_num_outs)]
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels) , f'inputs : {len(inputs)}, in_channels : {len(self.in_channels)}'
feats = list(inputs[self.start_level:self.backbone_end_level])
# add extra feature (ex. input features=4, output features=5, add 1 extra features from last feature)
if self.extra_convs:
for i in range(len(self.extra_convs)):
feats.append(self.extra_convs[i](feats[-1]))
# weighted bi-directional feature pyramid network
for idx, stack_bifpn in enumerate(self.stack_bifpns):
feats = stack_bifpn(feats)
return tuple(feats[:self.num_outs])
def init_weights(self, pretrained=None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.SyncBatchNorm)):
constant_init(m, 1)
|
[
"wlsgus1109@gmail.com"
] |
wlsgus1109@gmail.com
|
edcc6fa1b89f75d6e53dbb38c288b5217ea5e32e
|
61b00ed06c3d3cee37935dbc093649f7d7bf12ff
|
/toeprint_seq_main.py
|
7b9a1db7d15d953a5f756918b4947acd168b9247
|
[
"MIT"
] |
permissive
|
borisz264/toeprint_seq
|
d7b9559b723c7a329ea4769d5c4fed95a62bb6ab
|
370bf91b3487b84286c42f2f7e41ab6cc41ba958
|
refs/heads/master
| 2021-01-19T08:23:43.010949
| 2015-07-30T21:29:34
| 2015-07-30T21:29:34
| 39,972,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,010
|
py
|
import operator
import aColors
__author__ = 'boris'
"""
Intended for processing of toeprint-seq data from defined RNA pools
Based on Alex Robertson's original RBNS pipeline, available on github
"""
import sys
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 #leaves most text as actual text in PDFs, not outlines
import os
import argparse
import itertools
import collections
from collections import defaultdict
import gzip
import subprocess
import numpy
import scipy.stats as stats
import bzUtils
import tps_settings
import tps_utils
import tps_lib
import tps_qc
import stacked_bar_kmers
class TPSe:
def __init__(self, settings, threads):
self.threads = threads
self.settings = settings
self.collapse_identical_reads()
self.remove_adaptor()
self.remove_primer()
self.trim_reads()
self.trim_reference_pool_fasta()
self.build_bowtie_index()
self.map_reads()
self.initialize_libs()
def initialize_libs(self):
self.settings.write_to_log('initializing libraries, counting reads')
tps_utils.make_dir(self.rdir_path('sequence_counts'))
self.libs = []
map(lambda lib_settings: self.initialize_lib(lib_settings), self.settings.iter_lib_settings())
self.settings.write_to_log('initializing libraries, counting reads, done')
def initialize_lib(self, lib_settings):
lib = tps_lib.TPS_Lib(self.settings, lib_settings)
self.libs.append(lib)
def needs_calculation(self, lib_settings, count_type, k):
if self.settings.get_force_recount(count_type):
return True
return not lib_settings.counts_exist(count_type, k)
def make_tables(self):
tps_utils.make_dir(self.rdir_path('tables'))
self.make_counts_table()
def make_plots(self):
tps_utils.make_dir(self.rdir_path('plots'))
self.plot_AUG_reads()
self.plot_AUG_reads(unique_only = True,)
self.plot_last_AUG_reads()
self.plot_last_AUG_reads(unique_only = True,)
self.plot_AUG_reads(which_AUG = 2, unique_only = True)
self.plot_AUG_reads(which_AUG = 2)
def make_table_header(self, of):
"""
takes a file handle and writes a good header for it such that
each lane is a column.
"""
of.write('#')
for lib in self.libs:
of.write('\t' + lib.get_barcode())
of.write('\n[%s]' % self.settings.get_property('protein_name'))
for lib in self.libs:
of.write('\t%s' % lib.get_conc())
of.write('\nwashes')
for lib in self.libs:
of.write('\t%i' % lib.get_washes())
of.write('\nT (C)')
for lib in self.libs:
of.write('\t%s' % lib.get_temperature())
of.write('\n')
def collapse_identical_reads(self):
"""
collapses all identical reads using FASTX toolkit
:return:
"""
self.settings.write_to_log('collapsing reads')
if not self.settings.get_property('force_recollapse'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.collapsed_reads_exist():
break
else:
return
tps_utils.make_dir(self.rdir_path('collapsed_reads'))
if self.settings.get_property('collapse_identical_reads'):
bzUtils.parmap(lambda lib_setting: self.collapse_one_fastq_file(lib_setting), self.settings.iter_lib_settings(), nprocs = self.threads)
else:
bzUtils.parmap(lambda lib_setting: self.fastq_to_fasta(lib_setting), self.settings.iter_lib_settings(), nprocs = self.threads)
self.settings.write_to_log('collapsing reads complete')
def collapse_one_fastq_file(self, lib_settings):
lib_settings.write_to_log('collapsing_reads')
subprocess.Popen('gunzip -c %s | fastx_collapser -v -Q33 2>>%s | gzip > %s' % (lib_settings.get_fastq_file(),
lib_settings.get_log(),
lib_settings.get_collapsed_reads()
), shell=True).wait()
lib_settings.write_to_log('collapsing_reads_done')
def fastq_to_fasta(self, lib_settings):
lib_settings.write_to_log('fasta_conversion')
subprocess.Popen('gunzip -c %s | fastq_to_fasta -v -Q33 2>>%s | gzip > %s' % (lib_settings.get_fastq_file(),
lib_settings.get_log(),
lib_settings.get_collapsed_reads()
), shell=True).wait()
lib_settings.write_to_log('fasta_conversion done')
def remove_adaptor(self):
if not self.settings.get_property('force_retrim'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.adaptorless_reads_exist():
break
else:
return
if self.settings.get_property('trim_adaptor'):
tps_utils.make_dir(self.rdir_path('adaptor_removed'))
bzUtils.parmap(lambda lib_setting: self.remove_adaptor_one_lib(lib_setting), self.settings.iter_lib_settings(), nprocs = self.threads)
def remove_primer(self):
if not self.settings.get_property('force_retrim'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.primerless_reads_exist():
break
else:
return
if self.settings.get_property('trim_adaptor'):
tps_utils.make_dir(self.rdir_path('primer_removed'))
bzUtils.parmap(lambda lib_setting: self.remove_primer_one_lib(lib_setting), self.settings.iter_lib_settings(), nprocs = self.threads)
def remove_adaptor_one_lib(self, lib_settings):
lib_settings.write_to_log('adaptor trimming')
command_to_run = 'cutadapt --adapter %s --overlap 3 --minimum-length %d %s --output %s 1>>%s 2>>%s' % (self.settings.get_property('adaptor_sequence'), self.settings.get_property('min_post_adaptor_length'),
lib_settings.get_collapsed_reads(), lib_settings.get_adaptor_trimmed_reads(), lib_settings.get_log(),
lib_settings.get_log())
subprocess.Popen(command_to_run, shell=True).wait()
lib_settings.write_to_log('adaptor trimming done')
def remove_primer_one_lib(self, lib_settings):
lib_settings.write_to_log('reverse primer trimming')
command_to_run = 'cutadapt --adapter %s --overlap 3 --minimum-length %d %s --output %s 1>>%s 2>>%s' % (self.settings.get_property('primer_sequence'), self.settings.get_property('min_post_adaptor_length'),
lib_settings.get_adaptor_trimmed_reads(), lib_settings.get_primer_trimmed_reads(), lib_settings.get_log(),
lib_settings.get_log())
subprocess.Popen(command_to_run, shell=True).wait()
lib_settings.write_to_log('reverse primer trimming done')
def plot_AUG_reads(self, which_AUG = 1, unique_only = False, min_x = -30, max_x = 30, read_cutoff = 100):
#1 is for the first AUG, 2 the 2nd and so on. Only TLs with enough AUGs are counted
assert which_AUG > 0
positions = numpy.array(range(min_x, max_x+1))
mappings_passing_cutoff_in_all_libs = self.libs[0].get_mappings_with_minimum_reads(read_cutoff, names_only = True)
for lib in self.libs[1:]:
mappings_passing_cutoff_in_all_libs = \
mappings_passing_cutoff_in_all_libs.intersection(lib.get_mappings_with_minimum_reads(read_cutoff,
names_only = True))
if unique_only:
out_name = os.path.join(
self.settings.get_rdir(),
'plots',
'unique_AUG%d_density.pdf' % (which_AUG))
mapping_names = mappings_passing_cutoff_in_all_libs.intersection(lib.get_single_TL_mappings(names_only = True))
else:
out_name = os.path.join(
self.settings.get_rdir(),
'plots',
'AUG%d_density.pdf' % (which_AUG))
mapping_names = mappings_passing_cutoff_in_all_libs
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)
positions = range(min_x, max_x)
color_index = 0
genes_plotted = set()
for lib in self.libs:
offset_sum = defaultdict(float)
offset_counts = defaultdict(int)
num_genes_counted = 0
for mapping_name in mapping_names:
mapping = lib.pool_sequence_mappings[mapping_name]
AUG_positions = mapping.positions_of_subsequence('ATG')
if len(AUG_positions) >= which_AUG:
genes_plotted.add(mapping_name)
num_genes_counted += 1
alignment_position = AUG_positions[which_AUG-1]
for position in positions:
AUG_relative_position = alignment_position - position
read_fraction_at_position = mapping.fraction_at_position(AUG_relative_position)
if read_fraction_at_position != None:
offset_sum[position] += read_fraction_at_position
offset_counts[position] += 1
offset_averages = {}
for position in positions:
#print position, offset_sum[position], float(offset_counts[position])
offset_averages[position] = offset_sum[position]/float(offset_counts[position])
offset_average_array = [offset_averages[position] for position in positions]
plot.plot(positions, offset_average_array, color=bzUtils.rainbow[color_index], lw=2, label ='%s (%d)' %(lib.get_sample_name(), num_genes_counted))
color_index += 1
plot.axvline(16, ls= '--')
plot.axvline(19, ls= '--')
lg=plt.legend(loc=2,prop={'size':10}, labelspacing=0.2)
lg.draw_frame(False)
plot.set_xticks(positions[::3])
plot.set_xticklabels(positions[::3])
plot.set_xlabel("position of read 5' end from AUG %d" %(which_AUG) )
plot.set_ylabel("average read fraction")
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
print genes_plotted
for mapping_name in genes_plotted:
self.plot_single_sequence_read_distributions(mapping_name)
def plot_last_AUG_reads(self, unique_only = False, min_x = -30, max_x = 30, read_cutoff = 100):
#1 is for the first AUG, 2 the 2nd and so on. Only TLs with enough AUGs are counted
positions = numpy.array(range(min_x, max_x+1))
mappings_passing_cutoff_in_all_libs = self.libs[0].get_mappings_with_minimum_reads(read_cutoff, names_only = True)
for lib in self.libs[1:]:
mappings_passing_cutoff_in_all_libs = \
mappings_passing_cutoff_in_all_libs.intersection(lib.get_mappings_with_minimum_reads(read_cutoff,
names_only = True))
if unique_only:
out_name = os.path.join(
self.settings.get_rdir(),
'plots',
'unique_last_AUG_density.pdf')
mapping_names = mappings_passing_cutoff_in_all_libs.intersection(lib.get_single_TL_mappings(names_only = True))
else:
out_name = os.path.join(
self.settings.get_rdir(),
'plots',
'last_AUG_density.pdf')
mapping_names = mappings_passing_cutoff_in_all_libs
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)
positions = range(min_x, max_x)
color_index = 0
for lib in self.libs:
offset_sum = defaultdict(float)
offset_counts = defaultdict(int)
num_genes_counted = 0
for mapping_name in mapping_names:
mapping = lib.pool_sequence_mappings[mapping_name]
AUG_positions = mapping.positions_of_subsequence('ATG')
if len(AUG_positions) >= 1:
num_genes_counted += 1
alignment_position = AUG_positions[-1]
for position in positions:
AUG_relative_position = alignment_position - position
read_fraction_at_position = mapping.fraction_at_position(AUG_relative_position)
if read_fraction_at_position != None:
offset_sum[position] += read_fraction_at_position
offset_counts[position] += 1
offset_averages = {}
for position in positions:
#print position, offset_sum[position], float(offset_counts[position])
offset_averages[position] = offset_sum[position]/float(offset_counts[position])
offset_average_array = [offset_averages[position] for position in positions]
plot.plot(positions, offset_average_array, color=bzUtils.rainbow[color_index], lw=2, label ='%s (%d)' %(lib.get_sample_name(), num_genes_counted))
color_index += 1
plot.axvline(16, ls='--')
plot.axvline(19, ls='--')
lg=plt.legend(loc=2,prop={'size':10}, labelspacing=0.2)
lg.draw_frame(False)
plot.set_xticks(positions[::3])
plot.set_xticklabels(positions[::3])
plot.set_xlabel("position of read 5' end from last AUG")
plot.set_ylabel("average read fraction")
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
def plot_single_sequence_read_distributions(self, sequence_name):
fig = plt.figure(figsize=(8,8))
plot = fig.add_subplot(111)
colorIndex = 0
for lib in self.libs:
mapping = lib.pool_sequence_mappings[sequence_name]
positions = numpy.array(range(0, len(mapping.full_sequence)))
fractions = [mapping.fraction_at_position(position) for position in positions]
plot.plot(positions , fractions,color=bzUtils.rainbow[colorIndex], lw=1, label = lib.lib_settings.sample_name)
colorIndex+=1
for AUG_pos in mapping.positions_of_subsequence('ATG'):
plot.axvline(AUG_pos+16, ls='--')
plot.axvline(AUG_pos+19, ls='--')
plot.set_xticks(positions[::10])
plot.set_xticklabels(positions[::10])
plot.set_xlim(-1, len(mapping.full_sequence))
plot.set_xlabel("position of read 5' end from RNA end (--expected AUG toeprints)")
plot.set_ylabel("read fraction")
lg=plt.legend(loc=2,prop={'size':10}, labelspacing=0.2)
lg.draw_frame(False)
out_name = os.path.join(
self.settings.get_rdir(),
'plots',
'%(sequence_name)s.read_positions.pdf' % {'sequence_name': sequence_name})
plt.savefig(out_name, transparent='True', format='pdf')
plt.clf()
def trim_reads(self):
"""
Trim reads by given amount, removing potential random barcoding sequences from 5' end
Trimming from 3' end can also help if mapping is problematic by reducing chance for indels to prevent mapping
:return:
"""
self.settings.write_to_log( 'trimming reads')
if not self.settings.get_property('force_retrim'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.trimmed_reads_exist():
break
else:
return
tps_utils.make_dir(self.rdir_path('trimmed_reads'))
bzUtils.parmap(lambda lib_setting: self.trim_one_fasta_file(lib_setting), self.settings.iter_lib_settings(), nprocs = self.threads)
self.settings.write_to_log( 'trimming reads complete')
def trim_one_fasta_file(self, lib_settings):
lib_settings.write_to_log('trimming_reads')
first_base_to_keep = self.settings.get_property('first_base_to_keep') #the trimmer is 1-indexed. 1 means keep every base
last_base_to_keep = self.settings.get_property('last_base_to_keep')
if self.settings.get_property('trim_adaptor'):
subprocess.Popen('gunzip -c %s | fastx_trimmer -f %d -l %d -z -o %s >>%s 2>>%s' % (lib_settings.get_primer_trimmed_reads(),
first_base_to_keep, last_base_to_keep,
lib_settings.get_trimmed_reads(),
lib_settings.get_log(),
lib_settings.get_log()), shell=True).wait()
else:
subprocess.Popen('gunzip -c %s | fastx_trimmer -f %d -l %d -z -o %s >>%s 2>>%s' % (lib_settings.get_collapsed_reads(),
first_base_to_keep, last_base_to_keep,
lib_settings.get_trimmed_reads(),
lib_settings.get_log(),
lib_settings.get_log()), shell=True).wait()
lib_settings.write_to_log('trimming_reads done')
def get_barcode_match(self, barcode, barcodes):
"""
takes a barcode and returns the one it matches (hamming <= 1)
else
empty string
"""
if barcode in barcodes:
return barcode
for barcode_j in barcodes:
if tps_utils.hamming_N(barcode, barcode_j) <= self.settings.get_property('mismatches_allowed_in_barcode'):
return barcode_j
return ''
def build_bowtie_index(self):
"""
builds a bowtie 2 index from the input fasta file
recommend including barcode+PCR sequences just in case of some no-insert amplicons
"""
self.settings.write_to_log('building bowtie index')
if self.settings.get_property('force_index_rebuild') or not self.settings.bowtie_index_exists():
tps_utils.make_dir(self.rdir_path('bowtie_indices'))
index = self.settings.get_bowtie_index()
subprocess.Popen('bowtie2-build -f --offrate 0 %s %s 1>>%s 2>>%s' % (self.settings.get_trimmed_pool_fasta(),
self.settings.get_bowtie_index(), self.settings.get_log()+'.bwt',
self.settings.get_log()+'.bwt'), shell=True).wait()
self.settings.write_to_log('building bowtie index complete')
def trim_reference_pool_fasta(self):
'''
Trims the reference sequences to the length of the trimmed reads + a buffer
'''
trim_5p = self.settings.get_property('pool_5trim') #nucleotides to cut from 5' end
trim_3p = self.settings.get_property('pool_3trim') #nucleotides to cut from 3' end
f = open(self.settings.get_property('pool_fasta'))
g = open(self.settings.get_trimmed_pool_fasta(), 'w')
for line in f:
if not line.strip() == '' and not line.startswith('#'):#ignore empty lines and commented out lines
if line.startswith('>'):#> marks the start of a new sequence
g.write(line)
else:
g.write(self.settings.get_property('pool_prepend')+line.strip()[trim_5p:len(line.strip())-trim_3p]+self.settings.get_property('pool_append')+'\n')
f.close()
g.close()
def map_reads(self):
"""
map all reads using bowtie
:return:
"""
self.settings.write_to_log('mapping reads')
if not self.settings.get_property('force_remapping'):
for lib_settings in self.settings.iter_lib_settings():
if not lib_settings.mapped_reads_exist():
break
else:
return
tps_utils.make_dir(self.rdir_path('mapped_reads'))
tps_utils.make_dir(self.rdir_path('mapping_stats'))
tps_utils.make_dir(self.rdir_path('unmapped_reads'))
bzUtils.parmap(lambda lib_setting: self.map_one_library(lib_setting), self.settings.iter_lib_settings(),
nprocs = self.threads)
self.settings.write_to_log( 'finished mapping reads')
def map_one_library(self, lib_settings):
lib_settings.write_to_log('mapping_reads')
subprocess.Popen('bowtie2 -f -D 20 -R 3 -N 1 -L 15 --norc -i S,1,0.50 -x %s -p %d -U %s --un-gz %s -S %s 1>> %s 2>>%s' % (self.settings.get_bowtie_index(), self.threads,
lib_settings.get_trimmed_reads(), lib_settings.get_unmappable_reads(), lib_settings.get_mapped_reads_sam(),
lib_settings.get_log(), lib_settings.get_pool_mapping_stats()), shell=True).wait()
#subprocess.Popen('samtools view -b -h -o %s %s 1>> %s 2>> %s' % (lib_settings.get_mapped_reads(), lib_settings.get_mapped_reads_sam(), lib_settings.get_log(), lib_settings.get_log()), shell=True).wait()
#also, sort bam file, and make an index
#samtools view -uS myfile.sam | samtools sort - myfile.sorted
subprocess.Popen('samtools view -uS %s | samtools sort - %s.temp_sorted 1>>%s 2>>%s' % (lib_settings.get_mapped_reads_sam(), lib_settings.get_mapped_reads_sam(),
lib_settings.get_log(), lib_settings.get_log()), shell=True).wait()
#subprocess.Popen('samtools sort %s %s.temp_sorted 1>>%s 2>>%s' % (lib_settings.get_mapped_reads_sam(), lib_settings.get_mapped_reads_sam(),
# lib_settings.get_log(), lib_settings.get_log()), shell=True).wait()
subprocess.Popen('mv %s.temp_sorted.bam %s' % (lib_settings.get_mapped_reads_sam(),
lib_settings.get_mapped_reads()), shell = True).wait()
subprocess.Popen('samtools index %s' % (lib_settings.get_mapped_reads()), shell = True).wait()
subprocess.Popen('rm %s' % (lib_settings.get_mapped_reads_sam()), shell = True).wait()
lib_settings.write_to_log('mapping_reads done')
def rdir_path(self, *args):
return os.path.join(self.settings.get_rdir(), *args)
def get_rdir_fhandle(self, *args):
"""
returns a filehandle to the fname in the rdir
"""
out_path = self.rdir_path(*args)
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return tps_utils.aopen(out_path, 'w')
def perform_qc(self):
qc_engine = tps_qc.TPS_qc(self, self.settings, self.threads)
if self.settings.get_property('collapse_identical_reads'):
qc_engine.plot_pcr_bias()
qc_engine.identify_contaminating_sequences()
qc_engine.print_library_count_concordances()
qc_engine.plot_average_read_positions()
qc_engine.plot_count_distributions()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("settings_file")
parser.add_argument("--make-tables",
help="Makes tables.",
action='store_true')
parser.add_argument("--perform-qc",
help="performs quality control analysis.",
action='store_true')
parser.add_argument("--make-plots",
help="Makes plots.",
action='store_true')
parser.add_argument("--comparisons",
help="Does comparisons to other experiments",
action='store_true')
parser.add_argument("--all-tasks",
help="Makes plots, tables, folding and comparisons",
action='store_true')
parser.add_argument("--threads",
help="Max number of processes to use",
type = int, default = 8)
args = parser.parse_args()
return args
def main():
"""
"""
args = parse_args()
settings = tps_settings.TPS_settings(args.settings_file)
tps_experiment = TPSe(settings, args.threads)
print 'TPSe ready'
if args.perform_qc or args.all_tasks:
print 'QC'
settings.write_to_log('performing QC')
tps_experiment.perform_qc()
settings.write_to_log('done performing QC')
if args.make_tables or args.all_tasks:
print 'tables'
settings.write_to_log('making tables')
tps_experiment.make_tables()
settings.write_to_log('done making tables')
if args.make_plots or args.all_tasks:
print 'plots'
settings.write_to_log('making plots')
tps_experiment.make_plots()
settings.write_to_log('done making plots')
if args.comparisons or args.all_tasks:
settings.write_to_log('doing comparisons')
tps_experiment.compare_all_other_experiments()
main()
|
[
"boris@Boriss-iMac.local"
] |
boris@Boriss-iMac.local
|
1417dbe6d3773f4f8c5c60ef39421cb2a9fda69c
|
1db3e25d20771804923dd22c5291fb27621669f9
|
/self_daily/apriori_in_actions.py
|
030826c098363b67a430e84fb21cc105e800d599
|
[] |
no_license
|
tusonggao/manbing_apriori
|
11d5eb4b81768bf0952ba78ce0a13f915325de13
|
5c5b188f267d6219d80177c3fe03334e2e2136a3
|
refs/heads/master
| 2020-04-17T17:42:07.246473
| 2019-01-25T12:58:00
| 2019-01-25T12:58:00
| 166,794,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,513
|
py
|
from numpy import *
import pandas as pd
def loadDataSet():
# return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]
# return [[1, 3, 4],
# [2, 3, 5],
# [1, 2, 3, 5],
# [2, 5],
# [3, 4, 7, 9],
# [1, 4, 8],
# [2, 3, 4],
# [2, 5],
# [2, 3],
# [3, 7],
# [1, 3, 4],
# [3, 2, 7]]
# return [[1, 3, 4],
# [3, 4, 5],
# [1, 2, 3, 4],
# [2, 4],
# [3, 4, 1, 2],
# [3, 4, 5],
# [2, 3, 4]]
return [['1', '3', '4'],
['3', '4', '5'],
['1', '2', '3', '4'],
['2', '4'],
['3', '4', '1', '2'],
['3', '4', '5'],
['2', '3', '4']]
def createC1(dataSet):
C1 = []
try:
for transaction in dataSet:
for item in transaction:
if not [item] in C1:
C1.append([item])
C1.sort()
except:
print('get TypeError C1 is', C1)
return list(map(frozenset, C1)) # use frozen set so we
# can use it as a key in a dict
def scanD(D, Ck, minSupport):
ssCnt = {}
for tid in D:
for can in Ck:
if can.issubset(tid):
if can not in ssCnt:
ssCnt[can] = 1
else:
ssCnt[can] += 1
numItems = float(len(D))
retList = []
supportData = {}
for key in ssCnt:
support = ssCnt[key] / numItems
if support >= minSupport:
retList.insert(0, key)
supportData[key] = support
return retList, supportData
def aprioriGen(Lk, k): # creates Ck
retList = []
lenLk = len(Lk)
for i in range(lenLk):
for j in range(i + 1, lenLk):
L1 = list(Lk[i])[:k - 2];
L2 = list(Lk[j])[:k - 2]
L1.sort();
L2.sort()
if L1 == L2: # if first k-2 elements are equal
retList.append(Lk[i] | Lk[j]) # set union
return retList
def apriori(dataSet, minSupport=0.5):
C1 = createC1(dataSet)
D = map(set, dataSet)
D = list(D) # added by tusonggao
L1, supportData = scanD(D, C1, minSupport)
L = [L1]
k = 2
while (len(L[k - 2]) > 0):
Ck = aprioriGen(L[k - 2], k)
Lk, supK = scanD(D, Ck, minSupport) # scan DB to get Lk
supportData.update(supK)
L.append(Lk)
k += 1
return L, supportData
def generateRules(L, supportData, minConf=0.7): # supportData is a dict coming from scanD
bigRuleList = []
for i in range(1, len(L)): # only get the sets with two or more items
for freqSet in L[i]:
H1 = [frozenset([item]) for item in freqSet]
if (i > 1):
rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
else:
calcConf(freqSet, H1, supportData, bigRuleList, minConf)
return bigRuleList
def calcConf(freqSet, H, supportData, brl, minConf=0.7):
prunedH = [] # create new list to return
for conseq in H:
conf = supportData[freqSet] / supportData[freqSet - conseq] # calc confidence
if conf >= minConf:
print(freqSet - conseq, '-->', conseq, 'conf:', conf)
brl.append((freqSet - conseq, conseq, conf))
prunedH.append(conseq)
return prunedH
def rulesFromConseq(freqSet, H, supportData, brl, minConf=0.7):
m = len(H[0])
if (len(freqSet) > (m + 1)): # try further merging
Hmp1 = aprioriGen(H, m + 1) # create Hm+1 new candidates
Hmp1 = calcConf(freqSet, Hmp1, supportData, brl, minConf)
if (len(Hmp1) > 1): # need at least two sets to merge
rulesFromConseq(freqSet, Hmp1, supportData, brl, minConf)
def pntRules(ruleList, itemMeaning):
for ruleTup in ruleList:
for item in ruleTup[0]:
print(itemMeaning[item])
print("-------->")
for item in ruleTup[1]:
print(itemMeaning[item])
print("confidence: %f" % ruleTup[2])
print() # print a blank line
######################################################################
if __name__=='__main__':
# dataSet=loadDataSet()
# print('dataSet is', dataSet)
# result = createC1(dataSet)
# print(result)
L, suppData=apriori(dataSet, minSupport=0.33)
rules=generateRules(L,suppData, minConf=0.20)
print(L, suppData)
print('rules is', rules)
|
[
"tusonggao@163.com"
] |
tusonggao@163.com
|
67cddc681788dd8e3e3ed2cea94690b0d9fc72e6
|
b99dd37ae91dd5e5ee6b0ed0cad9d5ba376a5aab
|
/week2/ex4/color_change_inrange.py
|
02edc0239a3b279003fdf4e4ca647d94e12f73df
|
[] |
no_license
|
Dave-Elec/tutorials
|
d7f4f6b43ff18ba200fda23d57d27b175f89e03a
|
a2a8d8b7a21285e8e33d1617aab9146e65ce683f
|
refs/heads/master
| 2023-09-02T01:07:59.886897
| 2021-11-17T14:33:41
| 2021-11-17T14:33:41
| 426,303,533
| 0
| 0
| null | 2021-11-09T16:25:55
| 2021-11-09T16:25:52
| null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
import numpy as np
import cv2 as cv
import argparse
def color_change(input_img, input_hex, input_range, output_hex, output_img):
def HEX2HSV(hex):
hd = {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,
'8':8,'9':9,'A':10,'a':10,'B':11,'b':11,'C':12,
'c':12,'D':13,'d':13,'E':14,'e':14,'F':15,'f':15}
try:
if(len(hex)>6):
raise KeyError
r = hd[hex[0]]*16 + hd[hex[1]]
g = hd[hex[2]]*16 + hd[hex[3]]
b = hd[hex[4]]*16 + hd[hex[5]]
except KeyError:
exit('ERROR: Invalid HEX value')
return cv.cvtColor(np.array([b,g,r], dtype=np.uint8).reshape(1,1,3), cv.COLOR_BGR2HSV).flatten()
img = cv.imread(input_img, cv.IMREAD_UNCHANGED)
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
i_hsv = HEX2HSV(input_hex)
o_hsv = HEX2HSV(output_hex)
i_hsv = i_hsv.astype(np.int32)
change = np.array(input_range, dtype=np.int32)
i_hsv_L = i_hsv - change
i_hsv_U = i_hsv + change
i_hsv_L = i_hsv_L.clip(0,255).astype(np.uint8)
i_hsv_U = i_hsv_U.clip(0,255).astype(np.uint8)
thresh = cv.inRange(hsv, i_hsv_L, i_hsv_U)
"""
### Contour detection.
## Accept contours that meet minimum area or minimum length criteria
thresh2 = np.zeros(thresh.shape, np.uint8)
contours, h = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cnts = []
area_t = 500
perim_t = 50
for i in range(len(h[0])):
if h[0][i][-1] == -1:
area = cv.contourArea(contours[i])
p = cv.arcLength(contours[i], False)
#print(area, p)
if area >= area_t or p > perim_t:
cnts.append(contours[i])
cv.drawContours(thresh2, [contours[i]], 0, 255,-1)
thresh = cv.bitwise_and(thresh, thresh2)
#cv.drawContours(img, cnts, -1, [0,0,255],3)
#cv.imwrite('contours.png', img)
#cv.imwrite('contours_f.png', thresh2)
"""
####
disc = cv.getStructuringElement(cv.MORPH_ELLIPSE,(17,17))
cv.filter2D(thresh,-1,disc,thresh)
# threshold
ret,thresh = cv.threshold(thresh,50,255,0)
# replace hue with chosen color
hsv[:,:,0] = np.where(thresh==255, o_hsv[0] ,hsv[:,:,0])
img_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
if img.shape[-1] == 4:
img_bgr = np.dstack([img_bgr, img[:,:,3]])
cv.imwrite(args.output, img_bgr)
#cv.imwrite("mask.png", thresh)
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Input image to change color")
parser.add_argument("--input_hex", default='00b274', type=str, help='color to change')
parser.add_argument("--output_hex", default='C17A00', type=str, help="target Hue value")
parser.add_argument("--output", "-o", default='output.png', type=str, help="Output image file")
args = parser.parse_args()
print(args)
# Call function
color_change(args.input, args.input_hex, [5,120,50], args.output_hex, args.output)
|
[
"eskenderbesrat@gmail.com"
] |
eskenderbesrat@gmail.com
|
b88b6165ffa7e0e3c5f8e34b06d5caa07634dbab
|
be9fdce8e4cb5644ee25b5de789c5990d5c71175
|
/flask-chatterbot/Prueba/metro_logic.py
|
228d97b542967e6e0e92535678bbf6294c46423e
|
[] |
no_license
|
andrew962/finalPro_5
|
5e9c1196239d8a19c67841f9960eee48d2f98822
|
e02e1d2c6bd81e3feb97344fc92e3ac094bc67f2
|
refs/heads/master
| 2020-03-24T05:51:22.169442
| 2018-08-12T00:29:15
| 2018-08-12T00:29:15
| 142,505,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from __future__ import unicode_literals
from chatterbot.logic import LogicAdapter
import re
class MyLogicAdapter(LogicAdapter):
def __init__(self, **kwargs):
super(MyLogicAdapter, self).__init__(**kwargs)
def can_process(self, statement):
"""
Return true if the input statement contains.
"""
"""
aqui abajo dentro del parrafo que se introdujo se busca una serie de numeros de 6 caracteres( patron = re.compile(r'\d\d\d\d\d\d\d\d') )
"""
patron = re.compile(r'\d\d\d\d\d\d\d\d')
self.num = patron.search(statement.text)
words = ['mi','saldo']
if all(x in statement.text.split() for x in words):
return True
else:
return False
def process(self, statement):
from chatterbot.conversation import Statement
import requests,json
url ='http://panamenio.herokuapp.com/api/com/metrobus/'+self.num.group()
response = requests.get(url)
response.text
# Let's base the confidence value on if the request was successful
if response.status_code == 200:
confidence = 1
else:
confidence = 0
data = json.loads(response.text)
saldo = str(data['balance'])
response_statement = Statement('saldo '+saldo)
response_statement.confidence
return response_statement
|
[
"abadia962@gmail.com"
] |
abadia962@gmail.com
|
5d2c5b151d7b16f230ab8f1a7a64c307f5eea728
|
5c1cea06a57373224c09f0f8ebc03cfdc16e2ef8
|
/climate_flask.py
|
3cb5814ce35272fadc57a541a2b7006463cc1e92
|
[] |
no_license
|
PaulaJorgensen/sqlalchemy-challenge
|
5290057b8b8fcd3097b9e9bda7b1a1dc975c12b6
|
b194e8e45cd8a943720d50836b7adf19ad3c20b0
|
refs/heads/master
| 2020-09-11T09:39:26.922891
| 2019-11-24T00:10:26
| 2019-11-24T00:10:26
| 222,024,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
import datetime as dt
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def Welcome():
return (
f"Data Range is from 8/23/2016 thru 8/23/2017.<br><br>"
f"Available Routes: <br>"
f"/api/v1.0/precipitation<br/>"
f"Returns percipitation data for the data range.<br><br>"
f"/api/v1.0/stations<br/>"
f"Returns data on all the weather stations in Hawaii. <br><br>"
f"/api/v1.0/tobs<br/>"
f"Returns temperature data for the most active weather station (USC00519281).<br><br>"
f"/api/v1.0/<date>date<br/>"
f"Returns an Average, Max, and Min temperature for a given start date. <br><br>"
f"/api/v1.0/<startdate>startdate/<enddate>enddate<br/>"
f"Returns an Average, Max, and Min temperatures for a given date range."
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
curr_year=dt.date(2017, 8, 23)
prev_year = curr_year - dt.timedelta(days=365)
prcp=session.query(Measurement.date, func.sum(Measurement.prcp)).\
filter(Measurement.prcp != None).filter(Measurement.date>=prev_year).\
group_by(Measurement.date).all()
session.close()
prcp_data = []
for d,p in prcp:
prcp_dict = {}
prcp_dict["date"] = d
prcp_dict["prcp"] = p
prcp_data.append(prcp_dict)
return jsonify(prcp_data)
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
"""Return a list of stations."""
results = session.query(Station.station, Station.name, Station.elevation, Station.latitude, Station.longitude).all()
session.close()
station_list = []
for result in results:
row = {}
row['station'] = result[0]
row['name'] = result[1]
row['elevation'] = result[2]
row['latitude'] = result[3]
row['longitude'] = result[4]
station_list.append(row)
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
curr_year=dt.date(2017, 8, 23)
prev_year = curr_year - dt.timedelta(days=365)
temps = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
session.close()
temp_list = list(np.ravel(temps))
return jsonify(temp_list)
@app.route("/api/v1.0/<date>")
def date(date):
session = Session(engine)
results=session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date>=date).all()
session.close()
date_temp=list(np.ravel(results))
t_min=date_temp[0]
t_avg=date_temp[1]
t_max=date_temp[2]
t_dict = {'Minimum Temperature': t_min, 'Average Temperature': t_avg, 'Maximum Temperature': t_max}
return jsonify(t_dict)
@app.route("/api/v1.0/<startdate>/<enddate>")
def start_end_date(startdate,enddate):
session=Session(engine)
print(startdate)
results=session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date>=startdate).\
filter(Measurement.date<=enddate).all()
session.close()
print(jsonify(results))
date_temp=list(np.ravel(results))
t_min=date_temp[0]
t_avg=date_temp[1]
t_max=date_temp[2]
t_dict = {'Minimum Temperature': t_min, 'Average Temperature': t_avg, 'Maximum Temperature': t_max}
return jsonify(t_dict)
if __name__ == '__main__':
app.run(debug=True)
|
[
"53984747+PaulaJorgensen@users.noreply.github.com"
] |
53984747+PaulaJorgensen@users.noreply.github.com
|
f2c53b1a3d952b877976848f098613817402f694
|
16ec54556fe22d46aa9ec659bf63c465d9eef3dd
|
/myapp/models.py
|
202655da7b9b208b78b2d6b5c2dbf3a5d58622f7
|
[] |
no_license
|
CzNX/Craigslist-clone-utube-
|
37e273cc1dce78c352dd53428e07425514b5978b
|
ba9b2fecc861088be0e4b3d3916a8fd32ee2e24c
|
refs/heads/main
| 2023-02-27T04:57:36.978472
| 2021-02-10T03:38:29
| 2021-02-10T03:38:29
| 337,607,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from django.db import models
# Create your models here.
class Search(models.Model):
search = models.CharField(max_length=500)
created = models.DateTimeField(auto_now=True)
def __str__(self):
return self.search
class Meta:
verbose_name_plural = 'Searches'
|
[
"sijanshres0@gmail.com"
] |
sijanshres0@gmail.com
|
62a21e5eed5dbc606019cbd105e6fc4445533079
|
e3150323046fabc5a1c555b50e135b9a72f53302
|
/doi4bib/import_dois.py
|
51495dfc5a53be339898a1ff6ac520cee6b16941
|
[
"MIT"
] |
permissive
|
sharkovsky/doi4bib
|
f75b1b8de97e6bca199ba2065378f774fb37f417
|
c83a00fbc315a0dacb6a308c690b4e4f545e9c2e
|
refs/heads/master
| 2020-07-01T20:49:28.022253
| 2019-08-18T14:18:14
| 2019-08-18T14:18:14
| 201,296,699
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,013
|
py
|
# -*- coding: UTF-8 -*-
"""
This file was copied from
https://github.com/OpenAPC/openapc-de/blob/master/python/import_dois.py
The only modifications I made were to remove some lines that were not
useful to me
"""
import json
from urllib.error import HTTPError
from urllib.parse import quote_plus, urlencode
from urllib.request import urlopen, Request
from Levenshtein import ratio
__all__ = ['crossref_query_title']
EMPTY_RESULT = {
"crossref_title": "",
"similarity": 0,
"doi": ""
}
MAX_RETRIES_ON_ERROR = 3
def crossref_query_title(title):
"""Contacts Crossref API for DOI of a paper
The paper is identified by its title.
The function retrieves the first 5 results, and searches for the one
with maximum similarity to the original title.
Raises an HTTPError in case of failure.
Args:
title: a str with the title of the paper whose DOI we are looking for
"""
api_url = "https://api.crossref.org/works?"
params = {"rows": "5", "query.title": title}
url = api_url + urlencode(params, quote_via=quote_plus)
request = Request(url)
request.add_header("User-Agent",
"doi4bib utility\
(https://github.com/sharkovsky/doi4bib)")
try:
ret = urlopen(request)
content = ret.read()
data = json.loads(content.decode('utf-8'))
items = data["message"]["items"]
most_similar = EMPTY_RESULT
for item in items:
title = item["title"].pop()
result = {
"crossref_title": title,
"similarity": ratio(title.lower(),
params["query.title"].lower()),
"doi": item["DOI"]
}
if most_similar["similarity"] < result["similarity"]:
most_similar = result
return {"success": True, "result": most_similar}
except HTTPError as httpe:
return {"success": False, "result": EMPTY_RESULT, "exception": httpe}
|
[
"francesco.cremonesi@epfl.ch"
] |
francesco.cremonesi@epfl.ch
|
a917784c91af5a8fbff170296cdc0c44eddc539a
|
5db00b027e7dc84cbe00a4a66a27bc759ad48b0b
|
/SR_no_action.py
|
d7532526aac747c593a8bedf11bdd3e3ddff072d
|
[] |
no_license
|
idamomen/predictive_representations
|
032576822c08a2d6293925ca7747b2bca0fdb94a
|
93eaa8e7a43052aa9866db6d7f5a5aff263ecebd
|
refs/heads/master
| 2020-04-28T13:33:55.441076
| 2019-03-15T23:07:00
| 2019-03-15T23:07:00
| 175,309,659
| 14
| 0
| null | 2019-03-12T23:09:40
| 2019-03-12T23:07:05
| null |
UTF-8
|
Python
| false
| false
| 3,811
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import random
import math
class SR_no_action():
''' This class defines a reinforcement learning agent that
learns the state-state successor representation without taking actions.
Thus, the resulting SR matrix is in the service of prediction.
Initalization parameters
gamma: discount param
alpha: learning rate
p_sample: probability of sampling different options, only relevant for testing poilcy dependence
NUM_STATES: the number of states in the environment to intialize matrices
Ida Momennejad, 2019'''
def __init__(self, gamma, alpha, p_sample, NUM_STATES):
self.gamma = gamma # discount factor
self.alpha = alpha # learning rate
self.p_sample = p_sample # p(sampling options)
self.M= np.zeros([NUM_STATES, NUM_STATES]) # M: state-state SR
self.W= np.zeros([NUM_STATES]) # W: value weights, 1D
self.onehot=np.eye(NUM_STATES) # onehot matrix, for updating M
self.V= np.zeros([NUM_STATES]) # value function
self.biggest_change = 0
self.significant_improvement = 0.001 # convergence threshold
# policy: not revelant in exp 1, agent is passively moved
# but in Exp2 we keep updating it to get the optimal policy
# self.Pi = np.zeros([NUM_STATES], dtype=int)
self.epsilon = .1
self.memory=[]
def step(self, s, s_new, reward):
old_v = self.get_value()
self.update_memory(s, s_new)
self.update_SR(s, s_new)
self.update_W(s, s_new, reward)
self.update_biggest_change(old_v[s], s)
########## update policy ##############
#Pi[s] = action
# M, W = dyna_replay(memory, M, W, episodes)
def update_SR(self, s, s_new):
self.M[s] = (1-self.alpha)* self.M[s] + self.alpha * ( self.onehot[s] + self.gamma * self.M[s_new] )
def update_W(self, s, s_new, reward):
''' Update value weight vector.
It computes the normalized feature vector * reward PE.
Here reward function would be sufficient. The same,
but R is easier. We use W in plos comp biol 2017 paper, to
account for BG weights allowing dopamine similarities
between MF and MB learning.'''
# future notes: 27 feb 2019: in paper both get updated with every transition
# better to do batch updates. W updated every step, but M
# updated every couple of steps with dyna
# like feature learning.
# all rules are correct, but in practice for TD learning on features
# a little weird to learn feature vector with every step
# normally features are stable over the task.
norm_feature_rep = self.M[s] / ( self.M[s]@self.M[s].T )
# Compute the values of s and s_prime, then the prediction error
V_snew = self.M[s_new]@self.W
V_s = self.M[s]@self.W
w_pe = ( reward + self.gamma*V_snew - V_s ).squeeze()
# Update W with the same learning rate
# future: this could be different
self.W += self.alpha * w_pe *norm_feature_rep
def get_value(self):
''' Combine the successor representation M & value weight W
to determine the value of different options'''
self.V = self.M@self.W
return self.V
def update_memory(self, s, s_new):
''' Save current state and the state it visited in one-step
to memory. This is used in the Dyna version for replay.'''
self.memory.append([s, s_new])
def update_biggest_change(self, old_v_m, s):
''' Coompute the change in value, see if it is higher
than the present max change, if so, update biggest_change '''
V=self.get_value()
self.biggest_change = max(self.biggest_change, np.abs(old_v_m - V[s]))
self.check_converegnce()
def check_converegnce(self):
''' If statement is true, conferegnce has reached. '''
self.convergence= self.biggest_change < self.significant_improvement
|
[
"noreply@github.com"
] |
idamomen.noreply@github.com
|
d5cf10eeebc72cd06bb9ec289310d5a2326fc073
|
ee1e73bbe2b9f61f019e9001ff7c48eeffba1ae2
|
/configs/cascade_mask_rcnn_r50_fpn_1x.py
|
80002f423143b2601b15ade9fcd308c199c41906
|
[
"Apache-2.0"
] |
permissive
|
Leo-xxx/Libra_R-CNN
|
f246838636983c824ea1b28b02a878038c6c8773
|
387fdb32fc35067cca3af04f720cb5078b5fa529
|
refs/heads/master
| 2020-06-13T12:06:09.243520
| 2019-05-07T12:00:20
| 2019-05-07T12:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,609
|
py
|
# model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_mask_rcnn_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"pangjiangmiao@gmail.com"
] |
pangjiangmiao@gmail.com
|
0eef70b0027c2b33b256ef719347fe45d651db2a
|
9054c9d94680f1228c4188fe348df183c2d3f5af
|
/Data Structures in python/tuples.py
|
a3886f87f28d0c0a73f5eb3cae4ac3fe1b7b8d70
|
[] |
no_license
|
adityasharan01/coding-interview-patterns
|
ad2b434a80693b087629d6e2a606b0d3f0eb2782
|
8e402dc3969c508d099465ac6bb1f8220747efa5
|
refs/heads/main
| 2023-07-16T02:58:37.551162
| 2021-09-04T17:34:14
| 2021-09-04T17:34:14
| 336,842,301
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
# How to create a Tuple?
newTuple = ('a', 'b', 'c', 'd', 'e')
newTuple1 = tuple('abcde')
print(newTuple1)
# Access Tuple elements
print(newTuple[0])
# Traverse through tuple
for i in newTuple:
print(i)
for index in range(len(newTuple)):
print(newTuple[index])
# How to search for an element in Tuple?
print('a' in newTuple)
def searchInTuple(pTuple, element):
for i in pTuple:
if i == element:
return pTuple.index(i)
return 'The element does not exist'
print(searchInTuple(newTuple, 'a'))
# Tuple Operations / Functions
myTuple = (1,4,3,2,5)
myTuple1 = (1,2,6,9,8,7)
print(myTuple + myTuple1)
print(myTuple * 4)
print(2 in myTuple1)
myTuple1.count(2)
myTuple1.index(2)
|
[
"noreply@github.com"
] |
adityasharan01.noreply@github.com
|
b11661ce076c847699fc4abaa20da2a21283dd48
|
d1aa6e7d5631d7806531660febbd1f856eaeece7
|
/python/paddle/audio/__init__.py
|
e76a80300f5e61706374423f7f2b11265b1b2dd9
|
[
"Apache-2.0"
] |
permissive
|
gongweibao/Paddle
|
510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77
|
60f9c60cd8196c66c391d79c35d341e9072f8838
|
refs/heads/develop
| 2023-03-13T17:43:35.675875
| 2022-09-20T08:46:15
| 2022-09-20T08:46:15
| 82,279,237
| 3
| 2
|
Apache-2.0
| 2021-05-26T06:17:43
| 2017-02-17T09:16:16
|
Python
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import features
from . import functional
from . import utils
__all__ = ["functional", "features", "utils"]
|
[
"noreply@github.com"
] |
gongweibao.noreply@github.com
|
4f7ae8e0f85b6aed6c042801447335bcec8276b9
|
b46be9d018a7c337ec93802171faf5126c37c1d2
|
/Feb.5.Assignment.py
|
09955fcf65a3ef7a237b6c868ce9801e7093181a
|
[] |
no_license
|
jmoyang/DATA310
|
42d428344aa7a2b710e25efc6897f664f1d2f779
|
b735ff7a8c25e414f877f7ae8e97e93f5ce00040
|
refs/heads/main
| 2023-04-30T00:11:17.417068
| 2021-05-18T17:34:29
| 2021-05-18T17:34:29
| 336,088,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1,input_shape=[3])])
model.compile(optimizer='sgd',loss='mean_squared_error')
x1 = np.array([4.0,3.0,4.0,5.0,2.0,3.0],dtype=float)
x2 = np.array([3.524, 2.840, 3.680, 3.051, 1.479, 1.238],dtype=float)
x3 = np.array([2.0, 2.0, 3.0, 2.0, 1.0, 1.0],dtype=float)
xs = np.stack([x1, x2, x3], axis=1)
ys = np.array([2.89, 2.29, 3.99, 3.475, 2.5, 0.97],dtype=float)
model.fit(xs,ys,epochs=1000)
a= np.array([5.0], dtype=float)
b= np.array([3.680], dtype=float)
c= np.array([1.0], dtype=float)
d=np.stack([a, b, c], axis=1)
model.predict([d])
|
[
"noreply@github.com"
] |
jmoyang.noreply@github.com
|
4b7de08551036eb064c6d636b9a71cbe82fa04d7
|
e2a1a6eb081f7b8a13c3112e349436e47b5ebf6f
|
/main.py
|
75bff8c77bd7d03f41f6d95e91590d6d1c753b52
|
[] |
no_license
|
MrStraw/labibi
|
45a8589a147e109992f7632ddb96d5aff8752b97
|
8aa7894721ba31b2772678837d9f126c5e7faa39
|
refs/heads/master
| 2023-06-27T23:48:51.322093
| 2021-08-03T15:08:56
| 2021-08-03T15:08:56
| 391,997,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
from screen import screen_lab
screen_lab(10)
|
[
"metais.simon@icloud.com"
] |
metais.simon@icloud.com
|
1cf6a057d9544d3689c08f37ca972b72d9bb63d0
|
991c967a3e28e4287e6b9b4600e1c72dd2513aa5
|
/src/solutions/Prob47.py
|
73f08564d4a3466dfd9260f96f4587016bac287e
|
[] |
no_license
|
Beadsworth/ProjectEuler
|
0e0f76a4ba54b3e2e7b6ee852a2003e255c259d0
|
b6360ca6b7ecee514b80fe2bf04d023c74c749be
|
refs/heads/master
| 2022-11-30T20:32:38.221316
| 2022-11-28T02:08:15
| 2022-11-28T02:08:15
| 68,401,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from EulerHelpers import find_prime_factors
num = 1
num_unique = 4
consec_num = 0
num_list = []
while consec_num < num_unique:
num_factors = find_prime_factors(num)
distinct_factors = set(num_factors)
if len(distinct_factors) == num_unique:
consec_num += 1
num_list.append(num)
else:
consec_num = 0
num_list = []
num += 1
print(num_list)
|
[
"jamesbeadsworth@gmail.com"
] |
jamesbeadsworth@gmail.com
|
7e8ab4f848c58846c0751daa634f9deed32d58ae
|
9b9b935030eaaae70fcab14e15eddeb9bf4ecf2a
|
/main/feeds.py
|
b545ed253e1354eef1f531a41fa833481734bb65
|
[] |
no_license
|
camscars/blog
|
d296b87cb913005d32170e7bc4061c3126847430
|
120eed8432daae8894417123d91f11ca3df05fd8
|
refs/heads/master
| 2021-01-21T01:51:32.393172
| 2016-05-20T21:42:42
| 2016-05-20T21:42:42
| 54,678,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from .models import Post
class LatestPostsFeed(Feed):
title = 'My Blog'
link = '/blog'
description = 'New Posts of my blog'
def items(self): #retrieves the objects for the feed (last 5)
return Post.published.all()[:5]
def item_title(self, item): #get the objects title
return item.title
def item_description(self, item): #get the objects description
return truncatewords(item.body, 30)
|
[
"camscars702@gmail.com"
] |
camscars702@gmail.com
|
6a745d33214b2d62947f9172a87d5c8f91a4a1c2
|
96438b993168f98ea291d22528478c90499759f9
|
/nn_model.py
|
7c4f23249766d83085def0112abf654e117b4115
|
[] |
no_license
|
Nerkys/Sequential-neural-network
|
36fb130b37c5d839cad35ad937d1be55a38f5e81
|
62a70aadd242f2b152db93a9b458325c934f681b
|
refs/heads/master
| 2022-07-13T15:37:26.664060
| 2020-05-13T13:53:42
| 2020-05-13T13:53:42
| 263,640,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,065
|
py
|
import numpy as np
from time import time
import dill as pickle
#from matplotlib import pyplot as plt
#from pandas import DataFrame
#from sklearn import datasets
#from collections import Counter
#from sklearn.datasets import make_moons
#from sklearn.datasets import make_circles
def sigmoid(x):
#print(x)
return 1/(1+np.exp(-x))
def tanh(x):
return np.tanh(x)
def LeLu( x):
if x >= 0:
return x
else:
return 0.25*x
def ReLu(x):
return x * (x >= 0)
def compute_linear(data,W,B):
return W.dot(data) + B
def quadraticcost(output, ylabels):
m = ylabels.shape[1] # number of examples
cost = np.sum(1/(2*m)*(output-ylabels)**2)
return cost #loss function
class NetModel:
def __init__(self, hidden_layers, activations = dict(), input_layer = 1, seed = 2):
self.layers = hidden_layers
self.activations = activations
self.input_layer = input_layer
self.seed = seed #seed for random
self.length = len(self.layers)
np.random.seed(seed)
self.parameters = dict()
self.layers.insert(0, input_layer)
self.der = {sigmoid: lambda x:sigmoid(x)*(1-sigmoid(x)), #derivaties
tanh: lambda x:1-(tanh(x))**2,
quadraticcost : lambda a,y: a-y,
ReLu: lambda x: 1*(x>=0),
LeLu: lambda x: 1*(x>=0) + 0.25*(x<0)}
self.changes_nesterov = dict()
self.changes_adagrad = dict()
self.changes_adam_m = dict()
self.changes_adam_v = dict()
for l_num in range(1,len(self.layers)):
self.changes_nesterov["dw"+str(l_num)] = np.random.uniform(-1,1,size=(self.layers[l_num],self.layers[l_num-1]))*0
self.changes_nesterov["db"+str(l_num)] = np.zeros((self.layers[l_num],1))
self.changes_adagrad["dw"+str(l_num)] = np.random.uniform(-1,1,size=(self.layers[l_num],self.layers[l_num-1]))*0
self.changes_adagrad["db"+str(l_num)] = np.zeros((self.layers[l_num],1))
self.changes_adam_m["dw"+str(l_num)] = np.random.uniform(-1,1,size=(self.layers[l_num],self.layers[l_num-1]))*0
self.changes_adam_m["db"+str(l_num)] = np.zeros((self.layers[l_num],1))
self.changes_adam_v["dw"+str(l_num)] = np.random.uniform(-1,1,size=(self.layers[l_num],self.layers[l_num-1]))*0
self.changes_adam_v["db"+str(l_num)] = np.zeros((self.layers[l_num],1))
self.parameters["w"+str(l_num)] = np.random.uniform(-1,1,size=(self.layers[l_num],self.layers[l_num-1]))*0.01 #dont forget to *0.01
self.parameters["b"+str(l_num)] = np.zeros((self.layers[l_num],1))
self.parameters["ac"+str(l_num)] = self.activations[l_num] if l_num in self.activations.keys() else None
def forward_propagation(self,data):
cur = data.transpose() # cur features - rows; examples - columns;
cache = dict()
acs = self.activations
cache["A0"]=cur
for l in range(1,self.length+1): # length of net model - number of layers
cache["Z"+str(l)]=compute_linear(cur,self.parameters["w"+str(l)],self.parameters["b"+str(l)])
cache["A"+str(l)]=acs[l](cache["Z"+str(l)]) if acs[l] is not None else cache["Z"+str(l)] # applying activation if not None
cur = cache["A"+str(l)]
self.output = cache["A"+str(l)]
self.cache = cache
def back_propagation(self, ylabels, printq = False, cost= quadraticcost):
changes = dict()
ylabels=ylabels.transpose()
m=len(self.output[0])
dcura = self.der[cost](self.output,ylabels)
dcurz = dcura*self.der[self.parameters['ac'+str(self.length)]](self.cache['Z'+str(self.length)])
if printq==True:
print(self.output)
for l in range(self.length,0,-1):
changes["dw"+str(l)] = 1/m*dcurz.dot(self.cache['A'+str(l-1)].transpose())+self.lambd/m*self.parameters["w"+str(l)]
changes["db"+str(l)] = 1/m*np.sum(dcurz,axis=1,keepdims=True)
if l==1:
break
dcura = (self.parameters['w'+str(l)].transpose()).dot(dcurz)
dcurz = dcura*self.der[self.parameters['ac'+str(l-1)]](self.cache['Z'+str(l-1)])
self.changes = changes
def back_propagation_nesterov(self, ylabels, printq = False, cost= quadraticcost):
changes = dict()
ylabels=ylabels.transpose()
m=len(self.output[0])
dcura = self.der[cost](self.output,ylabels)
dcurz = dcura*self.der[self.parameters['ac'+str(self.length)]](self.cache['Z'+str(self.length)])
if printq==True:
print(self.output)
for l in range(self.length,0,-1):
changes["dw"+str(l)] = 1/m*dcurz.dot(self.cache['A'+str(l-1)].transpose() - gamma*self.changes_nesterov["dw" + str(l)].transpose())+self.lambd/m*self.parameters["w"+str(l)]
changes["db"+str(l)] = 1/m*np.sum(dcurz,axis=1,keepdims=True)
if l==1:
break
dcura = (self.parameters['w'+str(l)].transpose()).dot(dcurz)
dcurz = dcura*self.der[self.parameters['ac'+str(l-1)]](self.cache['Z'+str(l-1)])
self.changes = changes
def l2_cost(self,output, ylabels):
m = ylabels.shape[1] # number of examples
l2 = self.lambd/( m * 2 )*np.sum([np.sum(np.square(self.parameters["w"+str(i)])) for i in range(1,self.length)])
cost = np.sum(1/(2*m)*(output-ylabels)**2)
#print(cost,l2)
return cost + l2
def update_weights(self,learning_rate=0.05):
for l in range(1,self.length+1):
self.parameters["w"+str(l)]=self.parameters["w"+str(l)]-learning_rate*self.changes["dw"+str(l)]
self.parameters["b"+str(l)]=self.parameters["b"+str(l)]-learning_rate*self.changes["db"+str(l)]
def update_weights_nesterov(self,learning_rate=0.05, gamma = 0.9):
for l in range(1,self.length+1):
self.changes_nesterov["dw" + str(l)] = gamma*self.changes_nesterov["dw" + str(l)] + learning_rate*self.changes["dw"+str(l)]
self.changes_nesterov["db" + str(l)] = gamma*self.changes_nesterov["db" + str(l)] + learning_rate*self.changes["db"+str(l)]
self.parameters["w"+str(l)]= self.parameters["w"+str(l)] - self.changes_nesterov["dw" + str(l)]
self.parameters["b"+str(l)]= self.parameters["b"+str(l)] - self.changes_nesterov["db" + str(l)]
def update_weights_adagrad(self,learning_rate=0.05, epsilon = 0.000001):
for l in range(1,self.length+1):
self.changes_adagrad["dw" + str(l)] = self.changes_adagrad["dw" + str(l)] + (self.changes["dw"+str(l)])**2
self.changes_adagrad["db" + str(l)] = self.changes_adagrad["db" + str(l)] + (self.changes["db"+str(l)])**2
self.parameters["w"+str(l)]= self.parameters["w"+str(l)] - learning_rate*(self.changes_adagrad["dw" + str(l)] + epsilon)**(1/2)*self.changes["dw"+str(l)]
self.parameters["b"+str(l)]= self.parameters["b"+str(l)] - learning_rate*(self.changes_adagrad["db" + str(l)] + epsilon)**(1/2)*self.changes["db"+str(l)]
def update_weights_adam(self,learning_rate=0.05, beta1 = 0.5, beta2 = 0.5, epsilon = 0.000001):
for l in range(1,self.length+1):
self.changes_adam_m["dw" + str(l)] = beta1*(self.changes_adam_m["dw" + str(l)]) + (1-beta1)*self.changes["dw"+str(l)]
self.changes_adam_m["db" + str(l)] = beta1*(self.changes_adam_m["db" + str(l)]) + (1-beta1)*self.changes["db"+str(l)]
self.changes_adam_v["dw" + str(l)] = beta2*(self.changes_adam_v["dw" + str(l)]) + (1-beta2)*(self.changes["dw"+str(l)])**2
self.changes_adam_v["db" + str(l)] = beta2*(self.changes_adam_v["db" + str(l)]) + (1-beta2)*(self.changes["db"+str(l)])**2
self.parameters["w"+str(l)]= self.parameters["w"+str(l)] - learning_rate*self.changes_adam_m["dw" + str(l)]/(1 - beta1**l)/((self.changes_adam_v["dw" + str(l)]/(1 - beta2**l) + epsilon)**(1/2))
self.parameters["b"+str(l)]= self.parameters["b"+str(l)] - learning_rate*self.changes_adam_m["db" + str(l)]/(1 - beta1**l)/((self.changes_adam_v["db" + str(l)]/(1 - beta2**l) + epsilon)**(1/2))
def GD(self,data,ylabels, iterations = 1000,lr = 0.05, lambd = 0, printq = 1000):
try:
self.lambd = lambd
for i in range(iterations):
self.forward_propagation(data)
self.back_propagation(ylabels)
self.update_weights(learning_rate=lr)
if i % printq== 0:
if self.lambd==0:
print("Cost after iteration %i: %f" %(i, quadraticcost(self.output.transpose(),ylabels)))
else:
print("Cost after iteration %i: %f" %(i, self.l2_cost(self.output.transpose(),ylabels)))
except KeyboardInterrupt:
print("KeyboardInTerrupt")
return self.parameters
return self.parameters
def SGD(self, data, ylabels, iterations = 1000, batch_size = 16, lr = 0.05, printq = 1000,seed = 3,lambd = 0):
try:
self.lambd = lambd
np.random.seed(seed)
train = list(zip(data,ylabels))
n = len(train)
for i in range(iterations):
np.random.shuffle(train)
batches = [train[k:k+batch_size] for k in range(0, n, batch_size)]
for batch in batches:
batch_data = np.array([i[0] for i in batch])
batch_ylabels = np.array([i[1] for i in batch])
self.forward_propagation(batch_data)
self.back_propagation(batch_ylabels)
self.update_weights(learning_rate=lr)
if i % printq== 0:
self.forward_propagation(data)
if self.lambd==0:
print("Cost after iteration %i: %f" %(i, quadraticcost(self.output.transpose(),ylabels)))
else:
print("Cost after iteration %i: %f" %(i, self.l2_cost(self.output.transpose(),ylabels)))
except KeyboardInterrupt:
print("KeyboardInTerrupt")
return self.parameters
return self.parameters
def NAG(self, data, ylabels, iterations = 1000, batch_size = 16, lr = 0.05, printq = 1000,seed = 3, lambd = 0, gamma = 0.9):
try:
self.lambd = lambd
np.random.seed(seed)
train = list(zip(data,ylabels))
n = len(train)
for i in range(iterations):
np.random.shuffle(train)
batches = [train[k:k+batch_size] for k in range(0, n, batch_size)]
for batch in batches:
batch_data = np.array([i[0] for i in batch])
batch_ylabels = np.array([i[1] for i in batch])
self.forward_propagation(batch_data)
self.back_propagation(batch_ylabels)
self.update_weights_nesterov(learning_rate = lr, gamma = gamma)
if i % printq== 0:
self.forward_propagation(data)
if self.lambd==0:
print("Cost after iteration %i: %f" %(i, quadraticcost(self.output.transpose(),ylabels)))
else:
print("Cost after iteration %i: %f" %(i, self.l2_cost(self.output.transpose(),ylabels)))
except KeyboardInterrupt:
print("KeyboardInTerrupt")
return self.parameters
return self.parameters
def Adagrad(self, data, ylabels, iterations = 1000, batch_size = 16, lr = 0.05, printq = 1000,seed = 3, lambd = 0, epsilon = 0.000001):
try:
self.lambd = lambd
np.random.seed(seed)
train = list(zip(data,ylabels))
n = len(train)
for i in range(iterations):
np.random.shuffle(train)
batches = [train[k:k+batch_size] for k in range(0, n, batch_size)]
for batch in batches:
batch_data = np.array([i[0] for i in batch])
batch_ylabels = np.array([i[1] for i in batch])
self.forward_propagation(batch_data)
self.back_propagation(batch_ylabels)
self.update_weights_adagrad(learning_rate = lr, epsilon = epsilon)
if i % printq== 0:
self.forward_propagation(data)
if self.lambd==0:
print("Cost after iteration %i: %f" %(i, quadraticcost(self.output.transpose(),ylabels)))
else:
print("Cost after iteration %i: %f" %(i, self.l2_cost(self.output.transpose(),ylabels)))
except KeyboardInterrupt:
print("KeyboardInTerrupt")
return self.parameters
return self.parameters
def Adam(self, data, ylabels, iterations = 1000, batch_size = 16, lr = 0.05, printq = 1000,seed = 3, lambd = 0, beta1 = 0.5, beta2 = 0.5, epsilon = 0.00001):
try:
self.lambd = lambd
np.random.seed(seed)
train = list(zip(data,ylabels))
n = len(train)
for i in range(iterations):
np.random.shuffle(train)
batches = [train[k:k+batch_size] for k in range(0, n, batch_size)]
for batch in batches:
batch_data = np.array([i[0] for i in batch])
batch_ylabels = np.array([i[1] for i in batch])
self.forward_propagation(batch_data)
self.back_propagation(batch_ylabels)
self.update_weights_adam(learning_rate = lr)
if i % printq== 0:
self.forward_propagation(data)
if self.lambd==0:
print("Cost after iteration %i: %f" %(i, quadraticcost(self.output.transpose(),ylabels)))
else:
print("Cost after iteration %i: %f" %(i, self.l2_cost(self.output.transpose(),ylabels)))
except KeyboardInterrupt:
print("KeyboardInTerrupt")
return self.parameters
return self.parameters
def fit(self, data, ylabels, method = 'SGD', iterations = 10000, lr = 0.05, printq = 1000, batch_size = 16, seed = 3, lambd = 0, gamma = 0.9, epsilon = 0.000001,beta1 = 0.5, beta2 = 0.5):
if method == 'SGD':
self.SGD(data, ylabels, iterations=iterations, lr=lr, batch_size=batch_size, printq=printq, seed = seed, lambd = lambd)
elif method == 'GD':
self.GD(data, ylabels, iterations=iterations, lr=lr, printq=printq, lambd = lambd)
elif method == 'NAG':
self.NAG(data, ylabels, iterations=iterations, lr=lr, batch_size=batch_size, printq=printq, seed = seed, lambd = lambd, gamma = gamma)
elif method == 'Adagrad':
self.Adagrad(data, ylabels, iterations=iterations, lr=lr, batch_size=batch_size, printq=printq, seed = seed, lambd = lambd, epsilon = epsilon)
elif method == 'Adam':
self.Adam(data, ylabels, iterations=iterations, lr=lr, batch_size=batch_size, printq=printq, seed = seed, lambd = lambd, beta1= beta1, beta2 = beta2, epsilon = epsilon)
def predict(self,data,printq = False):
self.forward_propagation(data)
if printq:
print(self.output.transpose())
return np.round(self.output).transpose()
def save(self,path):
with open(path, 'wb') as f:
pickle.dump(self.parameters, f)
def load(self,path):
with open(path, 'rb') as f:
self.parameters = pickle.load(f)
def __repr__(self):
return str(self.parameters)
#additional functions
def plot_labels(data,target):
df = DataFrame(dict(x=data[:,0], y=data[:,1], label=target))
colors = {0:'red', 1:'blue'}
fig, ax = plt.subplots()
grouped = df.groupby('label')
for key, group in grouped:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
plt.show()
def f_predict(data,trained_net,printq=False):
if printq:
print(f_fp(data,trained_net)[0].transpose())
return np.round(f_fp(data,trained_net)[0]).transpose()
def f_fp(data,nmodel): # data features - columns; examples - rows;
cur = data.transpose() # cur features - rows; examples - columns;
cache=dict()
acs={key:value for key,value in nmodel.items() if key.startswith("ac")} # getting activation functions from parameters
cache["A0"]=cur
for l in range(1,len(nmodel)//3+1): # length of net model - number of layers
cache["Z"+str(l)]=compute_linear(cur,nmodel["w"+str(l)],nmodel["b"+str(l)])
cache["A"+str(l)]=acs["ac"+str(l)](cache["Z"+str(l)]) if acs["ac"+str(l)]!=None else cache["Z"+str(l)] # applying activation if not None
cur = cache["A"+str(l)]
output = cache["A"+str(l)]
return output, cache
def num(n):
cls=np.zeros(10)
cls[n]=1
return cls
def fromnum(num):
d=np.zeros(10)
d[num]=1.
return d
def evaluate_numbers(data,ylabel,trained_net,printq=False):
return np.all(predict(np.array([data]),trained_net)==np.array(ylabel))
def evaluate_clothes(img,label,net):
if np.argmax(predict(img.reshape(1,784),net,printq=False))==label:
return True
return False
def loadmnist(path):
mnist_raw = loadmat(path)
mnist = {
"data": mnist_raw["data"].T,
"target": mnist_raw["label"][0],
"COL_NAMES": ["label", "data"],
"DESCR": "mldata.org dataset: mnist-original",
}
return mnist
if __name__ == "__main__":
net = NetModel([3,1],activations={1:ReLu,2:sigmoid},input_layer=2,seed=4)
np.random.seed(3)
data=np.random.randn(20,10)
ylabels=np.random.randn(20,1)
#net.back_propagation(ylabels)
np.random.seed(3)
net.fit(data,ylabels,method='SGD',batch_size=16, iterations=6000,printq=1000)
#print(time()-st)
#print("{")
|
[
"noreply@github.com"
] |
Nerkys.noreply@github.com
|
097992e6ce01179fdfaf8076e396e45f16f1010a
|
72715d8c393d342827f9c7fc71fd0b7e1de5d550
|
/app/blog/models/post.py
|
88fab0506a7e483c60d4da7e234490bc8dbdadca
|
[] |
no_license
|
Chrisaor/blog_model
|
af204a1618158182a3917247063a6541ac74d874
|
ff90bd957fe55113763469b74c422293a5c94584
|
refs/heads/master
| 2020-03-21T01:53:14.495547
| 2018-06-21T01:50:23
| 2018-06-21T01:50:23
| 137,967,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
from django.db import models
from blog.models import BlogUser, Base
__all__ = (
'Post',
'PostLike',
)
class Post(Base):
user = models.ForeignKey(BlogUser, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
content = models.TextField()
def __str__(self):
return f'{self.title}'
@property
def number_like(self):
return f'{len(PostLike.objects.filter(post_id=self.id))}'
@property
def like_users(self):
result = '이 포스트에 좋아요를 누른 사람:\n'
for post_like in PostLike.objects.filter(post_id=self.id):
result += f'- {post_like.user}\n'
return print(result)
class PostLike(Base):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(BlogUser, on_delete=models.CASCADE)
def __str__(self):
return f'좋아요!'
|
[
"pchpch0070@gmail.com"
] |
pchpch0070@gmail.com
|
006438b4953240d82bb232e1982630410531582a
|
d2c8311b1e96f9ef6d627e3844655986b2b50c7f
|
/dbHandle.py
|
58948ef5801e9a563f3c6dbc29fc25766c73b91c
|
[] |
no_license
|
snuarrow/pubgStatz
|
3277b26d24a1ed41f5a76e47bd41dbb5a23c50b8
|
10951c0ab7a7b618f70cd35a1056b4fbff144bb9
|
refs/heads/master
| 2022-04-27T01:19:15.572286
| 2020-04-01T15:24:07
| 2020-04-01T15:24:07
| 178,664,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,014
|
py
|
import psycopg2
import pandas as pd
import json
def getQueryString(filename: str) -> str:
fd = open(f'sqlQueries/{filename}', 'r')
sqlFile = fd.read()
fd.close()
return sqlFile
class DBHandle:
def __init__(self, host, port, dbname, user, password):
self.connection = self.getDBConnection(host, port, dbname, user, password)
self.initDB()
def getDBConnection(self, host, port, dbname, user, password):
return psycopg2.connect("host="+host+" port="+port+" dbname="+dbname+ " user="+user+" password="+password)
def getCursor(self):
return self.connection.cursor()
def query(self, queryString):
return pd.read_sql(queryString, self.connection)
def sqlCommand(self, command):
try:
self.getCursor().execute(command)
self.connection.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: ", error)
def initDB(self):
self.sqlCommand("CREATE TABLE IF NOT EXISTS users (id VARCHAR NOT NULL PRIMARY KEY, data json NOT NULL)")
self.sqlCommand("CREATE TABLE IF NOT EXISTS matches (id VARCHAR NOT NULL PRIMARY KEY, data json NOT NULL)")
self.sqlCommand("CREATE TABLE IF NOT EXISTS telemetries (id VARCHAR NOT NULL PRIMARY KEY, data json NOT NULL)")
self.sqlCommand("CREATE TABLE IF NOT EXISTS matchesByMap (id VARCHAR NOT NULL PRIMARY KEY, data VARCHAR NOT NULL)")
# TODO: get rid of copypaste in save functions
def saveUserJson(self, userId, userJson):
try:
if not self.loadUserJson(userId):
self.getCursor().execute("INSERT INTO users VALUES("+str(userId)+",'"+userJson+"')")
self.connection.commit()
else:
print("Error: user already exists")
except (Exception, psycopg2.Error) as error:
print("Failed to save user: ", error)
# TODO: get rid of copypaste in load functions
def loadUserJson(self, userId):
cursor = self.getCursor()
cursor.execute("SELECT * FROM users WHERE id="+str(userId))
record = cursor.fetchall()
if len(record) is 1:
return True, record[0][1]
else:
return False
def matchExists(self, matchId):
cursor = self.getCursor()
cursor.execute(f"select id from matches where id='{matchId}'")
record = cursor.fetchall()
return len(record) > 0
# TODO: get rid of copypaste in load functions
def loadMatchJson(self, matchId):
cursor = self.getCursor()
cursor.execute(f"select * from matches where id='{matchId}'")
record = cursor.fetchall()
if len(record) is 1:
return True, record[0][1]
else:
return False
def telemetryExists(self, matchId):
cursor = self.getCursor()
cursor.execute(f"select id from telemetries where id='{matchId}'")
record = cursor.fetchall()
return len(record) > 0
# TODO: get rid of copypaste in load functions
def loadTelemetryJson(self, matchId):
cursor = self.getCursor()
cursor.execute(f"select * from telemetries where id='{matchId}'")
record = cursor.fetchall()
if len(record) is 1:
return True, record[0][1]
else:
return False
# TODO: get rid of copypaste in save functions
def saveMatch(self, matchId, matchJson):
try:
if not self.loadMatchJson(matchId):
self.getCursor().execute(f"INSERT INTO matches (id, data) VALUES ('{matchId}', '{json.dumps(matchJson)}')")
self.connection.commit()
else:
print("Error: match already exists")
except (Exception, psycopg2.Error) as error:
print(f"Failed to save match: {matchId}", error)
# TODO: get rid of copypaste in save functions
def saveTelemetry(self, matchId, telemetry):
try:
if not self.loadTelemetryJson(matchId):
self.getCursor().execute(f"INSERT INTO telemetries (id, data) VALUES ('{matchId}', '{json.dumps(telemetry)}')")
self.connection.commit()
else:
print("Error: telemetry already exists")
except (Exception, psycopg2.Error) as error:
print("Failed to save telemetry: ", error)
def loadData(self, query: str):
cursor = self.getCursor()
cursor.execute(query)
record = cursor.fetchall()
return record
#print(json.dumps(record, indent=4, default=str))
#print(len(record))
#exit(1)
def loadAllMatches(self):
cursor = self.getCursor()
cursor.execute(f"select * from matches")
record = cursor.fetchall()
if len(record) > 0:
return True, [{
'matchId': x[0],
'matchData': x[1]
} for x in record
]
else:
return False, None
|
[
"hexvaara@hex.local"
] |
hexvaara@hex.local
|
bc1188c44300190c5c0aafd1cbaaae2fd4a99ed7
|
680539004a873745a2660fb99807d4d4530a25d7
|
/universityData/settings.py
|
8ff42d9a7c34f894aeb7595b1411abad62ee2222
|
[] |
no_license
|
raza8899/universityDetails
|
c04cad2b1014c608c7ae4fecbea97debcd234933
|
ef026e574e449d78aa45b37a6943145884c3ad53
|
refs/heads/main
| 2023-04-21T01:03:01.305398
| 2021-05-11T11:38:21
| 2021-05-11T11:38:21
| 366,357,383
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,227
|
py
|
# Scrapy settings for universityData project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'universityData'
SPIDER_MODULES = ['universityData.spiders']
NEWSPIDER_MODULE = 'universityData.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'universityData (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'universityData.middlewares.UniversitydataSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'universityData.middlewares.UniversitydataDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'universityData.pipelines.UniversitydataPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_EXPORTERS = {
'json': 'universityData.exporters.Utf8JsonItemExporter',
}
|
[
"ar678@uni-rostock.de"
] |
ar678@uni-rostock.de
|
67fe8d88409a4af7af4cae12257ceceb2b8b2187
|
70f1535e1f1683b16c848d97764fe50724b3e157
|
/pythonProject/Curso Em Video Python/Mundo 1/ex030.py
|
1218c463aff1e481795f75b0a1415d450356c228
|
[] |
no_license
|
DanielMoscardini-zz/python
|
f2ce0e84290a70b3be2ec804b96b3bbaf6b9f9d0
|
5be5254e8c6e701cc9bb9ffafb29f220edde99bf
|
refs/heads/main
| 2023-01-25T04:41:53.638423
| 2020-12-08T20:14:15
| 2020-12-08T20:14:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
"""
Faça um software que leia um numero e retorne se o mesmo é par ou impar
"""
numero = int(input('Digite o numero: '))
if (numero % 2 == 0):
print(f'Numero {numero} é PAR')
else :
print(f'Numero {numero} é IMPAR')
|
[
"moscardinibdaniel@gmail.com"
] |
moscardinibdaniel@gmail.com
|
27fa20b1c7823efad0f7d9bdb7ffdfa1796019b9
|
92a622177a1c17055d6834cdbe9bbd99e41c7493
|
/segregation/tests/test_entropy.py
|
bdded79a2684142bc3033d7ad62dc9aa30465df0
|
[
"BSD-3-Clause"
] |
permissive
|
MyrnaSastre/segregation
|
b6d843ffafef5d469a5ed30341629d741819e440
|
6d430f7f00dccc2cf86973f34de3d2ccc284eb9f
|
refs/heads/master
| 2020-05-26T21:01:38.183941
| 2019-05-21T20:52:19
| 2019-05-21T20:52:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
import unittest
import libpysal
import geopandas as gpd
import numpy as np
from segregation.non_spatial_indexes import Entropy
class Entropy_Tester(unittest.TestCase):
def test_Entropy(self):
s_map = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
df = s_map[['geometry', 'HISP_', 'TOT_POP']]
index = Entropy(df, 'HISP_', 'TOT_POP')
np.testing.assert_almost_equal(index.statistic, 0.09459760633014454)
if __name__ == '__main__':
unittest.main()
|
[
"renanxcortes@gmail.com"
] |
renanxcortes@gmail.com
|
3625442af4bbc418e56b7c795dfefbe733112bd6
|
6d424b14de9c8d3ab77a8cc10be1985bbc972c08
|
/sorting/quick_3_string.py
|
8e2b5f503ce3e03d4b245254044c13987fb0b1b4
|
[] |
no_license
|
vporta/DataStructures
|
b784b0b398a1e5e16ec6f0bab6ddd10cb6156b43
|
f25e73e3ad98309029158e49100ceb8f33e40376
|
refs/heads/master
| 2021-03-16T15:44:17.125827
| 2020-09-18T18:45:20
| 2020-09-18T18:45:20
| 246,920,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
"""
quick_3_string.py
Reads string from standard input and 3-way string quicksort them.
"""
import random
class Quick3String:
CUTOFF = 15
@classmethod
def sort(cls, a):
random.shuffle(a)
n = len(a)
cls.__sort(a, 0, n - 1, 0)
assert cls.__is_sorted(a)
@classmethod
def __sort(cls, a, lo, hi, d):
if hi <= lo + cls.CUTOFF:
cls.__insertion(a, lo, hi, d)
return
lt, gt = lo, hi
v = cls.__char_at(a[lo], d)
i = lo + 1
while i <= gt:
t = cls.__char_at(a[i], d)
if t < v:
lt += 1
i += 1
a[i], a[lt] = a[lt], a[i]
elif t > v:
gt -= 1
a[i], a[gt] = a[gt], a[i]
else:
i += 1
# a[lo..lt-1] < v = a[lt..gt] < a[gt+1..hi].
cls.__sort(a, lo, lt-1, d)
if v >= 0:
cls.__sort(a, lt, gt, d+1)
cls.__sort(a, gt+1, hi, d)
@classmethod
def __char_at(cls, s, d):
assert 0 <= d <= len(s)
if d == len(s):
return -1
return ord(s[d])
@classmethod
def __less(cls, v, w, d):
i = d
while i < min(len(v), len(w)):
if v[i] < w[i]:
return True
if v[i] > w[i]:
return False
i += 1
return len(v) < len(w)
@classmethod
def __insertion(cls, a, lo, hi, d):
for i in range(lo, hi + 1):
for j in range(i, lo, -1):
if cls.__less(a[j], a[j - 1], d):
a[j], a[j - 1] = a[j - 1], a[j]
@classmethod
def __is_sorted(cls, a):
for i in range(1, len(a)):
if a[i] < a[i - 1]:
return False
return True
def main():
with open("../resources/shells.txt", ) as f:
a = "".join(f.readlines()).splitlines()
words = []
w = len(a[0].split(' '))
for line in a:
assert w == len(line.split(' '))
words.extend(line.split())
Quick3String.sort(words)
for item in words:
print(item)
if __name__ == '__main__':
main()
|
[
"vporta7@gmail.com"
] |
vporta7@gmail.com
|
e24fca2d24902d76ff8e27ebeec1c0580313050a
|
25ccaaeeb681b18eaf5a5ad60cc228a4c6a4fdca
|
/say/say.py
|
94af79667f3d1474f4bb957f6468cc9a79b9b2e5
|
[] |
no_license
|
ign0re-me/ignorance-cogs
|
04e0b1eabd4dbc96384fdb67ccf00267e1e06b76
|
85f49791ebf46c038feab48848b2ef1eb77d16ca
|
refs/heads/master
| 2020-07-03T06:27:18.805124
| 2016-11-22T05:39:48
| 2016-11-22T05:39:48
| 74,191,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
import discord
from discord.ext import commands
from random import choice
from .utils import checks
class Say:
"""Repeats what you tell it to."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
async def emsay(self, ctx, *, text):
"""Embed Say."""
channel = ctx.message.channel
author = ctx.message.author
server = ctx.message.server
avatar = author.avatar_url if author.avatar else author.default_avatar_url
colour = ''.join([choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
data = discord.Embed(description="" + text, colour=discord.Colour(value=colour))
data.set_author(name=author.name, icon_url=avatar)
await self.bot.send_message(channel, embed=data)
@commands.command(pass_context=True, no_pm=True)
async def say(self, ctx, *, text):
"""Bot repeats what you tell it to."""
channel = ctx.message.channel
await self.bot.send_message(channel, text)
def setup(bot):
bot.add_cog(Say(bot))
|
[
"me@calebj.io"
] |
me@calebj.io
|
a85175eff5a9258e69ccc1a4f8ce72f63fc3e667
|
fa50e0ad91bfa00babdbcdde21fa2cea63089ce0
|
/spacecraft.py
|
59c3b5343318b0334f7116b2a012021574597acc
|
[] |
no_license
|
Ferogle/PyGame
|
5931e7338401fecd0c1308fb4b2ef88ffee3f080
|
9c3ad74d7922613c42f481608082c9c9a519d218
|
refs/heads/master
| 2020-09-27T15:52:08.983305
| 2019-12-07T17:37:34
| 2019-12-07T17:37:34
| 226,549,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,744
|
py
|
import pygame
import random
pygame.init()
screen = pygame.display.set_mode((800, 600)) # creating a new window in pygame
pygame.display.set_caption("Space Invaders")
icon = pygame.image.load('ufo.png')
pygame.display.set_icon(icon)
background = pygame.image.load('background.png')
# player info
playerImg = pygame.image.load('spaceship.png')
playerX = 370
playerY = 480
player_Change = 0
# enemy info
enemyImg = pygame.image.load('monster.png')
enemyX = random.randint(0, 736)
enemyY = random.randint(0, 100)
enemy_Change = 4
# bullet dynamics
bulletImg = pygame.image.load('bullet.png')
bulletX = 0
bulletY = 480
bullet_YChange = 10
bullet_State = "Ready"
score = 0
def playerPos(x, y):
screen.blit(playerImg, (x, y))
def enemyPos(x, y):
screen.blit(enemyImg, (x, y))
def fire_bullet(x, y):
global bullet_State
bullet_State = "fired"
screen.blit(bulletImg, (x + 16, y + 10))
def isCollision(enemyX, enemyY, bulletX, bulletY):
distance = (enemyX - bulletX) ** 2 + (enemyY - bulletY) ** 2
if distance <= 729:
return True
else:
return False
# game loop
# makes sure that window is never closed until we quit
running = True
while running:
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT: # this is for quit button to function
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_Change = -5
if event.key == pygame.K_RIGHT:
player_Change = 5
if event.key == pygame.K_SPACE:
if bullet_State is "Ready":
bulletX = playerX
fire_bullet(bulletX, bulletY)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
player_Change = 0
playerX += player_Change
if playerX <= 0:
playerX = 0
if playerX >= 736:
playerX = 736
enemyX += enemy_Change
if enemyX <= 0:
enemy_Change = 4
enemyY += 40
if enemyX >= 736:
enemy_Change = -4
enemyY += 40
if bulletY <= 0:
bulletY = 480
bullet_State = "Ready"
if bullet_State is "fired":
fire_bullet(bulletX, bulletY)
bulletY -= bullet_YChange
if isCollision(enemyX,enemyY,bulletX,bulletY):
bulletY = 480
bullet_State = "Ready"
score+=1
print(score)
enemyX = random.randint(0, 736)
enemyY = random.randint(0, 100)
playerPos(playerX, playerY)
enemyPos(enemyX, enemyY)
pygame.display.update() # update any change done to the game window
|
[
"noreply@github.com"
] |
Ferogle.noreply@github.com
|
0fc0dcf056539984c631359a282b0b6e6e463db4
|
977cfd3762b222c2089885cdf6c1ab4ec54f3698
|
/verbigsum.py
|
033eb06d880b713cb519d7447a2e0db430bca429
|
[] |
no_license
|
3797kaushik/turbo-fortnight
|
82c99316e7907701bbf0b5f5e6d362d6126e41a3
|
a91583ac4dbd25240ca0e817a3a1eb4976a76fb2
|
refs/heads/master
| 2020-04-01T23:04:25.369824
| 2018-10-20T18:45:55
| 2018-10-20T18:45:55
| 153,740,359
| 0
| 0
| null | 2018-10-19T07:03:58
| 2018-10-19T07:03:57
| null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
'''
hacker rank solve
https://www.hackerrank.com/challenges/a-very-big-sum/problem
'''
def aVeryBigSum(ar):
get_you = 0
for data in ar:
get_you += data
return get_you
if __name__ == '__main__':
list_data = [1000000001, 1000000002, 1000000003, 1000000004, 1000000005]
aVeryBigSum(list_data)
|
[
"noerdafi@gmail.com"
] |
noerdafi@gmail.com
|
c472f8323f159854d61bbcbddd51cdb5ce542743
|
e0904632b00d984ab02dfc00e7599ee7efda6fcb
|
/netfacd/interface_reader.py
|
97238939327019c8152f57399b198973bf76d1e4
|
[] |
no_license
|
jdsdba/mywork
|
c88331b4d4955d6cd9ecb629c1966c28973c55fb
|
f5ec865409d8623e4224e51356cfd476b507b419
|
refs/heads/main
| 2023-04-25T01:28:15.418075
| 2021-05-14T20:34:18
| 2021-05-14T20:34:18
| 366,103,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
#!/usr/bin/env python3
import netifaces
import pprint
#r$print(netifaces.interfaces())
pprint.pprint(netifaces.interfaces())
for i in netifaces.interfaces():
print('\n****** details of interface - ' + i + ' ******')
try:
print('MAC: ', end='') # This print statement will always print MAC without an end of line
print((netifaces.ifaddresses(i)[netifaces.AF_LINK])[0]['addr']) # Prints the MAC address
print('IP: ', end='') # This print statement will always print IP without an end of line
print((netifaces.ifaddresses(i)[netifaces.AF_INET])[0]['addr']) # Prints the IP address
except: # This is a new line
print('Could not collect adapter information') # Print an error message
|
[
"jdsdba@gmail.com"
] |
jdsdba@gmail.com
|
bc086dc906a59a741e83f6ac2d3481a925714071
|
c5a7adbc55695ce67339a628c26a1fe3267a29b1
|
/hello.py
|
27dab27e703e59c599907fd0e3af012bb6b226fb
|
[] |
no_license
|
BladLust/Dailyrepo
|
f0458a6c973a9560599271047a65b8aec8239465
|
7725ba7bab04e69abacf09d2804f0eca518d3f77
|
refs/heads/master
| 2020-07-22T19:04:20.617745
| 2019-09-18T03:30:29
| 2019-09-18T03:30:29
| 207,299,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
print('I am currently learning how to use github and terminal!')
|
[
"timty.tsui@gmail.com"
] |
timty.tsui@gmail.com
|
ab78e7545456303929c5684ebb58061e93cbba9c
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/GIT-USERS/TOM-Lambda/CSEU4_DataStructures_GP/test_stack.py
|
287a6543fd02f56c987fcf4cd62affe55b794cc3
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
import unittest
from dll_stack import Stack
class QueueTests(unittest.TestCase):
def setUp(self):
self.s = Stack()
def test_len_returns_0_for_empty_stack(self):
self.assertEqual(self.s.len(), 0)
def test_len_returns_correct_length_after_push(self):
self.assertEqual(self.s.len(), 0)
self.s.push(2)
self.assertEqual(self.s.len(), 1)
self.s.push(4)
self.assertEqual(self.s.len(), 2)
self.s.push(6)
self.s.push(8)
self.s.push(10)
self.s.push(12)
self.s.push(14)
self.s.push(16)
self.s.push(18)
self.assertEqual(self.s.len(), 9)
def test_empty_pop(self):
self.assertIsNone(self.s.pop())
self.assertEqual(self.s.len(), 0)
def test_pop_respects_order(self):
self.s.push(100)
self.s.push(101)
self.s.push(105)
self.assertEqual(self.s.pop(), 105)
self.assertEqual(self.s.len(), 2)
self.assertEqual(self.s.pop(), 101)
self.assertEqual(self.s.len(), 1)
self.assertEqual(self.s.pop(), 100)
self.assertEqual(self.s.len(), 0)
self.assertIsNone(self.s.pop())
self.assertEqual(self.s.len(), 0)
<<<<<<< HEAD
if __name__ == '__main__':
unittest.main()
=======
if __name__ == "__main__":
unittest.main()
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
cf357e04064d6d3e74c48dbae8311733aa712d82
|
f88a576f53b594fe768f69190e282efc8436e80c
|
/Voting/Voting/wsgi.py
|
501d61fc560a7f26229f3919da786184f903b001
|
[] |
no_license
|
CrazyEinsten/WEB-SERVERPROJECT
|
016aca1f038514c86d06145509ebdc5ac9c8afca
|
fa4d5d594fc6b49914cc7bf1eb5be78faaca3137
|
refs/heads/master
| 2021-08-10T11:34:39.573931
| 2017-11-12T13:54:09
| 2017-11-12T13:54:09
| 110,434,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
"""
WSGI config for Voting project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Voting.settings")
application = get_wsgi_application()
|
[
"981073706@qq.com"
] |
981073706@qq.com
|
0a4664aabb33ec7505fb357048897565550b91d2
|
19b223fd7858b98eecc11614ee76332b6ee599ed
|
/Pokemon_Game/Package_Animal/Creature.py
|
b2f2cbd56aba7e3eb4e429bec39c4f39eef94345
|
[] |
no_license
|
MaximeWbr/Expedia_Project
|
b989d3a56aee4346b67a2bb936596c98a1601fa7
|
e6d0b2f84dbc194dc6f426b0ba7d04b45f32a9ee
|
refs/heads/master
| 2020-06-24T14:51:51.854588
| 2019-07-30T19:50:45
| 2019-07-30T19:50:45
| 198,991,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,601
|
py
|
import random
from Package_Animal.Type import TypeEnum
from Package_Animal.AllTypes import *
class Creature:
def __init__(self, levelMax, valTypeEnum):
self.experience = 0
self.type = None
typeEnum = TypeEnum[valTypeEnum]
if typeEnum == 'Grass':
self.type = Grass()
self.name = GrassCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Fire':
self.type = Fire()
self.name = FireCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Water':
self.type = Water()
self.name = WaterCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Darkness':
self.type = Darkness()
self.name = DarknessCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Earth':
self.type = Earth()
self.name = EarthCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Electric':
self.type = Electric()
self.name = ElectricCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Wind':
self.type = Wind()
self.name = WindCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Insect':
self.type = Insect()
self.name = InsectCreature[random.randrange(0, self.type._getEnumSize(), 1)]
elif typeEnum == 'Light':
self.type = Light()
self.name = LightCreature[random.randrange(0, self.type._getEnumSize(), 1)]
self.level = random.randrange(1, levelMax, 1)
self.health = self.type._healthEvolution(self.level)
self.attack = self.type._powerEvolution(self.level)
self.defense = self.type.defense
self.name = ""
def _print(self):
print('type : '+str(type(self.type))+'\nname : '+self.name+'\nlevel : '+str(self.level))
# Description: retourne False si en vie sinon True
def _isDead(self):
if self.health > 0:
return False
else:
return True
# Description: Determine si la creature est capturée a la fin du combat
# Output: True: est capturée, False: c'est échapée
def _getCapture(self):
print("To do with the type")
# Description: enfonction de l'expérience acquise, il determine si la creature passe un niveau
def _levelEvolution(self):
print("To do with the type of the creature")
typeLimit = 10 # Needs the type and the level to deteminate the limit
if typeLimit <= self.experience:
self.experience = 0
self.level += 1
# Description: Add the experience earn and evaluate the level evolution
# to update it, the evolution depend of the type
def _addExperience(self, points):
self.experience += points
self._levelEvolution()
|
[
"webermax@free.fr"
] |
webermax@free.fr
|
8748b47128cdf7a282f726bbab150034377a5697
|
4f9929e46b2f0a52fd0effd902d541abf9b72155
|
/noxfile.py
|
c0216f73b78430aad4710dfbf8192bc21d25fc3b
|
[
"BSD-3-Clause"
] |
permissive
|
parafoxia/sqlite2pg
|
6a95ee5551d08c013b9f00e7e0a11bd78d542b98
|
efbbc31e82aaac7fd470091c6a30fc1315d499c4
|
refs/heads/master
| 2023-07-27T17:16:52.522485
| 2021-09-09T05:07:04
| 2021-09-09T05:18:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
import nox
def install(session: nox.Session, dev: bool = False) -> nox.Session:
if dev:
session.run("poetry", "install", "-n", external=True)
else:
session.run("poetry", "install", "-n", "--no-dev", external=True)
return session
@nox.session(reuse_venv=True)
def testing(session: nox.Session) -> None:
session = install(session, True)
session.run("pytest", "--verbose")
@nox.session(reuse_venv=True)
def type_checking(session: nox.Session) -> None:
session = install(session, True)
session.run("mypy", ".", "--strict")
@nox.session(reuse_venv=True)
def formatting(session: nox.Session) -> None:
session = install(session, True)
session.run("black", ".", "-l99")
@nox.session(reuse_venv=True)
def import_checking(session: nox.Session) -> None:
session = install(session, True)
session.run(
"flake8",
"sqlite2pg",
"tests",
"--select",
"F4",
"--extend-ignore",
"E,F",
"--extend-exclude",
"__init__.py",
)
session.run("isort", ".", "-cq", "--profile", "black")
|
[
"51417989+Jonxslays@users.noreply.github.com"
] |
51417989+Jonxslays@users.noreply.github.com
|
20529495e9f822e01d4c33386bf8f7a1dd7e3fb0
|
e5c52142afb72300e7fb739378098db74026a8db
|
/hub/__init__.py
|
ee15bcf6a6b52f88869d4ed9a4e4da86a03c671a
|
[] |
no_license
|
EdmundMartin/SelectniumGrid
|
dddb67c8cf2006ef1712821185d905c542939bc2
|
d4f4f9b627ee7ab9c6488b5e4590c16607b87ec8
|
refs/heads/master
| 2020-06-08T13:05:20.398477
| 2019-06-25T18:55:15
| 2019-06-25T18:55:15
| 193,232,942
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from hub.abs_hub import AbstractHubManger
from hub.hub_managers import ApiManager
__all__ = ['AbstractHubManger', 'ApiManager']
|
[
"edmartin101@googlemail.com"
] |
edmartin101@googlemail.com
|
5d2491f0fe42a589a5b7ae5872eafe32853efbf2
|
6f86ae7211df0a9289a0a4e3dbb603aaf863869c
|
/week03/E10.py
|
269378d14591a53b3513ac106bdfc6cfd4a85695
|
[] |
no_license
|
mgoloshchapov/programming_practice_2020
|
5478ecb0dcf4b577a455bbca9c5e2357d9ef444f
|
e1bdcb64c749c082d1f7a93a5af73a0913b726eb
|
refs/heads/master
| 2023-03-13T14:11:47.884375
| 2021-02-27T20:35:41
| 2021-02-27T20:35:41
| 294,882,718
| 0
| 0
| null | 2020-09-12T06:11:56
| 2020-09-12T06:11:56
| null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
# решение съела собака, но Bonus оставила
|
[
"m.goloshchapov@yandex.ru"
] |
m.goloshchapov@yandex.ru
|
c0abba1dbb80ba854ba98e86caf3b53f991c1c75
|
909441103f1c0ec7ebe22b61904f9a67a5d071de
|
/python100天/007day/007.py
|
df5cf33227fe8344aedf4a796cb1c18658b7770c
|
[] |
no_license
|
godringchan/Python_base_GodRing
|
482e23bd1479e8845a2be09f18e95c6e9c2a38af
|
7eb4b33c9d03e685cd00d10b75b7218bd6f2d58e
|
refs/heads/master
| 2020-07-10T13:51:22.917341
| 2019-12-05T05:23:24
| 2019-12-05T05:23:24
| 204,277,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
# 双色球
import random
def select_balls():
"""
抽取双色球
"""
balls = [x for x in range(1, 34)]
balls = random.sample(balls, 6)
balls.sort()
balls.append(random.randint(1, 16))
return balls
# def show_balls(balls):
# for index, ball_num in enumerate(balls):
# if index == len(balls) - 1:
# print("|", end="")
# print("%02d" % ball_num, end=" ")
def show_balls(balls):
"""
显示抽取到的双色球号码
"""
flag = 0
for i in balls:
if flag == len(balls) - 1:
print("|", end="")
print("%02d" % i, end=" ")
flag += 1
def main():
times = int(input("买几注双色球"))
for _ in range(times):
show_balls(select_balls())
print()
if __name__ == "__main__":
main()
|
[
"147848417@qq.com"
] |
147848417@qq.com
|
70039f5b41697cbb009784884a46f721fea2fcd9
|
3450b0513e3996a22b26ebc3d189a629d4cae487
|
/Problem_3.py
|
cec12b3d5ee25611109d64c4b2d8206be2f7aa80
|
[] |
no_license
|
MattiooFR/project-euler
|
f3ce2472ed1dbf371387704bf5265afedb046444
|
1f9ac3fd1a508e7d815c670e83a2ab29a63efee9
|
refs/heads/master
| 2023-02-27T01:25:46.620755
| 2021-02-02T16:11:39
| 2021-02-02T16:11:39
| 294,355,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
# def isPrime(number):
# for i in range(2, number-1):
# if number % i == 0:
# return False
# return True
n = 2
e = 600851475143
while e != 1:
if e % n == 0:
e = e / n
else:
n = n + 1
print(n)
|
[
"dugue.mathieu@gmail.com"
] |
dugue.mathieu@gmail.com
|
5a39f8c50ac88c63beadb7aca1dffdcf60311f99
|
d1c56a1f5caa0dea4cbc071b781b108245a9ba8b
|
/docs/conf.py
|
c2bc81bc782d7da3fd58dff6eeaadeef6428e330
|
[
"BSD-3-Clause"
] |
permissive
|
dangom/unyt
|
efe2b6bd2133a4c3371f4cf5b1b93dd6f9b99a01
|
2b2aafee57917c96582a41e8294894ef372f5017
|
refs/heads/master
| 2020-03-26T12:34:21.343043
| 2018-08-15T17:20:42
| 2018-08-15T17:20:42
| 144,898,976
| 1
| 0
| null | 2018-08-15T20:14:04
| 2018-08-15T20:14:03
| null |
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# unyt documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import unyt
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'unyt'
copyright = u"2018, The yt Project"
author = u"The yt Project"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = unyt.__version__
# The full version, including alpha/beta/rc tags.
release = unyt.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'modules/modules.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/yt_icon.png"
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'unytdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'unyt.tex',
u'unyt Documentation',
u'The yt Project', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unyt',
u'unyt Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'unyt',
u'unyt Documentation',
author,
'unyt',
'One line description of project.',
'Miscellaneous'),
]
autodoc_member_order = 'bysource'
def run_apidoc(_):
try:
from sphinx.ext.apidoc import main
except ImportError:
from sphinx.apidoc import main
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
cur_dir = os.path.abspath(os.path.dirname(__file__))
api_doc_dir = os.path.join(cur_dir, 'modules')
module = os.path.join(cur_dir, "..", "unyt")
ignore = os.path.join(cur_dir, "..", "unyt", "tests")
os.environ['SPHINX_APIDOC_OPTIONS'] = (
'members,undoc-members,show-inheritance')
main(['-M', '-f', '-e', '-T', '-d 0', '-o', api_doc_dir, module, ignore])
def setup(app):
app.connect('builder-inited', run_apidoc)
|
[
"ngoldbau@illinois.edu"
] |
ngoldbau@illinois.edu
|
8c3ed6ebe4e04ee6ca55a7c2359db085bf1ea750
|
27ffb93bb7211720fa49a1814b3da524db57aba6
|
/ClickMon/manage.py
|
c3ba63dc9403162b5bde8929dcd8e4767a20f105
|
[] |
no_license
|
DonCuicui/Clickmon
|
4c9a86cad222b88db17f1b6fc98fce688f4d4a46
|
0636edfcfb21fa99eb7c3cc67ea1e99c536de3f2
|
refs/heads/master
| 2020-03-28T01:32:22.009862
| 2018-09-07T13:18:01
| 2018-09-07T13:18:01
| 147,511,868
| 1
| 1
| null | 2018-09-06T15:27:50
| 2018-09-05T12:08:59
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ClickMon.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"alexandre.Quilan@gmail.com"
] |
alexandre.Quilan@gmail.com
|
cf29ba25a81cc601cdc2034e1fcd0afc5bf6f81e
|
c8f29eab274a635a7c49540440dccaa330317ebf
|
/test.py
|
244a7f45dc4dc40d3a24f11f6d60040eea2b59ea
|
[] |
no_license
|
bipindevops2017/DOCKER_SCRIPT_
|
558ff5ddcf06eb61440c029f9843c449bed45aa2
|
76ef8a5731ba1ed586d693be7abf22c649901703
|
refs/heads/master
| 2021-01-13T16:06:50.595745
| 2017-11-13T05:59:50
| 2017-11-13T05:59:50
| 81,720,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
print"Hello docker script"
print"how r u ";
|
[
"tiwaribipin77@gmail.com"
] |
tiwaribipin77@gmail.com
|
ff7043af5aff8fb0b582532559de3e70fbbb5e16
|
ec5bb20549e793222ac6a1208f3661e31f6aa28a
|
/method_NMTF.py
|
6ed6df38ba1604f126721a9ebde00a426672b730
|
[
"Apache-2.0"
] |
permissive
|
DEIB-GECO/NMTF-DrugRepositioning
|
65bd55946ba47cf1a10a1555db53e6026c27ae18
|
b359c6daddb4f9cfa9a3f3978c897bbd38e43354
|
refs/heads/master
| 2021-06-24T13:17:03.430125
| 2020-12-29T14:09:45
| 2020-12-29T14:09:45
| 189,968,720
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,180
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 12:37:37 2019
@author: gaetandissez
"""
import numpy as np
import sklearn.metrics as metrics
from spherecluster import SphericalKMeans
from sklearn.cluster import KMeans
from scipy import sparse
class NMTF:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
R25 = sparse.load_npz('./tmp/R25.npz').toarray()
W3 = sparse.load_npz('./tmp/W3.npz').toarray()
W4 = sparse.load_npz('./tmp/W4.npz').toarray()
L3 = sparse.load_npz('./tmp/L3.npz').toarray()
L4 = sparse.load_npz('./tmp/L4.npz').toarray()
#Those matrices are called Degree matrices
D3 = L3 + W3
D4 = L4 + W4
#eps is a constant needed experimentally in update rules to make sure that the denominator is never null
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
n5 = R25.shape[1]
def update(self, A, num, den):
return A*(num / (den + NMTF.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, init_method, parameters, mask):
self.init_method = init_method
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF.R12, self.M)
if self.init_method == 'random':
"""Random uniform"""
self.G1 = np.random.rand(NMTF.n1, self.K[0])
self.G2 = np.random.rand(NMTF.n2, self.K[1])
self.G3 = np.random.rand(NMTF.n3, self.K[2])
self.G4 = np.random.rand(NMTF.n4, self.K[3])
self.G5 = np.random.rand(NMTF.n5, self.K[4])
if self.init_method == 'skmeans':
"""spherical k-means"""
#Sperical k-means clustering is done on the initial data
skm1 = SphericalKMeans(n_clusters=self.K[0])
skm1.fit(self.R12_train.transpose())
skm2 = SphericalKMeans(n_clusters=self.K[1])
skm2.fit(self.R12_train)
skm3 = SphericalKMeans(n_clusters=self.K[2])
skm3.fit(NMTF.R23)
skm4 = SphericalKMeans(n_clusters=self.K[3])
skm4.fit(NMTF.R34)
skm5 = SphericalKMeans(n_clusters=self.K[4])
skm5.fit(NMTF.R25)
#Factor matrices are initialized with the center coordinates
self.G1 = skm1.cluster_centers_.transpose()
self.G2 = skm2.cluster_centers_.transpose()
self.G3 = skm3.cluster_centers_.transpose()
self.G4 = skm4.cluster_centers_.transpose()
self.G5 = skm5.cluster_centers_.transpose()
if self.init_method == 'acol':
"""random ACOL"""
#We will "shuffle" the columns of R matrices and take the mean of k batches
Num1 = np.random.permutation(NMTF.n2)
Num2 = np.random.permutation(NMTF.n1)
Num3 = np.random.permutation(NMTF.n2)
Num4 = np.random.permutation(NMTF.n3)
Num5 = np.random.permutation(NMTF.n2)
G1 = []
for l in np.array_split(Num1, self.K[0]):
G1.append(np.mean(self.R12_train[:,l], axis = 1))
self.G1 = np.array(G1).transpose()
G2 = []
for l in np.array_split(Num2, self.K[1]):
G2.append(np.mean(self.R12_train.transpose()[:,l], axis = 1))
self.G2 = np.array(G2).transpose()
G3 = []
for l in np.array_split(Num3, self.K[2]):
G3.append(np.mean(NMTF.R23.transpose()[:,l], axis = 1))
self.G3 = np.array(G3).transpose()
G4 = []
for l in np.array_split(Num4, self.K[3]):
G4.append(np.mean(NMTF.R34.transpose()[:,l], axis = 1))
self.G4 = np.array(G4).transpose()
G5 = []
for l in np.array_split(Num5, self.K[4]):
G5.append(np.mean(NMTF.R25.transpose()[:,l], axis = 1))
self.G5 = np.array(G5).transpose()
if self.init_method == 'kmeans':
"""k-means with clustering on previous item"""
#As for spherical k-means, factor matrices will be initialized with the centers of clusters.
km1 = KMeans(n_clusters=self.K[0], n_init = 10).fit_predict(self.R12_train.transpose())
km2 = KMeans(n_clusters=self.K[1], n_init = 10).fit_predict(self.R12_train)
km3 = KMeans(n_clusters=self.K[2], n_init = 10).fit_predict(self.R23)
km4 = KMeans(n_clusters=self.K[3], n_init = 10).fit_predict(self.R34)
km5 = KMeans(n_clusters=self.K[4], n_init = 10).fit_predict(self.R25)
self.G1 = np.array([np.mean([self.R12_train[:,i] for i in range(len(km1)) if km1[i] == p], axis = 0) for p in range(self.K[0])]).transpose()
self.G2 = np.array([np.mean([self.R12_train[i] for i in range(len(km2)) if km2[i] == p], axis = 0) for p in range(self.K[1])]).transpose()
self.G3 = np.array([np.mean([self.R23[i] for i in range(len(km3)) if km3[i] == p], axis = 0) for p in range(self.K[2])]).transpose()
self.G4 = np.array([np.mean([self.R34[i] for i in range(len(km4)) if km4[i] == p], axis = 0) for p in range(self.K[3])]).transpose()
self.G5 = np.array([np.mean([self.R25[i] for i in range(len(km5)) if km5[i] == p], axis = 0) for p in range(self.K[4])]).transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), self.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), self.R34, self.G4])
self.S25 = np.linalg.multi_dot([self.G2.transpose(), self.R25, self.G5])
def iterate(self):
#These following lines compute the matrices needed for our update rules
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF.R23, self.G3)
R34G4 = np.dot(NMTF.R34, self.G4)
R25G5 = np.dot(NMTF.R25, self.G5)
W3G3 = np.dot(NMTF.W3, self.G3)
W4G4 = np.dot(NMTF.W4, self.G4)
D3G3 = np.dot(NMTF.D3, self.G3)
D4G4 = np.dot(NMTF.D4, self.G4)
G3Gt3D3G3 = np.dot(G3Gt3, D3G3)
G4Gt4D4G4 = np.dot(G4Gt4, D4G4)
G3Gt3W3G3 = np.dot(G3Gt3, W3G3)
G4Gt4W4G4 = np.dot(G4Gt4, W4G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Rt25G2S25 = np.linalg.multi_dot([NMTF.R25.transpose(), self.G2, self.S25])
G5G5tRt25G2S25 = np.linalg.multi_dot([self.G5, self.G5.transpose(), Rt25G2S25])
R25G5St25 = np.dot(R25G5, self.S25.transpose())
G2Gt2R25G5St25 = np.dot(G2Gt2, R25G5St25)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt2R25G5 = np.dot(self.G2.transpose(), R25G5)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
Gt2G2S25Gt5G5 = np.linalg.multi_dot([Gt2G2, self.S25, self.G5.transpose(), self.G5])
#Here factor matrices are updated.
self.G1 = NMTF.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF.vupdate(self, self.G2, Rt12G1S12 + R23G3St23 + R25G5St25, G2Gt2Rt12G1S12 + G2Gt2R23G3St23 + G2Gt2R25G5St25)
self.G3 = NMTF.vupdate(self, self.G3, Rt23G2S23 + R34G4St34 + W3G3 + G3Gt3D3G3, G3Gt3Rt23G2S23 + G3Gt3R34G4St34 + G3Gt3W3G3 + D3G3)
self.G4 = NMTF.vupdate(self, self.G4, Rt34G3S34 + W4G4 + G4Gt4D4G4, G4Gt4Rt34G3S34 + G4Gt4W4G4 + D4G4)
self.G5 = NMTF.vupdate(self, self.G5, Rt25G2S25, G5G5tRt25G2S25)
self.S12 = NMTF.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.S25 = NMTF.vupdate(self, self.S25, Gt2R25G5, Gt2G2S25Gt5G5)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
#We first isolate the validation set and the corresponding result
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF.R12[i, j])
R12_found_2.append(R12_found[i, j])
#We can asses the quality of our output with APS or AUROC score
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
Gt3L3G3 = np.linalg.multi_dot([self.G3.transpose(), NMTF.L3, self.G3])
Gt4L4G4 = np.linalg.multi_dot([self.G4.transpose(), NMTF.L4, self.G4])
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF.R25 - np.linalg.multi_dot([self.G2, self.S25, self.G5.transpose()]), ord='fro')**2
J += np.trace(Gt3L3G3) + np.trace(Gt4L4G4)
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4, k5)=({}, {}, {}, {}, {}) and {} initialization'.format(self.K[0], self.K[1], self.K[2], self.K[3], self.K[4], self.init_method)
|
[
"gaetan.dissez@gmail.com"
] |
gaetan.dissez@gmail.com
|
70a1783faa266a94822f942f9448e980f466238f
|
f05b7e086b08786b875a961ab52eeb50979064ec
|
/Demo/MDV2/Multicast.py
|
66fefc9b9526886e053c4062a169bc41367a55f2
|
[] |
no_license
|
JasonLeao/Demo-for-Self-defined-Multicast-Services
|
74fbc21bc35d1bc0155145565eb453fc9aca4f9d
|
1bdcd4e11b37a1f1772b4f0fc6cae2f727430363
|
refs/heads/master
| 2020-12-24T14:44:46.131947
| 2014-07-04T05:14:19
| 2014-07-04T05:14:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
__author__ = 'mkuai'
from Fileoperation import Fileoperation
from Paths import Paths
from Distancegraph import Distancegraph
from Determinepaths import Determinepaths
from Generaterouting import Generaterouting
#from Arppath import Arppath
#Organize all the processes, and transfer the port result to the ryu controller
#Then, we start the service; we believe the process can achieve our perspectives
class Multicast:
def __init__(self):
self.wrotten_ports = []
self.head = 0
self.proxy_port = 0
self.group_port = 0
self.switches = 0
def _organized_process(self):
file = Fileoperation()
file._process_files("Topology", "Ports", "Request") #read all the configure file
self.switches = file.m_switch_number
#self.source_ip = file.ip_src
paths = Paths()
paths._shortest_path_tree(file.receivers, file.graph)
distance_graph = Distancegraph()
distance_graph._minimal_spanning_tree(file.receivers, paths.path_sets)
distance_graph._restore_paths(paths.path_sets)
determine_path =Determinepaths()
determine_path._src_to_multitree(file.m_switch_number, distance_graph.paths, file.graph, file.sender)
generate_routing = Generaterouting()
generate_routing._write_ports(file.m_total_number, distance_graph.paths, determine_path.multi_head, file.ports)
self.wrotten_ports = generate_routing.map_ports
self.proxy_port = generate_routing.proxy_port
self.group_port = generate_routing.group_port
self.head = generate_routing.head_switch
#arp_path = Arppath()
#arp_path._install_arp_path(file.m_total_number, file.ports, determine_path.src_to_multi)
#print self.wrotten_ports
|
[
"shengquan-liao@163.com"
] |
shengquan-liao@163.com
|
9f875b46aab7f80a0998a7c07417a2e32f34b420
|
2b73bd11a6d777b03620a170f65650cd658b29d2
|
/pts/modules/feature.py
|
a4aae50b9a260652bf09b3ac94d791d447e38486
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
StatMixedML/pytorch-ts
|
33e018140571d5f7f80f6c402115c3aa90d09ec1
|
4bc2d247c70c59479d359d13d2db5739227307e8
|
refs/heads/master
| 2021-03-27T03:04:27.849280
| 2020-03-13T13:51:29
| 2020-03-13T13:51:29
| 247,779,634
| 1
| 0
|
NOASSERTION
| 2020-03-16T17:39:45
| 2020-03-16T17:39:44
| null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
from typing import Callable, List, Optional
import torch
import torch.nn as nn
class FeatureEmbedder(nn.Module):
def __init__(self, cardinalities: List[int], embedding_dims: List[int],) -> None:
super().__init__()
self.__num_features = len(cardinalities)
def create_embedding(c: int, d: int) -> nn.Embedding:
embedding = nn.Embedding(c, d)
return embedding
self.__embedders = nn.ModuleList(
[create_embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.__num_features > 1:
# we slice the last dimension, giving an array of length
# self.__num_features with shape (N,T) or (N)
cat_feature_slices = torch.chunk(features, self.__num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat(
[
embed(cat_feature_slice.squeeze(-1))
for embed, cat_feature_slice in zip(
self.__embedders, cat_feature_slices
)
],
dim=-1,
)
class FeatureAssembler(nn.Module):
def __init__(
self,
T: int,
embed_static: Optional[FeatureEmbedder] = None,
embed_dynamic: Optional[FeatureEmbedder] = None,
) -> None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict(
{"embed_static": embed_static, "embed_dynamic": embed_dynamic}
)
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
feat_dynamic_cat: torch.Tensor,
feat_dynamic_real: torch.Tensor,
) -> torch.Tensor:
processed_features = [
self.process_static_cat(feat_static_cat),
self.process_static_real(feat_static_real),
self.process_dynamic_cat(feat_dynamic_cat),
self.process_dynamic_real(feat_dynamic_real),
]
return torch.cat(processed_features, dim=-1)
def process_static_cat(self, feature: torch.Tensor) -> torch.Tensor:
if self.embeddings["embed_static"] is not None:
feature = self.embeddings["embed_static"](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: torch.Tensor) -> torch.Tensor:
if self.embeddings["embed_dynamic"] is None:
return feature.float()
else:
return self.embeddings["embed_dynamic"](feature)
def process_static_real(self, feature: torch.Tensor) -> torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: torch.Tensor) -> torch.Tensor:
return feature
|
[
"kashif.rasul@gmail.com"
] |
kashif.rasul@gmail.com
|
8d4a626e0834e092e43b51bebfccde69f6d4cc28
|
d36e1d6f39cce857bbb7783565a8e968136f1926
|
/apiTest/Service_Summary/test_addClass.py
|
834b0e69d197de9bf49166e40fc5e90f54765f27
|
[] |
no_license
|
qiquan1011/test_customerApi
|
ffc14568e2c175ee19a4c325e7aca200134fe2ae
|
65ba1f5d6092b2a36b3043656d95c1f42de97b7f
|
refs/heads/master
| 2023-04-07T04:32:23.033724
| 2021-04-14T01:54:56
| 2021-04-14T01:54:56
| 355,849,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,685
|
py
|
import json
import unittest
import paramunittest
from common import commom, configHTTP
local_config_http=configHTTP.Config_Http()
addclass_excel=commom.get_excel("testCase.xlsx","newAddClass")
@paramunittest.parametrized(*addclass_excel)
class addClass(unittest.TestCase):
def setUp(self):
pass
def setParameters(self,case_name,method,url,parameter,code,status,message):
self.case_name=str(case_name)
self.method=str(method)
self.url=str(url)
self.parameter=str(parameter)
self.code=str(code)
self.status=str(status)
self.message=str(message)
print(self.parameter)
def test_addClass(self):
self.delect_class()
login_cookies=commom.get_customer_login()
header={"content-Type":"application/json;charset=UTF-8","cookie":login_cookies}
local_config_http.get_Heardes(header)
send_parm=self.parameter
local_config_http.get_data(send_parm.encode(encoding="UTF-8"))
local_config_http.get_Path(self.url)
self.reponse=local_config_http.set_post()
self.checkResult()
def description(self):
return self.case_name
def checkResult(self):
commom.show_return_msg(self.reponse,self.case_name,self.parameter)
self.header = self.reponse.headers
if self.header["Content-Type"] == "application/octet-stream;charset=UTF-8":
self.info = self.reponse.text
self.assertIsNotNone(self.info, msg=None)
elif self.header["Content-Type"] == "application/json;charset=UTF-8":
self.info = self.reponse.json()
if self.reponse.status_code == 200 and self.info["success"] == True:
self.assertEqual(self.info["code"], int(float(self.code)))
self.assertEqual(self.info["message"], self.message)
elif self.reponse.status_code == 200 and self.info["success"] == False:
self.assertEqual(self.info["code"], int(float(self.code)))
self.assertIn(self.info["message"], self.message)
def delect_class(self):
send_dict=json.loads(self.parameter)
for className in send_dict:
if className!="":
sql="DELETE from cs_summary_class where class_name="+"'"+send_dict[className]+"'"
print(sql)
commom.getDelecte_dataBase(sql)
def select_classId(self):
sql="select class_id from cs_summary_class where class_name='你' and tenant_id='149'"
print(sql)
classId=commom.getSelect_dataBase(sql)
print(classId)
return classId
if __name__=="__main__":
unittest.main()
|
[
"1477742998@qq.com"
] |
1477742998@qq.com
|
24944475437eb47219018ccc93c5c251b3d64d11
|
b7ff8811358c29121d6f60d96c3d05fdf2466ac5
|
/Array/SortArrayByParity.py
|
db0d42d5d49e708d212934499a1116bdb8a9b43e
|
[] |
no_license
|
kevinvud/leet_code_python
|
e4882c5cf7dd6d7dec54462f3707b9c6dad493ce
|
34f92f5b64d56fa4f8f1ff85d746b09725e23621
|
refs/heads/master
| 2020-07-15T07:38:03.249607
| 2019-09-08T23:03:32
| 2019-09-08T23:03:32
| 205,513,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
"""
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Note:
1 <= A.length <= 5000
0 <= A[i] <= 5000
"""
def sortArrayByParity(input):
even_array = []
odd_array = []
for index in input:
if index % 2 == 0:
even_array.append(index)
else:
odd_array.append(index)
return even_array + odd_array
arrayInput = [3,1,2,4]
print(sortArrayByParity(input=arrayInput))
|
[
"kevinvud@gmail.com"
] |
kevinvud@gmail.com
|
4c0803d7b8621c6a3c4e105aa299f8c7d9551bb5
|
c8a3638dbb74b4281e99ebc32f7a42b2c61a160f
|
/mysql/spiders/6_Malaysia_miti.py
|
0d5f23ae110cabee9b2f93de8576dafeb6e343af
|
[] |
no_license
|
HaoDong96/sea_news_crawler
|
392fc374409078f13ff09dbaf553c5d1df8f2ab1
|
807e7963831728d537c82143472fd3392ca09460
|
refs/heads/master
| 2020-03-26T21:58:29.966921
| 2018-08-20T13:35:09
| 2018-08-20T13:35:09
| 145,422,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
# # coding:utf-8
#
# from scrapy.spiders import CrawlSpider, Rule
# from scrapy.linkextractors import LinkExtractor
# from mysql.items import NewsItem
# from scrapy import log
# import urllib
# import scrapy
# import re
# import string
# import time
#
#
# class test_crawler(CrawlSpider):
# name = '6_Malaysia_miti'
# allowed_domains = ['miti.gov.my']
# key_name = ['ocean','aquatic','marine ', 'fishery','warship', 'fishing','coastal' 'blue+economy' ]
# # key_name=['ocean']
# base_url='http://www.miti.gov.my/index.php/search/results?search={key}'
#
#
# def start_requests(self):
# # 用for和字符串插入的方法构成关键字链接循环入口
# for key in self.key_name:
# url = self.base_url.format(key=key)
# yield scrapy.Request(url=url, callback=self.parse_pages, dont_filter=True)
# #print(url)
#
# def parse_pages(self, response):
# try:
# print("parse_pages:"+response.url)
# # '解析跳转到每篇文件链接'
# #print(response.body)
# for news_url in response.xpath('//div[@id="search_result"]'
# '/div[@class="search_result_item"]/a/@href').extract():
# print("news url:"+news_url)
# yield scrapy.Request(news_url,callback=self.parse_news, dont_filter=True)
# except Exception as error:
# log(error)
#
# def parse_news(self, response):
# try:
# print("parse:"+response.url)
# #print(response.body)
# item = NewsItem()
# item['url'] = response.url
# item['country_code'] = "6"#"".join(response.xpath('//*[@property="v:summary"]/text()').extract())
# item['country_name']="Malaysia"
# # image="".join(response.xpath('//section[@id="block-views-newsroom-page-block-1"]').extract())
# # if image:
# # item['image']=image
# # else:
# # item['image']=None
# item['image_urls'] = None
# item['content']="".join(response.xpath('//*[@id="container_content"]/div[@class="editable"]').extract()).replace('src="','src="http://www.miti.gov.my/')
# #print("content"+item['content'])
# item['source']="http://www.miti.gov.my"
# item['title']="".join(response.xpath('//*[@id="365"]/div[2]/div[1]/h1/text()').extract())
# item['time']="".join(response.xpath('//*[@id="container_content"]/div[3]/p/em/text()').extract())
# item['crawled_time']=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
# # #print(item['title'])
# yield item
# except Exception as error:
# log(error)
# coding:utf-8
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from mysql.items import NewsItem
from scrapy import log
import urllib
import scrapy
import re
import string
import time
class test_crawler(CrawlSpider):
name = '6_Malaysia_miti'
allowed_domains = ['miti.gov.my']
key_name = ['ocean','aquatic','marine ', 'fishery','warship', 'fishing','coastal' 'blue+economy' ]
# key_name=['ocean']
base_url='http://www.miti.gov.my/miti/resources/Media%20Release/Media_Release_-_Malaysia_Promotion_Programme_(MPP)_takes_centre_stage_in_Manila.pdf'
def start_requests(self):
# 用for和字符串插入的方法构成关键字链接循环入口
for key in self.key_name:
url = self.base_url.format(key=key)
yield scrapy.Request(url=url, callback=self.parse_pages, dont_filter=True)
#print(url)
def parse_pages(self, response):
try:
print("parse_pages:"+response.url)
# '解析跳转到每篇文件链接'
#print(response.body)
for news_url in response.xpath('//div[@id="search_result"]'
'/div[@class="search_result_item"]/a/@href').extract():
print("news url:"+news_url)
yield scrapy.Request(news_url,callback=self.parse_news, dont_filter=True)
except Exception as error:
log(error)
def parse_news(self, response):
try:
print("parse:"+response.url)
#print(response.body)
item = NewsItem()
item['url'] = response.url
item['country_code'] = "6"#"".join(response.xpath('//*[@property="v:summary"]/text()').extract())
item['country_name']="Malaysia"
# image="".join(response.xpath('//section[@id="block-views-newsroom-page-block-1"]').extract())
# if image:
# item['image']=image
# else:
# item['image']=None
item['image_urls'] = None
item['content']="".join(response.xpath('//*[@id="container_content"]/div[@class="editable"]').extract()).replace('src="','src="http://www.miti.gov.my/')
#print("content"+item['content'])
item['source']="http://www.miti.gov.my"
item['title']="".join(response.xpath('//*[@id="365"]/div[2]/div[1]/h1/text()').extract())
item['time']="".join(response.xpath('//*[@id="container_content"]/div[3]/p/em/text()').extract())
item['crawled_time']=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
# #print(item['title'])
yield item
except Exception as error:
log(error)
|
[
"spdonghao@gmail.com"
] |
spdonghao@gmail.com
|
dee1ba0c22c12de661c27b665f872a986aa38a7e
|
0fce260d9c73e2966dd12025b01bf1763666c3b1
|
/mandelbrot/management/commands/loadslack.py
|
9edd01f193f88821ba0d2b45b945b78f7dbb42d2
|
[] |
no_license
|
paultag/mandelbrot
|
0b15810969413a0afebe3220cf9965aa4f5027d8
|
60880cdb6f020ff9ec765e393311a11d03d8d678
|
refs/heads/master
| 2021-01-19T03:04:01.743770
| 2016-06-07T00:15:37
| 2016-06-07T00:15:37
| 45,364,430
| 4
| 3
| null | 2016-04-11T03:13:54
| 2015-11-02T00:59:07
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,855
|
py
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from mandelbrot.models import Expert, ContactDetail
import requests
import os
KEY = os.environ['SLACK_ACCESS_TOKEN']
class Command(BaseCommand):
help = 'Load experts from GitHub'
CACHE = {}
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
for person in scrape():
pass
def scrape():
team = requests.get(
'https://slack.com/api/users.list?token={}'.format(KEY),
).json()['members']
for person in team:
if person.get('deleted', False):
continue
if person.get('is_bot', False):
continue
name = person.get('real_name', person.get('name'))
if name == "":
continue
try:
who = Expert.by_name(name)
except Expert.DoesNotExist:
print(",{},Slack Name,,,False".format(name))
continue
if who.photo_url == "":
who.photo_url = person.get('profile', {}).get('image_original', "")
phone = person.get("profile", {}).get("phone", None)
if phone is not None and phone != "":
detail, created = who.add_contact_detail(
value=phone,
label=None,
type='phone',
preferred=True,
official=False,
)
if created:
detail.label = "From Slack"
detail.save()
detail, created = who.add_contact_detail(
value=person['name'],
label=None,
type='slack',
preferred=True,
official=False,
)
if created:
detail.label = "From Slack"
detail.save()
who.save()
yield who
|
[
"tag@pault.ag"
] |
tag@pault.ag
|
21d6881c4e2c1086a73635b9efb49ce6a17dad32
|
16cf959acf8746bf65e1a74672209aa36645ab9d
|
/basicsort.py
|
308bdd53a75bb08f216b367fbf67a0168ef3de47
|
[] |
no_license
|
tlavr/typesnalgos
|
5a504092f664b5af50cf74768b0de0a38fbf7179
|
5282b660ac44466ef6998bad5fc1ffa8353b26e3
|
refs/heads/master
| 2020-08-07T23:55:22.439841
| 2020-03-11T11:55:28
| 2020-03-11T11:55:28
| 213,630,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
def swap(arr, idx1, idx2):
tmp = arr[idx2]
arr[idx2] = arr[idx1]
arr[idx1] = tmp
def SelectionSortStep(arr, el):
idx = el
for ii in range(el, arr.__len__()):
if arr[ii] < arr[idx]:
idx = ii
if idx != el:
swap(arr,idx,el)
def BubbleSortStep(arr):
isSwap = False
for ii in range(arr.__len__()-1):
if arr[ii] > arr[ii+1]:
swap(arr,ii,ii+1)
isSwap = True
if not isSwap:
return True
return False
def InsertionSortStep(arr, step, bidx):
isOk = False
while not isOk:
idx = bidx
isSwap = False
while idx + step < arr.__len__():
if arr[idx] > arr[idx+step]:
swap(arr,idx,idx+step)
isSwap = True
idx = idx + step
if not isSwap:
isOk = True
|
[
"noreply@github.com"
] |
tlavr.noreply@github.com
|
0aef74dc74ea50b71eb777b7be2ae2d69042d61b
|
2c1ec51f98991987179ccb5c79ceadd98a127cbd
|
/virtual/bin/sqlformat
|
7e6978542a25ebc9d42cf05d14ccf16c5dd9f28e
|
[
"MIT"
] |
permissive
|
monicaoyugi/djangoGallery
|
af1715c9fe5effbad5097132a6e83a6f2aee6d1d
|
43661d0ed37d10e8870afc1130ea0c05467dcdec
|
refs/heads/master
| 2021-11-27T03:13:10.733683
| 2020-03-03T08:03:13
| 2020-03-03T08:03:13
| 243,685,426
| 1
| 2
|
MIT
| 2021-09-22T18:40:46
| 2020-02-28T05:26:14
|
Python
|
UTF-8
|
Python
| false
| false
| 273
|
#!/home/moringaschool/Documents/Core/Week1/Pictures/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"monicaoyugi@gmail.com"
] |
monicaoyugi@gmail.com
|
|
e81364b97e81046d756cd7779b9a8cdacc1a7cdd
|
8b6105703dd5e86e106cc3f80e49f1ed9790b002
|
/Homeworks/day_3.py
|
5043c4c78b61696955bf6ef8a81654c86e977d9d
|
[] |
no_license
|
ratnadeeppal/gaih-students-repo-example
|
a72aba524cd96ab5d670130bd95714d46f08c573
|
000f1bba6097d0a75234102b63466cfae32772d6
|
refs/heads/main
| 2023-02-26T21:52:49.757914
| 2021-02-06T12:06:37
| 2021-02-06T12:06:37
| 335,695,054
| 0
| 0
| null | 2021-02-03T17:00:17
| 2021-02-03T17:00:16
| null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
# -*- coding: utf-8 -*-
"""day_3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ot_-j3hVF9E_l5XwKL9HZ0yqO3OQeO7h
# Question 1
"""
username = 'ratnadeep'
password = 'india'
ip_username = input("Username: ")
ip_password = input("Password: ")
if username == ip_username and password != ip_password:
print("Invalid Password")
elif username != ip_username and password == ip_password:
print("Invalid Username")
elif username != ip_username and password != ip_password:
print("Invalid password and username")
else:
print("Successfully Login")
"""# Question 2"""
user_pass = {"ratnadeep":"india","athar": "pakistan","rashford":"england","ronaldo":"brazil"}
ip_username = input("Username: ")
ip_password = input("Password: ")
if ip_username not in user_pass and user_pass.get(ip_username) != ip_password:
print("Invalid password and username")
elif user_pass.get(ip_username) != ip_password:
print("Invalid password")
else:
print("Successfully Login")
|
[
"noreply@github.com"
] |
ratnadeeppal.noreply@github.com
|
64575c4a1e64b4f1b339eebd86e9d527a3b29a3a
|
f16146c1184f2621dda6c66c9c36092313e26919
|
/shell_cmd/run_state.py
|
c4dcdc5a03e8fcc35a6d31384af3ab2eb6b5aab5
|
[] |
no_license
|
kowaraj/scriptsCERN
|
3b4222516e399f5c3fe9ae3d2f64ed63fdc9f97e
|
53bf7bc6222116bde85fdc4b776f0e07faef4e86
|
refs/heads/master
| 2020-05-22T01:04:28.717354
| 2017-01-26T12:07:57
| 2017-01-26T12:07:57
| 33,919,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
#!/usr/bin/python
from subprocess import Popen, PIPE
import os
import shell_cmd as sc
# processes
sc.pid('ALLVTULHC_DU_R')
sc.pid('ALLVTULHC_DU_S')
sc.pid('ALLSpsRephasingIntfc_DU_R')
sc.pid('ALLSpsRephasingIntfc_DU_S')
# shared mem
sc.shmexist('ALLVTULHC_DU.cfv-864-agsps')
sc.shmexist('sem.ALLVTULHC_DU.cfv-864-agsps')
sc.shmexist('ALLVTULHCClassShm')
sc.shmexist('ALLVTUClassShm')
sc.shmexist('ALLSpsRephasingIntfcClassShm')
sc.shmexist('ALLSpsRephasingIntfc_DU.cfv-864-agsps')
sc.shmexist('sem.ALLSpsRephasingIntfc_DU.cfv-864-agsps')
|
[
"kowaraj@gmail.com"
] |
kowaraj@gmail.com
|
e1f5018ed53d43cede8b0037e0a69109b758aa24
|
e42adbfe85ebf7bc4b6936e8d003d988f3e681c0
|
/svea_starter/src/svea/src/localizers/qualysis_localizers.py
|
cde090b38291db363320a93da4ced23c9c8cf258
|
[] |
no_license
|
emmaj7/sml_summerproject
|
f0c6924cee428fd6de64ecdb45fc58bfb7bb27f1
|
1151267d10c3b37fde53045b494c3f7494322099
|
refs/heads/master
| 2022-12-24T08:06:23.516464
| 2019-09-23T15:02:28
| 2019-09-23T15:02:28
| 191,755,042
| 1
| 0
| null | 2022-12-10T19:00:08
| 2019-06-13T12:03:14
|
C++
|
UTF-8
|
Python
| false
| false
| 3,880
|
py
|
#!/usr/bin/env python
"""
ROS interface object for localization with qualysis odom.
This module simply contains the implementations of the ROS interface
code wrapped in objects. The launch files for each interfaced ROS node
still needs to be run BEFORE initializing these objects. In particular
roslaunch files:
qualysis.launch
qualysis_odom.launch model_name:=<blah blah blah>
need to be run. It is recommended you simply add these launch files to
whatever project launch file you are using.
TODO:
- Add event-based functionality
Author - Frank J Jiang <frankji@kth.se>
"""
import sys
import os
import numpy as np
from threading import Thread
import rospy
import tf
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import Twist, TwistWithCovarianceStamped
from nav_msgs.msg import Odometry
from math import sqrt
class State(object):
"""
Class representing the state of a vehicle.
:param x: (float) x-coordinate
:param y: (float) y-coordinate
:param yaw: (float) yaw angle
:param v: (float) speed
"""
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
"""Instantiate the object."""
super(State, self).__init__()
self.x = x
self.y = y
self.yaw = yaw
self.v = v
class QualysisCarOdom():
def __init__(self, qualysis_model_name):
self.qualysis_model_name = qualysis_model_name
# rospy.init_node(self.qualysis_model_name + '_qualysis_odom')
self.state = State()
self.last_time = None
def start(self):
Thread(target=self._init_and_spin_ros, args=()).start()
return self
def _init_and_spin_ros(self):
rospy.loginfo("Starting Qualysis Odometry Interface Node: \n"
+ str(self))
self.node_name = self.qualysis_model_name + '_qualysis_odom'
self._collect_srvs()
self._start_listen()
def _collect_srvs(self):
# rospy.wait_for_service('set_pose')
# self.set_pose = rospy.ServiceProxy('set_pose', SetPose)
pass
def _start_listen(self):
rospy.Subscriber(self.qualysis_model_name + '/odom', Odometry,
self._read_qualysis_odom_msg)
rospy.loginfo("Qualysis Odometry Interface successfully initialized")
rospy.spin()
def _read_qualysis_odom_msg(self, msg):
pose = msg.pose.pose.position
vel = msg.twist.twist.linear
q = msg.pose.pose.orientation
quaternion = (q.x, q.y, q.z, q.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
self.state.x = pose.x
self.state.y = pose.y
self.state.yaw = yaw
self.state.v = sqrt(vel.x**2 + vel.y**2)
self.last_time = rospy.get_time()
def __repr__(self):
return ""
def __str__(self):
return ""
# def set_pose(self, qualysis_model_name, pose_to_set):
# try:
# self.set_pose(qualysis_model_name, pose_to_set)
# except rospy.ServiceException as exc:
# print(self.node_name + ": Set Pose service failed: " + str(exc))
def is_publishing(self):
if self.last_time is not None:
is_publishing = rospy.get_time() - self.last_time < 1/100
if not is_publishing:
rospy.loginfo_throttle(2, "{0} not updating".format(
self.node_name))
return is_publishing
def get_state_obj(self):
"""Returns state object with variables state.x, state.y, state.yaw,
state.v
"""
return self.state
def get_state(self):
"""Returns state as a list"""
return [self.state.x, self.state.y, self.state.yaw, self.state.v]
def get_state_np(self):
"""Returns state as a numpy array"""
return np.array(self.get_state)
|
[
"tranbarsjuice@gmail.com"
] |
tranbarsjuice@gmail.com
|
7c796737367705100b0ddc61ce403b8d9e93b8db
|
902e2f5fab3c61ca02c0ba2b54a3564d5dec6912
|
/dice.py
|
cc667cd7c0be54179b9d3944866df91e1c0a6b2a
|
[] |
no_license
|
lantone/colloquium_scripts
|
51515a08511044a16be75e87e20cb77eef72c82d
|
19a051b4378d00a86784624c9f5f66eccd0d7714
|
refs/heads/master
| 2021-01-13T16:00:56.963540
| 2017-02-23T02:14:05
| 2017-02-23T02:14:05
| 79,591,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,483
|
py
|
#!/usr/bin/env python
import sys
#import glob
#import re
#import os
#from array import *
import matplotlib
matplotlib.use('QT4Agg')
import numpy as np
import matplotlib.pyplot as plt
plt.xkcd()
matplotlib.rcParams.update({'font.size': 28})
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 13
fig_size[1] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.subplots_adjust(left=0.1,right=0.95, top=0.9, bottom=0.1)
n_rolls = 1000000
n_events = 1000
n_bins=21
x_min = 0
rolls1 = []
rolls2 = []
rolls3 = []
for n in range(n_rolls):
d1 = np.random.randint(1,high=7,size=2)
d2 = np.random.randint(1,high=9,size=2)
d3 = np.random.randint(1,high=11,size=2)
rolls1.append(sum(d1))
rolls2.append(sum(d2))
rolls3.append(sum(d3))
plot = plt.hist([rolls3,rolls2,rolls1], n_bins,range=[x_min, n_bins],histtype='step',label=["10-sided dice","8-sided dice","6-sided dice"],linewidth=3)
axes = plt.gca()
ymax = axes.get_ylim()[1]
axes.set_ylim([0,(n_rolls+1)/float(4)])
axes.set_ylabel("events")
axes.yaxis.set_label_coords(-0.07, 0.85)
axes.set_xlim(xmin=x_min+1, xmax=n_bins+1)
axes.set_xlabel("sum of two dice")
axes.xaxis.set_label_coords(0.87, -0.05)
leg = plt.legend(numpoints=1,loc=2,fontsize=30)
leg.draw_frame(False)
plt.savefig('plot_0.png')
plt.cla()
data = []
for n in range(n_events):
datum = np.random.randint(1,high=9,size=2)
data.append(sum(datum))
if n>100 and (n+1)%10:
continue
print n+1
# if n < 9999:
# continue
y,binEdges=np.histogram(data,bins=n_bins,range=(x_min, n_bins))
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
weight = [(n+1)/max(25.,float(n_rolls))] * n_rolls
# weight = [1] * n_rolls
plot = plt.hist([rolls3,rolls2,rolls1], n_bins,range=[x_min, n_bins],histtype='step',weights=3*[weight],label=["10-sided dice","8-sided dice","6-sided dice"],linewidth=3)
axes = plt.gca()
axes.set_title(" "+str(n+1)+" rolls",y=0.85,fontsize=50)
ymax = axes.get_ylim()[1]
axes.set_ylim([0,max(25,(n+1)/float(4))])
axes.set_ylabel("events")
axes.yaxis.set_label_coords(-0.07, 0.85)
axes.set_xlim(xmin=x_min+1, xmax=n_bins+1)
axes.set_xlabel("sum of two dice")
axes.xaxis.set_label_coords(0.87, -0.05)
data_plot = plt.plot(bincenters,y,'ko',label="data",markersize=10,markeredgewidth=0.0)
leg = plt.legend(numpoints=1,loc=2,fontsize=30)
leg.draw_frame(False)
plt.savefig('plot_'+str(n+1)+'.png')
plt.cla()
|
[
"lantone@gmail.com"
] |
lantone@gmail.com
|
dc73c4de221e3699efcae9d229a2059d59e042f8
|
b14b816ed5d733c3eda445114a307fb021aced56
|
/rango/views.py
|
b363af611e17619a39627eba310931fc87c4d210
|
[] |
no_license
|
mishraprags/tango_with_django_project
|
679225da6f57d9bad7040a50f0d133d267a40125
|
ae092a5a294b7e01f5db990557ea434fe71d8b80
|
refs/heads/master
| 2023-01-19T03:16:07.365867
| 2020-11-22T19:32:42
| 2020-11-22T19:32:42
| 256,175,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,186
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from datetime import datetime
from rango.models import Category, Page
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
def index(request):
# Dict is passed to template engine as context
# Get top 5 most liked categories
request.session.set_test_cookie()
category_list = Category.objects.order_by('-likes')[:5]
context_dict = {'categories': category_list}
# Get 5 most viewed pages
page_list = Page.objects.order_by('-views')[:5]
context_dict['pages'] = page_list
visitor_cookie_handler(request)
context_dict['visits'] = request.session['visits']
response = render(request, 'rango/index.html', context=context_dict)
# Returning a rendered response for the client
return response
def about(request):
context_dict = {}
if request.session.test_cookie_worked():
print("TEST COOKIE WORKED!")
request.session.delete_test_cookie()
visitor_cookie_handler(request)
context_dict['visits'] = request.session['visits']
response = render(request, 'rango/about.html', context=context_dict)
return response
#return HttpResponse("Rango says here is the about page. Head back to the main page <a href='/rango/'>here</a>")
def show_category(request, category_name_slug):
context_dict = {}
try:
# get category based on URL slug
category = Category.objects.get(slug=category_name_slug)
# get all pages for that category
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
# when a category isn't a thing
context_dict['pages'] = None
context_dict['category'] = None
return render(request, 'rango/category.html', context_dict)
def add_category(request):
form = CategoryForm()
# Received HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True) # save new category to DB
return index(request) # redirect to index page
else: # supplied form contains errors
print(form.errors)
return render(request, 'rango/add_category.html', {'form':form})
def add_page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
form = PageForm()
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.views = 0
page.save()
return show_category(request, category_name_slug)
else:
print(form.errors)
context_dict = {'form': form, 'category': category}
return render(request, 'rango/add_page.html', context_dict)
def register(request):
# A boolean telling template whether registration successful
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form=UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request, 'rango/register.html',
{'user_form': user_form,
'profile_form': profile_form,
'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse("Your Rango account is disabled.")
else:
print("Invalid login details: {0}, {1}".format(username, password))
return HttpResponse("Invalid login details supplied.")
else:
return render(request, 'rango/login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
@login_required
def restricted(request):
return render(request, 'rango/restricted.html', {})
def get_server_side_cookie(request, cookie, default_val=None):
val = request.session.get(cookie)
if not val:
val = default_val
return val
def visitor_cookie_handler(request):
visits = int(request.COOKIES.get('visits', '1'))
last_visit_cookie = request.COOKIES.get('last_visit', str(datetime.now()))
last_visit_time = datetime.strptime(last_visit_cookie[:-7],'%Y-%m-%d %H:%M:%S')
if (datetime.now() - last_visit_time).days > 0:
visits += 1
request.session['last_visit'] = str(datetime.now())
else:
request.session['last_visit'] = last_visit_cookie
request.session['visits'] = visits
|
[
"2506109m@student.gla.ac.uk"
] |
2506109m@student.gla.ac.uk
|
ae89368f6894952ab87cdf181f423665bc8b5238
|
3112f5833646f33fc731cd56be052db74c96697a
|
/L16/nmt/utils/nmt_utils.py
|
9f43f897005538b1e5e65862476bd16b063da3d1
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
busyyang/DL_21tensorflow
|
4ec33c7396f16607c940de94fd2878ef8d2fc15a
|
ccac457b66a80f3de80d14d503e6cec8681537eb
|
refs/heads/master
| 2020-05-05T06:10:03.467053
| 2020-04-24T09:40:30
| 2020-04-24T09:40:30
| 179,778,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions specifically for NMT."""
from __future__ import print_function
import codecs
import time
import tensorflow as tf
from utils import evaluation_utils
from utils import misc_utils as utils
__all__ = ["decode_and_evaluate", "get_translation"]
def decode_and_evaluate(name,
model,
sess,
trans_file,
ref_file,
metrics,
bpe_delimiter,
beam_width,
tgt_eos,
decode=True):
"""Decode a test set and compute a score according to the evaluation task."""
# Decode
if decode:
utils.print_out(" decoding to output %s." % trans_file)
start_time = time.time()
num_sentences = 0
with codecs.getwriter("utf-8")(
tf.gfile.GFile(trans_file, mode="wb")) as trans_f:
trans_f.write("") # Write empty string to ensure file is created.
while True:
try:
nmt_outputs, _ = model.decode(sess)
if beam_width > 0:
# get the top translation.
nmt_outputs = nmt_outputs[0]
num_sentences += len(nmt_outputs)
for sent_id in range(len(nmt_outputs)):
translation = get_translation(
nmt_outputs,
sent_id,
tgt_eos=tgt_eos,
bpe_delimiter=bpe_delimiter)
trans_f.write((translation + b"\n").decode("utf-8"))
except tf.errors.OutOfRangeError:
utils.print_time(" done, num sentences %d" % num_sentences,
start_time)
break
# Evaluation
evaluation_scores = {}
if ref_file and tf.gfile.Exists(trans_file):
for metric in metrics:
score = evaluation_utils.evaluate(
ref_file,
trans_file,
metric,
bpe_delimiter=bpe_delimiter)
evaluation_scores[metric] = score
utils.print_out(" %s %s: %.1f" % (metric, name, score))
return evaluation_scores
def get_translation(nmt_outputs, sent_id, tgt_eos, bpe_delimiter):
"""Given batch decoding outputs, select a sentence and turn to text."""
# Select a sentence
output = nmt_outputs[sent_id, :].tolist()
# If there is an eos symbol in outputs, cut them at that point.
if tgt_eos and tgt_eos in output:
output = output[:output.index(tgt_eos)]
if not bpe_delimiter:
translation = utils.format_text(output)
else: # BPE
translation = utils.format_bpe_text(output, delimiter=bpe_delimiter)
return translation
|
[
"jzhm_4@hotmail.com"
] |
jzhm_4@hotmail.com
|
03716fe5734eed034ca142d91437b67b07de10f6
|
7874d87ae790c92900e42c9a5b41eb07f2da82c8
|
/app/admin.py
|
a08ffa254b4aec6c18de9d67c16fff57ccb85350
|
[
"MIT"
] |
permissive
|
PatrickRudgeri/financial-mngt
|
9a751b1112cff4a3d5bb3013d192d397dca8a334
|
42754e7ade89805a2297c1783f86a0451dec4674
|
refs/heads/main
| 2023-07-14T20:32:30.135553
| 2021-08-31T22:33:05
| 2021-08-31T22:33:05
| 375,134,656
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Receita, Despesa, CategoriaReceita, CategoriaDespesa
admin.site.register(Despesa)
admin.site.register(Receita)
admin.site.register(CategoriaReceita)
admin.site.register(CategoriaDespesa)
|
[
"patrick.rudgeri@ice.ufjf.br"
] |
patrick.rudgeri@ice.ufjf.br
|
b5268837c074ea67f5075ad15b1200f4da8176f3
|
2cbe5fca293b975801feb50527091f4c7df6fb50
|
/Carpool Report.py
|
ecb4365539eeccebd9704cab81baa01ed33bcb13
|
[] |
no_license
|
Devtlv-classroom/carpool-report-shaul615
|
947fcb52156372ada6b23947d680c4d765ad50c2
|
3f0d892c5486433b1729bd4492ac7b8fce07af8b
|
refs/heads/master
| 2020-08-29T13:34:46.853546
| 2019-10-28T14:13:42
| 2019-10-28T14:13:42
| 218,047,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
cars_available = 100
max_passengers_per_car = 4
days_drivers = 30
days_passengers_waiting = 90
# always be more cars than drivers
if (cars_available < days_drivers):
cars_available = days_drivers+1
empty_cars_today = cars_available-(days_passengers_waiting /max_passengers_per_car)
print("The number of cars available on your app:",cars_available)
print("The number of drivers registered on your app:",days_drivers)
print("The number of empty cars today:",int(empty_cars_today))
print("The number of passengers that can be transported today:",cars_available*(max_passengers_per_car+1))
print("The average of passengers to put in each car:",days_passengers_waiting/days_drivers)
|
[
"noreply@github.com"
] |
Devtlv-classroom.noreply@github.com
|
ecc44c0dc3c2b5e543449e9a4f7a77c968e58bc6
|
b431eaf4281c494892461ca8ff0021a1d64d2cc1
|
/savant/web/migrations/0011_auto_20161003_2203.py
|
01c2b029fc83625648f7e13476d31fc20a8eba1f
|
[] |
no_license
|
elston/savantrend
|
7716f8f28775f17ebe231bddb6b9765530fbacbd
|
91d4603a18003c99a9313e033d6a09733737f155
|
refs/heads/master
| 2021-01-11T01:47:02.973465
| 2016-10-25T08:01:53
| 2016-10-25T08:01:53
| 70,667,707
| 0
| 0
| null | 2016-10-12T05:57:32
| 2016-10-12T05:57:30
| null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('web', '0010_auto_20160522_2323'),
]
operations = [
migrations.AddField(
model_name='user',
name='enabled_reports',
field=django.contrib.postgres.fields.ArrayField(default=['performancecalendar', 'executivesummary', 'hourlyperformance', 'dailyretailtrendanalysis', 'performancecomparison', 'performancetrendanalysis'], size=None, base_field=models.CharField(max_length=200)),
)
]
|
[
"vitaliysvyatuk@VITALIYs-MacBook.local"
] |
vitaliysvyatuk@VITALIYs-MacBook.local
|
50118fd8db82a5f4ac248eaa74a26f8e05463712
|
26d3ea26c53851baa406fd411e0815c7de5921a1
|
/split_to_utterances.py
|
8697b5ede93eef78780f3de00d6feeecf30a42ce
|
[
"MIT"
] |
permissive
|
megseekosh/ALICE
|
2a877de4ea618a68e14050cb35b82699b5ee98fc
|
3cbd142437cb7f21b5ffe1e91bbf8e290bf301d9
|
refs/heads/master
| 2023-02-11T08:01:58.271246
| 2020-06-11T13:47:18
| 2020-06-11T13:47:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
# Loads diarization outputs (.rttms) and splits long audio files to utterance-sized
# wav-files based on the diarization results. Short files are temporarily stored
# to ALICE/tmo_data/short/
import csv,sys
from scipy.io import wavfile
import numpy as np
curdir = sys.argv[1]
valid_speakers = ['FEM','MAL']
DATA = []
with open(curdir + '/output_voice_type_classifier/tmp_data/all.rttm') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
DATA.append(row)
line_count += 1
curfile = []
for k in range(0,len(DATA)):
s = DATA[k][0].split()
filename = s[1]
orig_filename = curdir + '/tmp_data/'+ s[1] +'.wav'
speaker = s[7]
isvalid = False
for ref in valid_speakers:
if(ref == speaker):
isvalid = True
if(curfile != orig_filename): # read .wav if not read already
fs, data = wavfile.read(orig_filename)
curfile = orig_filename
onset = float(s[3])
offset = onset+float(s[4])
if isvalid:
y = data[max(0,round(onset*fs)):min(len(data),round(offset*fs))]
new_filename = curdir + '/tmp_data/short/'+ filename + ("_%08.0f" % (onset*10000)) + '_' + ("%08.0f" % (offset*10000)) +'.wav'
wavfile.write(new_filename,fs,y)
|
[
"okko.rasanen@tuni.fi"
] |
okko.rasanen@tuni.fi
|
e031cbcb202dd5a172d82084fe7d561a5868b525
|
cce1a4bef3d160368041e94c6cf5c19b4d1b052a
|
/doc/source/conf.py
|
aa7a1801ebd820d87817e4112cdc27b91437d780
|
[] |
no_license
|
arattinger/block
|
f6cdd26f42aefdded202e5874f5ad6220d1e7735
|
f2f6fe74b9383b47ee56612cb7e9517193a6769e
|
refs/heads/master
| 2020-04-09T23:40:01.660115
| 2016-11-01T20:37:20
| 2016-11-01T20:37:20
| 9,770,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,202
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# block documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 28 21:19:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.pngmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'block'
copyright = '2016, Andre Rattinger'
author = 'Andre Rattinger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'blockdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'block.tex', 'block Documentation',
'Andre Rattinger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'block', 'block Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'block', 'block Documentation',
author, 'block', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"andre.rattinger@saltwatersolutions.com.au"
] |
andre.rattinger@saltwatersolutions.com.au
|
da211cd2b9db393ee96655c9a0883c6285b6e7b0
|
840580a596ae3ae37ecbfba16ebe8da6baf3f282
|
/test_autolens/integration/tests/imaging/lens__source_inversion/rectangular/lens_mass__source__hyper.py
|
acf116cba8ca402c93e2f50d8b945e775c955a47
|
[
"MIT"
] |
permissive
|
SanchiMittal/PyAutoLens
|
3e4c7f949a6963891342c14535903dcb8ef978b1
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
refs/heads/master
| 2021-01-08T14:28:32.850616
| 2020-02-20T16:40:21
| 2020-02-20T16:40:21
| 242,053,578
| 1
| 0
|
MIT
| 2020-02-21T04:25:27
| 2020-02-21T04:25:26
| null |
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
import autofit as af
import autolens as al
from test_autolens.integration.tests.imaging import runner
test_type = "lens__source_inversion"
test_name = "lens_mass__source_rectangular__hyper"
data_type = "lens_sie__source_smooth"
data_resolution = "lsst"
def make_pipeline(name, phase_folders, optimizer_class=af.MultiNest):
class SourcePix(al.PhaseImaging):
def customize_priors(self, results):
self.galaxies.lens.mass.centre.centre_0 = 0.0
self.galaxies.lens.mass.centre.centre_1 = 0.0
self.galaxies.lens.mass.einstein_radius = 1.6
phase1 = SourcePix(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, mass=al.mp.EllipticalIsothermal),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.Rectangular,
regularization=al.reg.Constant,
),
),
optimizer_class=optimizer_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 60
phase1.optimizer.sampling_efficiency = 0.8
phase1.extend_with_multiple_hyper_phases(hyper_galaxy=True)
phase2 = al.PhaseImaging(
phase_name="phase_2",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
mass=phase1.result.model.galaxies.lens.mass,
hyper_galaxy=al.HyperGalaxy,
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=phase1.result.model.galaxies.source.pixelization,
regularization=phase1.result.model.galaxies.source.regularization,
hyper_galaxy=phase1.result.hyper_combined.instance.galaxies.source.hyper_galaxy,
),
),
optimizer_class=optimizer_class,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 40
phase2.optimizer.sampling_efficiency = 0.8
return al.PipelineDataset(name, phase1, phase2)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
|
[
"james.w.nightingale@durham.ac.uk"
] |
james.w.nightingale@durham.ac.uk
|
7bf97086e896dd70e7474c0d7b9263220a1afcb6
|
d806dc8232a89537ff4e1238f5dfad498312df49
|
/main.py
|
61766fc4933ec9b8fbe9ddcf7d5fd3fd6df7f3c3
|
[] |
no_license
|
abhay-venkatesh/ml-template
|
3e0ab7181486f41c21a5915469d00dff82841cc5
|
fb85a2a0274891584c8a73c6cc5e2d7c7136fb7b
|
refs/heads/master
| 2020-06-16T12:27:04.565453
| 2019-07-07T00:38:29
| 2019-07-07T00:38:29
| 195,573,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from inflection import underscore
import argparse
import importlib
if __name__ == "__main__":
# Get agent
parser = argparse.ArgumentParser()
parser.add_argument("agent")
args = parser.parse_args()
# Make the agent run!
agent_module = importlib.import_module(("agents.{}").format(underscore(args.agent)))
Agent = getattr(agent_module, args.agent)
Agent().run()
|
[
"abhay.venkatesh@gmail.com"
] |
abhay.venkatesh@gmail.com
|
18b65a38efed11ffc8280c7e042495755f31814e
|
dfb7cea1a1875b36f9689d781fe2cf866957cc5c
|
/soldajustica/soldajustica/wsgi.py
|
e29b379a6c94693b458b924404ca3b9d695dd8de
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
patrickporto/soldajustica
|
e1c84dfe636278109812dd5022c5787c94f437b3
|
191299012a8142c8d1c073f025a2ff2d1a2d267c
|
refs/heads/master
| 2021-01-10T05:25:33.810919
| 2015-06-19T04:31:11
| 2015-06-19T04:31:11
| 36,825,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
"""
WSGI config for soldajustica project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "soldajustica.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"patrick.porto@concretesolutions.com.br"
] |
patrick.porto@concretesolutions.com.br
|
98295db78c9952a00b429c7193e8b2799f35ff30
|
93bcfe04cde85254fc5854000442dcada1c2dcbb
|
/deps/lib/python3.4/site-packages/pywink/devices/lock.py
|
5e288ac702d4bb198681ed3c3738cae2560f7be1
|
[
"Apache-2.0"
] |
permissive
|
marknestor261/jarvis
|
042f80718a2abc9ebaf1716768c847cdfe01ed6b
|
062f20303b3e1d46a20a68e8ed8337b3d05f84dd
|
refs/heads/master
| 2023-08-28T03:25:10.850283
| 2021-11-09T20:30:37
| 2021-11-09T20:30:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,214
|
py
|
from pywink.devices.base import WinkDevice
class WinkLock(WinkDevice):
"""
Represents a Wink lock.
"""
def state(self):
return self._last_reading.get('locked', False)
def alarm_enabled(self):
return self._last_reading.get('alarm_enabled', False)
def alarm_mode(self):
return self._last_reading.get('alarm_mode')
def vacation_mode_enabled(self):
return self._last_reading.get('vacation_mode_enabled', False)
def beeper_enabled(self):
return self._last_reading.get('beeper_enabled', False)
def auto_lock_enabled(self):
return self._last_reading.get('auto_lock_enabled', False)
def alarm_sensitivity(self):
return self._last_reading.get('alarm_sensitivity')
def set_alarm_sensitivity(self, mode):
"""
:param mode: 1.0 for Very sensitive, 0.2 for not sensitive.
Steps in values of 0.2.
:return: nothing
"""
values = {"desired_state": {"alarm_sensitivity": mode}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response)
def set_alarm_mode(self, mode):
"""
:param mode: one of [None, "activity", "tamper", "forced_entry"]
:return: nothing
"""
values = {"desired_state": {"alarm_mode": mode}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response)
def set_alarm_state(self, state):
"""
:param state: a boolean of ture (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"alarm_enabled": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response)
def set_vacation_mode(self, state):
"""
:param state: a boolean of ture (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"vacation_mode_enabled": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response)
def set_beeper_mode(self, state):
"""
:param state: a boolean of ture (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"beeper_enabled": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response)
def set_state(self, state):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"locked": state}}
response = self.api_interface.local_set_state(self, values)
self._update_state_from_response(response)
def update_state(self):
"""Update state with latest info from Wink API."""
response = self.api_interface.local_get_state(self)
return self._update_state_from_response(response)
def add_new_key(self, code, name):
"""Add a new user key code."""
device_json = {"code": code, "name": name}
return self.api_interface.create_lock_key(self, device_json)
|
[
"lance@hayniemail.com"
] |
lance@hayniemail.com
|
4776637162cb0a3dccd00395d5446b5f26a201b0
|
a83e4e6a5a09a0a170dc57d8a153f9ee3f2c855c
|
/prime_numbers.py
|
c1719d3b832e59e24736af84efb362d702024395
|
[] |
no_license
|
markhebing/python-scripts
|
501562cb487bc3ecf501052e46ace79ff83d7b6c
|
c49f8d6a98e7bc8dfdad27d67b321e1c27e2584e
|
refs/heads/master
| 2022-11-11T23:44:17.124652
| 2020-06-28T17:23:19
| 2020-06-28T17:23:19
| 275,632,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
x = input("Enter a number: ")
y = 1
count = 0
while y <= int(x):
if int(x) % y == 0:
print("Divisible by " + str(y) + "...")
count = count + 1
y = y + 1
if count == 2:
print("Otherwise indivisible...this IS a prime number!")
elif count > 2:
print("As you can see...this is NOT a prime number.")
|
[
"noreply@github.com"
] |
markhebing.noreply@github.com
|
2883768972372e258eea39a8fcd8fd34acbd3403
|
0c962db0d657de6a9a9444633666a0455154d5dd
|
/mmdet/core/bbox/iou_calculators/obb/obbiou_calculator.py
|
42f9a417ecbd5fad95ee16423765f8bbf0956282
|
[
"Apache-2.0"
] |
permissive
|
Dustone-Mu/OBBDetection
|
be5e486f0676817220c01af47430b59c8a374610
|
a7c9539ffe63b54e4d79932b809ec80b2ff1f5bb
|
refs/heads/master
| 2023-08-15T03:21:31.064998
| 2021-09-26T12:35:58
| 2021-09-26T12:35:58
| 411,566,302
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,665
|
py
|
import torch
import BboxToolkit as bt
from mmdet.ops import obb_overlaps
from ..builder import IOU_CALCULATORS
@IOU_CALCULATORS.register_module()
class OBBOverlaps(object):
"""2D IoU Calculator"""
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes
Args:
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
empty. If is_aligned is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or iof (intersection
over foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert bboxes1.size(-1) in [0, 5, 6]
assert bboxes2.size(-1) in [0, 5, 6]
if bboxes2.size(-1) == 6:
bboxes2 = bboxes2[..., :5]
if bboxes1.size(-1) == 6:
bboxes1 = bboxes1[..., :5]
return obb_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + '()'
return repr_str
@IOU_CALCULATORS.register_module()
class PolyOverlaps(object):
"""2D IoU Calculator"""
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes
Args:
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
empty. If is_aligned is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or iof (intersection
over foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert bboxes1.size(-1) in [0, 8, 9]
assert bboxes2.size(-1) in [0, 8, 9]
if bboxes2.size(-1) == 9:
bboxes2 = bboxes2[..., :8]
if bboxes1.size(-1) == 9:
bboxes1 = bboxes1[..., :8]
return bt.bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + '()'
return repr_str
|
[
"709370615@qq.com"
] |
709370615@qq.com
|
62a302b657ce8c8d1a2a978eb3620007967c6d1f
|
d806fdb47893956d3d5b79a14345a744420beac5
|
/gameapp/migrations/0003_gameapp_status.py
|
9a2a4522f3ef6034a771a9043b1029010a2b286d
|
[] |
no_license
|
bbrastogi/hangman
|
047d800520b995bef8d1f1204e5df0c458c99ed8
|
a375210190600575a744497704aa6a38211b23d7
|
refs/heads/master
| 2020-05-20T02:26:23.780100
| 2019-05-21T06:18:07
| 2019-05-21T06:18:07
| 185,331,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-04-26 10:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gameapp', '0002_auto_20190423_1416'),
]
operations = [
migrations.AddField(
model_name='gameapp',
name='status',
field=models.CharField(max_length=250, null=True),
),
]
|
[
"rastogi.barkha6@gmail.com"
] |
rastogi.barkha6@gmail.com
|
487eb7e5f5bbdfbedc795b4fdfa8bb6748e9d912
|
82f1b4c0bccd66933f93d02703a3948f08ebc1a9
|
/tests/pytests/unit/states/test_smartos.py
|
69d4dc75ee95b402f03f7f1f54ecc66f4347cc89
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
waynew/salt
|
ddb71301944b64f5429e0dbfeccb0ea873cdb62d
|
ac9f139f795295de11be3fb1490ab8cec29611e5
|
refs/heads/master
| 2023-01-24T10:43:53.104284
| 2022-03-29T04:27:22
| 2022-03-29T13:45:09
| 163,890,509
| 1
| 0
|
Apache-2.0
| 2019-01-02T21:17:12
| 2019-01-02T21:17:11
| null |
UTF-8
|
Python
| false
| false
| 2,991
|
py
|
import pytest
import salt.states.smartos as smartos
from salt.utils.odict import OrderedDict
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {smartos: {"__opts__": {"test": False}}}
def test_config_present_does_not_exist():
"""
Test salt.states.smartos.config_present
when the config files does not exist
"""
name = "test"
value = "test_value"
with patch("os.path.isfile", return_value=False):
with patch("salt.utils.atomicfile.atomic_open", side_effect=IOError):
ret = smartos.config_present(name=name, value=value)
assert not ret["result"]
assert ret[
"comment"
] == 'Could not add property {} with value "{}" to config'.format(name, value)
def test_parse_vmconfig_vrrp():
"""
Test _parse_vmconfig's vrid -> mac convertor
SmartOS will always use a mac based on the vrrp_vrid,
so we will replace the provided mac with the one based
on this value.
Doing so ensures that 'old' nics are removed and 'new'
nics get added as these actions are keyed on the mac
property.
"""
# NOTE: vmconfig is not a full vmadm payload,
# this is not an issue given we are only testing
# the vrrp_vrid to mac conversions
ret = smartos._parse_vmconfig(
OrderedDict(
[
(
"nics",
OrderedDict(
[
(
"00:00:5e:00:01:01",
OrderedDict(
[
("vrrp_vrid", 1),
("vrrp_primary_ip", "12.34.5.6"),
]
),
),
(
"00:00:5e:00:01:24",
OrderedDict(
[
("vrrp_vrid", 240),
("vrrp_primary_ip", "12.34.5.6"),
]
),
),
(
"00:22:06:00:00:01",
OrderedDict([("ips", ["12.34.5.6/24"])]),
),
]
),
)
]
),
{"nics": "mac"},
)
# NOTE: nics.0 is a vrrp nic with correct mac (check mac == vrid based -> unchanged)
assert ret["nics"][0]["mac"] == "00:00:5e:00:01:01"
# NOTE: nics.1 is a vrrp nic with incorrect mac (check mac == vrid based -> changed)
assert ret["nics"][1]["mac"] == "00:00:5e:00:01:f0"
# NOTE: nics.2 was not a vrrp nic (check mac was not changed)
assert ret["nics"][2]["mac"] == "00:22:06:00:00:01"
|
[
"pedro@algarvio.me"
] |
pedro@algarvio.me
|
bd7860a9b0e76452fb3b1c3611913391e02eb453
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/01_netCDF_extraction/erafive902TG/539-tideGauge.py
|
e40ac93605236ebb7b7d014f6aac8cfe04e252b0
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,595
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
ERA5 netCDF extraction script
@author: Michael Tadesse
"""
import time as tt
import os
import pandas as pd
from d_define_grid import Coordinate, findPixels, findindx
from c_read_netcdf import readnetcdf
from f_era5_subsetV2 import subsetter
def extract_data(delta= 1):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
nc_path = {'slp' : "/lustre/fs0/home/mtadesse/era_five/slp",\
"wnd_u": "/lustre/fs0/home/mtadesse/era_five/wnd_u",\
'wnd_v' : "/lustre/fs0/home/mtadesse/era_five/wnd_v"}
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/erafive_localized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#################################
#looping through the predictor folders
#################################
for pf in nc_path.keys():
print(pf, '\n')
os.chdir(nc_path[pf])
####################################
#looping through the years of the chosen predictor
####################################
for py in os.listdir():
os.chdir(nc_path[pf]) #back to the predictor folder
print(py, '\n')
#get netcdf components - give predicor name and predictor file
nc_file = readnetcdf(pf, py)
lon, lat, time, pred = nc_file[0], nc_file[1], nc_file[2], \
nc_file[3]
x = 539
y = 540
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
print("tide gauge", tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(float(surge.iloc[1,4]), float(surge.iloc[1,5]))
print(tg_cord)
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
ind_grids.columns = ['lon', 'lat']
#loop through preds#
#subset predictor on selected grid size
print("subsetting \n")
pred_new = subsetter(pred, ind_grids, time)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name = tg.split('.csv')[0]
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#predictor directory
pred_name = pf
try:
os.makedirs(pred_name)
os.chdir(pred_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(pred_name)
#time for saving file
print("saving as csv")
yr_name = py.split('_')[-1]
save_name = '_'.join([tg_name, pred_name, yr_name])\
+ ".csv"
pred_new.to_csv(save_name)
#return to the predictor directory
os.chdir(nc_path[pf])
#run script
extract_data(delta= 1)
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
bda59c3fdeefa65d0333289409e03069373c37f1
|
641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2
|
/net/data/verify_certificate_chain_unittest/common.py
|
c5bcd9729986c1011f2062033565531500e24809
|
[
"BSD-3-Clause"
] |
permissive
|
massnetwork/mass-browser
|
7de0dfc541cbac00ffa7308541394bac1e945b76
|
67526da9358734698c067b7775be491423884339
|
refs/heads/master
| 2022-12-07T09:01:31.027715
| 2017-01-19T14:29:18
| 2017-01-19T14:29:18
| 73,799,690
| 4
| 4
|
BSD-3-Clause
| 2022-11-26T11:53:23
| 2016-11-15T09:49:29
| null |
UTF-8
|
Python
| false
| false
| 16,486
|
py
|
#!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Set of helpers to generate signed X.509v3 certificates.
This works by shelling out calls to the 'openssl req' and 'openssl ca'
commands, and passing the appropriate command line flags and configuration file
(.cnf).
"""
import base64
import os
import shutil
import subprocess
import sys
import openssl_conf
# Enum for the "type" of certificate that is to be created. This is used to
# select sane defaults for the .cnf file and command line flags, but they can
# all be overridden.
TYPE_CA = 2
TYPE_END_ENTITY = 3
# March 1st, 2015 12:00 UTC
MARCH_1_2015_UTC = '150301120000Z'
# March 2nd, 2015 12:00 UTC
MARCH_2_2015_UTC = '150302120000Z'
# January 1st, 2015 12:00 UTC
JANUARY_1_2015_UTC = '150101120000Z'
# January 1st, 2016 12:00 UTC
JANUARY_1_2016_UTC = '160101120000Z'
# The default time tests should use when verifying.
DEFAULT_TIME = MARCH_2_2015_UTC
# Counters used to generate unique (but readable) path names.
g_cur_path_id = {}
# Output paths used:
# - g_out_dir: where any temporary files (keys, cert req, signing db etc) are
# saved to.
# - g_out_pem: the path to the final output (which is a .pem file)
#
# See init() for how these are assigned, based on the name of the calling
# script.
g_out_dir = None
g_out_pem = None
def get_unique_path_id(name):
"""Returns a base filename that contains 'name', but is unique to the output
directory"""
path_id = g_cur_path_id.get(name, 0)
g_cur_path_id[name] = path_id + 1
# Use a short and clean name for the first use of this name.
if path_id == 0:
return name
# Otherwise append the count to make it unique.
return '%s_%d' % (name, path_id)
def get_path_in_output_dir(name, suffix):
return os.path.join(g_out_dir, '%s%s' % (name, suffix))
def get_unique_path_in_output_dir(name, suffix):
return get_path_in_output_dir(get_unique_path_id(name), suffix)
class Key(object):
"""Describes a public + private key pair. It is a dumb wrapper around an
on-disk key."""
def __init__(self, path):
self.path = path
def get_path(self):
"""Returns the path to a file that contains the key contents."""
return self.path
def generate_rsa_key(size_bits, path=None):
"""Generates an RSA private key and returns it as a Key object. If |path| is
specified the resulting key will be saved at that location."""
if path is None:
path = get_unique_path_in_output_dir('RsaKey', 'key')
# Ensure the path doesn't already exists (otherwise will be overwriting
# something).
assert not os.path.isfile(path)
subprocess.check_call(
['openssl', 'genrsa', '-out', path, str(size_bits)])
return Key(path)
def generate_ec_key(named_curve, path=None):
"""Generates an EC private key for the certificate and returns it as a Key
object. |named_curve| can be something like secp384r1. If |path| is specified
the resulting key will be saved at that location."""
if path is None:
path = get_unique_path_in_output_dir('EcKey', 'key')
# Ensure the path doesn't already exists (otherwise will be overwriting
# something).
assert not os.path.isfile(path)
subprocess.check_call(
['openssl', 'ecparam', '-out', path,
'-name', named_curve, '-genkey'])
return Key(path)
class Certificate(object):
"""Helper for building an X.509 certificate."""
def __init__(self, name, cert_type, issuer):
# The name will be used for the subject's CN, and also as a component of
# the temporary filenames to help with debugging.
self.name = name
self.path_id = get_unique_path_id(name)
# Allow the caller to override the key later. If no key was set will
# auto-generate one.
self.key = None
# The issuer is also a Certificate object. Passing |None| means it is a
# self-signed certificate.
self.issuer = issuer
if issuer is None:
self.issuer = self
# The config contains all the OpenSSL options that will be passed via a
# .cnf file. Set up defaults.
self.config = openssl_conf.Config()
self.init_config()
# Some settings need to be passed as flags rather than in the .cnf file.
# Technically these can be set though a .cnf, however doing so makes it
# sticky to the issuing certificate, rather than selecting it per
# subordinate certificate.
self.validity_flags = []
self.md_flags = []
# By default OpenSSL will use the current time for the start time. Instead
# default to using a fixed timestamp for more predictable results each time
# the certificates are re-generated.
self.set_validity_range(JANUARY_1_2015_UTC, JANUARY_1_2016_UTC)
# Use SHA-256 when THIS certificate is signed (setting it in the
# configuration would instead set the hash to use when signing other
# certificates with this one).
self.set_signature_hash('sha256')
# Set appropriate key usages and basic constraints. For flexibility in
# testing (since want to generate some flawed certificates) these are set
# on a per-certificate basis rather than automatically when signing.
if cert_type == TYPE_END_ENTITY:
self.get_extensions().set_property('keyUsage',
'critical,digitalSignature,keyEncipherment')
self.get_extensions().set_property('extendedKeyUsage',
'serverAuth,clientAuth')
else:
self.get_extensions().set_property('keyUsage',
'critical,keyCertSign,cRLSign')
self.get_extensions().set_property('basicConstraints', 'critical,CA:true')
# Tracks whether the PEM file for this certificate has been written (since
# generation is done lazily).
self.finalized = False
# Initialize any files that will be needed if this certificate is used to
# sign other certificates. Starts off serial numbers at 1, and will
# increment them for each signed certificate.
if not os.path.exists(self.get_serial_path()):
write_string_to_file('01\n', self.get_serial_path())
if not os.path.exists(self.get_database_path()):
write_string_to_file('', self.get_database_path())
def set_validity_range(self, start_date, end_date):
"""Sets the Validity notBefore and notAfter properties for the
certificate"""
self.validity_flags = ['-startdate', start_date, '-enddate', end_date]
def set_signature_hash(self, md):
"""Sets the hash function that will be used when signing this certificate.
Can be sha1, sha256, sha512, md5, etc."""
self.md_flags = ['-md', md]
def get_extensions(self):
return self.config.get_section('req_ext')
def get_path(self, suffix):
"""Forms a path to an output file for this certificate, containing the
indicated suffix. The certificate's name will be used as its basis."""
return os.path.join(g_out_dir, '%s%s' % (self.path_id, suffix))
def get_name_path(self, suffix):
"""Forms a path to an output file for this CA, containing the indicated
suffix. If multiple certificates have the same name, they will use the same
path."""
return get_path_in_output_dir(self.name, suffix)
def set_key(self, key):
assert self.finalized is False
self.set_key_internal(key)
def set_key_internal(self, key):
self.key = key
# Associate the private key with the certificate.
section = self.config.get_section('root_ca')
section.set_property('private_key', self.key.get_path())
def get_key(self):
if self.key is None:
self.set_key_internal(generate_rsa_key(2048, path=self.get_path(".key")))
return self.key
def get_cert_path(self):
return self.get_path('.pem')
def get_serial_path(self):
return self.get_name_path('.serial')
def get_csr_path(self):
return self.get_path('.csr')
def get_database_path(self):
return self.get_name_path('.db')
def get_config_path(self):
return self.get_path('.cnf')
def get_cert_pem(self):
# Finish generating a .pem file for the certificate.
self.finalize()
# Read the certificate data.
with open(self.get_cert_path(), 'r') as f:
return f.read()
def finalize(self):
"""Finishes the certificate creation process. This generates any needed
key, creates and signs the CSR. On completion the resulting PEM file can be
found at self.get_cert_path()"""
if self.finalized:
return # Already finalized, no work needed.
self.finalized = True
# Ensure that the issuer has been "finalized", since its outputs need to be
# accessible. Note that self.issuer could be the same as self.
self.issuer.finalize()
# Ensure the certificate has a key (gets lazily created by this call if
# missing).
self.get_key()
# Serialize the config to a file.
self.config.write_to_file(self.get_config_path())
# Create a CSR.
subprocess.check_call(
['openssl', 'req', '-new',
'-key', self.key.get_path(),
'-out', self.get_csr_path(),
'-config', self.get_config_path()])
cmd = ['openssl', 'ca', '-batch', '-in',
self.get_csr_path(), '-out', self.get_cert_path(), '-config',
self.issuer.get_config_path()]
if self.issuer == self:
cmd.append('-selfsign')
# Add in any extra flags.
cmd.extend(self.validity_flags)
cmd.extend(self.md_flags)
# Run the 'openssl ca' command.
subprocess.check_call(cmd)
def init_config(self):
"""Initializes default properties in the certificate .cnf file that are
generic enough to work for all certificates (but can be overridden later).
"""
# --------------------------------------
# 'req' section
# --------------------------------------
section = self.config.get_section('req')
section.set_property('encrypt_key', 'no')
section.set_property('utf8', 'yes')
section.set_property('string_mask', 'utf8only')
section.set_property('prompt', 'no')
section.set_property('distinguished_name', 'req_dn')
section.set_property('req_extensions', 'req_ext')
# --------------------------------------
# 'req_dn' section
# --------------------------------------
# This section describes the certificate subject's distinguished name.
section = self.config.get_section('req_dn')
section.set_property('commonName', '"%s"' % (self.name))
# --------------------------------------
# 'req_ext' section
# --------------------------------------
# This section describes the certificate's extensions.
section = self.config.get_section('req_ext')
section.set_property('subjectKeyIdentifier', 'hash')
# --------------------------------------
# SECTIONS FOR CAs
# --------------------------------------
# The following sections are used by the 'openssl ca' and relate to the
# signing operation. They are not needed for end-entity certificate
# configurations, but only if this certifiate will be used to sign other
# certificates.
# --------------------------------------
# 'ca' section
# --------------------------------------
section = self.config.get_section('ca')
section.set_property('default_ca', 'root_ca')
section = self.config.get_section('root_ca')
section.set_property('certificate', self.get_cert_path())
section.set_property('new_certs_dir', g_out_dir)
section.set_property('serial', self.get_serial_path())
section.set_property('database', self.get_database_path())
section.set_property('unique_subject', 'no')
# These will get overridden via command line flags.
section.set_property('default_days', '365')
section.set_property('default_md', 'sha256')
section.set_property('policy', 'policy_anything')
section.set_property('email_in_dn', 'no')
section.set_property('preserve', 'yes')
section.set_property('name_opt', 'multiline,-esc_msb,utf8')
section.set_property('cert_opt', 'ca_default')
section.set_property('copy_extensions', 'copy')
section.set_property('x509_extensions', 'signing_ca_ext')
section.set_property('default_crl_days', '30')
section.set_property('crl_extensions', 'crl_ext')
section = self.config.get_section('policy_anything')
section.set_property('domainComponent', 'optional')
section.set_property('countryName', 'optional')
section.set_property('stateOrProvinceName', 'optional')
section.set_property('localityName', 'optional')
section.set_property('organizationName', 'optional')
section.set_property('organizationalUnitName', 'optional')
section.set_property('commonName', 'optional')
section.set_property('emailAddress', 'optional')
section = self.config.get_section('signing_ca_ext')
section.set_property('subjectKeyIdentifier', 'hash')
section.set_property('authorityKeyIdentifier', 'keyid:always')
section.set_property('authorityInfoAccess', '@issuer_info')
section.set_property('crlDistributionPoints', '@crl_info')
section = self.config.get_section('issuer_info')
section.set_property('caIssuers;URI.0',
'http://url-for-aia/%s.cer' % (self.name))
section = self.config.get_section('crl_info')
section.set_property('URI.0', 'http://url-for-crl/%s.crl' % (self.name))
section = self.config.get_section('crl_ext')
section.set_property('authorityKeyIdentifier', 'keyid:always')
section.set_property('authorityInfoAccess', '@issuer_info')
def text_data_to_pem(block_header, text_data):
return '%s\n-----BEGIN %s-----\n%s\n-----END %s-----\n' % (text_data,
block_header, base64.b64encode(text_data), block_header)
class TrustAnchor(object):
"""Structure that represents a trust anchor."""
def __init__(self, cert, constrained=False):
self.cert = cert
self.constrained = constrained
def get_pem(self):
"""Returns a PEM block string describing this trust anchor."""
cert_data = self.cert.get_cert_pem()
block_name = 'TRUST_ANCHOR_UNCONSTRAINED'
if self.constrained:
block_name = 'TRUST_ANCHOR_CONSTRAINED'
# Use a different block name in the .pem file, depending on the anchor type.
return cert_data.replace('CERTIFICATE', block_name)
def write_test_file(description, chain, trust_anchor, utc_time, verify_result,
errors, out_pem=None):
"""Writes a test file that contains all the inputs necessary to run a
verification on a certificate chain"""
# Prepend the script name that generated the file to the description.
test_data = '[Created by: %s]\n\n%s\n' % (sys.argv[0], description)
# Write the certificate chain to the output file.
for cert in chain:
test_data += '\n' + cert.get_cert_pem()
test_data += '\n' + trust_anchor.get_pem()
test_data += '\n' + text_data_to_pem('TIME', utc_time)
verify_result_string = 'SUCCESS' if verify_result else 'FAIL'
test_data += '\n' + text_data_to_pem('VERIFY_RESULT', verify_result_string)
if errors is not None:
test_data += '\n' + text_data_to_pem('ERRORS', errors)
write_string_to_file(test_data, out_pem if out_pem else g_out_pem)
def write_string_to_file(data, path):
with open(path, 'w') as f:
f.write(data)
def init(invoking_script_path):
"""Creates an output directory to contain all the temporary files that may be
created, as well as determining the path for the final output. These paths
are all based off of the name of the calling script.
"""
global g_out_dir
global g_out_pem
# Base the output name off of the invoking script's name.
out_name = os.path.splitext(os.path.basename(invoking_script_path))[0]
# Strip the leading 'generate-'
if out_name.startswith('generate-'):
out_name = out_name[9:]
# Use an output directory with the same name as the invoking script.
g_out_dir = os.path.join('out', out_name)
# Ensure the output directory exists and is empty.
sys.stdout.write('Creating output directory: %s\n' % (g_out_dir))
shutil.rmtree(g_out_dir, True)
os.makedirs(g_out_dir)
g_out_pem = os.path.join('%s.pem' % (out_name))
def create_self_signed_root_certificate(name):
return Certificate(name, TYPE_CA, None)
def create_intermediate_certificate(name, issuer):
return Certificate(name, TYPE_CA, issuer)
def create_end_entity_certificate(name, issuer):
return Certificate(name, TYPE_END_ENTITY, issuer)
init(sys.argv[0])
|
[
"xElvis89x@gmail.com"
] |
xElvis89x@gmail.com
|
c657f336a4f642a5b71465cc5a3ee23fb52d87c0
|
19585a907ab1e1dafb00e53cce4f1803e805f4a6
|
/src/camera/calibration.py
|
606e5c555ea23dd9a8e3856957e036d241351ecc
|
[] |
no_license
|
Angeall/pyConnect4NAO
|
00ad27370746f36480b42cb49690ce6d3dadfece
|
700d84b0cde2cb8e3e7d5ff2c5ca7858679b4e8d
|
refs/heads/master
| 2020-12-01T11:38:46.776337
| 2018-09-02T12:41:57
| 2018-09-02T12:41:57
| 43,453,113
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,927
|
py
|
import time
from connect4.connect4handler import *
from connect4.detector import front_holes as c4
from nao import data
from nao.controller.motion import MotionController
from nao.controller.video import VideoController
from utils import latex_generator
__author__ = 'Anthony Rouneau'
def get_nao_image(camera_num=0):
global nao_video, nao_motion
if nao_video is None:
nao_video = VideoController()
nao_motion = MotionController()
# clean()
ret = nao_video.connectToCamera(res=2, fps=30, camera_num=camera_num)
if ret < 0:
print "Could not open camera"
return None
return nao_video.getImageFromCamera()
connect4 = Connect4Handler(get_nao_image)
# connect4 = None
connect4_model = connect4.model
# connect4_model = None
detector = c4.FrontHolesDetector(connect4_model)
nao_video = None
nao_motion = None
def get_camera_information():
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
v_margin = 0.024
h_margin = 0.0135
square_length = 0.025
colors_boundaries = [
(np.array([0, 0, 0]), np.array([255, 80, 80])),
(np.array([0, 0, 0]), np.array([80, 255, 120])),
(np.array([0, 0, 0]), np.array([120, 80, 255]))]
color_names = ["Blue", "Green", "Red"]
# noinspection PyPep8
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, 1:3][:, ::-1] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
objp[:, 2] *= -1 # So it's left to right
objp[:, 2] += 8
objp *= square_length
np.add(objp, np.array([0, v_margin, h_margin]))
objp2 = np.zeros((6 * 9, 3), np.float32)
objp2[:, ::2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
objp2 *= square_length
np.add(objp2, np.array([h_margin, 0, v_margin]))
objp3 = np.zeros((6 * 9, 3), np.float32)
objp3[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
objp3 *= square_length
np.add(objp3, np.array([h_margin, h_margin, 0]))
objp = np.append(np.append(objp, objp2, axis=0), objp3, axis=0)
objpoints = [] # 3d point
imgpoints = [] # 2d point
finished = False
gray = None
# ctr = -1
while not finished:
img = get_nao_image(1)
if img is not None:
chessboards_not_found = False
chessboards_corners = [None, None, None]
# ctr += 1
# cv2.imwrite("../../values/calibration" + "_" + str(ctr) + ".png", img)
i = 0
img2 = img.copy()
for (lower, upper) in colors_boundaries:
mask = cv2.inRange(img2, lower, upper)
output = cv2.bitwise_and(img2, img2, mask=mask)
gray2 = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray2, gray2)
color_name = color_names[i]
i += 1
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
cv2.imshow(color_name, gray)
cv2.waitKey(500)
# If one of the chessboards is not detected, we break
if not ret:
chessboards_not_found = True
print "NOT FOUND", color_name
break
# If the chessboard is found, add object points, image points
else:
chessboards_corners[i - 1] = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
# cv2.putText(img, str(color_name), tuple(int(p) for p in corners[0]),
# cv2.FONT_HERSHEY_SIMPLEX, 1, tuple(colors_boundaries[i - 1][1]), 3)
# Draw and display the corners
cv2.drawChessboardCorners(img, (9, 6), corners, ret)
# If three chessboards have been detected
if not chessboards_not_found:
if geom.point_distance(chessboards_corners[0][0][0], chessboards_corners[1][0][0]) \
> geom.point_distance(chessboards_corners[0][45][0], chessboards_corners[1][0][0]):
chessboards_corners[0] = chessboards_corners[0][::-1]
if geom.point_distance(chessboards_corners[2][0][0], chessboards_corners[1][0][0]) \
> geom.point_distance(chessboards_corners[2][45][0], chessboards_corners[1][0][0]):
chessboards_corners[2] = chessboards_corners[2][::-1]
if geom.point_distance(chessboards_corners[1][0][0], chessboards_corners[2][0][0]) \
> geom.point_distance(chessboards_corners[1][45][0], chessboards_corners[2][0][0]):
chessboards_corners[1] = chessboards_corners[1][::-1]
chessboards_corners = np.append(np.append(chessboards_corners[0], chessboards_corners[1], axis=0),
chessboards_corners[2], axis=0)
print "3D Model"
print objp
print
print "Found Chessboard"
print chessboards_corners
objpoints.append(objp)
imgpoints.append(chessboards_corners)
cv2.imshow('img', img)
if cv2.waitKey(1) == 27: # ESC pressed ?
finished = True
if not finished:
# We wait 2 seconds so the operator can move the chessboard
time.sleep(2)
cv2.destroyAllWindows()
init_intrinsic = data.CAM_MATRIX
dist = data.CAM_DISTORSION
ret, mtx, disto, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], init_intrinsic, dist,
flags=cv2.CALIB_USE_INTRINSIC_GUESS)
return mtx, disto
def get_f_score(nb_grid_circles, nb_noise_circles):
total_circles = float((nb_grid_circles + nb_noise_circles))
if total_circles == 0 or nb_grid_circles == 0:
return 0
recall = float(nb_grid_circles) / total_circles
precision = float(nb_grid_circles) / 42.0
return (2 * precision * recall) / (precision + recall)
def calibration_param2(dist, images, must_latex=True):
global detector, connect4
titles = ["\\texttt{param2}", "Grid circles", "Noise circles",
"Total", "Score"]
results = []
counter = 0
max_radius = connect4.estimateMaxRadius(dist)
min_radius = connect4.estimateMinRadius(dist)
max_error = connect4.computeMaxPixelError(min_radius)
min_dist = int(min_radius * 1.195)
param1 = 60
for img in images:
table = []
best_value = []
best_score = -1000
# how many pixels for a circle radius on a 320x240px image when standing one meter away
param2 = 5.
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.medianBlur(gray, 3)
while param2 < 17:
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, min_dist, param1=param1, param2=param2,
minRadius=min_radius, maxRadius=max_radius)
if circles is None:
nb_of_grid_circles = 0
circles = [[]]
else:
try:
detector.runDetection(circles[0], pixel_error_margin=max_error)
nb_of_grid_circles = len(detector.relativeCoordinates)
except c4.CircleGridNotFoundException:
nb_of_grid_circles = 0
score = round(get_f_score(nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles), 4)
if score > best_score:
best_score = score
best_value = [param2]
elif abs(score - best_score) < 0.00001:
best_value.append(param2)
line = [param2, nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles, len(circles[0]), score]
table.append(line)
param2 += 0.25
results.append(best_value)
print "radius : image " + str(counter) + " finished"
if must_latex:
latex_generator.generate_longtable(titles, "../../latex/generated_radius_" +
str(dist) + "_" + str(counter), table)
counter += 1
return results
def plotting_param2(dist, images):
global detector, connect4
results = {}
counter = 0
max_radius = connect4.estimateMaxRadius(dist)
min_radius = connect4.estimateMinRadius(dist)
max_error = connect4.computeMaxPixelError(min_radius)
min_dist = int(min_radius * 1.195)
param1 = 60
for img in images:
# how many pixels for a circle radius on a 320x240px image when standing one meter away
param2 = 5.
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.medianBlur(gray, 3)
while param2 < 17:
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, min_dist, param1=param1, param2=param2,
minRadius=min_radius, maxRadius=max_radius)
if circles is None:
nb_of_grid_circles = 0
circles = [[]]
score = 0
else:
# circles = np.uint16(np.around(circles))
try:
detector.runDetection(circles[0], pixel_error_margin=max_error)
nb_of_grid_circles = len(detector.relativeCoordinates)
score = round(get_f_score(nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles), 4)
except c4.CircleGridNotFoundException:
score = 0
nb_of_grid_circles = 0
param2 += 0.25
key = str(round(param2, 2))
if key in results:
results[key].append(score)
else:
results[key] = [score]
print "param2 : image " + str(counter) + " finished"
counter += 1
return results
def calibration_param1(dist, images, must_latex=True):
global detector, connect4
titles = ["\\texttt{param1}", "Grid circles", "Noise circles",
"Total", "Score"]
results = []
counter = 0
min_radius = connect4.estimateMinRadius(dist)
max_radius = connect4.estimateMaxRadius(dist)
max_error = connect4.computeMaxPixelError(min_radius)
min_dist = int(min_radius * 1.195)
param2 = 10.5
for img in images:
table = []
best_value = []
best_score = 0
# how many pixels for a circle radius on a 320x240px image when standing one meter away
param1 = 30
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.medianBlur(gray, 3)
while param1 < 200:
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, min_dist, param1=param1, param2=param2,
minRadius=min_radius, maxRadius=max_radius)
if circles is None:
score = 0
nb_of_grid_circles = 0
circles = [[]]
else:
try:
detector.runDetection(circles[0], pixel_error_margin=max_error)
nb_of_grid_circles = len(detector.relativeCoordinates)
score = round(get_f_score(nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles), 4)
except c4.CircleGridNotFoundException:
score = 0
nb_of_grid_circles = 0
if score > best_score:
best_score = score
best_value = [param1]
elif abs(score - best_score) < 0.00001:
best_value.append(param1)
line = [param1, nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles, len(circles[0]), score]
table.append(line)
param1 += 1
results.append(best_value)
print "param1 : image " + str(counter) + " finished"
if must_latex:
latex_generator.generate_longtable(titles, "../../latex/generated_param1_" +
str(dist) + "_" + str(counter), table)
counter += 1
return results
def plotting_param1(dist, images):
global detector, connect4
results = {}
counter = 0
min_radius = connect4.estimateMinRadius(dist)
max_radius = connect4.estimateMaxRadius(dist)
max_error = connect4.computeMaxPixelError(min_radius)
min_dist = int(min_radius * 1.195)
param2 = 10.5
for img in images:
best_score = -1
# how many pixels for a circle radius on a 320x240px image when standing one meter away
param1 = 30
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.medianBlur(gray, 3)
while param1 < 200:
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, min_dist, param1=param1, param2=param2,
minRadius=min_radius, maxRadius=max_radius)
if circles is None:
score = 0
nb_of_grid_circles = 0
circles = [[]]
else:
try:
detector.runDetection(circles[0], pixel_error_margin=max_error)
nb_of_grid_circles = len(detector.relativeCoordinates)
score = round(get_f_score(nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles), 4)
except c4.CircleGridNotFoundException:
score = 0
nb_of_grid_circles = 0
else:
score = round(get_f_score(nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles), 4)
param1 += 1
if param1 in results:
results[param1].append(score)
else:
results[param1] = [score]
print "param1 : image " + str(counter) + " finished"
counter += 1
return results
def calibration_radius_error(dist, images, must_latex=True):
global detector, connect4
titles = ["\\texttt{minRadius}", "\\texttt{maxRadius}", "\\texttt{minDist}", "Grid circles", "Noise circles",
"Total", "Score"]
results = []
counter = 0
factor = 3.0 * dist
for img in images:
table = []
best_score = -1000
# how many pixels for a circle radius on a 320x240px image when standing one meter away
one_meter_value = 6
best_value = []
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.medianBlur(gray, 3)
while one_meter_value < 8:
dist_value = int(round(one_meter_value / dist))
upper_bound = (dist_value + 1)
while upper_bound < (factor * one_meter_value) / dist:
lower_bound = (dist_value - 1)
while lower_bound > (one_meter_value / factor) / dist:
min_radius = int(lower_bound)
max_radius = int(upper_bound)
max_error = connect4.computeMaxPixelError(min_radius)
min_dist = round(lower_bound * 1.125, 2)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, min_dist, param1=48, param2=10.5,
minRadius=min_radius, maxRadius=max_radius)
if circles is None:
score = 0
nb_of_grid_circles = 0
circles = [[]]
else:
# circles = np.uint16(np.around(circles))
try:
detector.runDetection(circles[0], pixel_error_margin=max_error)
nb_of_grid_circles = len(detector.relativeCoordinates)
score = round(get_f_score(nb_of_grid_circles, len(circles[0]) - nb_of_grid_circles), 4)
except c4.CircleGridNotFoundException:
score = 0
nb_of_grid_circles = 0
if score > best_score:
best_score = score
best_value = [(min_radius, max_radius)]
elif abs(score - best_score) < 0.00001:
best_value.append((min_radius, max_radius))
line = [lower_bound, upper_bound, min_dist, nb_of_grid_circles,
len(circles[0]) - nb_of_grid_circles, len(circles[0]), score]
table.append(line)
lower_bound -= 1
upper_bound += 1
one_meter_value += 1
print "radius : image " + str(counter) + " finished"
results.append(best_value)
if must_latex:
latex_generator.generate_longtable(titles, "../../latex/generated_radius_" +
str(dist) + "_" + str(counter), table)
counter += 1
return results
def get_images(dist):
global nao_video
nao_video = VideoController()
nao_video.unsubscribeAllCameras()
nao_video.connectToCamera(res=1, fps=5, camera_num=0)
images = []
max_time = 15
start = time.time()
current = time.time()
while current - start < max_time:
images.append(nao_video.getImageFromCamera())
current = time.time()
for i, img in enumerate(images):
cv2.imwrite("../../../latex/img/" + str(dist) + "m/img_" + str(i) + ".png", img)
return images
def evaluate(best_values, param, dist):
scores = {}
titles = ["\\texttt{param" + param + "}", "Occurrences"]
table = []
for iteration in best_values:
for value in iteration:
if value in scores:
scores[value] += 1
else:
scores[value] = 1
for value in scores:
line = [value, scores[value]]
table.append(line)
latex_generator.generate_longtable(titles, "../../latex/value/" + str(param) + "_" + str(dist), table)
return best_values
def load_images(dist):
images = []
for i in range(40):
filename = "../../latex/img/" + str(dist) + "m/img_" + str(i) + ".png"
images.append(cv2.imread(filename))
return images
def prepare_plot(scores, param_name):
data_file = open("../../plot/" + param_name + ".dat", 'w')
big_dict = {}
for dico in scores:
for key in dico:
if key in big_dict:
big_dict[key].extend(dico[key])
else:
big_dict[key] = dico[key]
data = "#" + param_name + " mean var\n"
for key in big_dict:
mean = round(np.mean(big_dict[key]), 4)
var = round(np.var(big_dict[key]), 4)
data += str(key) + " " + str(mean) + " " + str(var) + '\n'
data_file.write(data)
data_file.close()
if __name__ == "__main__":
# dists = [0.4, 0.5, 1, 1.5, 2, 2.5, 3]
# image = get_images(dist)
# scores2 = []
# scores1 = []
# for dist in dists:
# print "-" * 20 + str(dist) + "-" * 20
# image = load_images(dist)
# print evaluate(calibration_radius_error(dist, image), "(minRadius, maxRadius)", dist)
# print evaluate(calibration_param1(dist, image), "param1", dist)
# print evaluate(calibration_param2(dist, image), "param2", dist)
# scores1.append(plotting_param1(dist, image))
# scores2.append(plotting_param2(dist, image))
# prepare_plot(scores1, "param1")
# prepare_plot(scores2, "param2")
camera_file = open("../../values/" + "camera_information" + ".dat", 'w')
cam_mat, cam_disto = get_camera_information()
camera_file.write(str(cam_mat) + "\n\n" + str(cam_disto))
camera_file.close()
|
[
"angeal1105@gmail.com"
] |
angeal1105@gmail.com
|
02227982cc2b018d86216d14543fac9f959ed702
|
950be9cb9c532f297481306981e8b1c8febbed9a
|
/Volume 12/src/Wedding.py
|
39b8f225cc071e88a111b4fae66fb0f6dd279b4f
|
[] |
no_license
|
MyCatWantsToKillYou/TimusTasks
|
9b075b19d54cf048517c0f6e08b509fd51e8a8a5
|
8064e0ca6671ec4cfa5c1d588d6d714562afa69a
|
refs/heads/master
| 2023-07-07T19:20:46.508301
| 2023-06-29T10:54:30
| 2023-06-29T10:54:30
| 132,224,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# task #2100
# Difficulty 34
guests = 2
for i in range(int(input())):
if input().endswith('+one'):
guests += 2
else:
guests += 1
if guests == 13:
print(1400)
else:
print(guests*100)
|
[
"mycatwantstokill@gmail.com"
] |
mycatwantstokill@gmail.com
|
bdb208604c15f266ea799f75d839c733e3a8fddc
|
3504013035e71eb9a745b0e5ba1d5ce0a2167f47
|
/REST/uriReverser.py
|
2f61da4ab298a3684dc615d186a2765eedb1bbbe
|
[] |
no_license
|
P79N6A/ProgrammingForIot
|
7f5eea3e90090b5610380d715cc5d33b45acbed6
|
9c938b87793a768dd62aea67535fbdaa34ba0d26
|
refs/heads/master
| 2020-07-07T00:45:05.653567
| 2019-08-19T14:15:03
| 2019-08-19T14:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import cherrypy
class UriReverser(object):
"""docstring for Reverser"""
exposed=True
def __init__(self):
pass
def GET(self, *uri):
reversed=uri[0]
return reversed[::-1]
if __name__ == '__main__':
conf={
'/':{
'request.dispatch':cherrypy.dispatch.MethodDispatcher(),
'tool.session.on':True
}
}
cherrypy.tree.mount(UriReverser(),'/',conf)
cherrypy.engine.start()
cherrypy.engine.block()
|
[
"matteo.orlando.1993@gmail.com"
] |
matteo.orlando.1993@gmail.com
|
f590886afbe3de74837afb0a874c140e34a89032
|
b512a6107ef3f8dfd8d88a76f0fad1f3a555c8d6
|
/ui.py
|
692f8fac949d6d09d79011e3ad9ae46782e95ed5
|
[
"MIT"
] |
permissive
|
wang1ang/slide-transition-detector
|
3bfddd40c6b1f272506d738efaead6fef3a2f4e5
|
180673fe8f864afb514859558a951c57021dc757
|
refs/heads/master
| 2020-12-11T01:06:53.675008
| 2020-01-16T13:33:31
| 2020-01-16T13:37:24
| 233,761,289
| 0
| 0
|
MIT
| 2020-01-14T05:09:30
| 2020-01-14T05:09:30
| null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
import progressbar as pb
class ProgressController(object):
"""
Controlls the ProgressBar UI to indicate the progress
of a process.
"""
def __init__(self, title, total=100):
"""
The default initializer.
:param title: the title of the progress bar
:param total: the maximum value of the progress
"""
self.widgets = [title, pb.Percentage(), ' - ', pb.Bar(), ' ']
self.total = total
self.progress = None
def start(self):
"""
Prints a new line ofter starting to seperate progressbar from
the rest of the output.
Then prints the progress bar UI.
"""
self.progress = pb.ProgressBar(widgets=self.widgets,maxval=self.total).start()
def update(self, i):
"""
Updates the progress bar according to the parameter i.
:param i: The progress of the process
"""
assert self.progress is not None
self.progress.update(i)
def increment(self, step=1):
assert self.progress is not None
self.progress.update(self.progress.currval + 1)
def finish(self):
"""
Stops updating the progress bar. And show an indication that
it's finished. Also prints an empty line after the progress bar.
"""
assert self.progress is not None
self.progress.finish()
|
[
"renebrandel@outlook.com"
] |
renebrandel@outlook.com
|
f1c840d19445eeef090245ac4f85f288343f1713
|
f5863cf378bce80d3aa459941dff79ea3c8adf5d
|
/SWEA/D1/SWEA_2027.py
|
5de36054728c8fc928830a79f90ad47491c04482
|
[] |
no_license
|
Taeg92/Problem_solving
|
815c13ae7895708948482eeb05411322be00ac12
|
15c0fe0eda4f77d974451777cb01d10882d8aaa9
|
refs/heads/master
| 2021-11-18T22:03:21.727840
| 2021-09-06T14:21:09
| 2021-09-06T14:21:09
| 235,335,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
# Problem [2027] : 주어진 텍스트를 그대로 출력하세요.
# <출력>
# #++++
# +#+++
# ++#++
# +++#+
# ++++#
string = '++++'
string_list = list(string)
for i in range(len(string)) :
string_list.insert(i,'#')
print(''.join(map(str,string_list)))
string_list = list(string)
|
[
"gtg92t@gmail.com"
] |
gtg92t@gmail.com
|
92e5c5dbd4af7ce474e5516560051834dfd86205
|
1eafc296c07e78d327f46e8ea58913ce4416940e
|
/test/test_optim.py
|
10f2e7fee03a135914df5f59ff29a78307be760a
|
[
"MIT"
] |
permissive
|
colinmatthewgeorge87/CrypTen
|
9dd4a15e0b4ea4700aaa6c0a91b1acfc6f3465ef
|
85db37fe555f5fde6117a67081bf7ee7fac67d97
|
refs/heads/master
| 2022-12-20T00:41:24.787004
| 2020-10-08T16:18:43
| 2020-10-08T16:20:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,942
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
from test.multiprocess_test_case import MultiProcessTestCase, get_random_test_tensor
import crypten
import torch
from crypten.common.tensor_types import is_float_tensor
class TestOptim(object):
"""
This class tests the crypten.optim package.
"""
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
if is_float_tensor(reference):
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.2)
test_passed = test_passed.gt(0).all().item() == 1
else:
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result: %s" % tensor)
logging.info("Reference: %s" % reference)
self.assertTrue(test_passed, msg=msg)
def test_sgd(self):
lr_vals = [0.01, 0.1, 0.5]
momentum_vals = [0.0, 0.1, 0.9]
dampening_vals = [0.0, 0.01, 0.1]
weight_decay_vals = [0.0, 0.9, 1.0]
nesterov_vals = [False, True]
torch_model = torch.nn.Linear(10, 2)
torch_model.weight = torch.nn.Parameter(
get_random_test_tensor(size=torch_model.weight.size(), is_float=True)
)
torch_model.bias = torch.nn.Parameter(
get_random_test_tensor(size=torch_model.bias.size(), is_float=True)
)
crypten_model = crypten.nn.Linear(10, 2)
crypten_model.set_parameter("weight", torch_model.weight)
crypten_model.set_parameter("bias", torch_model.bias)
crypten_model.encrypt()
for lr, momentum, dampening, weight_decay, nesterov in itertools.product(
lr_vals, momentum_vals, dampening_vals, weight_decay_vals, nesterov_vals
):
kwargs = {
"lr": lr,
"momentum": momentum,
"weight_decay": weight_decay,
"dampening": dampening,
"nesterov": nesterov,
}
if nesterov and (momentum <= 0 or dampening != 0):
with self.assertRaises(ValueError):
crypten.optim.SGD(crypten_model.parameters(), **kwargs)
continue
torch_optimizer = torch.optim.SGD(torch_model.parameters(), **kwargs)
crypten_optimizer = crypten.optim.SGD(crypten_model.parameters(), **kwargs)
x = get_random_test_tensor(size=(10,), is_float=True)
y = torch_model(x).sum()
y.backward()
xx = crypten.cryptensor(x)
yy = crypten_model(xx).sum()
yy.backward()
torch_optimizer.step()
crypten_optimizer.step()
torch_params = list(torch_model.parameters())
crypten_params = list(crypten_model.parameters())
for i in range(len(torch_params)):
self._check(
crypten_params[i], torch_params[i], "Parameter update mismatch"
)
class TestTFP(MultiProcessTestCase, TestOptim):
def setUp(self):
self._original_provider = crypten.mpc.get_default_provider()
crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedFirstParty)
super(TestTFP, self).setUp()
def tearDown(self):
crypten.mpc.set_default_provider(self._original_provider)
super(TestTFP, self).tearDown()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
e3c911c87b6481f89317a10962201331564467a5
|
c92ba7451cc124995810be4a4dc2c381ca224431
|
/python/isogram/isogram.py
|
ae4bc079aa6a659cd7d779ed48a754a6ff91e9bb
|
[] |
no_license
|
gavinhenderson/exercism.io
|
5fc72d1d2b7e51825101ad9f73a499f98523fc09
|
97e4c5ff8c45b5e950ed90ca5598fa00f19988ec
|
refs/heads/master
| 2021-06-04T17:25:49.845826
| 2020-08-01T21:33:52
| 2020-08-01T21:33:52
| 112,458,049
| 1
| 1
| null | 2020-07-17T11:31:41
| 2017-11-29T09:58:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 366
|
py
|
def is_isogram(string):
occurrences = {'a': 0 }
for c in string:
occurrences[c.lower()] = 0;
for c in string:
occurrences[c.lower()] += 1;
for key, value in occurrences.items():
if(key==" " or key=="-"):
continue
else:
if(value>1):
return False
return True
|
[
"gavin.henderson@hotmail.co.uk"
] |
gavin.henderson@hotmail.co.uk
|
41291e89a856816efc62d9bc82a751e9a275a097
|
51e683018a817037dcd7d8eae048cd9e363fb906
|
/MachineLearning/venv/Scripts/pip3-script.py
|
c5e6cec4e06abe66e7cb887b34519cd9c66e0901
|
[] |
no_license
|
alexmurat/PycharmProjects
|
25b0c20da52faa891afa6a44fdc0b3ae2774628e
|
5f0873ba38fe2e0e9d165b3aecdf075885fd3e62
|
refs/heads/master
| 2020-03-27T05:05:04.979118
| 2018-08-24T13:21:12
| 2018-08-24T13:21:12
| 145,991,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#!C:\Users\alex\PycharmProjects\MachineLearning\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"alexmurat@outlook.com"
] |
alexmurat@outlook.com
|
2b5e762b1efab72f9836ba910d087261611860df
|
7b502435a9a3cab0b0ab2c3233a4bdde69b0b97a
|
/pdfreader.py
|
fc7216b0f93683c0b87c36e2abdc26f933b9e2d6
|
[] |
no_license
|
narendranani/python-utilities
|
e1e91d765c5498418f30fb0573ba0129f18a0f18
|
1664b58a208aa3e67a3f1be3ba65a8e91a310c20
|
refs/heads/master
| 2020-05-17T10:16:46.742433
| 2019-05-20T09:40:51
| 2019-05-20T09:40:51
| 183,653,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# importing required modules
import PyPDF2
# creating a pdf file object
pdfFileObj = open(r'E:/Downloads/pygrametl.pdf', 'rb')
# creating a pdf reader object
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# printing number of pages in pdf file
# print(pdfReader.numPages)
# creating a page object
pageObj = pdfReader.getPage(0)
# extracting text from page and read first line of the page
print(pageObj.extractText().splitlines()[0])
# closing the pdf file object
pdfFileObj.close()
|
[
"34296972+Narendrak1154@users.noreply.github.com"
] |
34296972+Narendrak1154@users.noreply.github.com
|
9df450e2dba5385146d847a6cc0301e2c3b6550d
|
9ada76eea19ec3a7a74b6e41a5945674bd9b6037
|
/Strategies/ma_cross_strategy.py
|
665ed9efdb73fe7de3dddd10cc03a00cf6965815
|
[] |
no_license
|
cove9988/PyTrader-python-mt4-mt5-trading-api-connector-drag-n-drop
|
b1f8df2d3408d177d9321cb31e8affa1aac5cc71
|
4212cd520918814e80c668de9f948277872d50e9
|
refs/heads/master
| 2023-08-19T18:22:47.488284
| 2021-10-11T15:53:50
| 2021-10-11T15:53:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,512
|
py
|
# -*- coding: utf-8 -*-
'''
This script is ment as example how to use the Pytrader_API in live trading.
The logic is a simple crossing of two sma averages.
'''
import time
import pandas as pd
#import talib as ta
from utils.Pytrader_API_V1_06a import Pytrader_API
from utils.LogHelper import Logger # for logging events
# for not using talib
def calculate_simple_moving_average(series: pd.Series, n: int=20) -> pd.Series:
"""Calculates the simple moving average"""
return series.rolling(n).mean()
log = Logger()
log.configure()
# settings
timeframe = 'M5'
instrument = 'EURUSD'
server_IP = '127.0.0.1'
server_port = 1110 # check port number
SL_in_pips = 20
TP_in_pips = 10
volume = 0.01
slippage = 5
magicnumber = 1000
multiplier = 10000 # multiplier for calculating SL and TP, for JPY pairs should have the value of 100
if instrument.find('JPY') >= 0:
multiplier = 100.0
sma_period_1 = 9
sma_period_2 = 16
date_value_last_bar = 0
number_of_bars = 100
# Create simple lookup table, for the demo api only the following instruments can be traded
brokerInstrumentsLookup = {
'EURUSD': 'EURUSD',
'AUDCHF': 'AUDCHF',
'NZDCHF': 'NZDCHF',
'GBPNZD': 'GBPNZD',
'USDCAD': 'USDCAD'}
# Define pytrader API
MT = Pytrader_API()
connection = MT.Connect(server_IP, server_port, brokerInstrumentsLookup)
forever = True
if (connection == True):
log.debug('Strategy started')
while(forever):
# retrieve open positions
positions_df = MT.Get_all_open_positions()
# if open positions, check for closing, if SL and/or TP are defined.
# using hidden SL/TP
# first need actual bar info
actual_bar_info = MT.Get_actual_bar_info(instrument=instrument, timeframe=MT.get_timeframe_value(timeframe))
if (len(positions_df) > 0):
for position in positions_df.itertuples():
if (position.instrument == instrument and position.position_type == 'buy' and TP_in_pips > 0.0 and position.magic_number == magicnumber):
tp = position.open_price + TP_in_pips / multiplier
if (actual_bar_info['close'] > tp):
# close the position
MT.Close_position_by_ticket(ticket=position.ticket)
log.debug('trade with ticket ' + str(position.ticket) + ' closed in profit')
elif (position.instrument == instrument and position.position_type == 'buy' and SL_in_pips > 0.0 and position.magic_number == magicnumber):
sl = position.open_price - SL_in_pips / multiplier
if (actual_bar_info['close'] < sl):
# close the position
MT.Close_position_by_ticket(ticket=position.ticket)
log.debug('trade with ticket ' + str(position.ticket) + ' closed in loss')
elif (position.instrument == instrument and position.position_type == 'sell' and TP_in_pips > 0.0 and position.magic_number == magicnumber):
tp = position.open_price - TP_in_pips / multiplier
if (actual_bar_info['close'] < tp):
# close the position
MT.Close_position_by_ticket(ticket=position.ticket)
log.debug('trade with ticket ' + str(position.ticket) + ' closed in profit')
elif (position.instrument == instrument and position.position_type == 'sell' and SL_in_pips > 0.0 and position.magic_number == magicnumber):
sl = position.open_price + SL_in_pips / multiplier
if (actual_bar_info['close'] > sl):
# close the position
MT.Close_position_by_ticket(ticket=position.ticket)
log.debug('trade with ticket ' + str(position.ticket) + ' closed in loss')
# only if we have a new bar, we want to check the conditions for opening a trade/position
# at start check will be done immediatly
# date values are in seconds from 1970 onwards.
# for comparing 2 dates this is ok
if (actual_bar_info['date'] > date_value_last_bar):
date_value_last_bar = actual_bar_info['date']
# new bar, so read last x bars
bars = MT.Get_last_x_bars_from_now(instrument=instrument, timeframe=MT.get_timeframe_value(timeframe), nbrofbars=number_of_bars)
# convert to dataframe
df = pd.DataFrame(bars)
df.rename(columns = {'tick_volume':'volume'}, inplace = True)
df['date'] = pd.to_datetime(df['date'], unit='s')
# add the 2x sma's to
# using talib here
# add the 2x sma's to
# using talib here or not
#df.insert(0, column='sma_1', value=ta.SMA(df['close'], timeperiod=sma_period_1))
#df.insert(0, column='sma_2', value=ta.SMA(df['close'], timeperiod=sma_period_2))
df.insert(0, column='sma_1', value=calculate_simple_moving_average(df['close'], n = sma_period_1))
df.insert(0, column='sma_2', value=calculate_simple_moving_average(df['close'], n = sma_period_2))
index = len(df) - 2
# conditions will be checked on bar [index] and [index-1]
if (df['sma_1'][index] > df['sma_2'][index] and df['sma_1'][index-1] < df['sma_2'][index-1]): # buy condition
buy_OK = MT.Open_order(instrument=instrument,
ordertype='buy',
volume = volume,
openprice=0.0,
slippage = slippage,
magicnumber = magicnumber,
stoploss=0.0,
takeprofit=0.0,
comment='strategy_1')
if (buy_OK > 0):
log.debug('Buy trade opened')
# check if not a sell position is active, if yes close this sell position
for position in positions_df.itertuples():
if (position.instrument== instrument and position.position_type== 'sell' and position.magic_number == magicnumber):
# close
close_OK = MT.Close_position_by_ticket(ticket=position.ticket)
log.debug('closed sell trade due to cross and opening buy trade')
if (df['sma_1'][index] < df['sma_2'][index] and df['sma_1'][index-1] > df['sma_2'][index-1]): # sell condition
sell_OK = MT.Open_order(instrument=instrument,
ordertype='sell',
volume = volume,
openprice=0.0,
slippage = slippage,
magicnumber = magicnumber,
stoploss=0.0,
takeprofit=0.0,
comment='strategy_1')
if (sell_OK > 0):
log.debug('Sell trade opened')
# check if not a buy position is active, if yes close this buy position
for position in positions_df.itertuples():
if (position.instrument == instrument and position.position_type == 'buy' and position.magic_number == magicnumber):
# close
close_OK = MT.Close_position_by_ticket(ticket=position.ticket)
log.debug('closed buy trade due to cross and opening sell trade')
# wait 2 seconds
time.sleep(2)
# check if still connected to MT terminal
still_connected = MT.Check_connection()
if (still_connected == False):
forever = False
print('Loop stopped')
log.debug('Loop stopped')
|
[
"noreply@github.com"
] |
cove9988.noreply@github.com
|
8691ac1cb08a2fc78e360882aa4bf300a67e2efc
|
4d78f5a3db4bb9622eafaa32c3ad6bf3f54f6aa7
|
/pylib/Tools/Fetch/Pip.py
|
7337b47408ec3edd397b8d782c76a02e709fd3f7
|
[
"BSD-3-Clause",
"Artistic-1.0",
"BSD-3-Clause-Open-MPI"
] |
permissive
|
DebRez/mtt
|
da0ac7663f98dea5a12a2c983ccb896a93fa3007
|
3eafe8661c94d4f6c51b5f3b7462250b465d3799
|
refs/heads/master
| 2020-04-05T23:01:08.434722
| 2019-07-24T19:48:37
| 2019-07-24T19:53:53
| 60,280,533
| 1
| 0
| null | 2016-09-17T14:19:02
| 2016-06-02T16:38:04
|
Perl
|
UTF-8
|
Python
| false
| false
| 5,320
|
py
|
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: f; python-indent: 4 -*-
#
# Copyright (c) 2015-2019 Intel, Inc. All rights reserved.
# Copyright (c) 2017-2018 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
from urllib.parse import urlparse
from FetchMTTTool import *
from distutils.spawn import find_executable
import sys
import shutil
import subprocess
## @addtogroup Tools
# @{
# @addtogroup Fetch
# @section Pip
# Plugin for fetching and locally installing pkgs from the Web
# @param pkg Package to be installed
# @param sudo Superuser authority required
# @param userloc Install locally for the user instead of in system locations
# @param pip Command to use for pip (e.g., "pip3")
# @}
class Pip(FetchMTTTool):
def __init__(self):
# initialise parent class
FetchMTTTool.__init__(self)
self.activated = False
# track the repos we have processed so we
# don't do them multiple times
self.done = {}
self.options = {}
self.options['pkg'] = (None, "Package to be installed")
self.options['sudo'] = (False, "Superuser authority required")
self.options['userloc'] = (True, "Install locally for the user instead of in system locations")
self.options['cmd'] = ("pip", "Command to use for pip (e.g., \"pip3\")")
return
def activate(self):
if not self.activated:
# use the automatic procedure from IPlugin
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def print_name(self):
return "Pip"
def print_options(self, testDef, prefix):
lines = testDef.printOptions(self.options)
for line in lines:
print(prefix + line)
return
def execute(self, log, keyvals, testDef):
testDef.logger.verbose_print("Pip Execute")
# parse any provided options - these will override the defaults
cmds = {}
testDef.parseOptions(log, self.options, keyvals, cmds)
# check that they gave us an pkg namne
try:
if cmds['pkg'] is not None:
pkg = cmds['pkg']
except KeyError:
log['status'] = 1
log['stderr'] = "No PKG was provided"
return
testDef.logger.verbose_print("Install pkg " + pkg)
# check to see if we have already processed this pkg
try:
if self.done[pkg] is not None:
log['status'] = self.done[pkg]
log['stdout'] = "PKG " + pkg + " has already been processed"
return
except KeyError:
pass
# look for the executable in our path - this is
# a standard system executable so we don't use
# environmental modules here
if not find_executable("pip"):
log['status'] = 1
log['stderr'] = "Executable pip not found"
return
# see if the pkg has already been installed on the system
testDef.logger.verbose_print("checking system for pkg: " + pkg)
qcmd = []
if cmds['sudo']:
qcmd.append("sudo")
qcmd.append(cmds['cmd'])
qcmd.append("show")
qcmd.append(pkg)
results = testDef.execmd.execute(None, qcmd, testDef)
if 0 == results['status']:
log['status'] = 0
log['stdout'] = "PKG " + pkg + " already exists on system"
# Find the location
for t in results['stdout']:
if t.startswith("Location"):
log['location'] = t[10:]
break
return
# setup to install
icmd = []
if cmds['sudo']:
icmd.append("sudo")
icmd.append(cmds['cmd'])
icmd.append("install")
if cmds['userloc']:
icmd.append("--user")
icmd.append(pkg)
testDef.logger.verbose_print("installing package " + pkg)
results = testDef.execmd.execute(None, icmd, testDef)
if 0 != results['status']:
log['status'] = 1
log['stderr'] = "install of " + pkg + " FAILED"
return
# record the result
log['status'] = results['status']
log['stdout'] = results['stdout']
log['stderr'] = results['stderr']
# Find where it went
results = testDef.execmd.execute(None, qcmd, testDef)
if 0 == results['status']:
# Find the location
for t in results['stdout']:
if t.startswith("Location"):
log['location'] = t[10:]
try:
# Prepend the location to PYTHONPATH if it exists in environ
pypath = ":".join([log['location'], os.environ['PYTHONPATH']])
except:
pypath = log['location']
os.environ['PYTHONPATH'] = pypath
break
# track that we serviced this one
self.done[pkg] = results['status']
return
|
[
"rhc@pmix.org"
] |
rhc@pmix.org
|
4f92e4070900f0a4426bda18222ab588b9ba2d05
|
8fd17106d80f12df907b91943de174a970c65ffc
|
/venv/bin/chardetect
|
a72a30c54121a60436c07d6982a54dc8ea89529e
|
[] |
no_license
|
atanuc073/django-Stock-Market-App
|
fd320184e21011b9b74fadcd8162c0da16db2049
|
db870be89ccf85df8d02a0c54c6991549c63d9b9
|
refs/heads/master
| 2020-12-20T18:50:47.862423
| 2020-03-25T13:50:02
| 2020-03-25T13:50:02
| 236,175,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
#!/home/atanuc73/python/djangoStock/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"atanuc073@gmail.com"
] |
atanuc073@gmail.com
|
|
ae184e104a6ab27de8dee8a0135f956161e447a3
|
55510973cd284b8f4977757fb8a5d425e5c6ca61
|
/HW04/HW04.py
|
40910ed7c04d1f008a11657bb4969fe9a10fbb58
|
[] |
no_license
|
KirillKonovalov/hse_python_hw
|
9206eb4328bec622d57020bbb00cee440e0f3181
|
a9213535c545f2c8943e805841229f1cbda8b6b8
|
refs/heads/master
| 2022-11-07T07:01:16.472874
| 2020-05-11T16:28:08
| 2020-05-11T16:28:08
| 211,169,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
import re
def professors(filename):
with open(filename, 'r', encoding='utf-8') as file:
text = file.read()
result = re.findall('<th class="plainlist">Преподавател.+</th>\n<td class="plainlist">\n(\d+)', text)
with open('professors.tsv', 'a', encoding='utf-8') as rslt:
data = rslt.write(str(result[0])+'\t')
print('Количество преподавателей:', result[0])
filename1 = input('Введите название документа с университетом:')
professors(filename1)
def capital(filename):
with open(filename, 'r', encoding='utf-8') as file:
text = file.read()
result = re.findall('data-wikidata-property-id="P36"><a href="https://ru.wikipedia.org/wiki/.+" title=".*">(\w+)', text)
with open('capitals.tsv', 'a', encoding='utf-8') as rslt:
data = rslt.write(str(result[0])+'\t')
print("Столица этой страны:", result[0])
filename2 = input('Введите название документа со страной:')
capital(filename2)
def time_zone(filename):
with open(filename, 'r', encoding='utf-8') as file:
text = file.read()
result = re.findall('data-wikidata-property-id="P421"><a href=".+" class="mw-redirect" title="(.+)"', text)
with open('timezones.tsv', 'a', encoding='utf-8') as rslt:
data = rslt.write(str(result[0])+'\t')
print("Часовой пояс этого города:", result[0])
filename3 = input('Введите название документа с городом:')
time_zone(filename3)
|
[
"noreply@github.com"
] |
KirillKonovalov.noreply@github.com
|
e385ead310d3106ca430aff52eb1493207305ae2
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/detection/SSD_for_PyTorch/mmdet/models/necks/yolox_pafpn.py
|
c56aa6f53df1a46417ab15014670f24816906d99
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,238
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
from ..utils import CSPLayer
@NECKS.register_module()
class YOLOXPAFPN(BaseModule):
"""Path Aggregation Network used in YOLOX.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_csp_blocks=3,
use_depthwise=False,
upsample_cfg=dict(scale_factor=2, mode='nearest'),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super(YOLOXPAFPN, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
|
[
"chenyong84@huawei.com"
] |
chenyong84@huawei.com
|
a7a08117a64e3b9d0fee04e06d8a238517283597
|
80cb184cd05cc447da741a976527e9329bcd9f05
|
/maiden/config.py
|
f073e1ac179f68c5476b2de22632a448526a1061
|
[
"Apache-2.0"
] |
permissive
|
oozappa/maiden
|
9ef0f09f3d8f115f69ffe26f88d02e34b48d0d89
|
456d3bfe9d05ac4ab1680fafc8ef8a7477ff018f
|
refs/heads/master
| 2022-05-28T14:25:57.608191
| 2022-04-17T02:29:06
| 2022-04-17T02:29:06
| 57,427,320
| 0
| 1
|
Apache-2.0
| 2022-04-17T02:29:07
| 2016-04-30T04:39:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
import collections
import logging
logger = logging.getLogger('oozappa')
def _update(org, opt):
for k, v in opt.items():
if isinstance(v, collections.Mapping):
r = _update(org.get(k, OozappaSetting()), v)
org[k] = r
else:
org[k] = opt[k]
return org
class OozappaSetting(dict):
'''dict like object. accessible with dot syntax.
>>> settings = OozappaSetting(
... spam = '123',
... egg = 123
... )
>>> assert(settings.spam == '123')
>>> assert(settings.egg == 123)
>>> settings.ham = 123.0
>>> assert(settings.ham == 123.0)
>>> s2 = OozappaSetting(dict(spam=456))
>>> settings.update(s2)
>>> assert(settings.spam == 456)
>>> assert(settings.ham == 123.0)
'''
def __init__(self, *args, **kwargs):
for d in args:
if isinstance(d, collections.Mapping):
self.update(d)
for key, value in kwargs.items():
self[key] = value
def __setattr__(self, key, value):
if isinstance(value, collections.Mapping):
self[key] = OozappaSetting(value)
else:
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except:
object.__getattribute__(self, key)
def update(self, opt):
self = _update(self, opt)
|
[
"mtsuyuki@gmail.com"
] |
mtsuyuki@gmail.com
|
97a04619215b62b994dec14b5a445a7ec7f4c2d0
|
0b16b491709ff01407f4a77c7a66fa8a060a81f8
|
/mysite/department/admin.py
|
09cadeca94805250296293b9a22ba4bde5021775
|
[] |
no_license
|
kokkondaspandana/pollsapp
|
1813b6c81f866878e41ff6a1d57513fc7e5ff151
|
22605e7cad16150253fb38f57c3d9f531cd48562
|
refs/heads/master
| 2021-01-19T02:24:14.590908
| 2017-04-05T06:27:21
| 2017-04-05T06:27:21
| 87,273,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.contrib import admin
# Register your models here.
from . import models
admin.site.register(models.Department)
|
[
"spandanak.sosio@gmail.com"
] |
spandanak.sosio@gmail.com
|
1aa67886a5fbeb91ad7eb625be22e604d137222b
|
c3f6b9f4b850046d5c12c9a69803c66d18b975e3
|
/clip.py
|
80bfadd82ca1e1b9b50260bb361f0e0fb735e35d
|
[
"MIT"
] |
permissive
|
JimReno/pytorch-video-recognition
|
75b3307dbe5328fc79f8fa42a601bb33bc08a37a
|
8eceef090924e892bd2cdc1c875fc820e8699aba
|
refs/heads/master
| 2020-04-30T07:06:17.022438
| 2019-03-20T07:10:46
| 2019-03-20T07:10:46
| 176,674,477
| 0
| 0
|
MIT
| 2019-03-20T07:14:42
| 2019-03-20T07:03:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,746
|
py
|
# 数据集制作
# 2018/12/18
import glob
import cv2
def resize_img(img_dir, save_dir):
# 批量修改图片大小
resize_factor = (224, 224)
img_paths = glob.glob(img_dir)
for img_path in img_paths:
img_name = img_path.split('\\')[-1]
img = cv2.imread(img_path)
resized_img = cv2.resize(img, resize_factor)
save_name = save_dir + img_name
cv2.imwrite(save_name, resized_img)
def clip_video(video_path, output_size, output_dir, fps, video_count):
# 截取某一目录下所有视频,按照ucf1的命名标准进行重命名
video_cls = video_path.split('\\')[-1].split('_')[0]
videoCapture = cv2.VideoCapture(video_path)
total_frame = videoCapture.get(7)
is_open, frame = videoCapture.read()
if not is_open:
raise RuntimeError('Can not find any .avi format video, please set correct video file path.')
if total_frame <201:
raise RuntimeError('video {} is too short, please remove this file from the directory'.format(video_path))
frame_count = 1
clip_count = 1
# windows仅在使用MJPG的编码格式时,视频才能正常保存,使用XVID编码格式保存的视频无法打开,原因不明,linux下未验证
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_name = output_dir + '\\' + '{}_g{}_c{}.avi'.format(video_cls, str(video_count).zfill(3),
str(clip_count).zfill(4))
videoWriter = cv2.VideoWriter(video_name, fourcc, fps, output_size)
# 基于ucf-101的数据集格式保存视频,每200帧保存为一个视频,fps为25,视频大小为320x240,视频总长度或剩余长度不足100帧的直接丢弃
while is_open:
# 每隔200帧重新保存一个视频
if frame_count % 200 == 0 and (total_frame - frame_count > 100):
# 打印上一个写完的视频
print('{} has been written to path:{}'.format(video_name, output_dir))
clip_count += 1
video_name = output_dir + '\\' + '{}_g{}_c{}.avi'.format(video_cls, str(video_count).zfill(3),
str(clip_count).zfill(4))
videoWriter = cv2.VideoWriter(video_name, fourcc, fps, output_size)
# 不事先resize 保存的视频无法打开,原因不明
new_frame = cv2.resize(frame, output_size)
videoWriter.write(new_frame)
is_open, frame = videoCapture.read()
frame_count += 1
cv2.destroyAllWindows()
videoCapture.release()
def scan_video(video_dir):
# 打印出每个视频的总帧数
video_list = glob.glob(video_dir)
for video_path in video_list:
video = cv2.VideoCapture(video_path)
if video.get(7) != 200:
print(video_path.split('\\')[-1], video.get(7))
return
def main():
need_resize = False
if need_resize:
img_dir = 'E:\\flp\data_three_cls\\video\\news\\*.jpg'
save_dir = 'E:\\flp\data_three_cls\\video_resize\\news\\'
resize_img(img_dir, save_dir)
need_clip_video = False
if need_clip_video:
clip_size = (320, 240)
fps = 25
video_dir = 'E:\GE\\flp\\video_2\\news\\*.avi'
output_dir = 'E:\\GE\\flp\\output\\news\\'
video_files = glob.glob(video_dir)
for video_count, video_file in enumerate(video_files):
clip_video(video_file, clip_size, output_dir, fps, video_count+15)
need_scan_video = True
if need_scan_video:
video_dir = 'E:\\GE\\flp\output\\news\\*.avi'
scan_video(video_dir)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
JimReno.noreply@github.com
|
8e565e33761a803e5727e5d53561cce980caa560
|
d572728c0322e85b7e6421ee29e069016d622f88
|
/Estrutura sequencial/area_terreno.py
|
f8a028c1f7da1b37860934255d42d4587e7b52a7
|
[] |
no_license
|
brunolomba/exercicios_logica
|
60bc50576f5b9bd388458953597eae1eb8be7cda
|
31a5c7bb00adc47e8bc082285c9e5001b5b166db
|
refs/heads/main
| 2023-05-08T11:05:52.573121
| 2021-05-31T17:07:00
| 2021-05-31T17:07:00
| 368,974,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
largura = int(input('Digite um a largura do terreno'))
comprimento = int(input('Digite o comprimento do terreno'))
valor_metro = int(input('Digite o valor do metro quadrado'))
area = largura * comprimento
preco = area * valor_metro
print(f' o valor do terreno é: {preco}')
|
[
"brunolomba@gmail.com"
] |
brunolomba@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.