blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1bfe1ae1aee67c2705d37357e3c0edecb88a0ba5 | 261dacd54882e4fcfea28ec164d5a0fe3941ced8 | /main.py | b5faea0b0777405134a7b6cd3846cdccbdc6a213 | [] | no_license | bebarrey/rock-paper-scissors-start | 99343fae69fe1497f7e758ebad243cab1921a89e | 8c132f0974712c344d69f4485b80de1905017fb4 | refs/heads/master | 2023-01-24T17:35:08.676399 | 2020-11-24T05:53:30 | 2020-11-24T05:53:30 | 315,532,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
print("Welcome to rock, paper, scissors.")
answer = input("Type '0' for rock, '1' for paper and '2' for scissors.\n")
print(f"You Chose: {answer}")
if answer == "0":
print(rock)
elif answer == "1":
print(paper)
elif answer == "2":
print(scissors)
computer = random.randint(0,2)
print(f"Computer chose: {computer}")
if computer == 0:
print(rock)
elif computer == 1:
print(paper)
elif computer == 2:
print(scissors)
if computer == 0 and answer == "2":
print("Computer Wins.")
elif answer == "0" and computer == 2:
print("You win.")
elif computer == 0 and answer == "0":
print("Its a draw.")
if computer == 2 and answer == "1":
print("Computer Wins.")
elif answer == "2" and computer == 1:
print("You win.")
elif computer == 2 and answer == "2":
print("Its a draw.")
if computer == 1 and answer == "0":
print("Computer Wins.")
elif answer == "1" and computer == 0:
print("You win.")
elif computer == 1 and answer == "1":
print("Its a draw.") | [
"bharat.rajashekar@gmail.com"
] | bharat.rajashekar@gmail.com |
ca46b00bf89324ed706f2ec07aa26e868e8876a0 | 8b174b2c2d99b9932cab8f67b2e533c01bf68f81 | /SSD_Article/tf_convert_data.py | 01339a60f9e73df1f092c2cbe91a4865d1225c31 | [] | no_license | fanszoro/CV-detection | a7baac7273b5f6cca66babf341519821851c5350 | a56782edb9ebb15835836f2919dcc491b1cec49b | refs/heads/master | 2020-03-19T04:59:35.346990 | 2018-04-03T02:13:05 | 2018-04-03T02:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert a dataset to TFRecords format, which can be easily integrated into
a TensorFlow pipeline.
Usage:
```shell
python tf_convert_data.py \
--dataset_name=pascalvoc \
--dataset_dir=/tmp/pascalvoc \
--output_name=pascalvoc \
--output_dir=/tmp/
```
"""
import tensorflow as tf
from datasets import pascalvoc_to_tfrecords
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_test',
'The name of the dataset to convert.')
# tf.app.flags.DEFINE_string(
# 'dataset_dir',
# '/home/sxf/MyProject_Python/TFtest/SSD-Tensorflow/datasets/normal_data_set_voc/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/',
# 'Directory where the original dataset is stored.')
tf.app.flags.DEFINE_string(
'dataset_dir',
'/home/sxf/MyProject_Python/TFtest/SSD-Tensorflow/datasets/normal_data_set_voc/VOCtest_06-Nov-2007/VOCdevkit/VOC2007/',
'Directory where the original dataset is stored.')
tf.app.flags.DEFINE_string(
'output_name', 'pascalvoc',
'Basename used for TFRecords output files.')
tf.app.flags.DEFINE_string(
'output_dir', '/home/sxf/MyProject_Python/TFtest/SSD-Tensorflow/datasets/normal_data_to_tf_records/test/',
'Output directory where to store TFRecords files.')
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
print('Dataset directory:', FLAGS.dataset_dir)
print('Output directory:', FLAGS.output_dir)
if FLAGS.dataset_name == 'pascalvoc_test':
pascalvoc_to_tfrecords.run(FLAGS.dataset_dir, FLAGS.output_dir, FLAGS.output_name, shuffling=False)
else:
raise ValueError('Dataset [%s] was not recognized.' % FLAGS.dataset_name)
if __name__ == '__main__':
tf.app.run()
| [
"sxf1052566766@163.com"
] | sxf1052566766@163.com |
7c4a49ed952f53cdc5e0c6c94b654dd880ebba91 | 917166048ce42f4702b8cc28f451b8489a8dc701 | /helper.py | a0e3e9207d75d5c24a5fee47f068c14e979004c8 | [] | no_license | sadhi003/DLND-cifar10 | dd4037113ccb7cd5fe20b6357c80eb69ed2ee20e | 4234df7f8c033f7ee408fa70c61a34e0595a6953 | refs/heads/master | 2020-03-21T11:32:37.915891 | 2018-06-24T19:59:05 | 2018-06-24T19:59:05 | 138,511,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,649 | py | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the training data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all training data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_training.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| [
"adhi.shankar3@gmail.com"
] | adhi.shankar3@gmail.com |
db165d9274fcb016b890aceacb0829fe76d14c0e | 12f5e1035cccb683c7b83d47bab3c809005546ce | /hhparser/pipelines.py | db6ba7948202cbb095793e17d0493164dffb6708 | [] | no_license | Nikita371/HH-Scrapy-MySQL | 57ac6567952f66e61bce8fcf6da25226c645ffb4 | 0541ab458055e0ec2ba9d5746e29289203986e35 | refs/heads/master | 2020-03-28T21:48:16.423678 | 2018-09-17T20:28:08 | 2018-09-17T20:28:08 | 149,183,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # -*- coding: utf-8 -*-
import sys
import MySQLdb
from hhparser.items import HhparserItem
class HhparserPipeline(object):
def __init__(self):
self.conn = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="133737Aa", # your password
db="sys")
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
try:
self.cursor.execute("""INSERT INTO hhvacancy (NameVacancy,Salary,Employer)
VALUES (%s, %s,%s)""",
(item['NameVacancy'],item['Salary'],item['Employer']))
self.conn.commit()
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
return item
| [
"38251442+Nikita371@users.noreply.github.com"
] | 38251442+Nikita371@users.noreply.github.com |
97dd05252b2d1f893c293761aec525bc4a53456d | 2c144a35c8b178b1eae8d37e2f73e4b203fc03bb | /pyprograming/open_file_editfile/open_file_editfile/open_file_editfile.py | cf71d59142ceb99b7845dcaaa39be6a606357531 | [] | no_license | ivanpedro/QT_dev | 01f9fe8d1b7342315b50073a0ff1283c2485dfd4 | 59753ea9983198c079dbf740d6154e05c9c0c547 | refs/heads/master | 2021-05-06T18:01:12.847687 | 2017-11-30T20:00:59 | 2017-11-30T20:00:59 | 111,859,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,020 | py | import sys
from PyQt5.Qt import QFileDialog
from PyQt5.QtGui import QColor , QIcon
from PyQt5.QtWidgets import (QColorDialog, QFontDialog, QStyleFactory,
QCalendarWidget, QComboBox, QLabel,
QProgressBar, QCheckBox, QPushButton, QAction,
QMessageBox, QTextEdit, QTextEdit, QApplication,
QMainWindow)
from PyQt5 import QtGui, QtCore
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.setGeometry(50, 50, 500, 300)
self.setWindowTitle("PyQT tuts!")
self.setWindowIcon(QIcon('pythonlogo.png'))
extractAction = QAction("&GET TO THE CHOPPAH!!!", self)
extractAction.setShortcut("Ctrl+Q")
extractAction.setStatusTip('Leave The App')
extractAction.triggered.connect(self.close_application)
openEditor = QAction("&Editor", self)
openEditor.setShortcut("Ctrl+E")
openEditor.setStatusTip('Open Editor')
openEditor.triggered.connect(self.editor)
openFile = QAction("&Open File", self)
openFile.setShortcut("Ctrl+O")
openFile.setStatusTip('Open File')
openFile.triggered.connect(self.file_open)
self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
fileMenu.addAction(extractAction)
fileMenu.addAction(openFile)
editorMenu = mainMenu.addMenu("&Editor")
editorMenu.addAction(openEditor)
self.home()
def home(self):
btn = QPushButton("Quit", self)
btn.clicked.connect(self.close_application)
btn.resize(btn.minimumSizeHint())
btn.move(0,100)
extractAction = QAction(QIcon('todachoppa.png'), 'Flee the Scene', self)
extractAction.triggered.connect(self.close_application)
self.toolBar = self.addToolBar("Extraction")
self.toolBar.addAction(extractAction)
fontChoice = QAction('Font', self)
fontChoice.triggered.connect(self.font_choice)
#self.toolBar = self.addToolBar("Font")
self.toolBar.addAction(fontChoice)
color = QColor(0, 0, 0)
fontColor = QAction('Font bg Color', self)
fontColor.triggered.connect(self.color_picker)
self.toolBar.addAction(fontColor)
checkBox = QCheckBox('Enlarge Window', self)
checkBox.move(300, 25)
checkBox.stateChanged.connect(self.enlarge_window)
self.progress = QProgressBar(self)
self.progress.setGeometry(200, 80, 250, 20)
self.btn = QPushButton("Download",self)
self.btn.move(200,120)
self.btn.clicked.connect(self.download)
#print(self.style().objectName())
self.styleChoice = QLabel("Windows Vista", self)
comboBox = QComboBox(self)
comboBox.addItem("motif")
comboBox.addItem("Windows")
comboBox.addItem("cde")
comboBox.addItem("Plastique")
comboBox.addItem("Cleanlooks")
comboBox.addItem("windowsvista")
comboBox.move(50, 250)
self.styleChoice.move(50,150)
comboBox.activated[str].connect(self.style_choice)
cal = QCalendarWidget(self)
cal.move(500,200)
cal.resize(200,200)
self.show()
def file_open(self):
name, _ = QFileDialog.getOpenFileName(self, 'Open File', options=QFileDialog.DontUseNativeDialog)
file = open(name,'r')
self.editor()
with file:
text = file.read()
self.textEdit.setText(text)
def color_picker(self):
color = QColorDialog.getColor()
self.styleChoice.setStyleSheet("QWidget { background-color: %s}" % color.name())
def editor(self):
self.textEdit = QTextEdit()
self.setCentralWidget(self.textEdit)
def font_choice(self):
font, valid = QFontDialog.getFont()
if valid:
self.styleChoice.setFont(font)
def style_choice(self, text):
self.styleChoice.setText(text)
QApplication.setStyle(QStyleFactory.create(text))
def download(self):
self.completed = 0
while self.completed < 100:
self.completed += 0.0001
self.progress.setValue(self.completed)
def enlarge_window(self, state):
if state == QtCore.Qt.Checked:
self.setGeometry(50,50, 1000, 600)
else:
self.setGeometry(50, 50, 500, 300)
def close_application(self):
choice = QMessageBox.question(self, 'Extract!',
"Get into the chopper?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
print("Extracting Naaaaaaoooww!!!!")
sys.exit()
else:
pass
if __name__=='__main__':
app = QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_()) | [
"ivanpedrouk@gmail.com"
] | ivanpedrouk@gmail.com |
27982193987fec8be1dcf9936d232590f0fb7939 | c0343149672446e5264dd097fa623335121434d6 | /BSBI.py | 9d30db90e8c66663741c4b782e749ff18759bc80 | [
"MIT"
] | permissive | Google1234/Information_Retrieva_Learning | cc43913238f7fd0fd92541dc4c6712618f29bee8 | 0406bdebf8fbfd2d81a8c600620890a9fdbdb4b6 | refs/heads/master | 2021-01-01T05:10:29.886484 | 2016-05-10T09:37:36 | 2016-05-10T09:37:36 | 57,950,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py |
'''
BSBI NDEXConSTRUCTION()
n <- 0
while(all documents have not been processed)
do n<-n+1
block <- PARSENEXTBLOCK() //文档分析
BSBI-INVERT(block)
WRITEBLOCKTODISK(block,fn)
MERGEBLOCKS(f1,...,fn;fmerged)
''' | [
"taoJiang_ict@163.com"
] | taoJiang_ict@163.com |
c33a4797398bb281a0fc4cce2a453e42945a3181 | d79e21c02f5a82c83f87e797f1d29f4841ba87fd | /listmethods/TuesdayMorninglist.py | 2d855d01e5f4f3b50fc553106f1e1ff95ab39329 | [] | no_license | dragonwizard21/mycode | daa930ab65a8096287ee49ccdb49038befb4fe25 | 24c7e68132d0873138d2b7596e60827b3e3b22ad | refs/heads/master | 2022-12-18T16:47:00.607499 | 2020-09-19T08:19:27 | 2020-09-19T08:19:27 | 295,483,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | #!/usr/bin/env python3
#list of pets:
pets = ['fido', 'spot', 'fluffy']
i1 = input("Please insert a pet name:")
pets.append(i1)
print(len(pets))
i2 = input("Please insert another pet name:")
pets.append(i2)
print(len(pets))
i3 = input("Please insert one more pet name:")
pets.append(i3)
print(len(pets))
print(pets)
for index, pet in enumerate(pets):
print(index, pet)
| [
"zachhuff21@gmail.com"
] | zachhuff21@gmail.com |
e4e79f61d8937b553684ef09dc913c77cecc991a | 950db427f502e8d0c90c114dd9b145cb037ece4c | /mysite/urls.py | 1115ad84952bef7423374a32876bdad35a651e83 | [] | no_license | kornkritpawit/django-polls | d0c29be7d4baf9a416b0fbe9d947c14913d78c00 | 16d9b02fb481b5f17ecf75999d36a841dbfb5975 | refs/heads/master | 2020-07-22T16:21:55.336124 | 2019-11-30T15:30:22 | 2019-11-30T15:30:22 | 207,247,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from polls import views
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('signup/', views.signup, name='signup'),
]
| [
"konsoo@hotmail.com"
] | konsoo@hotmail.com |
2ece8e71c47ea8af26db3b8613ff6240f90944fc | 97ee4b2e87d96a661b6f1c0e29d07ed9f7f4ef28 | /admission/migrations/0029_auto_20180728_1456.py | 7355a473eefb01f904d04c819b0df9fb409da07e | [] | no_license | GHostEater/Portal | 0aac67bf74b3476b26959dd3c80484e1390f1399 | 502dff5cbe0ad13f490e2e0b2600d895fc631068 | refs/heads/master | 2020-09-07T04:55:20.199427 | 2019-11-30T15:50:04 | 2019-11-30T15:50:04 | 94,420,941 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-07-28 13:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admission', '0028_application_app_no'),
]
operations = [
migrations.AddField(
model_name='application',
name='curr_course',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='application',
name='curr_uni',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='application',
name='curr_year_of_entry',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='application',
name='email_dean',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='application',
name='email_hod',
field=models.EmailField(blank=True, max_length=255, null=True),
),
]
| [
"lasisi28@gmail.com"
] | lasisi28@gmail.com |
fd2d118e11cb5e6a64fa7a81d1e6788c194dd960 | 643edac50049ec50a193f7b32d783cde46a32e02 | /utils/utils.py | a4e9dc4aafc42960fb5be9be3372a4da92d37bd4 | [] | no_license | olavblj/thesis-pong | 150aee6f523e32ba50376105fe9d34fa0d9a6fca | 709a068bda599dccd9918ceb9a8898fb303babff | refs/heads/master | 2020-03-28T16:13:21.876047 | 2018-12-31T11:43:28 | 2018-12-31T11:43:28 | 148,671,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from datetime import datetime, timezone, timedelta
import numpy as np
from config import Path
from system_manager import SystemManager
sys_manager = SystemManager.get_instance()
def date_str(timestamp):
return datetime.fromtimestamp(timestamp, tz=timezone(offset=timedelta(hours=2)))
def fetch_highscore():
file_path = Path.high_score_list
names = []
scores = []
with open(file_path) as infile:
for i, line in enumerate(infile):
tokens = [t.strip() for t in line.split(";")]
name, score = tokens
names.append(name)
scores.append(int(score))
order = np.flip(np.argsort(scores))
scores = np.array(scores)[order]
names = np.array(names)[order]
res = []
for i in range(len(scores)):
res.append("{}: {} - {}".format(i + 1, scores[i], names[i]))
return res
| [
"olavlj@me.com"
] | olavlj@me.com |
4aff42044e083db2d05e023305df2e5c3c5219fb | 72d1bacb885515d02de7192ec2fb9318dcc6bc11 | /migrations/versions/4e966cd2775d_.py | 5cb10950086ab04ece77610b891b8e2c27da75f3 | [] | no_license | ellsye/zlktqa | 10b419bf69efda1d83c4cb6fd032584428c1c437 | 9d80258868c65d557d55927dc15ba4a845801062 | refs/heads/master | 2020-03-08T14:52:40.351284 | 2018-04-05T12:26:13 | 2018-04-05T12:26:13 | 128,196,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | """empty message
Revision ID: 4e966cd2775d
Revises: f02c53fd8c48
Create Date: 2018-04-05 16:12:37.382270
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4e966cd2775d'
down_revision = 'f02c53fd8c48'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('question',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.VARCHAR(length=100), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('question')
# ### end Alembic commands ###
| [
"ells@dasudian.com"
] | ells@dasudian.com |
1db0677e6ea19f82689938367444e6926eff063b | 0b0d3246d39974cb8faff7d269da2d539415afab | /problem_python/p379.py | 80f05b909fe2051d1420340e3fec99855326600b | [] | no_license | xionghhcs/leetcode | 972e7ae4ca56b7100223630b294b5a97ba5dd7e8 | 8bd43dcd995a9de0270b8cea2d9a48df17ffc08b | refs/heads/master | 2020-03-07T17:18:08.465559 | 2019-09-29T11:11:26 | 2019-09-29T11:11:26 | 127,607,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | class PhoneDirectory:
def __init__(self, maxNumbers: int):
"""
Initialize your data structure here
@param maxNumbers - The maximum numbers that can be stored in the phone directory.
"""
self.nums = [0]* maxNumbers
self.maxNumbers = maxNumbers
def get(self) -> int:
"""
Provide a number which is not assigned to anyone.
@return - Return an available number. Return -1 if none is available.
"""
for i in range(self.maxNumbers):
if self.nums[i] == 0:
self.nums[i] = 1
return i
return -1
def check(self, number: int) -> bool:
"""
Check if a number is available or not.
"""
if self.nums[number] == 0:
return True
return False
def release(self, number: int) -> None:
"""
Recycle or release a number.
"""
self.nums[number] = 0
# Your PhoneDirectory object will be instantiated and called as such:
# obj = PhoneDirectory(maxNumbers)
# param_1 = obj.get()
# param_2 = obj.check(number)
# obj.release(number)
| [
"xionghhcs@163.com"
] | xionghhcs@163.com |
e456ca431caac69054baecf48d19905f1ee2b55b | 3e0a05d3ad122a24427259d162717358b5365871 | /common/commonData.py | 54c545960bee527f966356471f01870c3d6cfd88 | [] | no_license | ly951107/pytest_Api | b128080a8cac728091dbb0434dae469cb887d890 | 6f2b7365b2912694cd2b9fb63784d319c81c9825 | refs/heads/master | 2020-05-05T10:22:30.326958 | 2019-04-07T09:39:35 | 2019-04-07T09:39:35 | 179,943,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | class CommonData:
modile="18210034706"
password="123456"
host="http://192.168.1.203:8083"
token=None
proxies={"http":"http://localhost:8888"}
#changeName——success
userId=136
userName="15935625020"
oldPwd="123456"
newPwd="951107"
#/user/saveOrUpdateUser
nickName_regiest="ye01"
userName_regiest="18899990001"
roleIds= "1"
#/ user / loadUserList
pageSize=30
pageCurrent=1
nickName_yenzheng="ye01"
userName_yenzheng="18899990001"
| [
"1982338935@qq.com"
] | 1982338935@qq.com |
cf4ab042a4fa03626c4f0acf22692693769810a3 | f8493a8b1fdf9b071c718c47ce9d5a4c896da555 | /apps/organization/migrations/0004_teacher_image.py | 0141e39e3e72bc1787ba9f6046a66a7fcb7d4ea2 | [] | no_license | ze25800000/Django-learning-project | 43c38ca30da00d41815397a271f29685c2118ac7 | fb7d1984945a25bbd4bfe75cfeec5699800b1cd5 | refs/heads/master | 2020-03-08T12:38:52.804741 | 2018-04-09T23:30:37 | 2018-04-09T23:30:37 | 128,132,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-06 16:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0003_auto_20180406_1159'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='image',
field=models.ImageField(default='', upload_to='teacher/%Y/%m', verbose_name='头像'),
),
]
| [
"ze258100000@sina.com"
] | ze258100000@sina.com |
64e6c4b3e5b32f69c893357ab399b23c309aaff6 | a9f7894aca35992dc90eecc7e4a3c4a1fd6fcb58 | /star_wars_planet/tests/conftest.py | 3a6e87687d081c50ec8722c1d493e684627231d9 | [] | no_license | vroxo/star_wars_planet_api | 8d10d2be4ff9247f129ba5edd0aa39b364551c6a | c3ee9c0ea66311e7bc20022b25b13c6f282a9b3c | refs/heads/master | 2022-12-21T10:40:00.610754 | 2020-04-17T17:11:09 | 2020-04-17T17:11:09 | 244,825,341 | 0 | 0 | null | 2022-12-08T03:44:04 | 2020-03-04T06:31:42 | Python | UTF-8 | Python | false | false | 952 | py | import pytest
from star_wars_planet.app import create_app, minimal_app
from star_wars_planet.extensions.commands import populate_db, truncate_db
from star_wars_planet.documents import Planet
@pytest.fixture(scope="session")
def min_app():
app = minimal_app(FORCE_ENV_FOR_DYNACONF="testing")
return app
@pytest.fixture(scope="session")
def app():
app = create_app(FORCE_ENV_FOR_DYNACONF="testing")
with app.app_context():
populate_db()
yield app
truncate_db()
@pytest.fixture(scope="session")
def client(app):
client = app.test_client()
yield client
@pytest.fixture(scope="session")
def planets(app):
with app.app_context():
return Planet.objects()
@pytest.fixture(scope="session")
def planet(app):
with app.app_context():
planet = Planet(name='TestePlanet', climate='TesteClimate', terrain='TesteTerrain', count_films=1)
planet.save()
return planet
| [
"vitor@mobi2buy.com"
] | vitor@mobi2buy.com |
327ce5b287155b91abfb814164684e491a510614 | fb2f92e452a665746c3e551804d8635d67de1b51 | /list_2015_02_05.py | e00215dde403abfbb2ef01a12dca9cbf4a2e268e | [] | no_license | skaferun9/Twitch-s-Veiwer-For-Coding-Enducation | 3e446cb7556b33639aa6ad44a533902089b5bb85 | 6a5ba1663092beaadb229ff4ea91567be5ac0bfd | refs/heads/master | 2022-01-22T17:40:27.199953 | 2018-12-19T02:57:28 | 2018-12-19T02:57:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,247 | py | def list_name():
return (["2015-02-05/all-2015-02-05-00-00-00.txt"
,"2015-02-05/all-2015-02-05-00-05-00.txt"
,"2015-02-05/all-2015-02-05-00-10-00.txt"
,"2015-02-05/all-2015-02-05-00-15-00.txt"
,"2015-02-05/all-2015-02-05-00-20-00.txt"
,"2015-02-05/all-2015-02-05-00-25-00.txt"
,"2015-02-05/all-2015-02-05-00-30-00.txt"
,"2015-02-05/all-2015-02-05-00-35-00.txt"
,"2015-02-05/all-2015-02-05-00-40-00.txt"
,"2015-02-05/all-2015-02-05-00-45-00.txt"
,"2015-02-05/all-2015-02-05-00-50-00.txt"
,"2015-02-05/all-2015-02-05-00-55-00.txt"
,"2015-02-05/all-2015-02-05-01-00-00.txt"
,"2015-02-05/all-2015-02-05-01-05-00.txt"
,"2015-02-05/all-2015-02-05-01-10-00.txt"
,"2015-02-05/all-2015-02-05-01-15-00.txt"
,"2015-02-05/all-2015-02-05-01-20-00.txt"
,"2015-02-05/all-2015-02-05-01-25-00.txt"
,"2015-02-05/all-2015-02-05-01-30-00.txt"
,"2015-02-05/all-2015-02-05-01-35-00.txt"
,"2015-02-05/all-2015-02-05-01-40-00.txt"
,"2015-02-05/all-2015-02-05-01-45-00.txt"
,"2015-02-05/all-2015-02-05-01-50-00.txt"
,"2015-02-05/all-2015-02-05-01-55-00.txt"
,"2015-02-05/all-2015-02-05-02-00-00.txt"
,"2015-02-05/all-2015-02-05-02-05-00.txt"
,"2015-02-05/all-2015-02-05-02-10-00.txt"
,"2015-02-05/all-2015-02-05-02-15-00.txt"
,"2015-02-05/all-2015-02-05-02-20-00.txt"
,"2015-02-05/all-2015-02-05-02-25-00.txt"
,"2015-02-05/all-2015-02-05-02-30-00.txt"
,"2015-02-05/all-2015-02-05-02-35-00.txt"
,"2015-02-05/all-2015-02-05-02-40-00.txt"
,"2015-02-05/all-2015-02-05-02-45-00.txt"
,"2015-02-05/all-2015-02-05-02-50-00.txt"
,"2015-02-05/all-2015-02-05-02-55-00.txt"
,"2015-02-05/all-2015-02-05-03-00-00.txt"
,"2015-02-05/all-2015-02-05-03-05-00.txt"
,"2015-02-05/all-2015-02-05-03-10-00.txt"
,"2015-02-05/all-2015-02-05-03-15-00.txt"
,"2015-02-05/all-2015-02-05-03-20-00.txt"
,"2015-02-05/all-2015-02-05-03-25-00.txt"
,"2015-02-05/all-2015-02-05-03-30-00.txt"
,"2015-02-05/all-2015-02-05-03-35-00.txt"
,"2015-02-05/all-2015-02-05-03-40-00.txt"
,"2015-02-05/all-2015-02-05-03-45-00.txt"
,"2015-02-05/all-2015-02-05-03-50-00.txt"
,"2015-02-05/all-2015-02-05-03-55-00.txt"
,"2015-02-05/all-2015-02-05-04-00-00.txt"
,"2015-02-05/all-2015-02-05-04-05-00.txt"
,"2015-02-05/all-2015-02-05-04-10-00.txt"
,"2015-02-05/all-2015-02-05-04-15-00.txt"
,"2015-02-05/all-2015-02-05-04-20-00.txt"
,"2015-02-05/all-2015-02-05-04-25-00.txt"
,"2015-02-05/all-2015-02-05-04-30-00.txt"
,"2015-02-05/all-2015-02-05-04-35-00.txt"
,"2015-02-05/all-2015-02-05-04-40-00.txt"
,"2015-02-05/all-2015-02-05-04-45-00.txt"
,"2015-02-05/all-2015-02-05-04-50-00.txt"
,"2015-02-05/all-2015-02-05-04-55-00.txt"
,"2015-02-05/all-2015-02-05-05-00-00.txt"
,"2015-02-05/all-2015-02-05-05-05-00.txt"
,"2015-02-05/all-2015-02-05-05-10-00.txt"
,"2015-02-05/all-2015-02-05-05-15-00.txt"
,"2015-02-05/all-2015-02-05-05-20-00.txt"
,"2015-02-05/all-2015-02-05-05-25-00.txt"
,"2015-02-05/all-2015-02-05-05-30-00.txt"
,"2015-02-05/all-2015-02-05-05-35-00.txt"
,"2015-02-05/all-2015-02-05-05-40-00.txt"
,"2015-02-05/all-2015-02-05-05-45-00.txt"
,"2015-02-05/all-2015-02-05-05-50-00.txt"
,"2015-02-05/all-2015-02-05-05-55-00.txt"
,"2015-02-05/all-2015-02-05-06-00-00.txt"
,"2015-02-05/all-2015-02-05-06-05-00.txt"
,"2015-02-05/all-2015-02-05-06-10-00.txt"
,"2015-02-05/all-2015-02-05-06-15-00.txt"
,"2015-02-05/all-2015-02-05-06-20-00.txt"
,"2015-02-05/all-2015-02-05-06-25-00.txt"
,"2015-02-05/all-2015-02-05-06-30-00.txt"
,"2015-02-05/all-2015-02-05-06-35-00.txt"
,"2015-02-05/all-2015-02-05-06-40-00.txt"
,"2015-02-05/all-2015-02-05-06-45-00.txt"
,"2015-02-05/all-2015-02-05-06-50-00.txt"
,"2015-02-05/all-2015-02-05-06-55-00.txt"
,"2015-02-05/all-2015-02-05-07-00-00.txt"
,"2015-02-05/all-2015-02-05-07-05-00.txt"
,"2015-02-05/all-2015-02-05-07-10-00.txt"
,"2015-02-05/all-2015-02-05-07-15-00.txt"
,"2015-02-05/all-2015-02-05-07-20-00.txt"
,"2015-02-05/all-2015-02-05-07-25-00.txt"
,"2015-02-05/all-2015-02-05-07-30-00.txt"
,"2015-02-05/all-2015-02-05-07-35-00.txt"
,"2015-02-05/all-2015-02-05-07-40-00.txt"
,"2015-02-05/all-2015-02-05-07-45-00.txt"
,"2015-02-05/all-2015-02-05-07-50-00.txt"
,"2015-02-05/all-2015-02-05-07-55-00.txt"
,"2015-02-05/all-2015-02-05-08-00-00.txt"
,"2015-02-05/all-2015-02-05-08-05-00.txt"
,"2015-02-05/all-2015-02-05-08-10-00.txt"
,"2015-02-05/all-2015-02-05-08-15-00.txt"
,"2015-02-05/all-2015-02-05-08-20-00.txt"
,"2015-02-05/all-2015-02-05-08-25-00.txt"
,"2015-02-05/all-2015-02-05-08-30-00.txt"
,"2015-02-05/all-2015-02-05-08-35-00.txt"
,"2015-02-05/all-2015-02-05-08-40-00.txt"
,"2015-02-05/all-2015-02-05-08-45-00.txt"
,"2015-02-05/all-2015-02-05-08-50-00.txt"
,"2015-02-05/all-2015-02-05-08-55-00.txt"
,"2015-02-05/all-2015-02-05-09-00-00.txt"
,"2015-02-05/all-2015-02-05-09-05-00.txt"
,"2015-02-05/all-2015-02-05-09-10-00.txt"
,"2015-02-05/all-2015-02-05-09-15-00.txt"
,"2015-02-05/all-2015-02-05-09-20-00.txt"
,"2015-02-05/all-2015-02-05-09-25-00.txt"
,"2015-02-05/all-2015-02-05-09-30-00.txt"
,"2015-02-05/all-2015-02-05-09-35-00.txt"
,"2015-02-05/all-2015-02-05-09-40-00.txt"
,"2015-02-05/all-2015-02-05-09-45-00.txt"
,"2015-02-05/all-2015-02-05-09-50-00.txt"
,"2015-02-05/all-2015-02-05-09-55-00.txt"
,"2015-02-05/all-2015-02-05-10-00-00.txt"
,"2015-02-05/all-2015-02-05-10-05-00.txt"
,"2015-02-05/all-2015-02-05-10-10-00.txt"
,"2015-02-05/all-2015-02-05-10-15-00.txt"
,"2015-02-05/all-2015-02-05-10-20-00.txt"
,"2015-02-05/all-2015-02-05-10-25-00.txt"
,"2015-02-05/all-2015-02-05-10-30-00.txt"
,"2015-02-05/all-2015-02-05-10-35-00.txt"
,"2015-02-05/all-2015-02-05-10-40-00.txt"
,"2015-02-05/all-2015-02-05-10-45-00.txt"
,"2015-02-05/all-2015-02-05-10-50-00.txt"
,"2015-02-05/all-2015-02-05-10-55-00.txt"
,"2015-02-05/all-2015-02-05-11-00-00.txt"
,"2015-02-05/all-2015-02-05-11-05-00.txt"
,"2015-02-05/all-2015-02-05-11-10-00.txt"
,"2015-02-05/all-2015-02-05-11-15-00.txt"
,"2015-02-05/all-2015-02-05-11-20-00.txt"
,"2015-02-05/all-2015-02-05-11-25-00.txt"
,"2015-02-05/all-2015-02-05-11-30-00.txt"
,"2015-02-05/all-2015-02-05-11-35-00.txt"
,"2015-02-05/all-2015-02-05-11-40-00.txt"
,"2015-02-05/all-2015-02-05-11-45-00.txt"
,"2015-02-05/all-2015-02-05-11-50-00.txt"
,"2015-02-05/all-2015-02-05-11-55-00.txt"
,"2015-02-05/all-2015-02-05-12-00-00.txt"
,"2015-02-05/all-2015-02-05-12-05-00.txt"
,"2015-02-05/all-2015-02-05-12-10-00.txt"
,"2015-02-05/all-2015-02-05-12-15-00.txt"
,"2015-02-05/all-2015-02-05-12-20-00.txt"
,"2015-02-05/all-2015-02-05-12-25-00.txt"
,"2015-02-05/all-2015-02-05-12-30-00.txt"
,"2015-02-05/all-2015-02-05-12-35-00.txt"
,"2015-02-05/all-2015-02-05-12-40-00.txt"
,"2015-02-05/all-2015-02-05-12-45-00.txt"
,"2015-02-05/all-2015-02-05-12-50-00.txt"
,"2015-02-05/all-2015-02-05-12-55-00.txt"
,"2015-02-05/all-2015-02-05-13-00-00.txt"
,"2015-02-05/all-2015-02-05-13-05-00.txt"
,"2015-02-05/all-2015-02-05-13-10-00.txt"
,"2015-02-05/all-2015-02-05-13-15-00.txt"
,"2015-02-05/all-2015-02-05-13-20-00.txt"
,"2015-02-05/all-2015-02-05-13-25-00.txt"
,"2015-02-05/all-2015-02-05-13-30-00.txt"
,"2015-02-05/all-2015-02-05-13-35-00.txt"
,"2015-02-05/all-2015-02-05-13-40-00.txt"
,"2015-02-05/all-2015-02-05-13-45-00.txt"
,"2015-02-05/all-2015-02-05-13-50-00.txt"
,"2015-02-05/all-2015-02-05-13-55-00.txt"
,"2015-02-05/all-2015-02-05-14-00-00.txt"
,"2015-02-05/all-2015-02-05-14-05-00.txt"
,"2015-02-05/all-2015-02-05-14-10-00.txt"
,"2015-02-05/all-2015-02-05-14-15-00.txt"
,"2015-02-05/all-2015-02-05-14-20-00.txt"
,"2015-02-05/all-2015-02-05-14-25-00.txt"
,"2015-02-05/all-2015-02-05-14-25-02.txt"
,"2015-02-05/all-2015-02-05-14-30-00.txt"
,"2015-02-05/all-2015-02-05-14-35-00.txt"
,"2015-02-05/all-2015-02-05-14-40-00.txt"
,"2015-02-05/all-2015-02-05-14-45-00.txt"
,"2015-02-05/all-2015-02-05-14-50-00.txt"
,"2015-02-05/all-2015-02-05-14-55-00.txt"
,"2015-02-05/all-2015-02-05-15-00-00.txt"
,"2015-02-05/all-2015-02-05-15-05-00.txt"
,"2015-02-05/all-2015-02-05-15-10-00.txt"
,"2015-02-05/all-2015-02-05-15-15-00.txt"
,"2015-02-05/all-2015-02-05-15-20-00.txt"
,"2015-02-05/all-2015-02-05-15-25-00.txt"
,"2015-02-05/all-2015-02-05-15-30-00.txt"
,"2015-02-05/all-2015-02-05-15-35-00.txt"
,"2015-02-05/all-2015-02-05-15-40-00.txt"
,"2015-02-05/all-2015-02-05-15-45-00.txt"
,"2015-02-05/all-2015-02-05-15-50-00.txt"
,"2015-02-05/all-2015-02-05-15-55-00.txt"
,"2015-02-05/all-2015-02-05-16-00-00.txt"
,"2015-02-05/all-2015-02-05-16-05-00.txt"
,"2015-02-05/all-2015-02-05-16-10-00.txt"
,"2015-02-05/all-2015-02-05-16-15-00.txt"
,"2015-02-05/all-2015-02-05-16-20-00.txt"
,"2015-02-05/all-2015-02-05-16-25-00.txt"
,"2015-02-05/all-2015-02-05-16-45-00.txt"
,"2015-02-05/all-2015-02-05-16-50-00.txt"
,"2015-02-05/all-2015-02-05-16-55-00.txt"
,"2015-02-05/all-2015-02-05-17-00-00.txt"
,"2015-02-05/all-2015-02-05-17-05-00.txt"
,"2015-02-05/all-2015-02-05-17-10-00.txt"
,"2015-02-05/all-2015-02-05-17-15-00.txt"
,"2015-02-05/all-2015-02-05-17-20-00.txt"
,"2015-02-05/all-2015-02-05-17-25-00.txt"
,"2015-02-05/all-2015-02-05-17-30-00.txt"
,"2015-02-05/all-2015-02-05-17-35-00.txt"
,"2015-02-05/all-2015-02-05-17-40-00.txt"
,"2015-02-05/all-2015-02-05-17-45-00.txt"
,"2015-02-05/all-2015-02-05-17-50-00.txt"
,"2015-02-05/all-2015-02-05-17-55-00.txt"
,"2015-02-05/all-2015-02-05-18-00-00.txt"
,"2015-02-05/all-2015-02-05-18-05-00.txt"
,"2015-02-05/all-2015-02-05-18-10-00.txt"
,"2015-02-05/all-2015-02-05-18-15-00.txt"
,"2015-02-05/all-2015-02-05-18-20-00.txt"
,"2015-02-05/all-2015-02-05-18-25-00.txt"
,"2015-02-05/all-2015-02-05-18-30-00.txt"
,"2015-02-05/all-2015-02-05-18-35-00.txt"
,"2015-02-05/all-2015-02-05-18-40-00.txt"
,"2015-02-05/all-2015-02-05-18-45-00.txt"
,"2015-02-05/all-2015-02-05-18-50-00.txt"
,"2015-02-05/all-2015-02-05-18-55-00.txt"
,"2015-02-05/all-2015-02-05-19-00-00.txt"
,"2015-02-05/all-2015-02-05-19-05-00.txt"
,"2015-02-05/all-2015-02-05-19-10-00.txt"
,"2015-02-05/all-2015-02-05-19-15-00.txt"
,"2015-02-05/all-2015-02-05-19-20-00.txt"
,"2015-02-05/all-2015-02-05-19-25-00.txt"
,"2015-02-05/all-2015-02-05-19-30-00.txt"
,"2015-02-05/all-2015-02-05-19-35-00.txt"
,"2015-02-05/all-2015-02-05-19-40-00.txt"
,"2015-02-05/all-2015-02-05-19-45-00.txt"
,"2015-02-05/all-2015-02-05-19-50-00.txt"
,"2015-02-05/all-2015-02-05-19-55-00.txt"
,"2015-02-05/all-2015-02-05-20-00-00.txt"
,"2015-02-05/all-2015-02-05-20-05-00.txt"
,"2015-02-05/all-2015-02-05-20-10-00.txt"
,"2015-02-05/all-2015-02-05-20-15-00.txt"
,"2015-02-05/all-2015-02-05-20-20-00.txt"
,"2015-02-05/all-2015-02-05-20-25-00.txt"
,"2015-02-05/all-2015-02-05-20-30-00.txt"
,"2015-02-05/all-2015-02-05-20-35-00.txt"
,"2015-02-05/all-2015-02-05-20-40-00.txt"
,"2015-02-05/all-2015-02-05-20-45-00.txt"
,"2015-02-05/all-2015-02-05-20-50-00.txt"
,"2015-02-05/all-2015-02-05-20-55-00.txt"
,"2015-02-05/all-2015-02-05-21-00-00.txt"
,"2015-02-05/all-2015-02-05-21-05-00.txt"
,"2015-02-05/all-2015-02-05-21-10-00.txt"
,"2015-02-05/all-2015-02-05-21-15-00.txt"
,"2015-02-05/all-2015-02-05-21-20-00.txt"
,"2015-02-05/all-2015-02-05-21-25-00.txt"
,"2015-02-05/all-2015-02-05-21-30-00.txt"
,"2015-02-05/all-2015-02-05-21-35-00.txt"
,"2015-02-05/all-2015-02-05-21-40-00.txt"
,"2015-02-05/all-2015-02-05-21-45-00.txt"
,"2015-02-05/all-2015-02-05-21-50-00.txt"
,"2015-02-05/all-2015-02-05-21-55-00.txt"
,"2015-02-05/all-2015-02-05-22-00-00.txt"
,"2015-02-05/all-2015-02-05-22-05-00.txt"
,"2015-02-05/all-2015-02-05-22-10-00.txt"
,"2015-02-05/all-2015-02-05-22-15-00.txt"
,"2015-02-05/all-2015-02-05-22-15-02.txt"
,"2015-02-05/all-2015-02-05-22-20-00.txt"
,"2015-02-05/all-2015-02-05-22-25-00.txt"
,"2015-02-05/all-2015-02-05-22-30-00.txt"
,"2015-02-05/all-2015-02-05-22-35-00.txt"
,"2015-02-05/all-2015-02-05-22-40-00.txt"
,"2015-02-05/all-2015-02-05-22-45-00.txt"
,"2015-02-05/all-2015-02-05-22-50-00.txt"
,"2015-02-05/all-2015-02-05-22-55-00.txt"
,"2015-02-05/all-2015-02-05-23-00-00.txt"
,"2015-02-05/all-2015-02-05-23-05-00.txt"
,"2015-02-05/all-2015-02-05-23-10-00.txt"
,"2015-02-05/all-2015-02-05-23-15-00.txt"
,"2015-02-05/all-2015-02-05-23-20-00.txt"
,"2015-02-05/all-2015-02-05-23-25-00.txt"
,"2015-02-05/all-2015-02-05-23-30-00.txt"
,"2015-02-05/all-2015-02-05-23-35-00.txt"
,"2015-02-05/all-2015-02-05-23-40-00.txt"
,"2015-02-05/all-2015-02-05-23-45-00.txt"
,"2015-02-05/all-2015-02-05-23-50-00.txt"
,"2015-02-05/all-2015-02-05-23-55-00.txt"]
,"2015-02-05")
| [
"jer11698.553@outlook.com"
] | jer11698.553@outlook.com |
90669fdf9b20e3de4f1c21a79131f12f0013be09 | 28bec0072dbb493148034e45bfe283b6c15c511e | /acorn_framework/train_tagger_network.py | 74e9c5c21c24d95b46466156d547fa22b49a0beb | [] | no_license | cmilke/vbf_selection | 2db299fd667fa565181153870f887cb91da23c5a | 1cc1f65d81047758dabfdc0c3cbf5e531c4ac1b8 | refs/heads/master | 2020-08-01T23:43:31.433028 | 2020-03-06T16:27:38 | 2020-03-06T16:27:38 | 211,161,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | #!/usr/bin/env python
import sys
import math
import pickle
import numpy
import random
from acorn_backend.tagger_methods import selector_options
from acorn_backend.machine_learning.ML_tagger_base import ML_tagger_base
def train():
training_category = 'JVT'
tagger_to_train = ML_tagger_base(selector_options['all'], tag=False)
# Randomly mix together signal and background events
data_dump_sig = pickle.load( open('data/output_'+sys.argv[1]+'_train_sig.p', 'rb') )
data_dump_bgd = pickle.load( open('data/output_'+sys.argv[1]+'_train_bgd.p', 'rb') )
event_count = 40000
event_list = data_dump_sig[training_category].events[:event_count]
event_list += data_dump_bgd[training_category].events[:event_count]
random.seed(768234)
random.shuffle(event_list)
# Collect events and associated selections
valid_event = lambda event : len(event.jets) == 2
input_list = [ event for event in event_list if valid_event(event) ]
training_cutoff = int( len(input_list)* (3/4) )
training_labels, testing_labels = [], []
training_data = tagger_to_train.prepare_events(input_list[:training_cutoff], training_labels)
testing_data = tagger_to_train.prepare_events(input_list[training_cutoff:], testing_labels)
print(training_data)
if len(training_labels) == 0: raise RuntimeError('Data List is Empty. Aborting!')
tagger_to_train.train_model(training_data, training_labels)
tagger_to_train.test_model(testing_data, testing_labels)
train()
| [
"chrisdmilke@gmail.com"
] | chrisdmilke@gmail.com |
ab32a9e3ccd00d392b248cff6e1bae34efc0a5fb | 9d5183506ef06e2de297124222e0186974dcb065 | /manage.py | 29760bcd2f4a20f03ccc370f857efc9ad6cea405 | [] | no_license | ardaellidokuz/eycof | 2d33fb6aae4ebc66e5b7fb9c29632a979394d799 | cf0f8e3ef7f96acb05066bdaf943edc874d758ce | refs/heads/master | 2023-04-13T15:35:24.123790 | 2021-04-25T14:06:53 | 2021-04-25T14:06:53 | 361,445,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eycof.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ardaellidokuz@gmail.com"
] | ardaellidokuz@gmail.com |
a296ad39150d17d9697e34bbbaf44985d69eceae | fbe68d84e97262d6d26dd65c704a7b50af2b3943 | /third_party/expected-lite/script/update-version.py | 034e4f00671a011ffed11919b8981d460d2c1063 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"BSL-1.0"
] | permissive | thalium/icebox | c4e6573f2b4f0973b6c7bb0bf068fe9e795fdcfb | 6f78952d58da52ea4f0e55b2ab297f28e80c1160 | refs/heads/master | 2022-08-14T00:19:36.984579 | 2022-02-22T13:10:31 | 2022-02-22T13:10:31 | 190,019,914 | 585 | 109 | MIT | 2022-01-13T20:58:15 | 2019-06-03T14:18:12 | C++ | UTF-8 | Python | false | false | 3,877 | py | #!/usr/bin/env python
#
# Copyright 2017-2018 by Martin Moene
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# script/update-version.py
#
from __future__ import print_function
import argparse
import os
import re
import sys
# Configuration:
table = (
# path, substitute find, substitute format
( 'CMakeLists.txt'
, r'\W{2,4}VERSION\W+([0-9]+\.[0-9]+\.[0-9]+)\W*$'
, ' VERSION {major}.{minor}.{patch}' )
, ( 'CMakeLists.txt'
, r'set\W+expected_lite_version\W+"([0-9]+\.[0-9]+\.[0-9]+)"\W+$'
, 'set( expected_lite_version "{major}.{minor}.{patch}" )\n' )
# , ( 'example/cmake-pkg/CMakeLists.txt'
# , r'set\W+expected_lite_version\W+"([0-9]+\.[0-9]+(\.[0-9]+)?)"\W+$'
# , 'set( expected_lite_version "{major}.{minor}" )\n' )
#
# , ( 'script/install-xxx-pkg.py'
# , r'\expected_lite_version\s+=\s+"([0-9]+\.[0-9]+\.[0-9]+)"\s*$'
# , 'expected_lite_version = "{major}.{minor}.{patch}"\n' )
, ( 'conanfile.py'
, r'version\s+=\s+"([0-9]+\.[0-9]+\.[0-9]+)"\s*$'
, 'version = "{major}.{minor}.{patch}"' )
, ( 'include/nonstd/expected.hpp'
, r'\#define\s+expected_lite_MAJOR\s+[0-9]+\s*$'
, '#define expected_lite_MAJOR {major}' )
, ( 'include/nonstd/expected.hpp'
, r'\#define\s+expected_lite_MINOR\s+[0-9]+\s*$'
, '#define expected_lite_MINOR {minor}' )
, ( 'include/nonstd/expected.hpp'
, r'\#define\s+expected_lite_PATCH\s+[0-9]+\s*$'
, '#define expected_lite_PATCH {patch}\n' )
)
# End configuration.
def readFile( in_path ):
"""Return content of file at given path"""
with open( in_path, 'r' ) as in_file:
contents = in_file.read()
return contents
def writeFile( out_path, contents ):
"""Write contents to file at given path"""
with open( out_path, 'w' ) as out_file:
out_file.write( contents )
def replaceFile( output_path, input_path ):
# prevent race-condition (Python 3.3):
if sys.version_info >= (3, 3):
os.replace( output_path, input_path )
else:
os.remove( input_path )
os.rename( output_path, input_path )
def editFileToVersion( version, info, verbose ):
"""Update version given file path, version regexp and new version format in info"""
major, minor, patch = version.split('.')
in_path, ver_re, ver_fmt = info
out_path = in_path + '.tmp'
new_text = ver_fmt.format( major=major, minor=minor, patch=patch )
if verbose:
print( "- {path} => '{text}':".format( path=in_path, text=new_text.strip('\n') ) )
writeFile(
out_path,
re.sub(
ver_re, new_text, readFile( in_path )
, count=0, flags=re.MULTILINE
)
)
replaceFile( out_path, in_path )
def editFilesToVersion( version, table, verbose ):
if verbose:
print( "Editing files to version {v}:".format(v=version) )
for item in table:
editFileToVersion( version, item, verbose )
def editFilesToVersionFromCommandLine():
"""Update version number given on command line in paths from configuration table."""
parser = argparse.ArgumentParser(
description='Update version number in files.',
epilog="""""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'version',
metavar='version',
type=str,
nargs=1,
help='new version number, like 1.2.3')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='report the name of the file being processed')
args = parser.parse_args()
editFilesToVersion( args.version[0], table, args.verbose )
if __name__ == '__main__':
editFilesToVersionFromCommandLine()
# end of file
| [
"benoit.amiaux@gmail.com"
] | benoit.amiaux@gmail.com |
e7e55f1d520c4c58b05c92f01c099627c0f2f90f | d1ae9caf754fa31b0d452aacf619b7f286faaeac | /Chapter04/ch4-mod/counter/countdown.py | ad32bd1be211960e48c6267238c28d8a8cb68748 | [
"MIT"
] | permissive | PacktPublishing/Tkinter-GUI-Programming-by-Example | 1a89201334030b0dd7a4cd7251da9cedbe3042cd | 2ccea65c87d9c4da3438dd5cc230ec2688b2eba0 | refs/heads/master | 2023-08-19T10:06:05.179585 | 2023-01-30T08:52:40 | 2023-01-30T08:52:40 | 130,801,365 | 166 | 96 | MIT | 2023-08-10T13:11:40 | 2018-04-24T05:35:21 | Python | UTF-8 | Python | false | false | 118 | py | def count_down(max):
numbers = [i for i in range(max)]
for num in numbers[::-1]:
print(num, end=', ')
| [
"subhalaxmin@packtpub.com"
] | subhalaxmin@packtpub.com |
14a350e33c120435a0275dc517f3b6d994b30a1c | 7db97444b6af7de9392f0290fcc6768264ef04df | /udp_send.py | fe719d707c95ad7a8f7d4530a34398619a405bc4 | [] | no_license | acreel21/Cloud_Controlled_Mobile_Robot_Design | 5ffcbcae22175fe04a441bf1f2656227137134f1 | 16b2cf7cd8c84a53f4c093c3ffff4df5c027af17 | refs/heads/master | 2020-05-14T12:57:42.236614 | 2019-04-29T18:16:45 | 2019-04-29T18:16:45 | 181,803,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | import socket
import sys
from ctypes import *
import getch
v = 0
t = 0
UDP_IP = "192.168.1.99"
UDP_PORT = 5005
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.connect((UDP_IP, UDP_PORT))
class Robot(Structure):
_fields_ = [("velocity", c_double),("theta", c_double),("mode", c_int)] #setting up c type struct
class Data(Structure):
_fields_ = [("X", c_double),("Y", c_double),("phi", c_double)]
print("Welcome, the controls for the robot are:")
print("q is to exit")
print("r is to restart the robot")
print("n is receive data back from the robot")
print("Space bar is to stop")
print("a is to servo 0 deg")
print("s is to servo 90 deg")
print("d is to servo 180 deg")
print("f is to servo 270 deg")
print("Up arrow is to increase speed")
print("Down arrow is to decrease speed")
while True:
b = getch.getch() #get theta term
print("b is equal to: ") #for testing
print(b) #for testing
if (b == 'a'):
t = 0
print("t is equal to: ")
print(t)
elif (b == 's'):
t = 90
print("t is equal to: ")
print(t)
elif (b == 'd'):
t = 180
print("t is equal to: ")
print(t)
elif (b == 'f'):
t = 270
print("t is equal to: ")
print(t)
if (b == 'r'): #restart check
sendRobot = Robot(0,0,1) #parse data
sock.send(sendRobot) #send parse data
if (b == 'n'): #restart check
sendRobot = Robot(v,t,2) #parse data
sock.send(sendRobot) #send parse data
buff = sock.recv(sizeof(Data))
myData = Data.from_buffer_copy(buff)
print "X=%d, Y=%d, phi=%f" % (myData.X, myData.Y, myData.phi)
if (b == ' '):
sendRobot = Robot(0,0,0) #parse data
sock.send(sendRobot) #send parse data
if (b == 'q'):
print("Exiting")
sys.exit()
else:
if (getch.getch() == '\033'): #get velocity term
getch.getch()
c = getch.getch()
if (c == 'A'):
v += 17
if (v > 255):
v = 255
print("v is equal to: ") #for testing
print(v) #for testing
elif (c == 'B'):
v -= 17
if (v < 0):
v = 0
print("v is equal to: ") #for testing
print(v) #for testing
sendRobot = Robot(v,t,0) #parse data
sock.send(sendRobot) #send parse data | [
"acreel@gmu.edu"
] | acreel@gmu.edu |
d5940530bc4553fabdeaed10fa92f452da0d3571 | 20a34ca150213244f9bf2fdf6641e0c282d492e4 | /test_app/models.py | 7b982ee6fcda4bf63b506378acfecdc1f4dfa91c | [] | no_license | SNCKER/CVE-2020-7471 | 3c1060673c0b2c27124da756a1b686eefadb39c3 | 8a9f5c098f484d2b04d481f3088cc235ad5cc9cc | refs/heads/master | 2021-01-09T10:10:24.467228 | 2020-02-22T01:42:03 | 2020-02-22T01:42:03 | 242,260,909 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.db import models
# Create your models here.
class testModel(models.Model):
field1 = models.TextField()
field2 = models.TextField()
| [
"948375961@qq.com"
] | 948375961@qq.com |
63d862cdc8edcd102a17e39a952df8976a4cccc3 | 93f266195cf5081ceb7ed356ef3dbf8712ca034a | /softwareprocess/Sample.py | 5be4c75a58fdacdf90e36e30d05e12004e3e1379 | [] | no_license | zzz0069/Navigation-positioning | b92cc9937adabe3323d4dc5c09f4c0b448033e9a | 4c06836dda215b7d87cf0c2be9fdcdcbafaf771d | refs/heads/master | 2021-03-27T19:25:29.644070 | 2018-04-19T19:13:10 | 2018-04-19T19:13:10 | 81,520,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | import math
class Sample(object):
# outward facing methods
def __init__(self, n=None):
functionName = "Sample.__init__: "
if(n == None):
raise ValueError(functionName + "invalid n")
if(not(isinstance(n, int))):
raise ValueError(functionName + "invalid n")
if((n < 2) or (n >= 30)):
raise ValueError(functionName + "invalid n")
self.n = n
def getN(self):
return self.n
def p(self, t=None, tails=1):
functionName = "Sample.p: "
if(t == None):
raise ValueError(functionName + "missing t")
if(not(isinstance(t, float))):
raise ValueError(functionName + "invalid t")
if(t < 0.0):
raise ValueError(functionName + "invalid t")
if(not(isinstance(tails, int))):
raise ValueError(functionName + "invalid tails")
if((tails != 1) & (tails != 2)):
raise ValueError(functionName + "invalid tails")
constant = self.calculateConstant(self.n)
integration = self.integrate(0.0,t, self.n, self.f)
if(tails == 1):
result = constant * integration + 0.5
else:
result = constant * integration * 2
if(result > 1.0):
raise ValueError(functionName + "result > 1.0")
return result
# internal methods
def gamma(self, x):
if(x == 1):
return 1
if(x == 0.5):
return math.sqrt(math.pi)
return (x - 1) * self.gamma(x - 1)
def calculateConstant(self, n):
n = float(n)
numerator = self.gamma((n + 1.0) / 2.0)
denominator = self.gamma(n / 2.0) * math.sqrt(n * math.pi)
result = numerator / denominator
return result
def f(self, u, n):
n = float(n)
base = (1 + (u ** 2) / n)
exponent = -(n + 1.0) / 2
result = base ** exponent
return result
def integrate(self, lowBound, highBound, n, f):
epsilon = 0.001
simpsonOld = 0.0
simpsonNew = epsilon
s = 4
while(abs((simpsonNew - simpsonOld) / simpsonNew) > epsilon):
simpsonOld = simpsonNew
w = (highBound - lowBound) / s
oddSum = 0
evenSum = 0
coef1 = 2 * w
coef2 = w
while coef1 < highBound:
oddSum = oddSum + 2 * f(coef1, n)
coef1 = coef1 + 2 * w
while coef2 < highBound:
evenSum = evenSum + 4 * f(coef2, n)
coef2 = coef2 + 2 * w
simpsonNew = (w / 3) * (f(lowBound, n) + f(highBound, n) + oddSum + evenSum)
s = s * 2
return simpsonNew
| [
"zzz0069@auburn.edu"
] | zzz0069@auburn.edu |
9f13dfc1872ec53f15943d98dfd1dcd9ed0eaf09 | c43e8f4cc55c9aa0db70408ed56f74f1eb8de1d2 | /main.py | d56a3198af8d81d5fd00a7e941ae100327812eca | [] | no_license | shivaverma/Score-Time-Detection | 400e08824a9bfefca224f4052d3fe734700ae8e5 | 6dbf185e1d9e6035c4ccaf937d94fba1776d404d | refs/heads/master | 2022-12-04T22:17:01.038636 | 2019-05-29T12:03:58 | 2019-05-29T12:03:58 | 155,692,352 | 4 | 2 | null | 2022-11-22T03:05:22 | 2018-11-01T09:33:29 | Jupyter Notebook | UTF-8 | Python | false | false | 6,819 | py | import cv2
import torch
import random
import collections
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms
class strLabelConverter(object):
def __init__(self, alphabet):
alphabet = alphabet.lower()
self.alphabet = alphabet + '-'
self.dict = {}
for i, char in enumerate(alphabet):
self.dict[char] = i + 1 # Position 0 is for space character
def encode(self, text):
# Encoding the word into integer format.
text = [self.dict[char.lower()]for char in text]
length = [len(text)]
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length, raw=False):
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
class CRNN(nn.Module):
def __init__(self):
super(CRNN, self).__init__()
cnn = nn.Sequential()
cnn.add_module('conv0', nn.Conv2d(1, 64, 3, 1, 1)) # Input, Output, Kernal, Stride, Padding
cnn.add_module('relu0', nn.ReLU(True))
cnn.add_module('pooling0', nn.MaxPool2d(2, 2)) # 64x16x64
cnn.add_module('conv1', nn.Conv2d(64, 128, 3, 1, 1))
cnn.add_module('relu1', nn.ReLU(True))
cnn.add_module('pooling1', nn.MaxPool2d(2, 2)) # 128x8x32
cnn.add_module('conv2', nn.Conv2d(128, 256, 3, 1, 1))
cnn.add_module('batchnorm2', nn.BatchNorm2d(256))
cnn.add_module('relu2', nn.ReLU(True))
cnn.add_module('conv3', nn.Conv2d(256, 256, 3, 1, 1))
cnn.add_module('relu3', nn.ReLU(True))
cnn.add_module('pooling2', nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16
cnn.add_module('conv4', nn.Conv2d(256, 512, 3, 1, 1))
cnn.add_module('batchnorm4', nn.BatchNorm2d(512))
cnn.add_module('relu4', nn.ReLU(True))
cnn.add_module('conv5', nn.Conv2d(512, 512, 3, 1, 1))
cnn.add_module('relu5', nn.ReLU(True))
cnn.add_module('pooling3', nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16
cnn.add_module('conv6', nn.Conv2d(512, 512, 2, 1, 0)) # 512x1x16
cnn.add_module('batchnorm6', nn.BatchNorm2d(512))
cnn.add_module('relu6', nn.ReLU(True))
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, 256, 256), # Input, Hidden, Output
BidirectionalLSTM(256, 256, 37)) # Final output: 37 classes
def forward(self, input):
# conv features
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1) # [w, b, c]
# rnn features
output = self.rnn(conv)
return output
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
def predict(image_name):
model_path = './model/crnn.pth'
img_path = './data/' + image_name
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
model = CRNN()
if torch.cuda.is_available():
model = model.cuda()
# print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path))
converter = strLabelConverter(alphabet)
transformer = resizeNormalize((100, 32))
image = Image.open(img_path).convert('L')
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
# x is maximum value out of 37 characters and preds is corresponding alphabet
x, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True) # 26 prediction by each output layer
sim_pred = converter.decode(preds.data, preds_size.data, raw=False) # clean prediction
print('%s ==> %s' %(raw_pred, sim_pred))
return sim_pred
def load_image(name):
image = cv2.imread("data/" + name, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (1280, 720))
print(image.shape)
print("Original Frame:")
return image
def crop(box, name, img):
image = img
crop_img = image[box[0]:box[1], box[2]:box[3]]
print(crop_img.shape)
cv2.imwrite('data/' + name, crop_img)
def create_result():
image = load_image("demo.png")
time_box = [43, 70, 105, 182]
score_box = [43, 70, 257, 327]
team1_box = [43, 70, 190, 250]
team2_box = [43, 70, 335, 390]
crop(time_box, "time.png", image)
crop(score_box, "score.png", image)
crop(team1_box, "team1.png", image)
crop(team2_box, "team2.png", image)
time = predict("time.png")
if len(time) == 5:
time = time[:2] + time[3:]
time = time[:2] + ":" + time[2:]
team1 = predict("team1.png")
team2 = predict("team2.png")
score = predict("score.png")
if len(score) == 3:
score = score[:1] + score[2:]
score = score[:1] + "-" + score[1:]
result = {"Time": time, "Score": score, "Team_1": team1.upper(), "Team_2": team2.upper()}
return result
if __name__ == "__main__":
create_result()
| [
"shivajbd@gmail.com"
] | shivajbd@gmail.com |
2c48eb95fdde29c4ebdeaea526a1f270eb66f30e | 744096e063ffb4cdb017f60e6dfae410a51c789a | /AE/a11_noise1.py | cf32a655f4f90e0fe71c9ecb3f4b296da12e79f3 | [] | no_license | elf0508/Study-bit | 59ddab507b02c13a45913c05a4799ff946e63f95 | a773d7643cbb1c0008e7ea01c32615c9e6e3678c | refs/heads/master | 2022-12-31T11:53:44.344693 | 2020-10-16T09:04:01 | 2020-10-16T09:04:01 | 270,950,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,082 | py | # PCA : 선형
# 오토인코더 (autoencoder) : 비선형
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import numpy as np
def autoencoder(hidden_layer_size):
model = Sequential()
model.add(Dense(units = hidden_layer_size, input_shape = (784, ), activation = 'relu'))
model.add(Dense(units = 784, activation = 'sigmoid'))
return model
from tensorflow.keras.datasets import mnist
train_set, test_set = mnist.load_data()
x_train, y_train = train_set
x_test, y_test = test_set
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1]*x_train.shape[2]))
# 60000 , 784
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1]*x_test.shape[2]))
x_train = x_train/255.
x_test = x_test/255.
# noise 추가
x_train_noised = x_train + np.random.normal(0, 0.1, size=x_train.shape)
x_test_noised = x_test + np.random.normal(0, 0.1, size=x_test.shape)
# x_train_noised = x_train + np.random.normal(0, 0.5, size=x_train.shape)
# x_test_noised = x_test + np.random.normal(0, 0.5, size=x_test.shape)
x_train_noised = np.clip(x_train_noised, a_min=0, a_max=1)
x_test_noised = np.clip(x_test_noised, a_min=0, a_max=1)
model = autoencoder(hidden_layer_size = 154)
# model = autoencoder(hidden_layer_size = 32)
# model.compile(optimize = 'adam', loss = 'mse', metrics = ['acc'])
# loss: 0.0104 - acc: 0.0108
model.compile(optimize = 'adam', loss = 'binary_crossentropy', metrics = ['acc'])
# loss: 0.0941 - acc: 0.8141
model.fit(x_train_noised, x_train, epochs = 20, batch_size = 128)
# model.fit(x_train_noised, x_train_noised, epochs = 10) # <-- 노이즈 제거 훈련 부분
output = model.predict(x_test)
from matplotlib import pyplot as plt
import random
fig, ((ax1, ax2, ax3, ax4, ax5), (ax6, ax7, ax8, ax9,ax10),
(ax11, ax12, ax13, ax14, ax15)) = \
plt.subplots(3, 5, figsize=(20, 7))
# 이미지 다섯 개를 무작위로 고른다
random_images = random.sample(range(output.shape[0]), 5)
# 원본(입력) 이미지를 맨 위에 그린다 / 잡음 없다.
for i, ax in enumerate([ax1, ax2, ax3, ax4, ax5]):
ax.imshow(x_test[random_images[i]].reshape(28, 28), cmap = 'gray')
# ax.imshow(x_test[random_images[i]].reshape(28, 28), cmap = 'gray')
if i ==0:
ax.set_ylabel('INPUT', size = 40)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
# 잡음 있다.
for i, ax in enumerate([ax6, ax7, ax8, ax9, ax10]):
ax.imshow(x_test_noised[random_images[i]].reshape(28, 28), cmap = 'gray')
# ax.imshow(x_test[random_images[i]].reshape(28, 28), cmap = 'gray')
if i ==0:
ax.set_ylabel('NOISE', size = 40)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
# 오토인코더가 출력한 이미지를 마지막에 그린다.
for i, ax in enumerate([ax11, ax12, ax13, ax14, ax15]):
ax.imshow(output[random_images[i]].reshape(28, 28), cmap = 'gray')
if i ==0:
ax.set_ylabel('OUTPUT', size = 40)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
plt.show() | [
"elf0508@naver.com"
] | elf0508@naver.com |
063d9dc1da74391f81c536fcc40be7623033109a | 025119796f1003b32d8f44d7e471f19b4380a592 | /Triviafile.py | 9ed3092dd8b162eb02b7458c7ef78ddb92a1a992 | [] | no_license | dzng1234/dz1234.github.io | b21eb774cd73cb2df5c81eb993387bdf76e43b63 | 87471a3e63e5d946bbc785a50ad9b6ff0f140b7d | refs/heads/master | 2020-07-17T22:42:14.843388 | 2020-01-09T15:56:39 | 2020-01-09T15:56:39 | 206,115,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,336 | py | def intro():
print ("Welcome to the Sport trivia game!")
print ("Which sport would you like to play?")
print ("Press f for Football, s for Soccer, and b for Basketball")
levelselect= raw_input()
if levelselect==("f"):
footballtrivia()
elif levelselect==("s"):
soccertrivia()
else:
basketballtrivia()
def levelselect():
print ("\nWhich category would you like to play next?")
print ("Press f for football, s for soccor, and b for basketball")
print ("Press e to end the game")
levelselect= raw_input()
if levelselect==("f"):
arttrivia()
elif levelselect==("s"):
soccertrivia()
elif levelselect==("b"):
print ("Thank you for playing!")
else:
basketballtrivia()
def footballtrivia():
print ("Welcome to the football category of the game!")
aq1=("How many NFL teams has Los Angeles had in it history?")
aq2=("In their career, who has caught the most touchdown passes from Tom Brady?")
aq3=("Which of these is not a real penalty?")
aq4=("What position did Troy Polumalu play?")
aq5=("What is the average success rate ow two-point conversions?")
options1= ("a. 1\nb. 2\nc. 3\nd. 4")
options2= ("a. Rob Gronkowski\nb. Wes Welker\nc. Randy Moss\nd. Antonio Brown")
options3= ("a. Neutral Zone Infraction\nb. Horse Collar Tackle\nc. Uneccesary Roughness\nd. Pulling the Guard")
options4= ("a. Line Backer\nb. Strong Safety\nc. Tight End\nd. Running Back")
options5= ("a. 14%\nb. 21%\nc. 34%\nd. 48%")
score=0
print (aq1)
print (options1)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="b":
print ("Correct!")
score=score+1
print ("Your score is", score,"/1")
else:
print("The correct answer is b")
score=score+0
print ("Your score is", score,"/1")
print (aq2)
print (options2)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="a":
print ("Correct!")
score=score+1
print ("Your score is", score,"/2")
else:
print("The correct answer is a")
score=score+0
print ("Your score is", score,"/2")
print (aq3)
print (options3)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="d":
print ("Correct!")
score=score+1
print ("Your score is", score,"/3")
else:
print("The correct answer is d")
score=score+0
print ("Your score is", score,"/3")
print (aq4)
print (options4)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="b":
print ("Correct!")
score=score+1
print ("Your score is", score,"/4")
else:
print("The correct answer is b")
score=score+0
print ("Your score is", score,"/4")
print (aq5)
print (options5)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="d":
print ("Correct!")
score=score+1
if (score==5):
print ("Good job on a perfect 5/5!")
else:
print("Your score is", score,"/5")
else:
print("The correct answer is d")
score=score+0
print ("Your final score is", score,"/5")
levelselect()
def soccertrivia():
print ("Welcome to the soccer category of the game!")
mq1=("Who won the Champions League last year?")
mq2=("Which player has never played on FC Barcelona?")
mq3=("Who scored the most goals in their career?")
mq4=("Which player did not score in the final of the 2018 World Cup?")
mq5=("What player had the most expensive transfer?")
options1= ("a. Tottenhem\nb. Liverpool\nc. Manchester City\nd. FC Barcelona")
options2= ("a. Ibrahamovic\nb. Neymar\nc. Ronaldo\nd. Ronaldhino")
options3= ("a. Josef Bican\nb. Pele\nc. Ronaldo\nd. Messi")
options4= ("a. Mabppe\nb. Modric\nc. Pogba\nd. Manduzkic")
options5= ("a. Ronaldo\nb. Van Dijk\nc.Neymar\nd. Messi")
score=0
print (mq1)
print (options1)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="b":
print ("Correct!")
score=score+1
print ("Your score is", score,"/1")
else:
print("The correct answer is b")
score=score+0
print ("Your score is", score,"/1")
print (mq2)
print (options2)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="c":
print ("Correct!")
score=score+1
print ("Your score is", score,"/2")
else:
print("The correct answer is c")
score=score+0
print ("Your score is", score,"/2")
print (mq3)
print (options3)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="a":
print ("Correct!")
score=score+1
print ("Your score is", score,"/3")
else:
print("The correct answer is a")
score=score+0
print ("Your score is", score,"/3")
print (mq4)
print (options4)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="b":
print ("Correct!")
score=score+1
print ("Your score is", score,"/4")
else:
print("The correct answer is b")
score=score+0
print ("Your score is", score,"/4")
print (mq5)
print (options5)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="c":
print ("Correct!")
score=score+1
if (score==5):
print ("Good job on a perfect 5/5!")
else:
print("Your score is", score,"/5")
else:
print("The correct answer is c")
score=score+0
print ("Your final score is", score,"/5")
levelselect()
def basketballtrivia():
print("This is the basketball trivia!")
mq1=("What is the record for most regular season wins, set by the Golden State Warriors in 2016?")
mq2=("How many times has Steph Curry dunked?")
mq3=("What is depicted on the logo of the Golden State Warriors?")
mq4=("Who was the shortest player to win MVP?")
mq5=("What is the record for highest average scoring average set by a person during a single season?")
options1= ("a. 50\nb. 82\nc. 73\nd. 69")
options2= ("a. 5\nb. 15\nc. 20\nd. 25")
options3= ("a. A basketball\nb. A city skyline\nc. A bridge\nd. A spearhead")
options4= ("a. A Charles Barkley\nb. Spudd Webb\nc. Allen Iverson\nd. Steve Nash")
options5= ("a. 40.4\nb.50.4\nc. 35.4\nd. 49.4")
score=0
print (mq1)
print (options1)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="c":
print ("Correct!")
score=score+1
print ("Your score is", score,"/1")
else:
print("The correct answer is c")
score=score+0
print ("Your score is", score,"/1")
print (mq2)
print (options2)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="d":
print ("Correct!")
score=score+1
print ("Your score is", score,"/2")
else:
print("The correct answer is d")
score=score+0
print ("Your score is", score,"/2")
print (mq3)
print (options3)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="c":
print ("Correct!")
score=score+1
print ("Your score is", score,"/3")
else:
print("The correct answer is c")
score=score+0
print ("Your score is", score,"/3")
print (mq4)
print (options4)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="c":
print ("Correct!")
score=score+1
print ("Your score is", score,"/4")
else:
print("The correct answer is c")
score=score+0
print ("Your score is", score,"/4")
print (mq5)
print (options5)
response= raw_input("Press the letter of the answer you think is correct\n")
if response=="b":
print ("Correct!")
score=score+1
if (score==5):
print ("Good job on a perfect 5/5!")
else:
print("Your score is", score,"/5")
else:
print("The correct answer is b")
score=score+0
print ("Your final score is", score,"/5")
levelselect()
levelselect()
intro()
| [
"noreply@github.com"
] | dzng1234.noreply@github.com |
a56b54a1b148118fbd6bb0bfe054269d1aac4cb0 | f1c6178b5f0bb6cbd3d42d9326e9f9c41701e0a6 | /Day 7/hw2.py | 915bad5bdbd177e2d98455a02401957f7f654785 | [] | no_license | princemathew1997/random-python | 779d377fb43a39b37584b7f3a5702f0f29e98ad0 | 80b9065353525465b87636efcd7879d5f7a8ae76 | refs/heads/main | 2023-01-31T09:38:27.056368 | 2020-12-19T15:20:39 | 2020-12-19T15:20:39 | 319,009,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | num = int(input('How many numbers: '))
sum = 0
i = 0
while i<num:
a = input("Enter The Number: ")
if a == 'q':
break
else:
sum = sum + int(a)
i += 1
avg = sum/i
print(avg)
| [
"princepmd6@gmail.com"
] | princepmd6@gmail.com |
030114cbd063b9d9ebff2f297572a4ad727248b9 | be9704e5fbf2897cc49692967d1e15b4d6c9dc57 | /src/main.py | 562cf9ae0874e495efb5e00a4778cabaa2e581e3 | [] | no_license | Tanner0397/Gpac | 766de8ee001d2f178cdf5e098444688614794220 | 2ea8c276d729fcb9d1e1960d5d782ee6eb62e750 | refs/heads/master | 2020-04-12T20:12:11.260404 | 2019-01-23T21:27:18 | 2019-01-23T21:27:18 | 162,728,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,209 | py | """
Tanner Wendland
10/22/18
CS5401 - Gpac
"""
import game
import sys
import configparser
import copy
from config_builder import config_builder
import logger
sys.path.insert(0, 'build')
from fast_tree import tree
from evolution import Evolution
#For testing
from population_member import member
config = configparser.ConfigParser()
if(len(sys.argv) >= 2):
config.read(sys.argv[1]) #Use another config file
else:
config.read("config/default.cfg")
#Determine reuns and evals
config_values = config_builder()
#------------------------------------------
"""
Paremters: String
Returns: None
Write string to best game file.
"""
def print_game(world_string):
BEST_GAME = config_values.BEST_GAME
file = open(BEST_GAME, "w")
file.write(world_string)
file.close()
def worst_game(world_string):
WORST = config_values.WORST_GAME
file = open(WORST, "w")
file.write(world_string)
file.close()
def main():
#Create config builder values
#Game Parameters
HEIGHT = config_values.HEIGHT
WIDTH = config_values.WIDTH
WALL_DENSITY = config_values.WALL_DENSITY
PILL_DENSITY = config_values.WALL_DENSITY
FRUIT_CHANCE = config_values.FRUIT_CHANCE
FRUIT_SCORE = config_values.FRUIT_SCORE
TIME_MULT = config_values.TIME_MULT
#Paramters
RUNS = config_values.RUNS
EVALS = config_values.EVALS
#EA Parameters
MAX_DEPTH = config_values.MAX_DEPTH
PAC_POP_SIZE = config_values.pac_population_size
GHOST_POP_SIZE = config_values.ghost_population_size
PAC_GEN_STEP = config_values.pac_generation_step
GHOST_GEN_STEP = config_values.ghost_generation_step
P_SELECT = config_values.parent_selection
OVER_S = config_values.over_sel
S_SELECT = config_values.survival_selection
T_SELECT = config_values.termination
PAC_SUR_K = config_values.pac_survival_k
GHOST_SUR_K = config_values.ghost_survival_k
PAC_P_COEFF = config_values.pac_parsimony
GHOST_P_COEFF = config_values.ghost_parsimony
TERM_EVALS = config_values.term_evals
CONVERGE = config_values.convergence
MUT_RATE = config_values.mutation_rate
P_UPPER = config_values.p_upper
#Paths
LOG = config_values.LOG
GAME = config_values.BEST_GAME
PAC_CONTROLLER = config_values.PAC
GHOST_CONTOLLER = config_values.GHOST
best_pac_fitness_all_runs = -1
best_ghost_fitness_all_runs = 1
#Starting logging
logger.log_start(config_values)
#Create the Game dictinary. Add 2 to compensate for border.
game_dict = {
"height" : HEIGHT+2,
"width" : WIDTH+2,
"wall_density" : WALL_DENSITY,
"pill_density" : PILL_DENSITY,
"fruit_chance" : FRUIT_CHANCE,
"fruit_score" : FRUIT_SCORE,
"time_mult" : TIME_MULT,
}
for i in range(RUNS):
#Starting this Run
print("Starting run {}".format(i+1))
#Insert now log block...
logger.log_new_run(LOG, i)
#Create the EA instance
EA = Evolution(PAC_POP_SIZE, GHOST_POP_SIZE, PAC_GEN_STEP, GHOST_GEN_STEP, P_SELECT, S_SELECT, T_SELECT, PAC_SUR_K, GHOST_SUR_K, PAC_P_COEFF, GHOST_P_COEFF, OVER_S, TERM_EVALS, CONVERGE, MUT_RATE, game_dict, MAX_DEPTH, P_UPPER)
best_pac_this_run = EA.get_best_fitness()
best_ghost_this_run = EA.get_best_ghost_fitness()
#Better fitnesses may have emerged this run's inital population
if best_pac_this_run > best_pac_fitness_all_runs:
#print the game and assign
print_game(EA.best_world_string())
#Game contoller
EA.get_best_member().print_controller(PAC_CONTROLLER)
best_pac_fitness_all_runs = best_pac_this_run
#Now for chost
if best_ghost_this_run < best_ghost_fitness_all_runs:
#Just print contoller and assign
worst_game(EA.worst_world_string())
EA.get_best_ghost().print_controller(GHOST_CONTOLLER)
best_ghost_fitness_all_runes = best_ghost_this_run
#Start this runs log
logger.log_new_entry(LOG, max(PAC_POP_SIZE, GHOST_POP_SIZE), best_pac_this_run, EA.get_average_fitness())
#Since a fitness evaluation is new defined as a game being player, when creating generations the number of
#games played is max(pacman_lambda, ghost_lambda)
for j in range((max(PAC_POP_SIZE, GHOST_POP_SIZE)+max(PAC_GEN_STEP, GHOST_GEN_STEP)), EVALS+1, max(PAC_GEN_STEP, GHOST_GEN_STEP)):
#Main evolution loop
#Create the next generation
EA.create_generation()
#Dump pools into their poplation
EA.pac_dump_pool()
EA.ghost_dump_pool()
#Do the survival selection for both populations
EA.do_pac_survival_selection()
EA.do_ghost_survival_selection()
#Log entry
best_pac_this_run = EA.get_best_fitness()
best_ghost_this_run = EA.get_best_ghost_fitness()
#Check to see if any better controllers have emerged from the next generation
#log entry
logger.log_new_entry(LOG, j, best_pac_this_run, EA.get_average_fitness())
if best_pac_this_run > best_pac_fitness_all_runs:
#print the game and assign
print_game(EA.best_world_string())
#Game contoller
EA.get_best_member().print_controller(PAC_CONTROLLER)
best_pac_fitness_all_runs = best_pac_this_run
if best_ghost_this_run <= best_ghost_fitness_all_runs:
#Just print contoller and assign
#Print the worst game for testing
worst_game(EA.worst_world_string())
EA.get_best_ghost().print_controller(GHOST_CONTOLLER)
best_ghost_fitness_all_runs = best_ghost_this_run
if EA.determine_termination():
break
if __name__ == "__main__":
main()
| [
"trwt33@mst.edu"
] | trwt33@mst.edu |
93b26866bb323f28e65e6a08907cc6ed82010b30 | 7e82610c94ff8b569fcb44c40800a88386be314d | /344.reverse-string.py | 1017dbc95d97700bd14f88fb74fee6b52d1b1f01 | [] | no_license | irtfrm/leet-code | c7614076add0a84118db36055b1ffa9e27b5b155 | af20a605fe9c01fc11f04ab70eff12b25ea066dc | refs/heads/main | 2023-08-01T16:15:28.200198 | 2021-09-17T13:24:00 | 2021-09-17T13:24:00 | 370,783,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | #
# @lc app=leetcode id=344 lang=python3
#
# [344] Reverse String
#
# @lc code=start
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
for i in range(len(s)//2):
s[i], s[len(s) - i - 1] = s[len(s) - i - 1], s[i]
# @lc code=end
| [
"frm.tomoya@gmail.com"
] | frm.tomoya@gmail.com |
7570fc313c9dede406f12b3265be0521c1ef46ef | 69f2a41554cad2ed374ebfb045e08bca0e54c755 | /test.py | b470454371cbcccb46eb02ab151a066791f039ad | [] | no_license | Momentum-Team-8/python-word-frequency-qbarefoot | 0da449427709d852b00f929c72e480003a7854be | c4a2751e2a0cf2612704fd0673756299941ec644 | refs/heads/main | 2023-05-29T12:38:20.459150 | 2021-06-10T02:05:58 | 2021-06-10T02:05:58 | 375,056,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | import string
STOP_WORDS = [
'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from', 'has', 'he',
'i', 'in', 'is', 'it', 'its', 'of', 'on', 'that', 'the', 'to', 'were',
'will', 'with'
]
with open ("praise_song_for_the_day.txt") as f:
text = f.read().lower()
words = text.split()
print(words)
table = str.maketrans("", "", string.punctuation)
stripped = [w.translate(table) for w in words]
print(stripped)
for element in STOP_WORDS:
while element in text_no_punctuation:
text_no_punctuation.remove(element)
print(text_no_punctuation) | [
"quintenbarefoot@gmail.com"
] | quintenbarefoot@gmail.com |
1f61acd31da4332e2a3ac92943686889d38bbe78 | 9d9499a7ab0d3efccd35a29f997b3551573dfb64 | /SocialMediaProject/posts/urls.py | 8d88a7f2b91919b719c933bcc826292acf31ca95 | [] | no_license | SuryaSsrivastava/djangoProject | ec040c8813b4f2f15db53c38d031f47ee2afc7ca | d98f88e7a0ded1e9875ecf0e1021fa454bdf117a | refs/heads/master | 2022-01-15T17:14:20.927517 | 2019-07-16T14:22:26 | 2019-07-16T14:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from django.urls import path
from . import views
app_name='posts'
urlpatterns=[
path('',views.PostList.as_view(),name='all'),
path('new/',views.CreatePost.as_view(),name='create'),
path('by/<username>/',views.UserPosts.as_view(),name='for_user'),
path('by/<username>/<int:pk>/',views.PostDetail.as_view(),name='single'),
path('delete/<int:pk>/',views.DeletePost.as_view(),name='delete'),
]
| [
"scsuryamani@gmail.com"
] | scsuryamani@gmail.com |
c0e08cddd9467f521a019139fedd158aceceeb6e | fecac4770bb21e44f21a2aa148c2305c4f90e121 | /utils.py | acedcbb3fb0bb48be96db7209f05fcf86f5405c7 | [] | no_license | LucenZhao/Kaggle_House_Price_Prediction | 2c89b1d49116591f38fb88333d21cdd64737ba7e | 053e0b533266c0ebf95f8ded007b2866fbda8649 | refs/heads/master | 2020-03-10T08:06:39.090817 | 2018-04-27T12:19:07 | 2018-04-27T12:19:07 | 129,277,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | from sklearn.model_selection import KFold, cross_val_score
import numpy as np
import pandas
import config
def cross_valid(models, feats, labels):
ns = config.cv_folds
k_fold = KFold(n_splits=ns)
scores = []
for i, clf in models:
score = sum(cross_val_score(clf, feats, labels, cv=k_fold,
scoring='neg_mean_squared_error')) / ns
scores.append(-1 * score)
scores = np.sqrt(scores)
return scores
def write_test_file(preds, test_idx, filename):
d = {'Id': test_idx, 'SalePrice': preds}
df = pandas.DataFrame(data=d)
df.to_csv('data/'+filename)
def write_selection_results(f, name, methods, scores, names):
f.write(name+'\n')
for i in range(len(scores)):
f.write(methods[i]+'\t\t'+str(scores[i])+'\n')
f.write("BEST FEATURE SELECTION METHOD: ")
idx = scores.index(min(scores))
f.write(methods[idx]+'\n')
f.write("SELECTED FEATURES:\n")
f.write(str(names))
f.write('---------------------------------------\n')
def write_model_results(f, name, models, scores):
f.write(name+'\n')
for i in range(len(scores)):
f.write(models[i]+'\t\t'+str(scores[i])+'\n')
f.write("BEST SELECTED MODEL: ")
idx = scores.index(min(scores))
f.write(models[idx]+'\n')
f.write('---------------------------------------\n') | [
"noreply@github.com"
] | LucenZhao.noreply@github.com |
4945ebc633a91ec96ba5af4ca5b8dded7fb18667 | 414a2e81a33ad812245a4672daf99db9ed7274ee | /Python_code/pycharm/demo_random.py | b2a6a447c8d6727e6d0da37433cd522498514844 | [] | no_license | zhh-3/python_study | dd08e3768fb2973c8a4b0b07ff837c7620ddc661 | 07a702fbcf9f8d272aa58bbbe1548d08863e51ad | refs/heads/main | 2023-06-18T05:17:07.785058 | 2021-07-16T06:59:18 | 2021-07-16T06:59:18 | 386,537,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | import random # 引入随机库
# 随机生成0到3之间的整数 [0,10]
for i in range(10):
x = random.randint(0, 3)
print(x)
| [
"noreply@github.com"
] | zhh-3.noreply@github.com |
09d1fa2542d206fee4e3046b4f3e41b853deb3a0 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pandas/tests/extension/json/array.py | 2035f19861796dea63b5d52f6a4751469439881c | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:71f932c427834e45b2bd65a7b9bcb6566fb58c3713a7dfe914347c0ae8240f05
size 6380
| [
"github@cuba12345"
] | github@cuba12345 |
66b25c223ada291e0cc5a9c44b0f8240ecc60514 | 7496d8997095708ec8365a7536564c338823bb34 | /Q3/combiner.py | dffc354cd9c3db8c3492936fa8076c4498146bb1 | [] | no_license | chihfenl/bomoda-practice | ee561fc7900db5023dd7854da1b34a7adff189e7 | 8500c59f2c1450913a338a01452578c1ca91c980 | refs/heads/master | 2021-05-30T04:27:28.900355 | 2015-11-26T04:00:56 | 2015-11-26T04:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | #!/usr/bin/python
# The object of this file is to remove duplicate id_brand input from mapper.py
# and output a unique one to the reducer.py
import simplejson as json
import re
import sys
previous_id_brand = ""
previous_brand = ""
previous_count = ""
previous_text = ""
first = True
for line in sys.stdin:
try:
id_brand, text, count = line.split("\t")
if (first):
previous_id_brand = id_brand
id, previous_brand = id_brand.split(",")
previous_count = count
previous_text = text
first = False
if id_brand != previous_id_brand:
print previous_brand + "\t" + previous_text + "\t" + previous_count
previous_id_brand = id_brand
id, previous_brand = id_brand.split(",")
previous_count = count
previous_text = text
except ValueError as e:
continue
# Notice we have to print the last one
print previous_brand + "\t" + previous_text + "\t" + previous_count | [
"phone751230@gmail.com"
] | phone751230@gmail.com |
8c6eb05af7336cb5b5413f19657bc90ea67903c9 | 6a4a99ef02d4d9b5b03c16e0fb927ebe3ebd6bf4 | /Packages/app.py | aca4bf290d85897ae1601c80e39337274df3cbdf | [] | no_license | tinashiang/CEBI-Miner | cb9b51e43a4edd40f46da1e48f85eb5adda682e0 | 93971702bb3a576092ac46562d49d2d214150f74 | refs/heads/master | 2023-03-18T17:22:22.447073 | 2019-12-12T16:12:00 | 2019-12-12T16:12:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,730 | py | # -*- coding: utf-8 -*-
import base64
import datetime
import io
import sys
import os
import importlib as imp
import flask
from sys import platform
import time
if sys.version_info[0] >= 3:
imp.reload(sys)
else:
reload(sys)
sys.setdefaultencoding('utf8')
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dte
import dash_reusable_components as drc
import numpy as np
import pandas as pd
import xlrd
import numbers
from string import punctuation
import re
alphabets = "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co|MD)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
digits = "([0-9])"
column_widths = [125,125,125,125]
dataFrame = []
dataFrameResults = []
downloadLink = ""
downloadLinkxls = ""
error_triggered = 'False'
nClicks = 0
terms = []
ls_initial_column_order = []
ls_final_column_order_output = []
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash('offline example',external_stylesheets=external_stylesheets )
app.scripts.config.serve_locally = True
app.config['suppress_callback_exceptions'] = True
if sys.platform =="win32":
image_tim = os.getcwd()+'//tim.png'
image_filename = os.getcwd()+'//cebi.png'
image_loading = os.getcwd()+'//loading.gif'
else: #assuming linux or OS
image_tim = os.getcwd()+'/tim.png'
image_filename = os.getcwd()+'/cebi.png'
image_loading = os.getcwd()+'/loading.gif'
encoded_image_tim = base64.b64encode(open(image_tim, 'rb').read())
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
encoded_image_loading = base64.b64encode(open(image_loading, 'rb').read())
app.title = 'CEBI-Miner'
app.layout = html.Div([
html.Img(id='loading', src='data:image/gif;base64,{}'.format(encoded_image_loading.decode()), style={'padding-left': '40%', 'max-width': '300px', 'display': 'none',
'padding-top': '20%', 'position': 'absolute', 'float':'left', 'z-index':'100'}),
html.Img(id='loading2', src='data:image/gif;base64,{}'.format(encoded_image_loading.decode()), style={'padding-left': '40%', 'max-width': '300px', 'display': 'none',
'padding-top': '135%', 'position': 'absolute', 'float':'left', 'z-index':'100'}),
html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), style={'padding-left': '75%','padding-top': '0%', 'width': '150px', 'max-width': '150px'}),
html.Img(src='data:image/png;base64,{}'.format(encoded_image_tim.decode()), style={'padding-left': '45%', 'width': '10%', 'max-width': '300px'}),
html.Div( 'CEBI-Miner',
style={'text-align': 'center', 'margin-bottom': '15px', 'font-family':'Garmond','font-size':'50px', 'font-weight':'bold'}
),
drc.Card([
html.Div('Select Dataset:', style={'font-size':'25px', 'font-weight':'bold'}),
html.H6("Upload file (excel or csv):"),
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=False),
html.Br(),
html.Div(id='output-image-upload'),
html.Br(),
html.H6("Uploaded Data:"),
html.Div("Showing first 50 rows:"),
html.Div(dte.DataTable(rows=[{}], id='table',editable=False, columns = ls_initial_column_order)),
#Column of Interest
html.H6("Column of Interest:"),
dcc.Dropdown(id='dropdown_table_filterColumn',
multi = False,
placeholder='Select Column',
style={'width': '70.75%'}),
html.H6("Index Column: (optional)"),
dcc.Dropdown(id='dropdown_table_indexColumn',
multi = False,
placeholder='Select Column',
options=[
{'label': 'None', 'value': ''},
],
value='',
style={'width': '70.75%'}),
dcc.Input(id='index_keyword', type='text', value='', placeholder='Enter indicator value', style={'width': '50%'}),
html.H6("Include found keyword sentences as a seperate column in results: (Slow with data > 500 records)"),
dcc.RadioItems(id='keyword_sentences_column',
options=[
{'label': 'On', 'value': False},
{'label': 'Off', 'value': True} ],
value=True
),
html.H6("Scopes of Search:"),
dcc.Dropdown(id='search_scope', multi = False,
placeholder='Select Scope',
options=[
{'label': 'Whole Report', 'value': ''},
{'label': 'Patient Information', 'value': 'Patient Information'},
{'label': 'Exam Information', 'value': 'Exam Information'},
{'label': 'Report (with Impression)', 'value': 'Report & Impression'},
#{'label': 'Report (without Impression)', 'value': 'Report'},
{'label': 'Impression Only', 'value': 'Impression'},
],
value='',
style={'width': '70.75%'}),
#Search Keywords
html.H6("Search Keywords (comma separated):"),
dcc.Input(id='search_keywords', type='text', value='', placeholder='Enter here', style={'width': '50%'}),
#html.Button(
# 'Add keyword',
# id='add_keyword',
#),
#dcc.Dropdown(id='list_keywords',
# options=[{}],
# multi=True
#),
], style={'width': '70%'}),
#style={'text-align': 'center', 'margin-bottom': '15px'}
html.Br(),
#Radio button of Associated Words Feature
#html.Br(),
drc.Card([
html.Div('Associated Words Feature:', style={'font-size':'25px', 'font-weight':'bold'}),
dcc.RadioItems(id='associated_words_feature',
options=[
{'label': 'On', 'value': False},
{'label': 'Off', 'value': True} ],
value=True
),
#Associated words
html.H6("Associated words (comma seperated):"),
dcc.Input(id='associated_words', type='text', value='', placeholder='Enter here', style={'width': '50%'}),
#Associated Words Direction and Distance
html.Br(),
html.H6("Associated Words Direction:"),
dcc.Dropdown(id='associated_words_direction',
options=[
{'label': 'Before', 'value': 'Before'},
{'label': 'After', 'value': 'After'} ],
multi=True, style={'width': '70.75%'}
),
html.H6("Associated Distance Units:"),
dcc.RadioItems(id='associated_distance_units',
options=[
{'label': ' Word', 'value': 'Words'},
{'label': ' Sentence', 'value': 'Sentences'}
],
value='Words'
),
# Associated Words Distance
html.H6("Associated Distance:"),
dcc.Slider(id='associated_words_distance',
min=0,
max=20,
marks={i: '{}'.format(i) if i == 1 else str(i) for i in range(0, 21)},
value=0,
),
html.Br(),
html.Div(' - If "Sentence" is selected as the distance unit, a distance 0 means search within the same sentence.', style={'font-size':'15px'}),
], style={'width': '70%'}),
#Radio button of negations
html.Br(),
drc.Card([
html.Div('Negation Words Feature:', style={'font-size':'25px', 'font-weight':'bold'}),
dcc.RadioItems(id='negation_words_feature',
options=[
{'label': 'On', 'value': False},
{'label': 'Off', 'value': True} ],
value=True
),
#Negations words
html.H6("Negation words (comma seperated):"),
dcc.Input(id='negation_words', type='text', value='', placeholder='Enter here', style={'width': '50%'}),
#Negation Words Direction
html.Br(),
html.H6("Negation Words Direction:"),
dcc.Dropdown(id='negation_words_direction',
options=[
{'label': 'Before', 'value': 'Before'},
{'label': 'After', 'value': 'After'} ],
multi=True,
style={'width': '70.75%'}
),
html.H6("Negation Distance Units:"),
dcc.RadioItems(id='negation_distance_units',
options=[
{'label': ' Word', 'value': 'Words'},
{'label': ' Sentence', 'value': 'Sentences'}
],
value='Words'
),
# Negation Words Distance
html.H6("Negation Distance:"),
dcc.Slider(id='negation_words_distance',
min=0,
max=20,
marks={i: '{}'.format(i) if i == 1 else str(i) for i in range(0, 21)},
value=0,
),
html.Br(),
html.Div(' - If "Sentence" is selected as the distance unit, a distance 0 means search within the same sentence.', style={'font-size':'15px'}),
], style={'width': '70%'}),
#Output and Download
html.Br(),
drc.Card([
html.Div('Output:', style={'font-size':'25px', 'font-weight':'bold'}),
#Selection Summary
html.H6("Current Selection Summary:"),
html.Div(id='selection_summary_0'),
html.Div(id='selection_summary_1'),
html.Div(id='selection_summary_2'),
html.Div(id='selection_summary_3'),
html.Br(),
html.Div(id='table_x'),
html.Button(
'Run NLP Search',
id='search',
#download="output.csv",
#href="",
#target="_blank"
),
dcc.ConfirmDialog(
id='alert',
message='There was an error. After clicking okay, please refresh your browser and try again.',
),
#dcc.ConfirmDialog(
# id='alert',
# message='There was an unexpected error2!',
#),
# html.Button(
# 'Get Download Link for Table',
# id='download-button',
# #download="./results.csv",
# #href="./results.csv",
# #target="_blank"
# ),
#html.Br(),
#html.Br(),
# html.A(
# "Download Results (csv)", id='download-link', href="results.csv", target="", style={'padding-left':'25px'},
# ),
html.A(
"Download Results (xlsx - Excel)", id='download-link-xlsx', href="results.xlsx", target="", style={'padding-left':'25px'},
),
#Output Statistics
html.Br(),
html.Br(),
html.H6("Output Statistics:"),
html.Div(id='output_statistics_1', style={'user-select':'text'}),
html.Div(id='output_statistics_3', style={'user-select':'text'}),
html.Div(id='output_statistics_2', style={'user-select':'text'}),
html.Div(id='output_statistics_4', style={'user-select':'text'}),
html.Br(),
html.Br(),
html.Div("Showing first 50 rows:"),
html.Div(dte.DataTable(rows=[{}], id='results', editable=False, columns=ls_final_column_order_output, column_widths = column_widths )),
html.Br(),
], style={'width': '70%'}),
])
# Functions
# file upload function
#Telling user at the end what they entered
#keywords = [" ".join(x.strip().split()) for x in keywords.split(',')]
#neg_words = [" ".join(x.strip().split()) for x in neg_words.split(',')]
#assoc_words = [" ".join(x.strip().split()) for x in assoc_words.split(',')]
@app.callback(
Output(component_id='selection_summary_0', component_property='children'),
[Input('dropdown_table_filterColumn','value')]
)
def search_keywords_summary(input1):
if input1 == None:
return 'Column of interest: None. You must select a column of interest before running the NLP search.'
else:
return 'Column of interest: {}'.format(input1)
@app.callback(
Output(component_id='selection_summary_0', component_property='style'),
[Input('dropdown_table_filterColumn','value')]
)
def column_of_interest_color(input1):
if input1 == None:
style_bad = {'color': 'red'}
return style_bad
else:
style_good = {'color': 'black'}
return style_good
@app.callback(
Output(component_id='selection_summary_1', component_property='children'),
[Input('search_keywords','value')]
)
def search_keywords_summary(input1):
if input1 == '':
return "No search words entered. You must enter search keywords before running the NLP search."
else:
words = [" ".join(x.strip().split()) for x in input1.split(',')]
if words[-1] =="":
words.pop()
return 'Search keywords: {}'.format(words)
else:
return 'Search keywords: {}'.format(words)
@app.callback(
Output(component_id='selection_summary_1', component_property='style'),
[Input('search_keywords','value')]
)
def search_keywords_summary_color(input1):
if input1 == '':
style_bad = {'color': 'red'}
return style_bad
else:
style_good = {'color': 'black'}
return style_good
@app.callback(
Output(component_id='selection_summary_2', component_property='children'),
[Input('associated_words', 'value'),
Input('associated_words_feature', 'value'),
Input('associated_words_distance', 'value'),
Input('associated_words_direction', 'value'),
Input('associated_distance_units', 'value')]
)
def update_associated_words_summary(input1, input2, distance, directions, distance_units):
if input2 == False: #false is on
words = [" ".join(x.strip().split()) for x in input1.split(',')]
if (len(words)==1 and words[0] ==""):
return 'Associated feature is turned on, but no words have been entered.'
if directions == None or directions == []:
if words[-1]=="":
words.pop()
return 'Associated words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions)) + 'You must select an associated search direction.'
else:
return 'Associated words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions)) + 'You must select an associated search direction.'
if words[-1]=="":
words.pop()
return 'Associated words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions))
else:
return 'Associated words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions))
else:
return 'Associated feature is turned off.'
@app.callback(
Output(component_id='selection_summary_2', component_property='style'),
[Input('associated_words', 'value'),
Input('associated_words_feature', 'value'),
Input('associated_words_distance', 'value'),
Input('associated_words_direction', 'value'),
Input('associated_distance_units', 'value')]
)
def update_negation_words_summary_color(input1, input2, distance, directions, distance_units):
print(directions)
if input2 == False: #false is on
words = [" ".join(x.strip().split()) for x in input1.split(',')]
if (len(words)==1 and words[0] ==""):
return {'color': 'red'}
elif (directions == None or directions == []):
return {'color': 'red'}
else:
return {'color': 'black'}
@app.callback(
Output(component_id='selection_summary_3', component_property='children'),
[Input('negation_words', 'value'),
Input('negation_words_feature', 'value'),
Input('negation_words_distance', 'value'),
Input('negation_words_direction', 'value'),
Input('negation_distance_units', 'value')]
)
def update_associated_words_summary(input1, input2, distance, directions, distance_units):
if input2 == False: #false is on
words = [" ".join(x.strip().split()) for x in input1.split(',')]
if (len(words)==1 and words[0] ==""):
return 'Negation feature is turned on, but no words have been entered.'
if directions == None or directions == []:
if words[-1]=="":
words.pop()
return 'Negation words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions)) + 'You must select a negation search direction.'
else:
return 'Negation words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions)) + 'You must select a negation search direction.'
if words[-1]=="":
words.pop()
return 'Negation words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions))
else:
return 'Negation words: {}. Distance Unit: {}. Search distance: {}. Search Direction: {}.'.format(words, str(distance_units), str(distance), str(directions))
else:
return 'Negation feature is turned off.'
@app.callback(
Output(component_id='selection_summary_3', component_property='style'),
[Input('negation_words', 'value'),
Input('negation_words_feature', 'value'),
Input('negation_words_distance', 'value'),
Input('negation_words_direction', 'value'),
Input('negation_distance_units', 'value')]
)
def update_negation_words_summary_color(input1, input2, distance, directions, distance_units):
print(directions)
if input2 == False: #false is on
words = [" ".join(x.strip().split()) for x in input1.split(',')]
if (len(words)==1 and words[0] ==""):
return {'color': 'red'}
elif (directions == None or directions == []):
return {'color': 'red'}
else:
return {'color': 'black'}
################################################################
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if '.csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')), encoding='utf-8')
elif '.xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded), None)
if "Sheet1" in df.keys():
df = df["Sheet1"]
else:
df = df[next(iter(df))]
#df = df.replace("empty row", "")
except Exception as e:
print(e)
return None
return df
############ changing DAVID 11/13
@app.callback(Output('output-image-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
return 'File uploaded: "{}"'.format(list_of_names)
########################
############ LOADING RAEIN #####################
#@app.callback(Output('loading', 'style'),
# [Input('search', 'n_clicks')])
#def switch_loading(n_clicks):
# if n_clicks:
# return {'padding-left': '40%', 'max-width': '300px', 'display': 'none', 'padding-top': '185%', 'position': 'absolute', 'float':'left', 'z-index':'100'}
# return {'padding-left': '40%', 'max-width': '300px', 'display': 'none', 'padding-top': '20%', 'position': 'absolute', 'float':'left', 'z-index':'100'}
#@app.callback(Output('loading', 'style'),
# [Input('upload-data', 'filename')])
#def switch_loading(rows):
# return {'padding-left': '40%', 'max-width': '300px', 'display': 'none', 'padding-top': '20%', 'position': 'absolute', 'float':'left', 'z-index':'100'}
#################################
#callback table column order
@app.callback(Output('table', 'columns'),
[Input('upload-data', 'contents'),
Input('upload-data', 'filename')])
def update_ouput_columns(contents, filename):
if contents is not None:
df = parse_contents(contents, filename)
if df is not None:
global ls_initial_column_order
ls_initial_column_order = df.columns.tolist()
return ls_initial_column_order
else:
return []
else:
return []
# callback table creation
@app.callback(Output('table', 'rows'),
[Input('upload-data', 'contents'),
Input('upload-data', 'filename')])
def update_output(contents, filename):
if contents is not None:
df = parse_contents(contents, filename)
if df is not None:
global dataFrame
df = df.fillna('empty row')
dataFrame = df.to_dict('records')
return (df.head(50)).to_dict('records')
else:
return [{}]
else:
return [{}]
#callback update options of filter dropdown
@app.callback(Output('dropdown_table_filterColumn', 'options'),
[Input('table', 'rows'),
Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_filter_column_options(rows, list_of_contents, list_of_names, list_of_dates):
#dff = pd.DataFrame(tablerows) # <- problem! dff stays empty even though table was uploaded
#print "updating... dff empty?:", dff.empty #result is True, labels stay empty
return [{'label': i, 'value': i} for i in ls_initial_column_order]
#callback update options of filter dropdown
@app.callback(Output('dropdown_table_indexColumn', 'options'),
[Input('table', 'rows'),
Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_index_column_options(rows, list_of_contents, list_of_names, list_of_dates):
#dff = pd.DataFrame(tablerows) # <- problem! dff stays empty even though table was uploaded
#print "updating... dff empty?:", dff.empty #result is True, labels stay empty
return [{'label': 'None', 'value': ''}] + [{'label': i, 'value': i} for i in ls_initial_column_order]
###################### Keywords DropDown #######################
#@app.callback(Output('list_keywords', 'options'),
# [Input('add_keyword', 'n_clicks'),
# Input('search_keywords', 'value')])
#def add_keywords(nclicks, text):
# if nclicks:
# terms = terms + [{'label': text, 'value': text}]
# return terms
###################### Disabling #################
#@app.callback(Output('search', 'disabled'),
# [Input('negation_words_feature', 'value')])
#def update_search_button(negation):
# return negation
@app.callback(Output('negation_words', 'disabled'),
[Input('negation_words_feature', 'value')])
def update_negation(negation):
return negation
@app.callback(Output('negation_words_direction', 'disabled'),
[Input('negation_words_feature', 'value')])
def update_negation(negation):
return negation
@app.callback(Output('negation_words_distance', 'disabled'),
[Input('negation_words_feature', 'value')])
def update_negation(negation):
return negation
@app.callback(Output('negation_distance_units', 'disabled'),
[Input('associated_words_feature', 'value')])
def update_associated(negation):
return negation
@app.callback(Output('associated_words', 'disabled'),
[Input('associated_words_feature', 'value')])
def update_associated(associated):
return associated
@app.callback(Output('associated_words_direction', 'disabled'),
[Input('associated_words_feature', 'value')])
def update_associated(associated):
return associated
@app.callback(Output('associated_words_distance', 'disabled'),
[Input('associated_words_feature', 'value')])
def update_associated(associated):
return associated
@app.callback(Output('associated_distance_units', 'disabled'),
[Input('associated_words_feature', 'value')])
def update_associated(associated):
return associated
@app.callback(Output('download-link', 'href'),
[Input('search', 'n_clicks')])
def update_associated(n_clicks):
if n_clicks != None:
return "results.csv"
else:
return "#"
@app.callback(Output('download-link-xlsx', 'href'),
[Input('search', 'n_clicks')])
def update_associated(n_clicks):
if n_clicks != None:
return "results.xlsx"
else:
return "#"
@app.callback(Output('alert', 'displayed'),
[Input('search', 'n_clicks')])
def display_confirm(n_clicks):
if n_clicks != None:
time.sleep(10)
global error_triggered
if error_triggered == 'True':
print(error_triggered)
return True
else:
print(error_triggered)
return False
else:
print(error_triggered)
return False
@app.callback(Output('alert', 'message'),
[Input('alert', 'submit_n_clicks')])
def hide_alert_box(submit_n_clicks):
#time.sleep(15)
global error_triggered
if submit_n_clicks != None:
error_triggered = 'False'
return 'There was an error. After clicking okay, please refresh your browser and try again.'
###################### BACK END #######################
@app.callback(Output('results', 'rows'),
[Input('search', 'n_clicks')],
[State('dropdown_table_filterColumn', 'value'),
State('dropdown_table_indexColumn', 'value'),
State('search_scope', 'value'),
State('search_keywords', 'value'),
State('index_keyword', 'value'),
State('table', 'rows'),
State('negation_words_feature', 'value'),
State('negation_words', 'value'),
State('negation_words_direction', 'value'),
State('negation_distance_units', 'value'),
State('negation_words_distance', 'value'),
State('associated_words_feature', 'value'),
State('associated_words', 'value'),
State('associated_words_direction', 'value'),
State('associated_distance_units', 'value'),
State('associated_words_distance', 'value'),
State('keyword_sentences_column', 'value')])
def update_results(n_clicks, column, index, scope, keywords, index_val, reportsArr, neg, neg_words, neg_dir, neg_units, neg_dis, assoc, assoc_words, assoc_dir, assoc_units, assoc_dis, keyword_sentences_column):
time.sleep(2)
global nClicks
if n_clicks != None: #== nClicks + 1:
nClicks += 1
#df = pd.DataFrame(reportsArr)
global ls_initial_column_order
df = pd.DataFrame(dataFrame, columns=ls_initial_column_order)
############ Select index column satisfied rows ##############
index_val = [" ".join(x.strip().split()) for x in index_val.split(',')]
if index != "" and index is not None and not (df.dtypes[index] == 'int64' or df.dtypes[index] == 'float64'):
if len(index_val) == 1:
df = df.loc[df[index].str.lower() == str(index_val[0]).lower()]
else:
frames = []
for i in index_val:
temp = df.loc[df[index].str.lower() == str(i).lower()]
frames.append(temp)
df = pd.concat(frames)
elif index != "" and index is not None and (df.dtypes[index] == 'int64' or df.dtypes[index] == 'float64'):
if len(index_val) == 1:
df = df.loc[df[index] == float(index_val[0])]
else:
frames = []
for i in index_val:
temp = df.loc[df[index] == float(i)]
frames.append(temp)
df = pd.concat(frames)
################
if scope != "":
scope_results = get_scope(df.columns.get_loc(column), scope, df.values[:])
#print(len(scope_results), len(df.values[:]), scope_results[:2])
df = pd.DataFrame(np.append(np.array(scope_results, dtype=object), df.values[:], axis=1), columns=np.concatenate((['Search_Scope'], df.columns)))
# df = df.fillna('')
column = 'Search_Scope'
#print(df.columns)
if not keyword_sentences_column:
if not assoc:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts', 'NEG_Counts', 'Keyword_Sentences'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts', 'Keyword_Sentences'], df.columns))
else:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'NEG_Counts', 'Keyword_Sentences'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts', 'Keyword_Sentences'], df.columns))
else:
if not assoc:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts', 'NEG_Counts'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts'], df.columns))
else:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'NEG_Counts'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts'], df.columns))
try:
result = pd.DataFrame(Search(df.columns.get_loc(column), keywords, df.values[:], neg, neg_words, neg_dir, neg_units, neg_dis, assoc, assoc_words, assoc_dir, assoc_units, assoc_dis, keyword_sentences_column), columns=new_columns)
result = result.replace("empty row", "")
except Exception as err:
global error_triggered
error_triggered = 'True'
return [{}]
print(list(new_columns))
global dataFrameResults
global downloadLink, downloadLinkxls
dataFrameResults = result.to_dict('records')
downloadLink = result.to_csv('results.csv', index=False, columns=list(new_columns))
writer = pd.ExcelWriter('results.xlsx', engine='xlsxwriter')
downloadLinkxls = result.to_excel(writer, sheet_name='Sheet1', index=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'top',
'fg_color': '#D7E4BC',
'border': 1})
for col_num, value in enumerate(new_columns):
worksheet.write(0, col_num, value, header_format)
writer.save()
return (result.head(50)).to_dict('records')
else:
return [{}]
##################################################################################################
# Output Results Statistics:
@app.callback(Output(component_id='output_statistics_1', component_property='children'),
[Input('results', 'rows')])
def results_statistics_1(rows):
if rows != [{}]:
global DataFrameResults
df_results = pd.DataFrame.from_dict(dataFrameResults)
records_keywords = len(df_results.loc[df_results['TERM_Counts']>=1])
records_keywords_perc = round( (100*records_keywords/len(df_results)),2)
return ('Records containing keywords: \n\t {} ({}% of {} total records)'.format(str(records_keywords),str(records_keywords_perc), str(len(df_results))))
@app.callback(Output(component_id='output_statistics_3', component_property='children'),
[Input('results', 'rows')])
def results_statistics_3(rows):
if rows != [{}]:
global DataFrameResults
df_results = pd.DataFrame.from_dict(dataFrameResults)
if 'ASSOC_Counts' in (df_results.columns.tolist()):
records_assoc = len(df_results.loc[df_results['ASSOC_Counts']>=1])
records_assoc_perc = round( (100*records_assoc/len(df_results)),2)
return ('Records with keywords containing asssociated words within set associated word distance and direction: {} ({}% of {} total records)'.format(str(records_assoc),str(records_assoc_perc), str(len(df_results))))
@app.callback(Output(component_id='output_statistics_2', component_property='children'),
[Input('results', 'rows')])
def results_statistics_2(rows):
if rows != [{}]:
global DataFrameResults
df_results = pd.DataFrame.from_dict(dataFrameResults)
if 'NEG_Counts' in (df_results.columns.tolist()):
records_negs = len(df_results.loc[df_results['NEG_Counts']>=1])
records_negs_perc = round( (100*records_negs/len(df_results)),2)
return ('Records with keywords containing negations within set negation word distance and direction: {} ({}% of {} total records)'.format(str(records_negs),str(records_negs_perc),str(len(df_results))))
###################################################################################################
#Download link function ################
#@app.callback(Output('download-link', 'href'),
# [Input('search', 'n_clicks')])
#def update_column_results(n_clicks):
# global dataFrameResults
# print(downloadLink)
# print("data:text/csv;charset=utf-8," + urllib.parse.quote(downloadLink))
# return "data:text/csv;charset=utf-8," + dataFrameResults # + urllib.parse.quote(downloadLink)
@app.server.route('/results.csv')
def download_csv():
return flask.send_file('results.csv',
mimetype='text/csv',
attachment_filename='results.csv',
as_attachment=True)
@app.server.route('/results.xlsx')
def download_xlsx():
return flask.send_file('results.xlsx',
mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
attachment_filename='results.xlsx',
as_attachment=True,
cache_timeout=0)
####################################
#ADDED DAVID ORDERS_COLUMNS
#callback table column order
@app.callback(Output('results', 'columns'),
[Input('search', 'n_clicks')],
[State('dropdown_table_filterColumn', 'value'),
State('dropdown_table_indexColumn', 'value'),
State('search_scope', 'value'),
State('search_keywords', 'value'),
State('index_keyword', 'value'),
State('table', 'rows'),
State('negation_words_feature', 'value'),
State('negation_words', 'value'),
State('negation_words_direction', 'value'),
State('negation_distance_units', 'value'),
State('negation_words_distance', 'value'),
State('associated_words_feature', 'value'),
State('associated_words', 'value'),
State('associated_words_direction', 'value'),
State('associated_distance_units', 'value'),
State('associated_words_distance', 'value'),
State('keyword_sentences_column', 'value')])
def update_column_results(n_clicks, column, index, scope, keywords, index_val, reportsArr, neg, neg_words, neg_dir, neg_units, neg_dis, assoc, assoc_words, assoc_dir, assoc_units, assoc_dis, keyword_sentences_column):
global nClicks
if n_clicks != None: #== nClicks + 1:
#df = pd.DataFrame(reportsArr)
global ls_initial_column_order
df = pd.DataFrame(dataFrame, columns=ls_initial_column_order)
############ Select index column satisfied rows ##############
index_val = [" ".join(x.strip().split()) for x in index_val.split(',')]
if index != "" and index is not None and not (df.dtypes[index] == 'int64' or df.dtypes[index] == 'float64'):
if len(index_val) == 1:
df = df.loc[df[index].str.lower() == str(index_val[0]).lower()]
else:
frames = []
for i in index_val:
temp = df.loc[df[index].str.lower() == str(i).lower()]
frames.append(temp)
df = pd.concat(frames)
elif index != "" and index is not None and (df.dtypes[index] == 'int64' or df.dtypes[index] == 'float64'):
if len(index_val) == 1:
df = df.loc[df[index] == float(index_val[0])]
else:
frames = []
for i in index_val:
temp = df.loc[df[index] == float(i)]
frames.append(temp)
df = pd.concat(frames)
################
if scope != "":
scope_results = get_scope(df.columns.get_loc(column), scope, df.values[:])
#print(len(scope_results), len(df.values[:]), scope_results[:2])
df = pd.DataFrame(np.append(np.array(scope_results, dtype=object), df.values[:], axis=1), columns=np.concatenate((['Search_Scope'], df.columns)))
column = 'Search_Scope'
if not keyword_sentences_column:
if not assoc:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts', 'NEG_Counts', 'Keyword_Sentences'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts', 'Keyword_Sentences'], df.columns))
else:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'NEG_Counts', 'Keyword_Sentences'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts', 'Keyword_Sentences'], df.columns))
else:
if not assoc:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts', 'NEG_Counts'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts', 'ASSOC_Counts'], df.columns))
else:
if not neg:
new_columns = np.concatenate((['TERM_Counts', 'NEG_Counts'], df.columns))
else:
new_columns = np.concatenate((['TERM_Counts'], df.columns))
#result = pd.DataFrame(Search(df.columns.get_loc(column), keywords, df.values[:], neg, neg_words, neg_dir, neg_units, neg_dis, assoc, assoc_words, assoc_dir, assoc_units, assoc_dis), columns=new_columns)
global ls_final_column_order_output
#ls_final_column_order_output = result.columns.tolist()
ls_final_column_order_output = new_columns.tolist()
#ls_final_column_order_output = ls_final_column_order_output[-2:] + ls_final_column_order_output[:-2]
return ls_final_column_order_output
else:
return [] #ls_final_column_order_output
def get_scope(column, scope, reportsArr):
reports = reportsArr[:, column]
if scope == "":
return reports
#scopes = [" ".join(x.strip().split()) for x in scope.split(',')]
scopes = [" ".join(x.strip().split()) for x in scope.split('&')]
print(scopes)
filtered = []
for r in reports:
if isinstance(r, int) or isinstance(r, float) or len(r) < 1 :
filtered.append([r])
continue
StartIndex = 0
EndIndex = len(r)
for i, s in enumerate(scopes):
if i == 0:
if "information" not in s.lower():
if "\n" + s + ":" in r:
StartIndex = r.index("\n" + s + ":")
elif "\n" + s + " :" in r:
StartIndex = r.index("\n" + s + " :")
elif "\n" + s.lower() + ":" in r.lower():
StartIndex = r.lower().index("\n" + s.lower() + ":")
elif "\n" + s.lower() + " :" in r.lower():
StartIndex = r.lower().index("\n" + s.lower() + " :")
elif s + ":" in r:
StartIndex = r.index(s + ":")
elif s + " :" in r:
StartIndex = r.index(s + " :")
elif s.lower() + ":" in r.lower():
StartIndex = r.lower().index(s.lower() + ":")
elif s.lower() + " :" in r.lower():
StartIndex = r.lower().index(s.lower() + " :")
if StartIndex < 0 or StartIndex > len(r):
StartIndex = 0
else:
if "\n" + s.lower() + "\n" in r.lower():
StartIndex = r.lower().index("\n" + s.lower() + "\n")
if len(scopes) == i + 1:
if s.lower() == "patient information" and "\nexam information\n" in r.lower():
EndIndex = r.lower().index("\nexam information\n")
elif s.lower() == "exam information" and "\nresult information\n" in r.lower():
EndIndex = r.lower().index("\nresult information\n")
elif s.lower() == "report" and "\nimpression:" in r.lower():
EndIndex = r.lower().index("\nimpression:")
elif s.lower() == "impression" and "\nend of impression" in r.lower():
EndIndex = r.lower().index("\nend of impression")
#elif s.lower() == "impression" and "\ncritical results were communicated" in r.lower():
#EndIndex = r.lower().index("\ncritical results were communicated")
#elif s.lower() == "impression" and "\ni, the teaching physician" in r.lower():
#EndIndex = r.lower().index("\ni, the teaching physician")
elif s.lower() == "impression" and "\napproved by attending" in r.lower():
EndIndex = r.lower().index("\napproved by attending")
elif s.lower() == "patient information" and " exam information" in r.lower():
EndIndex = r.lower().index(" exam information")
elif s.lower() == "exam information" and " result information" in r.lower():
EndIndex = r.lower().index(" result information\n")
elif s.lower() == "report" and " impression:" in r.lower():
EndIndex = r.lower().index(" impression:")
elif s.lower() == "impression" and " end of impression" in r.lower():
EndIndex = r.lower().index(" end of impression")
#elif s.lower() == "impression" and "\ncritical results were communicated" in r.lower():
#EndIndex = r.lower().index("\ncritical results were communicated")
#elif s.lower() == "impression" and "\ni, the teaching physician" in r.lower():
#EndIndex = r.lower().index("\ni, the teaching physician")
elif s.lower() == "impression" and " approved by attending" in r.lower():
EndIndex = r.lower().index(" approved by attending")
if EndIndex < 0 or EndIndex > len(r):
EndIndex = -1
filtered.append([r[StartIndex:EndIndex]])
return filtered
def split_into_sentences(text):
text = " " + text + " "
text = text.replace(" "," ")
text = text.replace(" "," ")
text = text.replace("\n\n",".<stop>")
text = text.replace("M.D.","MD")
text = text.replace("e.g.","eg")
text = text.replace("i.e.","ie")
text = re.sub(digits + "[.]" + digits,"\\1<prd>\\2",text)
text = re.sub(digits + "[.]","\\1<prd>",text)
text = re.sub(digits + " [.]","\\1<prd>",text)
#text = re.sub(digits + " [.]" + digits,"\\1<prd>\\2",text)
#text = re.sub(digits + "[.] " + digits,"\\1<prd>\\2",text)
#text = re.sub(digits + "[.] " + digits,"\\1<prd>\\2",text)
#text = re.sub(digits + " [.] " + digits,"\\1<prd>\\2",text)
#text = re.sub(digits + "[.] " + alphabets,"\\1<prd>\\2",text)
#text = re.sub(digits + "[.] " + alphabets,"\\1<prd>\\2",text)
text = re.sub(alphabets + "[:] " + alphabets,"\\1<prd>\\2",text)
text = re.sub(alphabets + "[:] " + alphabets,"\\1<prd>\\2",text)
text = re.sub(digits + "[:]" + digits,"\\1<prd>\\2",text)
#text = text.replace(":",".<stop>")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [" " + s.strip() + " " for s in sentences]
return sentences
def Search(column, keywords, reportsArr, neg, neg_words, neg_dir, neg_units, neg_dis, assoc, assoc_words, assoc_dir, assoc_units, assoc_dis, keyword_sentences_column):
reportsArr = np.asarray(reportsArr)
#keywords = keywords.replace(",", " ")
#keywords = (keywords.strip()).split()
reports = reportsArr[:, column]
counts, neg_counts, assoc_counts = [], [], []
keywords = [" ".join(x.replace("'", "").strip().split()) for x in keywords.split(',')]
neg_words = [" ".join(x.replace("'", "").strip().split()) for x in neg_words.split(',')]
assoc_words = [" ".join(x.replace("'", "").strip().split()) for x in assoc_words.split(',')]
if keywords[-1] =="":
keywords.pop()
if neg_words[-1] =="":
neg_words.pop()
if assoc_words[-1] =="":
assoc_words.pop()
for punc in punctuation:
for index, keyword in enumerate(keywords):
keywords[index] = keyword.replace(punc, ' ' + punc + ' ')
for index, keyword in enumerate(neg_words):
neg_words[index] = keyword.replace(punc, ' ' + punc + ' ')
for index, keyword in enumerate(assoc_words):
assoc_words[index] = keyword.replace(punc, ' ' + punc + ' ')
if '' in keywords:
keywords = keywords.remove('')
if '' in neg_words:
neg_words = neg_words.remove('')
if '' in assoc_words:
assoc_words = assoc_words.remove('')
print(keywords, neg_words, assoc_words)
keyword_sentences_report = []
keyword_sentences_report = np.array(keyword_sentences_report)
for index, report in enumerate(reports):
counts.append([0])
neg_counts.append([0])
assoc_counts.append([0])
if isinstance(report, int) or isinstance(report, float) or len(report) < 1 :
continue
####################### LOWER CASE #############################
report = report + " . "
report = report.lower()
###########################################
###### add space before punctuation #########
for punc in punctuation:
report = report.replace(punc, ' ' + punc + ' ')
###### Eliminate double spaces ######
report = report.strip()
report = " ".join(report.split())
report = " " + report
keyword_sentences = []
keyword_sentences = np.array(keyword_sentences)
sentences = split_into_sentences(report)
for keyword in keywords:
####################### LOWER CASE #############################
keyword = keyword.lower()
###########################################
keyword = " " + keyword + " "
if not isinstance(report, numbers.Number) and keyword in report + " ":
counts[-1][0] += (" " + report + " ").count(keyword)
elif isinstance(report, numbers.Number) and keyword in str(report) + " ":
counts[-1][0] += (" " + str(report) + " ").count(keyword)
else:
continue
############ Extra column for sentences with keyword #############
if not keyword_sentences_column:
for i, sentence in enumerate(sentences):
# print(sentence)
if keyword in " " + sentence + " ":
if len(keyword_sentences) == 0:
keyword_sentences = np.array([keyword + " - " + sentence + "\n\n"]) # keyword_sentences = np.array([str(i) + " - " + keyword + " - " + sentence + "\n\n"])
#keyword_sentences = np.array([keyword + ": " + (sentences[max(0,i-1)] if max(0,i-1) != i else "") + "\n" + sentence + "\n" + (sentences[min(len(sentences)-1,i+1)] if min(len(sentences)-1,i+1) != i else "") + "\n\n"])
else:
keyword_sentences = np.append(keyword_sentences, keyword + " - " + sentence + "\n\n") #keyword_sentences = np.append(keyword_sentences, str(i) + " - " + keyword + " - " + sentence + "\n\n")
#keyword_sentences = np.append(keyword_sentences, keyword + ": " + (sentences[max(0,i-1)] if max(0,i-1) != i else "") + "\n" + sentence + "\n" + (sentences[min(len(sentences)-1,i+1)] if min(len(sentences)-1,i+1) != i else "") + "\n\n")
# print(keyword_sentences)
#########################################################
if not neg:
######## Negation by words ##############
if neg_units == "Words":
#sentences = split_into_sentences(report)
for i, sentence in enumerate(sentences):
while keyword in sentence + " ":
beginString = sentence.find(keyword) + len(keyword) + 1
endString = sentence.find(keyword) - 1
for i in range(neg_dis):
if 'Before' in neg_dir:
endString = sentence.rfind(' ', 0, endString)
if 'After' in neg_dir:
beginString = sentence.find(' ', beginString, len(sentence)) + 1
if 'After' in assoc_dir and sentence[sentence.find(keyword) + len(keyword) + 1:].find(keyword) > 0:
beginString = min(beginString, sentence.find(keyword) + len(keyword) + 1 + sentence[sentence.find(keyword) + len(keyword) + 1:].find(keyword))
if 'Before' in assoc_dir and sentence[endString:beginString].rfind(keyword[0]+'~'+keyword[1:]) > 0:
endString = max(endString, endString + sentence[endString:beginString].rfind(keyword[0]+'~'+keyword[1:]) + len(keyword) + 1)
if type(neg_words) != type([]):
neg_words = [neg_words]
for nword in neg_words:
####################### LOWER CASE #############################
nword = nword.lower()
###########################################
#print(keyword, nword)
#print("Before: ", report[endString + 1:report.find(keyword)])
#print("After: ", report[report.find(keyword)+len(keyword):beginString - 1])
if ('Before' in neg_dir or 'After' in neg_dir) and \
(" " + nword + " " in " " + sentence[endString + 1:sentence.find(keyword)] + " " or " " + nword + " " in " " + sentence[sentence.find(keyword)+len(keyword):beginString - 1] + " "):
neg_counts[-1][0] += 1
sentence = sentence[:sentence.find(keyword)+1] + '~' + sentence[sentence.find(keyword)+1:]
######## Negation by sentences ##############
else:
#sentences = split_into_sentences(report)
for i, sentence in enumerate(sentences):
if keyword in sentence + " ":
for nword in neg_words:
####################### LOWER CASE #############################
nword = nword.lower()
###########################################
if neg_dis == 0 and " " + nword + " " in sentences[i] + " ":
neg_counts[-1][0] += 1
continue
if ('Before' in neg_dir):
for j in sentences[max(0, i - neg_dis): i]:
if " " + nword + " " in " " + j + " ":
neg_counts[-1][0] += 1
if ('After' in neg_dir):
for j in sentences[i+1: min(len(sentences), i + neg_dis + 1)]:
if " " + nword + " " in " " + j + " ":
neg_counts[-1][0] += 1
if not assoc:
######## Association by words ##############
if assoc_units == "Words":
#sentences = split_into_sentences(report)
for i, sentence in enumerate(sentences):
if index < 235:
print(sentence)
while keyword in sentence + " ":
beginString = sentence.find(keyword) + len(keyword) + 1
endString = sentence.find(keyword) - 1
for i in range(assoc_dis):
if 'Before' in assoc_dir:
endString = sentence.rfind(' ', 0, endString)
if 'After' in assoc_dir:
beginString = sentence.find(' ', beginString, len(sentence)) + 1
if index < 235:
print(sentence[endString:beginString])
if 'After' in assoc_dir and sentence[sentence.find(keyword) + len(keyword) + 1:].find(keyword) > 0:
beginString = min(beginString, sentence.find(keyword) + len(keyword) + 1 + sentence[sentence.find(keyword) + len(keyword) + 1:].find(keyword))
if 'Before' in assoc_dir and sentence[endString:beginString].rfind(keyword[0]+'~'+keyword[1:]) > 0:
endString = max(endString, endString + sentence[endString:beginString].rfind(keyword[0]+'~'+keyword[1:]) + len(keyword) + 1)
if index < 235:
print(sentence[endString:beginString])
if type(assoc_words) != type([]):
assoc_words = [assoc_words]
for aword in assoc_words:
####################### LOWER CASE #############################
aword = aword.lower()
###########################################
if ('Before' in assoc_dir or 'After' in assoc_dir) and \
(" " + aword + " " in " " + sentence[endString + 1:sentence.find(keyword)] + " " or " " + aword + " " in " " + sentence[sentence.find(keyword)+len(keyword):beginString - 1] + " "):
assoc_counts[-1][0] += 1
sentence = sentence[:sentence.find(keyword)+1] + '~' + sentence[sentence.find(keyword)+1:]
######## Association by sentences ##############
else:
#sentences = split_into_sentences(report)
for i, sentence in enumerate(sentences):
if keyword in sentence + " ":
# print()
# print(i, keyword, sentence)
# print()
for aword in assoc_words:
####################### LOWER CASE #############################
aword = aword.lower()
###########################################
if assoc_dis == 0 and " " + aword + " " in " " + sentences[i] + " ":
assoc_counts[-1][0] += 1
continue
if ('Before' in assoc_dir):
for j in sentences[max(0, i - assoc_dis): i]:
if " " + aword + " " in " " + j + " ":
assoc_counts[-1][0] += 1
if ('After' in assoc_dir):
for j in sentences[i+1: min(len(sentences), i + assoc_dis + 1)]:
if " " + aword + " " in " " + j + " ":
assoc_counts[-1][0] += 1
# if report == '':
# print(np.array([" ".join(keyword_sentences)]))
# return
if not keyword_sentences_column:
if len(keyword_sentences) == 0:
keyword_sentences = np.array([""])
# print(keyword_sentences)
if len(keyword_sentences_report) == 0:
keyword_sentences_report = np.array([" ".join(keyword_sentences)])
else:
#if len(keyword_sentences) > 1:
#print(keyword_sentences_report)
#print("----------------------------------------")
#print(keyword_sentences)
keyword_sentences_report = np.append(keyword_sentences_report, np.array([" ".join(keyword_sentences)]), axis=0)
#if len(keyword_sentences) > 1:
#print("----------------------------------------")
#print(keyword_sentences_report)
#print("----------------------------------------")
# print("keyword_sentences:", np.shape(keyword_sentences))
# print("keyword_sentences_report:", np.shape(keyword_sentences_report))
counts = np.array(counts, dtype=object)
neg_counts = np.array(neg_counts, dtype=object)
assoc_counts = np.array(assoc_counts, dtype=object)
keyword_sentences_report = np.array(keyword_sentences_report, dtype=object)
if not assoc:
if not neg:
if not keyword_sentences_column:
result = np.append(np.expand_dims(keyword_sentences_report, axis=1), reportsArr, axis=1)
result = np.append(neg_counts, result, axis=1)
else:
result = np.append(neg_counts, reportsArr, axis=1)
result = np.append(assoc_counts, result, axis=1)
result = np.append(counts, result, axis=1)
else:
if not keyword_sentences_column:
result = np.append(np.expand_dims(keyword_sentences_report, axis=1), reportsArr, axis=1)
result = np.append(assoc_counts, result, axis=1)
else:
result = np.append(assoc_counts, reportsArr, axis=1)
result = np.append(counts, result, axis=1)
else:
if not neg:
if not keyword_sentences_column:
result = np.append(np.expand_dims(keyword_sentences_report, axis=1), reportsArr, axis=1)
result = np.append(neg_counts, result, axis=1)
else:
result = np.append(neg_counts, reportsArr, axis=1)
result = np.append(counts, result, axis=1)
else:
# print(np.shape(reportsArr), np.shape(np.expand_dims(keyword_sentences_report, axis=1)))
if not keyword_sentences_column:
result = np.append(np.expand_dims(keyword_sentences_report, axis=1), reportsArr, axis=1)
result = np.append(counts, result, axis=1)
else:
result = np.append(counts, reportsArr, axis=1)
return result
#############################################
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
if __name__ == '__main__':
app.run_server(debug=True)
| [
"hashemi.s@husky.neu.edu"
] | hashemi.s@husky.neu.edu |
07ca464ec612048050152c4fdcca8a14f7eea684 | 98c57c08ade572eb4c65b06173fca64273eba7c4 | /math/0x00-linear_algebra/12-bracin_the_elements.py | 27f855735cf0be95c0900dfd214a0da5b24b8391 | [] | no_license | najikadriholberton/holbertonschool-machine_learning | 0fefe7d4c464f65b681bb6a68f02df5598fb1167 | 33ae88b3c6e684f9bd63c87d5fa2bce9e02c53cf | refs/heads/master | 2023-08-04T02:54:29.406478 | 2021-10-01T11:32:09 | 2021-10-01T11:32:09 | 386,055,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | #!/usr/bin/env python3
"""
Module for Bracing The Elements
"""
def np_elementwise(mat1, mat2):
"""elementwise operations"""
return mat1+mat2, mat1-mat2, mat1*mat2, mat1/mat2
| [
"3214@holbertonschool.com"
] | 3214@holbertonschool.com |
65c240ce157060af252de316c8a054885f09302b | dfc992c1bc4a4e6d704c9a790a367f71294baccf | /orders/migrations/0007_usercheckout_braintree_id.py | 846449ab93c7fa4ff2b5d6e01eec175611797ec2 | [] | no_license | spaceled/emartway | 06ecb2b2c09b07b75fe46282120f1e54ea802dcc | 924866182a1e3cdfe571db97f9aa582fb9d2046a | refs/heads/master | 2022-12-15T20:14:06.076940 | 2018-02-12T14:42:47 | 2018-02-12T14:42:47 | 121,132,162 | 0 | 0 | null | 2022-12-07T23:46:20 | 2018-02-11T14:34:50 | HTML | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-08 02:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_order_status'),
]
operations = [
migrations.AddField(
model_name='usercheckout',
name='braintree_id',
field=models.CharField(blank=True, max_length=120, null=True),
),
]
| [
"spaceled@gmail.com"
] | spaceled@gmail.com |
e9279d1562b5fc0a5afda27a597bd474e89f812f | d74d127e4d46cd45b11bfd225cb301b878f6f983 | /fit.py | 7248233cc7a3b69ebb2e8f42fa129087c605af0d | [] | no_license | Diego10HDZ/CUFICO-2018-2 | bfc62b522cba57a949204460958f2feb63c33cb7 | f4eeeb171c317e7b0b829f1efd762722b0d49bb5 | refs/heads/master | 2020-03-26T09:41:02.818706 | 2019-02-26T21:21:38 | 2019-02-26T21:21:38 | 144,759,068 | 1 | 0 | null | 2019-02-26T20:17:18 | 2018-08-14T18:41:28 | Jupyter Notebook | UTF-8 | Python | false | false | 1,250 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a * np.exp(b*0.01* x) + c
x = np.linspace(5,60,55)
y = [4.75840e-05, 5.43641e-05, 6.11506e-05, 6.37069e-05, 6.69806e-05, 1.72117e-04, 1.89360e-04, 2.27382e-04, 2.64877e-04, 2.63542e-04, 2.95507e-04, -1.14680e-04, 5.71753e-04, 5.70742e-04, 9.15939e-04, 1.00213e-03, 1.07957e-03, 1.04668e-03, 1.28848e-03, 1.33655e-03, 1.32079e-03, 1.35788e-03, 1.31631e-03, 1.12270e-03, 2.71783e-03, 2.63591e-03, 2.73855e-03, 2.61657e-03, 2.87275e-03, 3.00163e-03, 2.50560e-03, 2.70381e-03, 4.86661e-03, 4.33370e-03, 5.24101e-03, 6.55424e-03, 5.39487e-03, 6.16986e-03, 6.41996e-03, 7.82181e-03, 4.53910e-03, 1.03016e-02, 1.13209e-02, 1.22221e-02, 1.16162e-02, 1.16933e-02, 1.41178e-02, 1.64583e-02, -1.96012e-03, 8.86712e-03, 1.35385e-02, 1.27278e-02, 1.35622e-02, 1.46033e-02, 1.54941e-02]
##yn = y + 0.02*np.random.normal(size=len(x))
popt, pcov = curve_fit(func, x, y)
plt.figure()
plt.plot(x, y, 'ko', label="Datos")
plt.plot(x, func(x, *popt), 'r-', label="Aproximacion exponencial de la forma a*exp(b*0.01*x)+c ")
plt.plot(x, func(x, *popt), 'g--', label=popt)# % tuple(popt))
#plt.plot(label=a)
plt.legend()
plt.show()
| [
"noreply@github.com"
] | Diego10HDZ.noreply@github.com |
2c687dbf04fda22347ea715f63400abcb4672fce | f1ec30dabdc40c0a569c54a5d00f0ce325c2d92e | /lista1/matcher.py | c33693358a58409670a3279eab7cda792e39efe6 | [] | no_license | JakubGogola-IDENTT/jftt | 3ae00da85e15fb5ee51207c076485cf593dda159 | a406afeb3fce94bec8f839985a5d62c95e1f1e05 | refs/heads/master | 2023-04-27T19:00:22.112695 | 2019-03-09T09:35:01 | 2019-03-09T09:35:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | # Patterns matcher
def compute_transition_function(pattern, sigma):
m = len(pattern)
delta = {}
for q in range(0, m + 1):
for a in sigma:
# The largest k where Pk is suffix of Pq|a
k = min(m + 1, q + 2)
k -= 1
while not str(pattern[0:q] + a).endswith(pattern[0:k]):
k -= 1
delta[(q, a)] = k
return delta
def finite_automation_matcher(text, delta, m):
n = len(text)
q = 0
for i in range(0, n):
q = delta[(q, text[i])]
if q == m:
s = i - m + 1
print('Pattern occurs with shift', s)
| [
"jakubgogola97@gmail.com"
] | jakubgogola97@gmail.com |
a5fd815823dd02f345d14a670163777412a29740 | ba7a526e65dfd482493de83be404b285e7b240f0 | /Python Stack/flask_fundamentals/great number game/great.py | 611380d2105578acb69e66d29a77044a70bf6f85 | [] | no_license | gracejansen227/CodingDojoAssignments | 3d19944098b82586d1d5a72283026477a8f9dce6 | b5dc035abdf7a87d8e1074bd6ddafb8690ba9cf2 | refs/heads/master | 2021-05-02T16:12:27.167783 | 2018-06-27T11:22:12 | 2018-06-27T11:22:12 | 120,667,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | from flask import Flask, render_template, request, redirect, session
import random
app = Flask(__name__)
app.secret_key = 'ThisIsSecret' # you need to set a secret key for security purposes
# routing rules and rest of server.py below
@app.route('/')
def index():
right_answer = random.randrange(0, 101)
session['right_answer'] = right_answer
print right_answer
print session
return render_template("index.html", right_answer=session['right_answer'])
@app.route('/guess', methods=['POST'])
def guess():
print "user guesses here"
guess = request.form['guess']
guess = int(guess)
right_answer = session['right_answer']
print "Guess is",guess
print "Right answer is",right_answer
msg1 = ''
if 'msg1' in session:
session.pop('msg1')
if guess > right_answer:
msg1 = "Too high!"
session['msg1'] = msg1
print 'The message should be', msg1
elif guess < right_answer:
msg1 = "Too low!"
session['msg1'] = msg1
print 'The message should be', msg1
elif guess == right_answer:
msg1 = "You got it right!"
print msg1
session['msg1'] = msg1
print session
#session['msg'] = msg
return render_template("index.html", msg1=session['msg1'], guess =guess)
@app.route('/reset', methods=['POST'])
def reset():
return render_template('index.html')
app.run(debug=True)
| [
"graceowensjansen@gmail.com"
] | graceowensjansen@gmail.com |
d6b2e9abd476619cb46b072951c4017d486b6422 | db1089e6201a65d7813d3b82222ad4460d97d4b4 | /database/models.py | 6c9dc22744a113a015c00e38eff31d27a1852021 | [
"MIT"
] | permissive | imtiaz101325/movie_api | 5c0dd06f992323df4d573ebdca189f105cc77d7c | db54066bc2f8b55aacd92aaa678fa4e1ae9ff4b3 | refs/heads/master | 2022-01-10T21:13:03.190488 | 2022-01-06T21:03:38 | 2022-01-06T21:03:38 | 252,554,313 | 6 | 0 | MIT | 2020-04-06T09:44:22 | 2020-04-02T20:02:35 | HTML | UTF-8 | Python | false | false | 3,321 | py | from sqlalchemy import create_engine, Column, Table, ForeignKey, MetaData
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Integer, String, Date, DateTime, Float, Boolean, Text
)
Base = declarative_base()
def db_connect():
return create_engine('sqlite:///database/movies.db')
def create_table(engine):
Base.metadata.create_all(engine)
movie_director_accociation_table = Table('movie_director_association', Base.metadata,
Column('movie_id', Integer, ForeignKey('movies.id')),
Column('director_id', Integer, ForeignKey('directors.id'))
)
movie_producer_accociation_table = Table('movie_producer_association', Base.metadata,
Column('movie_id', Integer, ForeignKey('movies.id')),
Column('producer_id', Integer, ForeignKey('producers.id'))
)
movie_star_accociation_table = Table('movie_star_association', Base.metadata,
Column('movie_id', Integer, ForeignKey('movies.id')),
Column('star_id', Integer, ForeignKey('stars.id'))
)
class Movie(Base):
__tablename__ = 'movies'
id = Column(Integer, primary_key=True)
name = Column('name', String(100))
year = Column('year', String(4))
awards = Column('awards', String(10))
nominations = Column('nominations', String(10))
image = relationship('MoviePoster', uselist=False, back_populates='movie')
genre = relationship('MovieGenre', back_populates='movie')
rating = relationship('MovieRating', back_populates='movie')
directors = relationship(
'Director',
secondary=movie_director_accociation_table,
back_populates='movies'
)
producers = relationship(
'Producer',
secondary=movie_producer_accociation_table,
back_populates='movies'
)
stars = relationship(
'Star',
secondary=movie_star_accociation_table,
back_populates='movies'
)
class MovieGenre(Base):
__tablename__ = 'genres'
id = Column(Integer, primary_key=True)
movie_id = Column(Integer, ForeignKey('movies.id'))
genre = Column('genre', String(10))
movie = relationship('Movie', back_populates='genre')
class MovieRating(Base):
__tablename__ = 'ratings'
id = Column(Integer, primary_key=True)
movie_id = Column(Integer, ForeignKey('movies.id'))
user_id = Column('user_id', Integer)
rating = Column('rating', Float)
timestamp = Column('timestamp', DateTime)
movie = relationship('Movie', back_populates='rating')
class MoviePoster(Base):
__tablename__ = 'movie_posters'
id = Column(Integer, primary_key=True)
movie_id = Column(Integer, ForeignKey('movies.id'))
src = Column('src', Text)
alt_text = Column('alt_text', Text)
movie = relationship("Movie", back_populates='image')
class Person(object):
id = Column(Integer, primary_key=True)
name = Column('name', Text, unique=True)
class Director(Person, Base):
__tablename__ = 'directors'
movies = relationship(
'Movie',
secondary=movie_director_accociation_table,
back_populates='directors'
)
class Producer(Person, Base):
__tablename__ = 'producers'
movies = relationship(
'Movie',
secondary=movie_producer_accociation_table,
back_populates='producers'
)
class Star(Person, Base):
__tablename__ = 'stars'
movies = relationship(
'Movie',
secondary=movie_star_accociation_table,
back_populates='stars'
) | [
"imtiaz101325@gmail.com"
] | imtiaz101325@gmail.com |
5dda65dc3c38a21843e3353d394dd102b4f9b2f0 | 98b0d740346ad9aecd228b9a8ebb8e818908ce03 | /sd-exercise.py | c87a9aec8446d4e8a23ea36643c04628048a821e | [] | no_license | alexisbellido/python-examples | 8c63156a2800a584a8aff0909325e38acbe49163 | e6a4f61d9cd18588987430007e28ef036971764b | refs/heads/master | 2022-10-16T08:28:15.312916 | 2022-09-30T15:55:31 | 2022-09-30T15:55:31 | 240,379,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py | from collections import deque
def get_next_pal(num, is_palindrome_fn):
try:
p_num = num + 1
while not is_palindrome_fn(p_num):
# print('current num', p_num)
p_num += 1
return p_num
except TypeError:
return 0
# TODO
# def is_pal_math():
# num = 807
# temp=num
# rev=0
# while(num>0):
# dig=num%10
# rev=rev*10+dig
# num=num//10
# print('dig', dig)
# print('rev', rev)
# print('num', num)
# print('====')
# if(temp==rev):
# print("The number is palindrome!")
# else:
# print("Not a palindrome!")
def is_pal_with_reverse(word):
word_list = list(str(word)) # turn into string if it's a number
if len(word_list) <= 1:
return True
# create a reverse list by cloning the original and running reverse() to reverse in place
# reverse_word_list = word_list[:]
# reverse_word_list.reverse()
# create reverse list with list slicing
reverse_word_list = word_list[::-1]
return word_list == reverse_word_list
def is_pal_with_deque(word):
d = deque(str(word))
if len(d) <= 1:
return True
while len(d) > 1:
head = d.popleft()
tail = d.pop()
if head != tail:
return False
return True
def is_pal_with_half_list(word):
word_list = list(str(word)) # turn into string if it's a number
# print(word_list)
word_len = len(word_list)
# print('word_len', word_len)
if word_len <= 1:
return True
for i in range(word_len // 2):
# print(f'i: {i}, word_list[i]: {word_list[i]}, (word_len - i - 1): {word_len - i - 1}, word_list[word_len - i - 1]: {word_list[word_len - i - 1]}')
if word_list[i] != word_list[word_len - i - 1]:
return False
return True
if __name__ == '__main__':
# num = 12521
# print(is_pal_with_deque(num))
num = 119
p_num = get_next_pal(num, is_pal_with_reverse)
print(f'Started at {num} and found {p_num}')
num = 119
p_num = get_next_pal(num, is_pal_with_deque)
print(f'Started at {num} and found {p_num}')
num = 537373
p_num = get_next_pal(num, is_pal_with_half_list)
if p_num:
print(f'Started at {num} and found {p_num}')
else:
print('Please use a number')
| [
"alexis@ventanazul.com"
] | alexis@ventanazul.com |
d864de3060ba0754499f07512ef7b4a5127f3495 | 19bb7fdcbd8a2fc9209a87a9b42e699925a47aff | /aiosmb/dcerpc/v5/interfaces/servicemanager.py | 7f11cccdc956751db4804c3c0b39a197de5d55ef | [] | no_license | xBlackSwan/aiosmb | 4cbf8af4806631cf370a319317cc3714239e16ba | 82d0f490eb7aa81af8b73ad6cb59b423605c51c3 | refs/heads/master | 2022-09-06T21:18:40.228457 | 2020-06-01T21:55:53 | 2020-06-01T21:55:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,777 | py | import enum
import asyncio
from aiosmb import logger
from aiosmb.dcerpc.v5.common.service import SMBServiceStatus, SMBService
from aiosmb.dcerpc.v5.common.connection.smbdcefactory import SMBDCEFactory
from aiosmb.dcerpc.v5 import wkst, scmr
from aiosmb.commons.utils.decorators import red, rr, red_gen
class SMBRemoteServieManager:
def __init__(self, connection):
self.connection = connection
self.dce = None
self.handle = None
self.service_handles = {} #service_name -> handle
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, traceback):
await self.close()
return True,None
@red
async def connect(self, open = True):
rpctransport = SMBDCEFactory(self.connection, filename=r'\svcctl')
self.dce = rpctransport.get_dce_rpc()
await rr(self.dce.connect())
await rr(self.dce.bind(scmr.MSRPC_UUID_SCMR))
if open == True:
await rr(self.open())
return True,None
@red
async def open(self):
if not self.dce:
await rr(self.connect())
ans, _ = await rr(scmr.hROpenSCManagerW(self.dce))
self.handle = ans['lpScHandle']
return True,None
@red
async def close(self):
if self.dce:
if self.handle:
for service_name in self.service_handles:
try:
await self.close_service(service_name)
except:
pass
try:
await scmr.hRCloseServiceHandle(self.dce, self.service_handles[service_name])
except:
pass
try:
await self.dce.disconnect()
except:
pass
return
return True,None
@red_gen
async def list(self):
resp, _ = await rr(scmr.hREnumServicesStatusW(self.dce, self.handle))
for i in range(len(resp)):
service_status = None
state = resp[i]['ServiceStatus']['dwCurrentState']
if state == scmr.SERVICE_CONTINUE_PENDING:
service_status = SMBServiceStatus.CONTINUE_PENDING
elif state == scmr.SERVICE_PAUSE_PENDING:
service_status = SMBServiceStatus.PAUSE_PENDING
elif state == scmr.SERVICE_PAUSED:
service_status = SMBServiceStatus.PAUSED
elif state == scmr.SERVICE_RUNNING:
service_status = SMBServiceStatus.RUNNING
elif state == scmr.SERVICE_START_PENDING:
service_status = SMBServiceStatus.START_PENDING
elif state == scmr.SERVICE_STOP_PENDING:
service_status = SMBServiceStatus.STOP_PENDING
elif state == scmr.SERVICE_STOPPED:
service_status = SMBServiceStatus.STOPPED
else:
service_status = SMBServiceStatus.UNKNOWN
service = SMBService(resp[i]['lpServiceName'][:-1], resp[i]['lpDisplayName'][:-1], service_status)
yield service, None
@red
async def open_service(self, service_name):
if service_name in self.service_handles:
return False, None
ans, _ = await rr(scmr.hROpenServiceW(self.dce, self.handle, service_name))
self.service_handles[service_name] = ans['lpServiceHandle']
return True,None
@red
async def close_service(self, service_name):
if not self.handle:
await rr(self.open())
if service_name not in self.service_handles:
await rr(self.open_service(service_name))
await rr(scmr.hRCloseServiceHandle(self.dce, self.service_handles[service_name]))
del self.service_handles[service_name]
return True,None
@red
async def check_service_status(self, service_name):
if not self.handle:
await rr(self.open())
if service_name not in self.service_handles:
await rr(self.open_service(service_name))
# Let's check its status
ans, _ = await rr(scmr.hRQueryServiceStatus(self.dce, self.service_handles[service_name]))
if ans['lpServiceStatus']['dwCurrentState'] == scmr.SERVICE_STOPPED:
logger.info('Service %s is in stopped state'% service_name)
# Let's check its configuration if service is stopped, maybe it's disabled :s
ans, _ = await rr(scmr.hRQueryServiceConfigW(self.dce,self.handle))
if ans['lpServiceConfig']['dwStartType'] == 0x4:
logger.info('Service %s is disabled'% service_name)
return SMBServiceStatus.DISABLED, None
else:
return SMBServiceStatus.STOPPED, None
elif ans['lpServiceStatus']['dwCurrentState'] == scmr.SERVICE_RUNNING:
logger.debug('Service %s is already running'% service_name)
return SMBServiceStatus.RUNNING, None
else:
raise Exception('Unknown service state 0x%x - Aborting' % ans['CurrentState'])
return False, None
@red
async def stop_service(self, service_name):
raise NotImplementedError('stop_service')
@red
async def create_service(self, service_name, display_name, command):
if not self.handle:
await rr(self.open())
#print(service_name)
#print(display_name)
#print(command)
resp, _ = await rr(scmr.hRCreateServiceW(self.dce, self.handle, service_name + '\x00', display_name + '\x00', lpBinaryPathName=command + '\x00'))
self.service_handles[service_name] = resp['lpServiceHandle']
return True,None
@red
async def delete_service(self, service_name):
if not self.handle:
await rr(self.open())
if service_name not in self.service_handles:
await rr(self.open_service(service_name))
await rr(scmr.hRDeleteService(self.dce, self.service_handles[service_name]))
return True,None
@red
async def start_service(self, service_name):
if not self.handle:
await rr(self.open())
if service_name not in self.service_handles:
await rr(self.open_service(service_name))
await rr(scmr.hRStartServiceW(self.dce , self.service_handles[service_name]))
await asyncio.sleep(1) #service takes time to start up...
return True,None
@red
async def enable_service(self, service_name):
if not self.handle:
await rr(self.open())
if service_name not in self.service_handles:
await rr(self.open_service(service_name))
await rr(scmr.hRChangeServiceConfigW(self.dce, self.service_handles[service_name]))
return True,None
| [
"info@skelsec.com"
] | info@skelsec.com |
c162445c33d189714668cb531a3348309d5df3bd | 3535e98acaefbb284f2bf0d6de76e31d849a3616 | /game.py | 66b261b15ab097b8edd48b8bfee5eeadf5dd51e8 | [] | no_license | omolazabal/tetris | 23740b13dff9ce9b5f78806a8c0207e886969f1b | 7cc7e8ac2126983b8c29071a60d8cb1300302306 | refs/heads/master | 2021-04-28T09:36:43.867911 | 2019-02-11T00:22:42 | 2019-02-11T00:22:42 | 122,044,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,927 | py |
import os
import pickle
import pygame as pg
import numpy as np
import datetime
from pygame.locals import *
from tetris.utils import Timer
from tetris.core import Tetromino, Board, Score
from tetris.settings import *
TILE_SIZE = TetrominoSettings.tile_size
TOP = DisplaySettings.height//10
BOTTOM = DisplaySettings.height//10
LEFT = (DisplaySettings.width-TILE_SIZE*10)//2
RIGHT = DisplaySettings.width-LEFT
BACKGROUND_LOC = (LEFT, TOP)
BACKGROUND_BORDER_LOC = (LEFT - TILE_SIZE//8, TOP - TILE_SIZE//8)
SIDE_BACKGROUND_LOC = (LEFT - TILE_SIZE*6 - TILE_SIZE//4, TOP + TILE_SIZE*4)
SIDE_BACKGROUND_BORDER_LOC = (SIDE_BACKGROUND_LOC[0] - TILE_SIZE//8, SIDE_BACKGROUND_LOC[1] - TILE_SIZE//8)
SIDE_TET_LOC = {
'I' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE/2, SIDE_BACKGROUND_LOC[1] + TILE_SIZE/2),
'O' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE/2, SIDE_BACKGROUND_LOC[1] + TILE_SIZE),
'L' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE, SIDE_BACKGROUND_LOC[1] + TILE_SIZE),
'J' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE, SIDE_BACKGROUND_LOC[1] + TILE_SIZE),
'T' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE, SIDE_BACKGROUND_LOC[1] + TILE_SIZE),
'Z' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE, SIDE_BACKGROUND_LOC[1] + TILE_SIZE),
'S' : (SIDE_BACKGROUND_LOC[0] + TILE_SIZE, SIDE_BACKGROUND_LOC[1] + TILE_SIZE),
}
SIDE_FONT_LOC = (SIDE_BACKGROUND_LOC[0] + TILE_SIZE*1.1, SIDE_BACKGROUND_LOC[1] - TILE_SIZE*1.5)
HELD_FONT_LOC = (SIDE_FONT_LOC[0], SIDE_FONT_LOC[1] + TILE_SIZE*8)
SCORE_FONT_LOC = (SIDE_FONT_LOC[0] + TILE_SIZE*16, SIDE_FONT_LOC[1])
SCORE_NUM_LOC = (SIDE_FONT_LOC[0] + TILE_SIZE*16, SIDE_FONT_LOC[1] + TILE_SIZE*2)
HIGH_SCORE_FONT_LOC = (SCORE_FONT_LOC[0], SCORE_FONT_LOC[1] + TILE_SIZE*5)
HIGH_SCORE_NUM_LOC = (SCORE_NUM_LOC[0], HIGH_SCORE_FONT_LOC[1] + TILE_SIZE*2)
LEVEL_FONT_LOC = (SIDE_FONT_LOC[0] + TILE_SIZE*16, HIGH_SCORE_FONT_LOC[1] + TILE_SIZE*5)
LEVEL_NUM_LOC = (SIDE_FONT_LOC[0] + TILE_SIZE*16, LEVEL_FONT_LOC[1] + TILE_SIZE*2)
GAME_OVER_FONT_LOC = (DisplaySettings.width//4, DisplaySettings.height//2 - TILE_SIZE)
FONT_SIZE = TILE_SIZE
GAME_OVER_FONT_SIZE = TILE_SIZE*2
BACKGROUND_COLOR = (27, 27, 27)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (220, 0, 0)
class Game:
"""Class to run Tetris game."""
def __init__(self):
pg.init()
self.board = Board()
self.tetromino = Tetromino()
self.score = Score()
self.debug = True
self.paused = False
self.display = None
self.speed = TimerSettings.drop_interval
self.background_img = 'assets/' + str(TILE_SIZE) + '/background.png'
self.background_border_img = 'assets/' + str(TILE_SIZE) + '/background_border.png'
self.side_background_img = 'assets/' + str(TILE_SIZE) + '/side_background.png'
self.side_background_border_img = 'assets/' + str(TILE_SIZE) + '/side_background_border.png'
self.scores = {str(datetime.datetime.now()):0}
if os.path.getsize('scores') > 0:
with open ('scores', 'rb') as fp:
self.scores = pickle.load(fp)
self.high_score = max(self.scores.values())
pg.mixer.music.load('music/edm_theme.wav')
self.shadow_imgs = {
'blue' : pg.image.load('assets/' + str(TILE_SIZE) + '/blue_shadow.png'),
'red' : pg.image.load('assets/' + str(TILE_SIZE) + '/red_shadow.png'),
'yellow' : pg.image.load('assets/' + str(TILE_SIZE) + '/yellow_shadow.png'),
'orange' : pg.image.load('assets/' + str(TILE_SIZE) + '/orange_shadow.png'),
'cyan' : pg.image.load('assets/' + str(TILE_SIZE) + '/cyan_shadow.png'),
'purple' : pg.image.load('assets/' + str(TILE_SIZE) + '/purple_shadow.png'),
}
self.tetromino_imgs = {
'blue' : pg.image.load('assets/' + str(TILE_SIZE) + '/blue_tile.png'),
'red' : pg.image.load('assets/' + str(TILE_SIZE) + '/red_tile.png'),
'yellow' : pg.image.load('assets/' + str(TILE_SIZE) + '/yellow_tile.png'),
'orange' : pg.image.load('assets/' + str(TILE_SIZE) + '/orange_tile.png'),
'cyan' : pg.image.load('assets/' + str(TILE_SIZE) + '/cyan_tile.png'),
'purple' : pg.image.load('assets/' + str(TILE_SIZE) + '/purple_tile.png'),
}
self.background = pg.image.load(self.background_img)
self.background_border = pg.image.load(self.background_border_img)
self.side_background = pg.image.load(self.side_background_img)
self.side_background_border = pg.image.load(self.side_background_border_img)
self.cover = pg.Surface((LEFT, TOP))
self.cover.fill(BACKGROUND_COLOR)
self.font_name = pg.font.match_font('arial', 1)
self.held_font = pg.font.Font(self.font_name, FONT_SIZE).render('HOLD', True, WHITE)
self.next_font = pg.font.Font(self.font_name, FONT_SIZE).render('NEXT', True, WHITE)
self.level_font = pg.font.Font(self.font_name, FONT_SIZE).render('LEVEL', True, WHITE)
self.score_font = pg.font.Font(self.font_name, FONT_SIZE).render('SCORE', True, WHITE)
self.high_score_font = pg.font.Font(self.font_name, FONT_SIZE).render('HIGH SCORE', True, WHITE)
self.game_over_font = pg.font.Font(self.font_name, GAME_OVER_FONT_SIZE).render('GAME OVER', True, RED)
def debug_print(self):
"""Print Tetris pieces and relevant information to console."""
os.system('cls' if os.name == 'nt' else 'clear')
print('\nPosition')
print(self.tetromino.position())
print('\nBlock coordinates')
print(self.tetromino.block_coordinates())
print('\nBoard')
print(self.board)
print('\nBoard heights')
print(self.board.get_height())
if self.pause:
print('\nPaused')
def start(self):
"""Start the game."""
pg.display.set_caption('Tetris')
self.display = pg.display.set_mode((DisplaySettings.width, DisplaySettings.height))
self.MOVE_DOWN = pg.USEREVENT + 1
pg.time.set_timer(self.MOVE_DOWN, self.speed)
pg.key.set_repeat(KeyboardSettings.delay, KeyboardSettings.interval)
self.clock = pg.time.Clock()
pg.mixer.music.play(-1)
self.play()
def blit_shadow(self):
coords = self.board.shadow.block_coordinates()
for x, y in zip(coords[1], coords[0]):
self.display.blit(self.shadow_imgs[self.tetromino.color],
(BACKGROUND_LOC[0] + (x - 3)*TILE_SIZE,
BACKGROUND_LOC[1] + (y - 3)*TILE_SIZE))
def blit_held_tetromino(self):
if self.board.held_tetromino is not None:
pos= self.board.held_tetromino.position()
coords = self.board.held_tetromino.block_coordinates()
for x, y in zip(coords[1], coords[0]):
self.display.blit(self.tetromino_imgs[self.board.held_tetromino.color],
(SIDE_TET_LOC[self.board.held_tetromino.shape][0] + (x - self.board.held_tetromino.col)*TILE_SIZE,
SIDE_TET_LOC[self.board.held_tetromino.shape][1] + (y - 3)*TILE_SIZE + TILE_SIZE*8))
def blit_next_tetromino(self):
coords = self.tetromino.next_block_coordinates()
for x, y in zip(coords[1], coords[0]):
self.display.blit(self.tetromino_imgs[self.tetromino.next_color],
(SIDE_TET_LOC[self.tetromino.next_shape][0] + x*TILE_SIZE,
SIDE_TET_LOC[self.tetromino.next_shape][1] + y*TILE_SIZE))
def blit_tetromino(self):
coords = self.tetromino.block_coordinates()
for x, y in zip(coords[1], coords[0]):
self.display.blit(self.tetromino_imgs[self.tetromino.color],
(BACKGROUND_LOC[0] + (x - 3)*TILE_SIZE,
BACKGROUND_LOC[1] + (y - 3)*TILE_SIZE))
def get_new_background(self):
self.background = self.display.copy().subsurface((BACKGROUND_LOC), (TILE_SIZE*10, TILE_SIZE*20))
def render_text(self):
score = pg.font.Font(self.font_name, FONT_SIZE).render(str(self.score.score), True, WHITE)
level = pg.font.Font(self.font_name, FONT_SIZE).render(str(self.score.level), True, WHITE)
high_score = pg.font.Font(self.font_name, FONT_SIZE).render(str(max(self.high_score, self.score.score)), True, WHITE)
self.display.blit(self.held_font, HELD_FONT_LOC)
self.display.blit(self.next_font, SIDE_FONT_LOC)
self.display.blit(self.level_font, LEVEL_FONT_LOC)
self.display.blit(self.score_font, SCORE_FONT_LOC)
self.display.blit(self.high_score_font, HIGH_SCORE_FONT_LOC)
self.display.blit(score, SCORE_NUM_LOC)
self.display.blit(high_score, HIGH_SCORE_NUM_LOC)
self.display.blit(level, LEVEL_NUM_LOC)
def render_frame(self):
self.display.fill(BACKGROUND_COLOR)
self.display.blit(self.background, BACKGROUND_LOC)
self.blit_shadow()
self.blit_tetromino()
self.display.blit(self.side_background, (SIDE_BACKGROUND_LOC[0], SIDE_BACKGROUND_LOC[1] + TILE_SIZE*8))
self.display.blit(self.side_background, SIDE_BACKGROUND_LOC)
self.blit_held_tetromino()
self.blit_next_tetromino()
self.display.blit(self.cover, (LEFT, 0))
self.display.blit(self.background_border, BACKGROUND_BORDER_LOC)
self.display.blit(self.side_background_border, (SIDE_BACKGROUND_BORDER_LOC[0], SIDE_BACKGROUND_BORDER_LOC[1] + TILE_SIZE*8))
self.display.blit(self.side_background_border, SIDE_BACKGROUND_BORDER_LOC)
self.render_text()
def clear_line(self):
chop = pg.transform.chop(self.background,
(0, TILE_SIZE*np.min(self.board.filled_rows - 3),
0, TILE_SIZE*self.board.filled_rows.size))
self.display.blit(chop,
(BACKGROUND_LOC[0], BACKGROUND_LOC[1] +
TILE_SIZE*self.board.filled_rows.size))
self.get_new_background()
self.board.filled_rows = np.array([])
def reset(self):
self.score.reset()
self.board.reset()
self.tetromino.reset()
self.board.start_game(self.tetromino)
self.background = pg.image.load(self.background_img)
def play(self):
"""Begin game and check for keyboard inputs."""
self.board.start_game(self.tetromino)
self.render_text()
while True:
if self.board.top_out:
self.game_over()
if self.board.filled_rows.size != 0:
if self.score.add_score(self.board.filled_rows.size):
self.speed -= 75
pg.time.set_timer(self.MOVE_DOWN, self.speed)
self.clear_line()
self.render_text()
self.clock.tick(DisplaySettings.fps)
self.render_frame()
pg.display.update()
for event in pg.event.get():
if self.debug:
self.debug_print()
if event.type == self.MOVE_DOWN and not self.board.top_out:
if self.board.soft_drop(self.tetromino):
self.get_new_background()
if event.type == pg.QUIT:
self.quit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_DOWN:
if self.board.soft_drop(self.tetromino):
self.get_new_background()
pg.time.set_timer(self.MOVE_DOWN, self.speed)
elif event.key == pg.K_LEFT:
self.board.move_left(self.tetromino)
elif event.key == pg.K_RIGHT:
self.board.move_right(self.tetromino)
elif event.key == pg.K_ESCAPE:
self.pause()
elif event.key == pg.K_x:
self.board.hold(self.tetromino)
elif event.key == pg.K_z:
self.board.rotate_left(self.tetromino)
pg.key.set_repeat(KeyboardSettings.delay, KeyboardSettings.interval)
elif event.key == pg.K_UP:
self.board.rotate_right(self.tetromino)
elif event.key == pg.K_SPACE:
self.board.hard_drop(self.tetromino)
self.display.blit(self.background, BACKGROUND_LOC)
self.blit_tetromino()
self.board.soft_drop(self.tetromino)
self.get_new_background()
pg.quit()
def pause(self):
"""Pause gameplay."""
self.paused = True
while self.paused:
pg.display.update()
self.clock.tick(DisplaySettings.fps)
for event in pg.event.get():
if self.debug:
self.debug_print()
if event.type == pg.QUIT:
self.quit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
self.paused = False
def game_over(self):
darken = pg.Surface((DisplaySettings.width, DisplaySettings.height))
darken.set_alpha(175)
darken.fill(BLACK)
self.display.blit(darken, (0,0))
self.display.blit(self.game_over_font, GAME_OVER_FONT_LOC)
self.scores[str(datetime.datetime.now())] = self.score.score
with open('scores', 'wb') as fp:
pickle.dump(self.scores, fp)
while True:
pg.display.update()
self.clock.tick(DisplaySettings.fps)
for event in pg.event.get():
if self.debug:
self.debug_print()
if event.type == pg.QUIT:
self.quit()
def quit(self):
"""Quit the program."""
pg.quit()
quit()
| [
"oscarolazabal@gmail.com"
] | oscarolazabal@gmail.com |
f6e1a494ebaedee5837926e1f68f62a8843f3630 | 55cfc146ac938442789bdf18dffc4ceead99c0d7 | /HttpServerCode.py | b7d350541231b3bdd77826c123ba3356b85452ef | [] | no_license | kansalk/CN-Project | 0879b3e351bfd3039f50f739c8b5f3326f7684e0 | 4f34b2f7801e253c67282784c58623ae2ecc2615 | refs/heads/master | 2023-03-08T03:35:40.492734 | 2019-10-25T13:53:05 | 2019-10-25T13:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | import os
import sys
import gzip
from socket import *
import threading
import functools
rootDirectory="/home/keshavk/Documents/CN Project/Templates1"
serverPort = 12000
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind(("",serverPort))
serverSocket.listen(1)
supportedFileTypes=['html','css','eot','svg','ttf','woff','js']
supportedImageFiles=['png','jpeg','jpg']
print("The server is ready to receive at Port",serverPort)
print("The root Root Directory is",rootDirectory)
def send_file(connectionSocket,filename):
print("opening -",filename)
fp = open(rootDirectory +filename,"r")
lines = fp.readlines()
for i in lines:
connectionSocket.send(i.encode())
def serve_request(connectionSocket,requestheader):
if(requestheader[0]=="GET"):
filepath=functools.reduce(lambda x,y:x+ '/' + y,requestheader[1].split('/')[:-1])
if(len(filepath)==0):
filepath='/'
requestedfile=requestheader[1].split('/')[-1]
if(len(requestedfile)==1):
requestedfile="index.html"
if(requestedfile not in os.listdir(rootDirectory + filepath)):
connectionSocket.send('HTTP/1.0 404 FILE NOT FOUND\n'.encode())
print("File Not found - ",requestedfile)
elif(requestheader[1].split('.')[-1] in supportedFileTypes):
connectionSocket.send('HTTP/1.0 200 OK\n'.encode())
connectionSocket.send(('Content-Type: text/' + requestheader[1].split('.')[-1] + '\n').encode())
connectionSocket.send('\n'.encode())
send_file(connectionSocket,filepath+ "/"+ requestedfile)
elif(requestheader[1].split('.')[-1] in supportedImageFiles):
try:
#compress image
if not (requestedfile+'.gz' in os.listdir(rootDirectory + filepath)):
# if not (requestheader[1] + '.gz' in os.listdir()):
with open(rootDirectory +filepath + '/'+ requestedfile,"rb") as img:
data = img.read()
bindata = bytearray(data)
with gzip.open(rootDirectory + filepath + '/' + requestedfile +'.gz', "wb") as f:
f.write(bindata)
#read compressed image
with gzip.open(rootDirectory + filepath + '/' + requestedfile + '.gz', "rb") as imageFile:
# with gzip.open(requestheader[1][1:]+'.gz', "rb") as imageFile:
s=imageFile.read()
connectionSocket.send('HTTP/1.0 200 OK\n'.encode())
connectionSocket.send(('Content-Type: image/' + requestheader[1].split('.')[-1] + '\n').encode())
connectionSocket.send('\n'.encode())
connectionSocket.send(s)
except:
print("File Read Error",rootDirectory + "/"+ filepath)
else:
print("File not found")
connectionSocket.send("HTTP/1.0 500 Server Error- The server is capable of handling GET request".encode())
#todo Server Error
try:
while True :
connectionSocket,ipaddr = serverSocket.accept()
print("Connection accepted from- ",ipaddr,'\n')
sentence = connectionSocket.recv(1024)
httprequest = sentence.decode()
print(httprequest)
httprequest=httprequest.split('\r\n')
for i in range(len(httprequest)):
httprequest[i]=httprequest[i].split()
t1=threading.Thread(target=serve_request,args=(connectionSocket,httprequest[0],))
t1.start()
t1.join()
connectionSocket.shutdown(SHUT_WR)
connectionSocket.close()
print("Connection Closed")
except KeyboardInterrupt:
print("Server Error")
serverSocket.shutdown(SHUT_RDWR)
serverSocket.close()
| [
"kansalkeshav@gmail.com"
] | kansalkeshav@gmail.com |
ba880850e2f70193fe11de6eb05519cbb4b2a638 | 9bf1fc29fd4ace556da64c5da46b60d15ffb5b9f | /EJERCICIO2.py | 81a2c9712e1db71801d5e4fe8d769a090fb6cbae | [] | no_license | Jelowis/Ejercicios-Python-U | 876e612aafef6a5c73c817b26b9e37388ae0d8f5 | 0dce56740b38811b3f5707da9ba5f2c63acff463 | refs/heads/main | 2023-06-07T23:02:42.506347 | 2021-06-28T05:06:31 | 2021-06-28T05:06:31 | 380,867,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | # for range(v) – range(vi,vf) - range(vi,vf,inc)
frase = input("Ingrese frase: ")
for indice in range(len(frase)):
print(indice,'=',frase[indice]) | [
"noreply@github.com"
] | Jelowis.noreply@github.com |
e6f15f434682f566a3a5fc6b95ea0645234f5d45 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/hard/8_20.py | 9b5691a40626bea7dee38b7648365443f5a95f4f | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,192 | py | Private Variables in Python
**Prerequisite:**Underscore in Python
In Python, there is no existence of “Private” instance variables that cannot
be accessed except inside an object. However, a convention is being followed
by most Python code and coders i.e., a name prefixed with an underscore, For
e.g. **_geek** should be treated as a non-public part of the API or any Python
code, whether it is a function, a method, or a data member. While going
through this we would also try to understand the concept of various forms of
trailing underscores, for e.g., for _ in range(10), __init__(self).
**Mangling and how it works**
In Python, there is something called name mangling, which means that there is
a limited support for a valid use-case for class-private members basically to
avoid name clashes of names with names defined by subclasses. Any identifier
of the form __geek (at least two leading underscores or at most one trailing
underscore) is replaced with _classname__geek, where classname is the current
class name with a leading underscore(s) stripped. As long as it occurs within
the definition of the class, this mangling is done. This is helpful for
letting subclasses override methods without breaking intraclass method calls.
Let’s look at this example and try to find out how this underscore works:
## Python
__
__
__
__
__
__
__
# Python code to illustrate how mangling works
class Map:
def __init__(self, iterate):
self.list = []
self.__geek(iterate)
def geek(self, iterate):
for item in iterate:
self.list.append(item)
# private copy of original geek() method
__geek = geek
class MapSubclass(Map):
# provides new signature for geek() but
# does not break __init__()
def geek(self, key, value):
for i in zip(keys, value):
self.list.append(i)
---
__
__
The mangling rules are designed mostly to avoid accidents but it is still
possible to access or modify a variable that is considered private. This can
even be useful in special circumstances, such as in the debugger.
**_Single Leading Underscores**
So basically one underline in the beginning of a method, function, or data
member means you shouldn’t access this method because it’s not part of the
API. Let’s look at this snippet of code:
## Python
__
__
__
__
__
__
__
# Python code to illustrate
# how single underscore works
def _get_errors(self):
if self._errors is None:
self.full_clean()
return self._errors
errors = property(_get_errors)
---
__
__
The snippet is taken from the Django source code (django/forms/forms.py). This
suggests that errors is a property, and it’s also a part of the API, but the
method, _get_errors, is “private”, so one shouldn’t access it.
**__Double Leading Underscores**
Two underlines, in the beginning, cause a lot of confusion. This is about
syntax rather than a convention. double underscore will mangle the attribute
names of a class to avoid conflicts of attribute names between classes. For
example:
## Python
__
__
__
__
__
__
__
# Python code to illustrate how double
# underscore at the beginning works
class Geek:
def _single_method(self):
pass
def __double_method(self): # for mangling
pass
class Pyth(Geek):
def __double_method(self): # for mangling
pass
---
__
__
**__Double leading and Double trailing underscores__**
There’s another case of double leading and trailing underscores. We follow
this while using special variables or methods (called “magic method”) such
as__len__, __init__. These methods provide special syntactic features to the
names. For example, __file__ indicates the location of the Python file, __eq__
is executed when a == b expression is executed.
**Example:**
## Python
__
__
__
__
__
__
__
# Python code to illustrate double leading and
# double trailing underscore works
class Geek:
# '__init__' for initializing, this is a
# special method
def __init__(self, ab):
self.ab = ab
# custom special method. try not to use it
def __custom__(self):
pass
---
__
__
This article is contributed by **Chinmoy Lenka**. If you like GeeksforGeeks
and would like to contribute, you can also write an article using
contribute.geeksforgeeks.org or mail your article to
contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks
main page and help other Geeks.
I have referred Python Docs, hackernoon.com and igorsobreira.com
Please write comments if you find anything incorrect, or you want to share
more information about the topic discussed above.
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
4737c16d94228d9c689c9fe4e6c7d3e559caff04 | a03c7e9a7c3b6b2c21929d184873107c66031ece | /chapter_01/pub_package/pyApp/__init__.py | 0db340a62606ca4a764965ba44c599282c56e7db | [] | no_license | maguichang/pyNote | bd8549d0a0665bd7fe40ef863f3d568859186f16 | b2bf748872cdfb87b381a9a948f7baa5f3fdd87f | refs/heads/master | 2020-09-05T19:28:31.010671 | 2019-11-07T09:06:11 | 2019-11-07T09:06:11 | 220,193,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # -*- coding:utf-8 -*-
# @Time :2019/11/7 16:26
# Author :MaGuichang
"""
此 __init__.py 写下你的 name 和导入你的代码模块
"""
from __future__ import absolute_import
from .pyApp import *
name = "python 项目的正确打开方式" | [
"mgc5320@163.com"
] | mgc5320@163.com |
d24f4da6658241c60450ba6aba1669e8dc08157a | 4da58eeffd61b22e276dc6d56db31c7440544d2e | /basics/Calculator.py | 03dd5e64999f882d50046506051662b6056fc3ac | [] | no_license | Ro7nak/python | 604abfb42ccc2c7cd40588d4e36449b30f03a570 | 60d61f68ad438f355c6b9b8b8a3fb524e02cb0de | refs/heads/master | 2020-03-28T04:26:10.865955 | 2019-12-15T08:01:54 | 2019-12-15T08:01:54 | 147,714,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # from math import *
num1 = input("enter a number: ")
num2 = input("enter another number: ")
#add1 = int(num1) + int(num2)
# input from user is always a string so int can convert in into number
# int will wor for only whole number
add2 = float(num1) + float(num2)
# float will work for decimal numbers
#print("addition of " + num1 + " + " + num2 + " = " + str(add1))
print("addition of " + num1 + " + " + num2 + " = " + str(add2))
| [
"rounak077@gmail.com"
] | rounak077@gmail.com |
3d0c0555f2535a6bc80ec1bd9ae8801309a8b061 | bcd5a15b00c918dcdc220f229489ed298ee0d99c | /src/products.py | d01292700991ae7193b4bf1976da60b1507552b9 | [] | no_license | stepp1/Landslide_EarlyDetection | 48c766d969b803d7f3eec0ffb2fb91516a3116ad | 934fda3b81c1ce21970c15fa2829c83bcd859c3c | refs/heads/main | 2023-08-08T23:55:40.471564 | 2020-12-01T05:06:55 | 2020-12-01T05:06:55 | 321,784,420 | 0 | 0 | null | 2023-07-25T17:03:49 | 2020-12-15T20:43:24 | null | UTF-8 | Python | false | false | 2,089 | py | weather_prods = {
'goes' : {
'id' : 'goes17:fulldisk:v1',
'short_name' : 'goes',
'freq' : '15 min',
'res' : 'multiple',
'bands' : [
'derived:evi',
'derived:ndvi',
'derived:ndwi',
'derived:ndwi1',
'derived:ndwi2'
]
},
'gsod' : {
'name' : 'GSOD Daily Interpolation Weather Product',
'short_name' : 'gsod',
'id' : 'daily-weather:gsod-interpolated:v0',
'res' : '10km',
'deg_res': 0.10,
'bands' : ['tavg', 'tmax', 'tmin', 'rh', 'prec'],
'descrip' : 'interpolated raster from 1980-01-01 for geographical area from -180 deg to 180 deg longitude, and from -60 to 60 deg latitude.'
},
'chirps' : {
'name' : 'CHIRPS Daily Precipitation Weather',
'short_name' : 'chirps',
'id' : 'chirps:daily:v1',
'res' : '5km',
'deg_res': 0.05,
'freq': 'daily',
'bands' : ['daily_precipitation']
},
'cfs' : {
'name' : 'CFS Daily Weather',
'short_name' : 'cfs',
'id' : 'ncep:cfsr-v2:daily:v1',
'res' : '20km',
'deg_res': 0.20,
'freq': 'daily',
'bands' : ['albedo', 'prec', 'snow_cover', 'snow_depth', 'snow_water_equivalent',
'soilmoist1', 'soilmoist2', 'soilmoist3', 'tavg', 'tmax', 'tmin', 'water_runoff']
}
}
soil_moist = {
'smap' : {
'id' : 'smap:SMPL3SM_E',
'short_name': 'smap',
'res' : '9km',
'deg_res': 0.10,
'freq' : 'daily',
'bands': ['am_soil_moisture', 'pm_soil_moisture']
}
}
elevation = {
'aster' : {
'id' : 'aster:gdem3:v0',
'short_name': 'aster',
'res': '30m',
'deg_res': 0.001,
'bands': ['alpha', 'height', 'number_images']
}
}
population = {
'population' : {
'id' : 'd15c019579fa0985f7006094bba7c7288f830e1f:GPW_Population_Density_V4_0',
'name': 'pop',
'res' : '1km',
'deg_res': None,
'bands': ['population']
}
} | [
"sfaragg@gmail.com"
] | sfaragg@gmail.com |
bf24f5fef311dba540023427219ffb4327c635f1 | c0fae5c72bf74f9577eba38fa319c6dff92a46af | /extension_2_ensemble.py | 7dab6f67c78b21a0aee885df26b8fbae7b9aa405 | [] | no_license | theriaultr/CISC_874_Project_Final_Code | 047a120b04a97aec28c29545aadcf302774686d7 | a5f695c43e120ff4037c7f53423d80ee2c0494af | refs/heads/main | 2023-04-04T07:55:25.516410 | 2021-04-15T22:44:42 | 2021-04-15T22:44:42 | 358,392,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,519 | py | '''
The purpose of this section is to extend ResNet in an ensemble approach using the final layer before model output.
It designed for combining 2 models with the same number of nodes on last fully connected layer before output layer
The optional methods set at the parameter OPTION are
1. Summing fully connected portion (SUM)
2. Multiplying fully connected portions (MULT)
3. Concatenating fully connected portions (CONC)
4. RNN (RNN)
'''
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
from PIL import Image
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import seaborn as sn
from sklearn import metrics
from keras import backend as K
from tensorflow.keras.layers import Flatten, Dense, BatchNormalization, Input, Concatenate, Add, Average, SimpleRNN, Embedding, Dropout
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.models import Sequential
from get_results import *
def normalize_data(augmented_image):
'''
Purpose: scale the data sample-wise between -1 and 1
Args:
augmented_image: array of the image to be scaled
Returns:
re_scaled_image: augmented_image scaled between -1 and 1
Assumption: values range from 0 to 255
'''
re_scaled_image = (augmented_image/127.5) - 1
return re_scaled_image
'''
SET THE PARAMETERS
'''
OPTION = "SUM"
NUM_EPOCHS = 50
BATCH_SIZE = 32
TYPE_NAME = "With_Ensemble_DR_LR1e4" #name of the test being performed
ENSEMBLE_DR_RATE = 0.3
'''
LOAD THE MODELS FROM THE PATH NAME SAVED
'''
model1_path = "Saved_Models/Train1_50_No_Dropout/saved_model.pb"
model2_path = "Saved_Models/Train1_100_No_Dropout/saved_model.pb"
final_layer_size = 2048
lr_extension = 0.0001
print("------------------Loading the models------------------------")
model1 = load_model('Saved_Models/Train1_50_DR_Final/')
model2 = load_model('Saved_Models/Train1_100_DR_Final/')
model1._name = "Train1"
model2._name= "Train2"
#remove the last layer of the model
model1.pop()
model2.pop()
#don't re-train the models
model1.trainable=False
model2.trainable=False
print("model 1 layers trainable:")
print(model1.layers[0].trainable)
print(model1.layers[1].trainable)
print(model1.layers[2].trainable)
#re-train from scratch option
# #model 1******************************************************
# model_resnet50 = tf.keras.applications.ResNet50V2(
# include_top=False,
# weights="imagenet",
# input_tensor=None,
# input_shape=(50,50,3)
# )
# model1 = Sequential(
# [
# Input(shape=(50,50,3)),
# model_resnet50,
# Flatten(),
# Dense(2048, activation='tanh'), #relu because that is the activation function used by ResNet
# ]
# )
# print("Model 1 layer 0:", model1.layers[0])
# print("Model 1 layer 1:", model1.layers[1])
# model_resnet101 = tf.keras.applications.ResNet101V2(
# include_top=False,
# weights="imagenet",
# input_tensor=None,
# input_shape=(50,50,3)
# )
# model2 = Sequential(
# [
# Input(shape=(50,50,3)),
# model_resnet101,
# Flatten(),
# Dense(2048, activation='tanh'), #relu because that is the activation function used by ResNet
# ]
# )
'''
Load the training data
'''
#load train data
print("------------------------Loading the data--------------------------")
X_train = np.load("../Data/IDC_Data/Split/X_train_patient.npy")
y_train = np.load("../Data/IDC_Data/Split/y_train_patient.npy")
X_test = np.load("../Data/IDC_Data/Split/X_test_patient.npy")
y_test = np.load("../Data/IDC_Data/Split/y_test_patient.npy")
#Create model for each option and train the model
if OPTION == "SUM":
print("Using summation option")
#develop a new model that will sum the outputs of each model before prediction
inputs = Input(shape=(50,50,3))
model1_layer = model1(inputs)
model2_layer = model2(inputs)
addition_layer = Add()([model1_layer, model2_layer]) #concatenation layer
dropout1 = Dropout(ENSEMBLE_DR_RATE)(addition_layer)
dense1 = Dense(512, activation='relu')(dropout1)
dropout2 = Dropout(ENSEMBLE_DR_RATE)(dense1)
# dense1 = Dense(final_layer_size, activation='relu')(addition_layer)
# dense2 = Dense(final_layer_size/2, activation='relu')(dense1)
out = Dense(2, activation='softmax')(dropout2)
model = Model(inputs = inputs, outputs = out)
print("Summation model summary:")
model.summary()
#create the augmenting data object
aug = ImageDataGenerator(
rotation_range=30,
brightness_range = (0.5, 1.5),
horizontal_flip=True,
vertical_flip = True,
fill_mode="reflect",
preprocessing_function=normalize_data)
#compile the model
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)
loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly
metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()]
)
#sending the same augmented input through both models, fit the model to the data
history1 = model.fit(
x = aug.flow(X_train, y_train, batch_size = BATCH_SIZE),
shuffle = True,
epochs = NUM_EPOCHS,
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])
elif OPTION == "AVG":
print("Using average option")
#develop a new model that will average the outputs of each model before prediction
inputs = Input(shape=(50,50,3))
model1_layer = model1(inputs)
model2_layer = model2(inputs)
avg_layer = Average()([model1_layer, model2_layer]) #average layer
dropout1 = Dropout(ENSEMBLE_DR_RATE)(avg_layer)
layer1 = Dense(512, activation = 'relu')(dropout1)
dropout2 = Dropout(ENSEMBLE_DR_RATE)(layer1)
#additional layers used in testing
# layer2 = Dense(128, activation = 'tanh')(dropout2)
# dropout3 = Dropout(ENSEMBLE_DR_RATE)(layer2)
# layer3 = Dense(32, activation = 'tanh')(dropout3)
# dropout4 = Dropout(ENSEMBLE_DR_RATE)(layer3)
# # dense1 = Dense(final_layer_size, activation='relu')(addition_layer)
# dense2 = Dense(final_layer_size/2, activation='relu')(dense1)
out = Dense(2, activation='softmax')(dropout2)
model = Model(inputs = inputs, outputs = out)
print("Summary of Avergae Model:")
model.summary()
#create the augmenting data object
aug = ImageDataGenerator(
rotation_range=30,
brightness_range = (0.5, 1.5),
horizontal_flip=True,
vertical_flip = True,
fill_mode="reflect",
preprocessing_function=normalize_data)
#compile the model
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)
loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly
metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()]
)
model.summary()
#sending the same augmented input through both models and fit the model
history1 = model.fit(
x = aug.flow(X_train, y_train, batch_size = BATCH_SIZE),
shuffle = True,
epochs = NUM_EPOCHS,
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])
elif OPTION == "CONCAT":
print("Using conatenation option")
#develop a new model that will concatenate the outputs of each model before prediction
inputs = Input(shape=(50,50,3))
model1_layer = model1(inputs)
model2_layer = model2(inputs)
concat_layer = Concatenate()([model1_layer, model2_layer]) #concatenation layer
dropout1 = Dropout(ENSEMBLE_DR_RATE)(concat_layer)
layer1 = Dense(final_layer_size, activation='relu')(dropout1)
dropout2 = Dropout(ENSEMBLE_DR_RATE)(layer1)
layer2 = Dense(512, activation='relu')(dropout2)
dropout3 = Dropout(ENSEMBLE_DR_RATE)(layer2)
out = Dense(2, activation='softmax')(dropout3)
model = Model(inputs = inputs, outputs = out)
model.summary()
#create the augmenting data object
aug = ImageDataGenerator(
rotation_range=30,
brightness_range = (0.5, 1.5),
horizontal_flip=True,
vertical_flip = True,
fill_mode="reflect",
preprocessing_function=normalize_data)
#compile the model
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)
loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly
metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()]
)
#sending the same augmented input through both models and fit the model
history1 = model.fit(
x = aug.flow(X_train, y_train, batch_size = 32),
shuffle = True,
epochs = NUM_EPOCHS,
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])
elif OPTION == "RNN":
print("Using RNN option")
#develop a new model that will concatenate the outputs of each model before prediction
inputs = Input(shape=(50,50,3))
model1_layer = model1(inputs)
model2_layer = model2(inputs)
new_input = tf.convert_to_tensor([model1_layer, model2_layer])
new_input = tf.transpose(new_input, [1,0,2]) #shape (None, timestep(2), features(2048))
simple_rnn = SimpleRNN(2048, activation='relu', dropout=0.3)(new_input) # Shape = (None, 2048)
dense1 = Dense(512, activation='relu')(simple_rnn)
dropout1 = Dropout(ENSEMBLE_DR_RATE)(dense1)
out = Dense(2, activation='softmax')(dropout1)
model = Model(inputs = inputs, outputs = out)
model.summary()
#create the augmenting data object
aug = ImageDataGenerator(
rotation_range=30,
brightness_range = (0.5, 1.5),
horizontal_flip=True,
vertical_flip = True,
fill_mode="reflect",
preprocessing_function=normalize_data)
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)
loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly
metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()])
model.layers[0].trainable = False
#sending the same augmented input through both models
history1 = model.fit(
x = aug.flow(X_train, y_train, batch_size = BATCH_SIZE),
shuffle = True,
epochs = NUM_EPOCHS,
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])
else:
print("Option selected is not defined. Please select from SUM, MULT, CONC, or RNN")
"""Plot the results of the training session 1"""
print("---------------------------------- Getting Metrics -----------------------------")
plt.figure(figsize=(10,10))
plt.plot(history1.history['mean_squared_error'])
plt.title('MSE')
plt.ylabel('MSE')
plt.xlabel('Epoch')
plt.savefig(OPTION+'_Train_MSE_'+TYPE_NAME)
plt.close()
plt.figure(figsize=(10,10))
plt.plot(history1.history['categorical_accuracy'])
plt.title('Categorical Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.savefig(OPTION+'_Train_Categorical_'+TYPE_NAME)
plt.close()
plt.figure(figsize=(10,10))
plt.plot(history1.history['loss'])
plt.title('Binary Cross Entropy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.savefig(OPTION+'_Train_Loss_'+TYPE_NAME)
plt.close()
#Get training and testing results
#normalize all samples
X_train_normalized = np.zeros(X_train.shape)
for idx, sample in enumerate(X_train):
X_train_normalized[idx] = normalize_data(sample)
if idx==0:
print("size of a sample normalizing:", sample.shape)
X_test_normalized = np.zeros(X_test.shape)
for idx, sample in enumerate(X_test):
X_test_normalized[idx] = normalize_data(sample)
#Perform final prediction of the model using non-augmented train and test data
final_prediction_train = model.predict(X_train_normalized.astype(np.float64))
final_prediction_test = model.predict(X_test_normalized.astype(np.float64))
#get the final metrics (from file get_results)
get_metrics(final_prediction_train, y_train, "Train Metrics***************", "Train", OPTION+"_Ensemble_Confusion_Train_Ensemble_"+TYPE_NAME)
get_metrics(final_prediction_test, y_test, "Test Metrics****************", "Test", OPTION+"_Ensemble_Confusion_Test_Ensemble_"+TYPE_NAME) | [
"noreply@github.com"
] | theriaultr.noreply@github.com |
027213b7f05cac7207d402a79bd8a74355de05f4 | 11ef4bbb8086ba3b9678a2037d0c28baaf8c010e | /Source Code/server/binaries/chromium/pyproto/components/feed/core/proto/content_storage_pb2.py | ee9d322f417b3184236037d8f6e46f0940829c38 | [] | no_license | lineCode/wasmview.github.io | 8f845ec6ba8a1ec85272d734efc80d2416a6e15b | eac4c69ea1cf0e9af9da5a500219236470541f9b | refs/heads/master | 2020-09-22T21:05:53.766548 | 2019-08-24T05:34:04 | 2019-08-24T05:34:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 2,486 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: content_storage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='content_storage.proto',
package='feed',
syntax='proto2',
serialized_options=_b('H\003'),
serialized_pb=_b('\n\x15\x63ontent_storage.proto\x12\x04\x66\x65\x65\x64\"8\n\x13\x43ontentStorageProto\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x14\n\x0c\x63ontent_data\x18\x02 \x01(\x0c\x42\x02H\x03')
)
_CONTENTSTORAGEPROTO = _descriptor.Descriptor(
name='ContentStorageProto',
full_name='feed.ContentStorageProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='feed.ContentStorageProto.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content_data', full_name='feed.ContentStorageProto.content_data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=87,
)
DESCRIPTOR.message_types_by_name['ContentStorageProto'] = _CONTENTSTORAGEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ContentStorageProto = _reflection.GeneratedProtocolMessageType('ContentStorageProto', (_message.Message,), dict(
DESCRIPTOR = _CONTENTSTORAGEPROTO,
__module__ = 'content_storage_pb2'
# @@protoc_insertion_point(class_scope:feed.ContentStorageProto)
))
_sym_db.RegisterMessage(ContentStorageProto)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"wasmview@gmail.com"
] | wasmview@gmail.com |
1d2597e62867a48b42e627be8c5761ed4af60f31 | 443d16e224d7b8b9014746c4995ea48090b1e41d | /liction1/robot-tasks-master/task_27.py | 099ab024a84d86f74ee552426f51d9ae45035e46 | [] | no_license | motomax990/lictions | fae3ef891c92b7fbe1b5a74561a7c39d45f0c76f | 1a329b1b07f4da2fc7b0bbcf134c1912b87ae10a | refs/heads/master | 2021-05-21T01:04:57.940074 | 2020-04-03T07:44:32 | 2020-04-03T07:44:32 | 252,479,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!/usr/bin/python3
from pyrob.api import *
@task
def task_7_5():
move_right()
fill_cell()
n = 0
nn = n
while not wall_is_on_the_right():
if n < nn:
n += 1
move_right()
else:
n = 0
nn += 1
move_right()
if not wall_is_on_the_right():
fill_cell()
if __name__ == '__main__':
run_tasks()
| [
"motomax990@gmail.com"
] | motomax990@gmail.com |
d969b7adc6bd3bbefcc49987e3756a2ac25fa5f6 | 528c66f55202c1c68fbfb727ecf6a9fc1e0caa0c | /katran/stamps/plugins/stamplink/dbgettext_registration.py | f95a06ac7bea58183db4f58a6b2bb467df2ed5a5 | [] | no_license | samjacoby/katran | 445d9e972b463e75c18f5deb2a6f34837c65b62f | 61b3d024e9209c925664c5c64e8b7aaa1f9d52a7 | refs/heads/master | 2020-03-31T07:41:46.138648 | 2011-08-19T21:50:38 | 2011-08-19T21:50:38 | 1,395,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from dbgettext.registry import registry, Options
from models import StampLink
class LinkOptions(Options):
attributes = ('name', 'url', 'mailto')
parent = 'page'
registry.register(StampLink, LinkOptions)
| [
"sam@shackmanpress.com"
] | sam@shackmanpress.com |
514a8ee2bbd1c46788a905786a44bd30fe126160 | 3fbf8a41253b008083a4ddb92955514f5f277df6 | /temp.py | 206341102a944e0da7051b9a17a9529320ec65f4 | [] | no_license | ashish0707/pagerank | f28a2c12ba93bd9ff001294e72e798c993ad9ba4 | f0450115a93ade9906d7f44106450dbe157a03f5 | refs/heads/master | 2021-01-11T01:09:38.256259 | 2016-10-16T22:44:34 | 2016-10-16T22:44:34 | 71,052,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | import os
import sys
import fileinput
from collections import defaultdict
class Node:
inlinks = []
title = ""
numberOfOutlinks = 0
pageRank = 0.0;
newPageRank = 0.0;
def __init__(self, depth, url):
self.depth = depth
self.url = url
def __eq__(self, node):
if self.url == node.url:
return True
return False
def addInlink(self,node):
self.inlinks.append(node)
def setTitle(self, title):
self.title = title
def setNoOfOutlinks(self,count):
self.numberOfOutlinks = count
def setPageRank(self,rank):
self.pageRank = rank
def setNewPageRank(self, rank):
self.pageRank = rank
def __hash__(self):
return hash(self.url)
tempList = [Node(1 , "abc"),Node(1 , "abcd"),Node(1 , "abce")]
print Node(2, "abc") in tempList
print tempList.index(Node(2, "abce"))
temp = defaultdict(int)
temp = {'ash' : 1 , "bul" : 2}
if 'bul' in temp:
print "ash is present"
| [
"saashaa0707@gmail.com"
] | saashaa0707@gmail.com |
4a0fee6d173f9ed06c26a63cd716de5b07c251f6 | 813eebfeadbe8d77b02d307eb64dfa9a94aef553 | /slidenup.py | 52e608ed01a2f38c33c845746de5f8e7041dc45d | [
"MIT"
] | permissive | lacop/slidenup | efbf07fd84e071b6925e5d265214d45f71250539 | 50a7824a1019c058cde47d73c654c49e4bcc9167 | refs/heads/master | 2020-05-17T16:01:12.000548 | 2015-05-31T11:53:17 | 2015-05-31T11:53:17 | 36,600,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,777 | py | #!/usr/bin/env python3
import sys
import os
import subprocess
from termcolor import colored
import tempfile
NUPARGS = ['--nup', '3x2', '--landscape']
MERGEARGS = ['--landscape']
TEXPREAMBLE = [
r'\documentclass[12pt, a4paper]{article}',
r'\usepackage{multido}',
r'\usepackage[landscape]{geometry}',
r'\voffset = -2cm',
r'\topmargin = 0cm',
r'\headheight = 1cm',
r'\headsep = 0cm',
r'\textheight = 18cm',
r'\footskip = 0cm',
r'\usepackage{fancyhdr}',
r'\pagestyle{fancy}',
r'\renewcommand{\headrulewidth}{0pt}',
]
TEXDOCUMENT = [
r'\begin{document}',
r'\multido{}{\pagecnt}{\vphantom{x}\newpage}',
r'\end{document}'
]
def TEXLABELS(header, label, pagecnt):
ret = []
ret.append(r'\newcommand{\pagecnt}{' + str(pagecnt) + r'}')
if header:
ret.append(r'\chead{' + label + r'}')
ret.append(r'\rhead{\thepage ~/~ ' + str(pagecnt) + r'}')
ret.append(r'\cfoot{}')
else:
ret.append(r'\cfoot{' + label + r'}')
ret.append(r'\rfoot{\thepage ~/~ ' + str(pagecnt) + r'}')
return ret
if len(sys.argv) < 3:
print('Usage:', sys.argv[0], 'output-file', 'input-files')
sys.exit(1)
FNULL = open(os.devnull, 'w')
def call(args):
print(colored('Executing', 'green'), colored(args[0], 'blue'), ' '.join(args[1:]))
#res = subprocess.call(args)
res = subprocess.call(args, stdout=FNULL, stderr=FNULL)
if res != 0:
print('\t', colored('FAILED', 'yellow'))
return False
return True
def check_output(args):
print(colored('Executing', 'green'), colored(args[0], 'blue'), ' '.join(args[1:]))
try:
res = subprocess.check_output(args, stderr=FNULL)
except subprocess.CalledProcessError:
print('\t', colored('FAILED', 'yellow'))
return None
return res
tempdir = tempfile.TemporaryDirectory()
print(colored('Working in temp directory', 'blue'), tempdir.name)
# First nup individual input files
print(colored('N-up-ing individual files together', 'blue'))
infiles = sys.argv[2:]
outfiles = []
for i,infile in enumerate(infiles):
outfile = os.path.join(tempdir.name, 'nup-{}.pdf'.format(i))
if not call(['pdfjam'] + NUPARGS + [infile, '--outfile', outfile]):
print(colored('N-up failed, exiting', 'red'))
sys.exit(1)
outfiles.append(outfile)
def number_and_label(infile, outfile, label, header = True):
print(colored('Numbering and labeling', 'green'), infile)
# Use pdfinfo to count number of pages
out = check_output(['pdfinfo', infile])
if out is None:
print(colored('Calling pdfinfo failed', 'yellow'))
return False
pagecnt = None
for line in out.decode('utf-8').split('\n'):
if line.startswith('Pages:'):
pagecnt = int(line.split()[1])
break
if pagecnt is None:
print(colored('Weird pdfinfo output', 'yellow'))
return False
print(pagecnt)
# Generate tex file for numbering
texfile = os.path.join(tempdir.name, 'numbering.tex')
with open(texfile, 'w') as texf:
texf.writelines(TEXPREAMBLE)
texf.writelines(TEXLABELS(header, label, pagecnt))
texf.writelines(TEXDOCUMENT)
# Compile using pdflatex
if not call(['pdflatex', '-aux_directory='+tempdir.name, '-output-directory='+tempdir.name, texfile]):
print(colored('Running pdflatex failed', 'yellow'))
return False
numfile = os.path.join(tempdir.name, 'numbering.pdf')
# Merge with input file
if not call(['pdftk', infile, 'multistamp', numfile, 'output', outfile]):
print(colored('Combining file with labels failed', 'yellow'))
return False
return True
# Number and label each nuped file first
print(colored('Numbering the N-up-ed files individually', 'blue'))
infiles = outfiles
outfiles = []
for i,infile in enumerate(infiles):
outfile = os.path.join(tempdir.name, 'nup-numbered-{}.pdf'.format(i))
if not number_and_label(infile, outfile, os.path.basename(sys.argv[2+i]), True):
print(colored('Numbering and labeling failed, exiting', 'red'))
sys.exit(1)
outfiles.append(outfile)
# Merge numbered nuped files together
print(colored('Merging together to form single file', 'blue'))
mergefile = os.path.join(tempdir.name, 'merged.pdf')
if not call(['pdfjam'] + MERGEARGS + outfiles + ['--outfile', mergefile]):
print(colored('Merge failed, exiting', 'red'))
sys.exit(1)
# Number and label merged file
print(colored('Numbering the final merged file', 'blue'))
if not number_and_label(mergefile, sys.argv[1], '', False):
print(colored('Final numbering failed, exiting', 'red'))
sys.exit(1)
print(colored('Finished, output written to', 'green'), colored(sys.argv[1], 'blue'))
| [
"lacop@lacop.net"
] | lacop@lacop.net |
cbcb4e26a72746f3f0c792281fd754aa831d4222 | 94e336b295eeee74862f4d88996bc6d98c10ad2c | /python/S14-Day12-学员管理系统/modules/db_conn.py | 691fdff3844e997580eb0df5c6d5500ca1ca7003 | [] | no_license | colinmaomao/python | f753b1bbe1ba3808af951bf8a2be2713568ebcd6 | 3c4375c91c9131c8a157146fd5aee832f75a0443 | refs/heads/master | 2020-03-13T03:19:12.678278 | 2018-04-25T03:13:53 | 2018-04-25T03:13:53 | 85,037,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Time:2017/12/19 20:37
__Author__ = 'Sean Yao'
from sqlalchemy import create_engine, Table
from sqlalchemy.orm import sessionmaker
from conf import settings
engine = create_engine(settings.ConnParams)
SessionCls = sessionmaker(bind=engine)
session = SessionCls() | [
"colinmaomao@live.com"
] | colinmaomao@live.com |
1ff3689cef9ea51c11ce8e0a2538fdcf94209214 | 9beb3189a58f8b212903bfb37d90866cff54c868 | /build/hokuyo_node/catkin_generated/pkg.develspace.context.pc.py | a73fb6e05bef682eb7bc658d162fc09b1822b1c5 | [] | no_license | HORIOJAPAN2017/GTUNE_catkin_ws | e6f20bde8f432e853c0e1f320d0f86844e7d4d16 | b88c7c0e90a6ea84ba8aa9087f0300bde67eb224 | refs/heads/master | 2020-03-22T21:22:47.965710 | 2018-07-12T08:03:57 | 2018-07-12T08:03:57 | 140,681,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/user/catkin_ws/devel/include;/home/user/catkin_ws/src/hokuyo_node/include".split(';') if "/home/user/catkin_ws/devel/include;/home/user/catkin_ws/src/hokuyo_node/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rosconsole".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llibhokuyo".split(';') if "-llibhokuyo" != "" else []
PROJECT_NAME = "hokuyo_node"
PROJECT_SPACE_DIR = "/home/user/catkin_ws/devel"
PROJECT_VERSION = "1.7.8"
| [
"horiojapan2017@gmail.com"
] | horiojapan2017@gmail.com |
4fa8d0b64882d101c46fc76024cc58d44c6c15db | 5d0f5c079132dd477810d0d39b4fc365a9670791 | /Tools/ServerAutoTester/protocols/CLHeartBeat_pb2.py | dc0f00cb7979439aacca402a439c335e3332ec38 | [] | no_license | JoeChen999/scut | 457563734e33873c9c8781b6aa63b1e96e09f03c | eaac6e8fcc10263f126bae473b0777f36d09022d | refs/heads/master | 2021-01-19T11:48:44.886011 | 2016-09-22T03:48:50 | 2016-09-22T03:48:50 | 68,881,856 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 1,630 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: 1001_CLHeartBeat.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import PBInt64_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='1001_CLHeartBeat.proto',
package='',
serialized_pb='\n\x16\x31\x30\x30\x31_CLHeartBeat.proto\x1a\rPBInt64.proto\"+\n\x0b\x43LHeartBeat\x12\x1c\n\nClientTime\x18\x01 \x02(\x0b\x32\x08.PBInt64')
_CLHEARTBEAT = _descriptor.Descriptor(
name='CLHeartBeat',
full_name='CLHeartBeat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ClientTime', full_name='CLHeartBeat.ClientTime', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=41,
serialized_end=84,
)
_CLHEARTBEAT.fields_by_name['ClientTime'].message_type = PBInt64_pb2._PBINT64
DESCRIPTOR.message_types_by_name['CLHeartBeat'] = _CLHEARTBEAT
class CLHeartBeat(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLHEARTBEAT
# @@protoc_insertion_point(class_scope:CLHeartBeat)
# @@protoc_insertion_point(module_scope)
| [
"chenbiao@elex-tech.com"
] | chenbiao@elex-tech.com |
6ec73f5f5b1094fb1c39ec1e25616a5f5a012826 | 4c63edb34dbc6e7d3a07da10f25f23469824d36c | /exses2/printwithoutnextln.py | 38f7a0a6a882d619d23b6d0651cbdac7feefe712 | [] | no_license | KKAiser97/trantrungkien-fundamentals-c4e26 | 59a3c22433dfec85a778a8fc231f429080a6abc3 | 0812ad0061e6ff1aeb7928dfc3464f7b7f2cb193 | refs/heads/master | 2020-04-19T16:12:49.364161 | 2019-03-17T09:47:59 | 2019-03-17T09:47:59 | 168,297,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | print("Hello,", end=" ")
print("my name is", end=" ")
print("B-max", end=" ") | [
"dragonkien17@gmail.com"
] | dragonkien17@gmail.com |
a7b403a88927d37debd20ab3b7b266166a6a0bec | 23633f61451aa00dc8adf2446340621d7c22e6da | /venv/bin/chardetect | d6d9b28e12be62b00ec77c458cadd6979796f4b7 | [] | no_license | Vishula/twitter-bot | 8ee2b82d4f67bec4e06a96cd67cc5e6ad6281d4b | 3b24d7ad63e56218ae255a32372ca9c0fb52740a | refs/heads/master | 2022-04-24T12:14:44.330146 | 2020-04-27T23:44:55 | 2020-04-27T23:44:55 | 259,480,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/Users/Vishula/Desktop/tweepy-bots/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vishula.gamaetige@my.jcu.edu.au"
] | vishula.gamaetige@my.jcu.edu.au | |
7eaebce784acf19905cbf7c16b1af0eb28636a84 | bb3da11cc4b4fb442ab0bebaad38fd6cc10d83e0 | /Day_11_Problem_H-Index.py | 2ac4f5c77067642bac25baa5b70af1ccf98000fa | [] | no_license | Harsha4517/Leetcode_August_challenge | 772e05553c683d76ec740bf9bc133fa4e489d752 | eaa7c22f1fdaa979694cfba48292dfcaa0c60b34 | refs/heads/master | 2022-12-01T11:43:42.309452 | 2020-08-19T16:42:10 | 2020-08-19T16:42:10 | 284,933,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | class Solution:
def hIndex(self, citations: List[int]) -> int:
citations.sort()
r=len(citations)
for i in citations:
if r<=i:
return r
r-=1
return 0
| [
"noreply@github.com"
] | Harsha4517.noreply@github.com |
7b18838931145b1988e35860e46e2a8cade9689f | 7748e6279f48de7aceb18057e8faede9c272ce58 | /0x03-python-data_structures/8-multiple_returns.py | 7145f64e85ada5618383e003e99d7fae161107f6 | [] | no_license | pichu185/holbertonschool-higher_level_programming | 3eb2cfb692e7e405210fa69772b53a1bba7f7b07 | 1e70a39d39ad6b02ff1169a957a0459d7c42f9fc | refs/heads/main | 2023-07-31T13:22:22.241319 | 2021-09-22T22:07:50 | 2021-09-22T22:07:50 | 361,893,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/python3
def multiple_returns(sentence):
if sentence == "":
return (0, None)
return (len(sentence), sentence[0])
| [
"pichu_otegui@hotmail.com"
] | pichu_otegui@hotmail.com |
5f0073cc2c699ab320785b7b4f05018258e7458e | f40110c2e0d4f2653017a3c5d4cd959cda0b543f | /tsai/data/unwindowed.py | 5d7c8c5da0e656140bb4784fb61b11a7a7f9b92e | [
"Apache-2.0"
] | permissive | jingmouren/timeseriesAI | 161ea09cc601cac6c61fc6d86696517215cd3ff6 | df8eb53c22701e633b796f5f61b5197d6a2a0872 | refs/heads/master | 2022-06-18T13:41:28.735669 | 2022-05-29T06:54:31 | 2022-05-29T06:54:31 | 212,359,225 | 0 | 0 | null | 2019-10-02T14:14:58 | 2019-10-02T14:14:57 | null | UTF-8 | Python | false | false | 3,382 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/014_data.unwindowed.ipynb (unless otherwise specified).
__all__ = ['TSUnwindowedDataset', 'TSUnwindowedDatasets']
# Cell
from ..imports import *
from ..utils import *
from .validation import *
from .core import *
# Cell
class TSUnwindowedDataset():
_types = TSTensor, TSLabelTensor
def __init__(self, X=None, y=None, y_func=None, window_size=1, stride=1, drop_start=0, drop_end=0, seq_first=True, **kwargs):
store_attr()
if X is not None:
if X.ndim == 1: X = np.expand_dims(X, 1)
shape = X.shape
assert len(shape) == 2
if seq_first:
seq_len = shape[0]
else:
seq_len = shape[-1]
max_time = seq_len - window_size + 1 - drop_end
assert max_time > 0, 'you need to modify either window_size or drop_end as they are larger than seq_len'
self.all_idxs = np.expand_dims(np.arange(drop_start, max_time, step=stride), 0).T
self.window_idxs = np.expand_dims(np.arange(window_size), 0)
if 'split' in kwargs: self.split = kwargs['split']
else: self.split = None
self.n_inp = 1
if y is None:
self.loss_func = MSELossFlat()
else:
if (is_listy(y[0]) and isinstance(y[0][0], Integral)) or isinstance(y[0], Integral):
self.loss_func = CrossEntropyLossFlat()
else:
self.loss_func = MSELossFlat()
def __len__(self):
if not hasattr(self, "split"): return 0
elif self.split is not None:
return len(self.split)
else:
return len(self.all_idxs)
def __getitem__(self, idxs):
if self.split is not None:
idxs = self.split[idxs]
widxs = self.all_idxs[idxs] + self.window_idxs
if self.seq_first:
xb = self.X[widxs]
if xb.ndim == 3: xb = xb.transpose(0,2,1)
else: xb = np.expand_dims(xb, 1)
else:
xb = self.X[:, widxs].transpose(1,0,2)
if self.y is None:
return (self._types[0](xb),)
else:
yb = self.y[widxs]
if self.y_func is not None:
yb = self.y_func(yb)
return (self._types[0](xb), self._types[1](yb))
def new_empty(self):
return type(self)(X=None, y=None)
@property
def vars(self):
s = self[0][0] if not isinstance(self[0][0], tuple) else self[0][0][0]
return s.shape[-2]
@property
def len(self):
s = self[0][0] if not isinstance(self[0][0], tuple) else self[0][0][0]
return s.shape[-1]
class TSUnwindowedDatasets(FilteredBase):
def __init__(self, dataset, splits):
store_attr()
def subset(self, i):
return type(self.dataset)(self.dataset.X, y=self.dataset.y, y_func=self.dataset.y_func, window_size=self.dataset.window_size,
stride=self.dataset.stride, drop_start=self.dataset.drop_start, drop_end=self.dataset.drop_end,
seq_first=self.dataset.seq_first, split=self.splits[i])
@property
def train(self):
return self.subset(0)
@property
def valid(self):
return self.subset(1)
def __getitem__(self, i): return self.subset(i) | [
"“oguiza@gmail.com”"
] | “oguiza@gmail.com” |
cd5a0f8fe81207a66be3012c91827b4cacbf97bb | 1cd10945363de8c3c555fd002ee16bf078b3a4dc | /todobackend/todoapp/views.py | c66ce3f50bb0a5fc4e0decd0a586447a8ac89601 | [] | no_license | windlessStorm/todo-app | 63bcde89f123cf14fb12725a108b700ed4840f47 | 0e26a158437460c72bc7b078bd2f6f7def70a03c | refs/heads/master | 2022-12-24T02:39:45.500683 | 2018-10-25T16:17:51 | 2018-10-25T16:17:51 | 153,991,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | # using tastypie | [
"vikrant.biswas@gmail.com"
] | vikrant.biswas@gmail.com |
d104f81110379bd99dfed54230e5d9a2199b294e | ae544c4e0033e96a8d7cab802804f53f479a2ca6 | /Lecture_9-10/model_playground/main/admin.py | 005944d94a2f670d5c82691ec924a1ae772a71b6 | [] | no_license | jatinkatyal13/Python-Basics-Aug-2018 | e7c523ce23e7f9f5d7a823670509f5c160004901 | 6a224d21c4a49874ee844488b878b0e06a394532 | refs/heads/master | 2021-06-03T00:04:16.264794 | 2018-10-06T07:29:35 | 2018-10-06T07:29:35 | 146,196,887 | 6 | 11 | null | 2018-10-02T13:05:26 | 2018-08-26T16:24:06 | Jupyter Notebook | UTF-8 | Python | false | false | 193 | py | from django.contrib import admin
from main import models
# Register your models here.
admin.site.register(models.Student)
admin.site.register(models.Marks)
admin.site.register(models.Branch) | [
"jatin.katyal13@gmail.com"
] | jatin.katyal13@gmail.com |
6b281f7775ecf07282769814943ba415a7720747 | 9b2385171894da51bb4fd29fa3938d33ce15beb3 | /flask/project1/app.py | 685a13dadfad032cad0507e15a652f0102c76054 | [] | no_license | ArjunPadaliya/COMP840-Machine-Learning | 57a4a686ab8fbc0f76e7f9c6b099042fbd0a00e5 | 32d77e0944f27c0c458fb5328f563c1e6645e7c9 | refs/heads/master | 2020-03-27T23:03:32.616228 | 2018-11-08T21:25:50 | 2018-11-08T21:25:50 | 147,285,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | from flask import Flask, request, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('echo.html')
@app.route("/echo", methods=['POST'])
def echo():
return render_template('echo.html', text=request.form['text'])
if __name__ == "__main__":
app.run(debug=True)
| [
"arjunpadaliya@DESKTOP-DA4KHRT.localdomain"
] | arjunpadaliya@DESKTOP-DA4KHRT.localdomain |
3ee8e2adcf9f76f16ab706f3cf5d0974060e6e36 | 5ea9589b899ff843e548f317460f1574786a303d | /bi_lstm_crf/app/train.py | 5a26f07358703634beedb6e066deb5ce05d0f203 | [] | no_license | DarkPr0digy/Conditonal-Random-Fields-For-Morphological-Segmentation | 962f28e0cbd6777c0fdd8b9ec22f01d3fc0675fc | 8b31a3126579a9999c193b25428cd0a3dc714f29 | refs/heads/master | 2023-04-04T05:19:22.856013 | 2021-04-17T16:17:11 | 2021-04-17T16:17:11 | 284,056,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,249 | py | from os import mkdir
import numpy as np
import pandas as pd
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from tqdm import tqdm
from bi_lstm_crf.app.preprocessing import *
from bi_lstm_crf.app.utils import *
def __eval_model(model, device, dataloader, desc):
model.eval()
with torch.no_grad():
# eval
losses, nums = zip(*[
(model.loss(xb.to(device), yb.to(device)), len(xb))
for xb, yb in tqdm(dataloader, desc=desc)])
return np.sum(np.multiply(losses, nums)) / np.sum(nums)
def __save_loss(losses, file_path):
pd.DataFrame(data=losses, columns=["epoch", "batch", "train_loss", "val_loss"]).to_csv(file_path, index=False)
def __save_model(model_dir, model):
model_path = model_filepath(model_dir)
torch.save(model.state_dict(), model_path)
print("save model => {}".format(model_path))
def train(args):
model_dir = args.model_dir
if not exists(model_dir):
mkdir(model_dir)
save_json_file(vars(args), arguments_filepath(model_dir))
preprocessor = Preprocessor(config_dir=args.corpus_dir, save_config_dir=args.model_dir, verbose=True)
model = build_model(args, preprocessor, load=args.recovery, verbose=True)
# loss
loss_path = join(args.model_dir, "loss.csv")
losses = pd.read_csv(loss_path).values.tolist() if args.recovery and exists(loss_path) else []
# datasets
(x_train, y_train), (x_val, y_val), (x_test, y_test) = preprocessor.load_dataset(
args.corpus_dir, args.val_split, args.test_split, max_seq_len=args.max_seq_len)
train_dl = DataLoader(TensorDataset(x_train, y_train), batch_size=args.batch_size, shuffle=True)
valid_dl = DataLoader(TensorDataset(x_val, y_val), batch_size=args.batch_size * 2)
test_dl = DataLoader(TensorDataset(x_test, y_test), batch_size=args.batch_size * 2)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
device = running_device(args.device)
model.to(device)
val_loss = 0
best_val_loss = 1e4
for epoch in range(args.num_epoch):
# train
model.train()
bar = tqdm(train_dl)
for bi, (xb, yb) in enumerate(bar):
model.zero_grad()
loss = model.loss(xb.to(device), yb.to(device))
loss.backward()
optimizer.step()
bar.set_description("{:2d}/{} loss: {:5.2f}, val_loss: {:5.2f}".format(
epoch + 1, args.num_epoch, loss, val_loss))
losses.append([epoch, bi, loss.item(), np.nan])
# evaluation
val_loss = __eval_model(model, device, dataloader=valid_dl, desc="eval").item()
# save losses
losses[-1][-1] = val_loss
__save_loss(losses, loss_path)
# save model
if not args.save_best_val_model or val_loss < best_val_loss:
best_val_loss = val_loss
__save_model(args.model_dir, model)
print("save model(epoch: {}) => {}".format(epoch, loss_path))
# test
test_loss = __eval_model(model, device, dataloader=test_dl, desc="test").item()
last_loss = losses[-1][:]
last_loss[-1] = test_loss
losses.append(last_loss)
__save_loss(losses, loss_path)
print("training completed. test loss: {:.2f}".format(test_loss))
'''
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('corpus_dir', type=str, help="the corpus directory")
parser.add_argument('--model_dir', type=str, default="model_dir", help="the output directory for model files")
parser.add_argument('--num_epoch', type=int, default=20, help="number of epoch to train")
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--weight_decay', type=float, default=0., help='the L2 normalization parameter')
parser.add_argument('--batch_size', type=int, default=1000, help='batch size for training')
parser.add_argument('--device', type=str, default=None,
help='the training device: "cuda:0", "cpu:0". It will be auto-detected by default')
parser.add_argument('--max_seq_len', type=int, default=100, help='max sequence length within training')
parser.add_argument('--val_split', type=float, default=0.2, help='the split for the validation dataset')
parser.add_argument('--test_split', type=float, default=0.2, help='the split for the testing dataset')
parser.add_argument('--recovery', action="store_true", help="continue to train from the saved model in model_dir")
parser.add_argument('--save_best_val_model', action="store_true",
help="save the model whose validation score is smallest")
parser.add_argument('--embedding_dim', type=int, default=100, help='the dimension of the embedding layer')
parser.add_argument('--hidden_dim', type=int, default=128, help='the dimension of the RNN hidden state')
parser.add_argument('--num_rnn_layers', type=int, default=1, help='the number of RNN layers')
parser.add_argument('--rnn_type', type=str, default="lstm", help='RNN type, choice: "lstm", "gru"')
args = parser.parse_args()
train(args)
if __name__ == "__main__":
main()
'''
| [
"DarkPr0digy@users.noreply.github.com"
] | DarkPr0digy@users.noreply.github.com |
00a5456629eade115c10504c5c51ee715246da94 | 78efa54b2b253f99ea7e073f783e6121c20cdb52 | /Codechef/Make a Permutation.py | 0d93d423602cd522191dfdbd1c22c35da99cf806 | [] | no_license | NishchaySharma/Competitve-Programming | 32a93581ab17f05d20129471f7450f34ec68cc53 | 1ec44324d64c116098eb0beb74baac7f1c3395bb | refs/heads/master | 2020-04-08T04:02:46.599398 | 2020-01-01T15:51:39 | 2020-01-01T15:51:39 | 159,000,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | t=int(input())
for _ in range(t):
n=int(input())
a=list(map(int,input().split()))
s=list(set(a))
cnt=len(a)-len(s)
for i in s:
if i>n:
cnt+=1
print(cnt)
| [
"noreply@github.com"
] | NishchaySharma.noreply@github.com |
95c23c7e8223d508534b84b542974fe593930fcf | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2581/58585/318647.py | a775edb61e5b740e8766d7ab194e117bcb0c8eb4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | a=int(input())
b=input()
if a==2 and b=='1,1':
print('False')
elif a==1 and b=='2,0':
print('False')
elif a==2 and b=='1,0':
print('True')
elif a==1 and b=='2,2':
print('False')
elif a==1 and b=='1,0':
print('False')
else:
print(a)
print(b) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
06639ce4d5c194b1663a7ed119fa779a310b4efd | ef8f61459130e4864779b45bb4bb8d61ff793458 | /BinarySearchLastTarget.py | a0968ec0fcc8a95f3939b3e591be7d8f6eebf6b3 | [] | no_license | invokerkael918/BinarySearch | 64a38c88a20da679f7466fb122f3d57c9f876d6e | 8f303f013193cb5a09c4263562839e92c8915ccc | refs/heads/master | 2020-08-09T16:25:18.325564 | 2020-01-21T03:23:59 | 2020-01-21T03:23:59 | 214,123,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | class Solution:
# @param {int[]} A an integer array sorted in ascending order
# @param {int} target an integer
# @return {int} an integer
def lastPosition(self, nums, target):
if not nums:
return -1
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] < target:
start = mid
elif nums[mid] > target:
end = mid
else:
start = mid
if nums[end] == target:
return end
if nums[start] == target:
return start
else:
return -1
if __name__ == '__main__':
S = Solution().lastPosition([1,1],1)
print(S) | [
"sam.cao918@gmail.com"
] | sam.cao918@gmail.com |
f4b60b1c6d8f00273517672a8ab1d2cab1416d77 | 442927aa09e6d9b0eded769a61944c87008ad8a2 | /rds_config.py | 2c77dc279c90b83628f1794deb100e279dec4f9c | [
"Apache-2.0"
] | permissive | AldebaranSouza/autorizador-debito-cartao | d28e27857da2cd0f940096fc7cb9a5fa0e140510 | ccef14eacd58f7f0d352726808013769865543fe | refs/heads/main | 2023-01-24T02:39:21.399682 | 2020-12-15T09:06:01 | 2020-12-15T09:06:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | import os
db_host = os.environ['RDS_ENDPOINT']
db_username = 'autorizador'
db_password = 'autorizador'
db_name = 'autorizador'
| [
"evandro@evandropires.com.br"
] | evandro@evandropires.com.br |
4b2b21f1afc3615ff25275755ccd49076db6a53c | b05d73a48ac0289eddd673f956c50c00d26faaf5 | /src/kugou/__init__.py | 11ffa566e6b57b33ae6e0317f97fc3c8d4ca86ea | [] | no_license | JUNEAPTX/Reptile_kugou | 60210b93a9b5b1998cbc79e565319da28c3db796 | f2d22d20a5b8c0d81f0edc8c18b2494e244cf882 | refs/heads/master | 2020-07-27T23:00:48.171354 | 2019-09-18T07:00:22 | 2019-09-18T07:00:22 | 209,239,211 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | import requests
from bs4 import BeautifulSoup
import time
headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36 QIHU 360EE'
}
def get_info(url):
wb_data = requests.get(url,headers = headers)
soup = BeautifulSoup(wb_data.text,'lxml')
ranks = soup.select('span.pc_temp_num')
titles = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > a')
times = soup.select('span.pc_temp_tips_r > span')
for rank,title,time in zip(ranks,titles,times):
data = {
'rank':rank.get_text().strip(),
'singer':title.get_text().split('-')[0],
'song':title.get_text().split('-')[1],
'time':time.get_text().strip()
}
print (data)
if __name__ == '__main__':
urls = ['https://www.kugou.com/yy/rank/home/{}-8888.html?from=rank'.format(str(i)) for i in range(1,24)]
for url in urls:
get_info(url)
time.sleep(1)
| [
"976123690@qq.com"
] | 976123690@qq.com |
76e7e5751a4f31fe956d9efd8d148850907a1efb | 937fb8880ab8d45b9f8761f4064e4be3bef1ccdc | /habit_tracker/settings.py | 6364962b6c2403aa632067534f1122b64265821e | [] | no_license | zyjswiadomielife/habits | 2d6df7d0014d8882383c6293dac9bcefed54209d | 639af25a5bee04da8b55a107e9f8bc397dc3a0b9 | refs/heads/master | 2023-05-27T02:39:21.437807 | 2020-07-07T14:39:58 | 2020-07-07T14:39:58 | 277,565,084 | 0 | 0 | null | 2021-06-10T23:08:04 | 2020-07-06T14:31:09 | Python | UTF-8 | Python | false | false | 3,479 | py | """
Django settings for habit_tracker project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qb(aqv+nzw(0!ab*xq-tge78!575)fkz=jm%zm6%_n10felipx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['habit.local']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'Goal',
'SimpleHabitTracker',
'goal_realisation',
'rest_framework',
]
CSRF_COOKIE_NAME = "XSRF-TOKEN"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'habit_tracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'habit_tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'gonano',
'USER': os.environ.get('DATA_DB_USER'),
'PASSWORD': os.environ.get('DATA_DB_PASS'),
'HOST': os.environ.get('DATA_DB_HOST'),
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL= 'SimpleHabitTracker:login'
| [
"maciejurmanski@gmail.com"
] | maciejurmanski@gmail.com |
3997a3c17986e696b21f385627d3ee4ee7e639a3 | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/sparsity/drug_sensitivity_gdsc/gaussian_gaussian_volumeprior.py | ead1aea38e08f04599f394bcbe3113b61c9f5bb6 | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 1,396 | py | '''
Measure sparsity experiment on the GDSC drug sensitivity dataset, with
the Gaussian + Gaussian + Volume Prior model.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_gaussian_volumeprior import BMF_Gaussian_Gaussian_VolumePrior
from BMF_Priors.data.drug_sensitivity.load_data import load_gdsc_ic50_integer
from BMF_Priors.experiments.sparsity.sparsity_experiment import sparsity_experiment
import matplotlib.pyplot as plt
''' Run the experiment. '''
R, M = load_gdsc_ic50_integer()
model_class = BMF_Gaussian_Gaussian_VolumePrior
n_repeats = 10
stratify_rows = False
fractions_unknown = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
settings = {
'R': R,
'M': M,
'K': 10,
'hyperparameters': { 'alpha':1., 'beta':1., 'lamb':0.1, 'gamma':10**-30 },
'init': 'random',
'iterations': 120,
'burn_in': 100,
'thinning': 1,
}
fout = './results/performances_gaussian_gaussian_volumeprior.txt'
average_performances, all_performances = sparsity_experiment(
n_repeats=n_repeats, fractions_unknown=fractions_unknown, stratify_rows=stratify_rows,
model_class=model_class, settings=settings, fout=fout)
''' Plot the performance. '''
plt.figure()
plt.title("Sparsity performances")
plt.plot(fractions_unknown, average_performances['MSE'])
plt.ylim(0,1000) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
f921ef84ef18f1e2cee7397d038613961a2c9258 | da421607c1b19ed660fcaee743fdb405aa4f4b7b | /route/general.py | 14a9f922face36d9b4f3a8bda346338f031755bc | [
"MIT"
] | permissive | chxv/myweb | be947d6f73c4b33a2e9713903857ef2deff1d8cd | ae69682d9cad1776604c2aa2804905e60b18090a | refs/heads/master | 2022-04-20T08:40:01.231113 | 2020-03-31T05:56:14 | 2020-03-31T05:56:14 | 146,612,834 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | from flask import Blueprint, redirect, render_template, url_for, request, flash, g
from flask_login import current_user, login_required
mod = Blueprint('index', __name__) # 根目录
@mod.route('/')
def index():
return render_template('index.html')
@mod.route('/index')
def index2():
return redirect(url_for('index.index')) # 第一个index是命名空间,是mod的名字(第一个参数),第二个表示端点
@mod.route('/home')
def home2():
return redirect(url_for('u.home'))
@mod.route('/search')
def search():
''' 搜索 '''
q = request.args.get('q')
return render_template('search.html', q=q)
@mod.route('/explore')
def explore():
return render_template('explore.html')
@mod.route('/d')
@login_required
def delete():
''' 删除文章 [ 使用get ]'''
article_id = request.args.get('article')
# 未传入参数article
if not article_id:
return "failed", 403
from ..models import Article
essay = Article.query.filter_by(id=article_id).first() # 找到对应id的文章
# 找不到对应文章
if not essay:
return "failed", 403
if current_user.id == essay.user_id: # 若文章作者id等于当前用户id
from .. import db
db.session.delete(essay)
db.session.commit()
return "success"
@mod.route('/changeArticlePermission', methods=['POST'])
@login_required
def changeArticlePermission():
'''改变文章的状态 [ 使用post ]'''
from ..models import Article
from .. import db
# 检查http方法
if request.method == 'POST':
# 检查参数
if 'article_id' in request.form and 'secrecy' in request.form:
article_id, secrecy = request.form['article_id'], request.form['secrecy']
essay = Article.query.filter_by(id=article_id).first()
# 检查文章是否存在
if essay:
# 检查用户权限
if current_user.id == essay.user_id:
essay.secrecy = secrecy
db.session.commit()
return 'success'
return 'Forbidden', 403
def cannot_get_article():
'''当无法获取文章时:'''
flash('文章不存在或权限错误')
return redirect(url_for('index.index'))
@mod.route('/article/<article_id>')
def reading(article_id):
from ..models import Article
from ..func import get_article
essay = Article.query.filter_by(id=article_id).first() # 找到对应id的文章
# 文章不存在
if not essay:
return cannot_get_article()
# 文件目录: static/u/<user_id>/filename
if essay.secrecy == 'secret':
if current_user.is_authenticated and (current_user.id == essay.user_id or current_user.role.name == 'Admin'):
return render_template('essay.html', content=get_article(essay), title=essay.title)
else:
# 权限错误
return cannot_get_article()
elif essay.secrecy == 'public':
return render_template('essay.html', content=get_article(essay), title=essay.title)
# @mod.before_request
# def before_request():
# g.db = connect_db()
#
#
# @mod.teardown_request
# def teardown_request(exception):
# g.db.close()
#
#
# def connect_db():
# from flask_sqlalchemy import SQLAlchemy
# db = SQLAlchemy()
| [
"1149277941@qq.com"
] | 1149277941@qq.com |
8f5d2addd86aa90c831638a21a8d551a429fe95a | 95bad0e363fd299357666671d2008e5e12830ac9 | /model/__init__.py | 246f0b9081328a2c1a538c9966d08891eeb3d810 | [
"MIT"
] | permissive | flywind2/blend-mask | 7c0929d29f9ba50c18387d96baa776c82390bef3 | 3e1eaa6749ea8a09c9903d4654683090df01368f | refs/heads/main | 2023-04-08T17:18:30.412081 | 2021-04-20T06:52:02 | 2021-04-20T06:52:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | from .blend_mask import *
from .criterion import *
| [
"cbjg395@gmail.com"
] | cbjg395@gmail.com |
f8f82bec7431db41e4e71819c59e5aeac5d30794 | 30d4dee3f452ac3524cf8d9ab3261e8e4ad62606 | /loop_detection/test_linkedlist.py | 13bf80ef15429d11edb147c1a26f76764ed76014 | [] | no_license | pvanirudh23192/Challenges | 136fc27179c7e941342a9ed0a72d8996831e053f | 73b4dfa4a6cda84a8726e3fa5bbe3e71346cf416 | refs/heads/master | 2020-06-15T08:47:56.607464 | 2019-07-04T13:58:56 | 2019-07-04T13:58:56 | 195,252,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | def iscircular(linked_list):
"""
Determine wether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
"""
# TODO: Write function to check if linked list is circular
pass
# Test Cases
small_loop = LinkedList([0])
small_loop.head.next = small_loop.head
print ("Pass" if iscircular(list_with_loop) else "Fail")
print ("Pass" if not iscircular(LinkedList([-4, 7, 2, 5, -1])) else "Fail")
print ("Pass" if not iscircular(LinkedList([1])) else "Fail")
print ("Pass" if iscircular(small_loop) else "Fail")
print ("Pass" if not iscircular(LinkedList([])) else "Fail")
| [
"venkataanirudhpochiraju@Venkatas-MacBook-Pro.local"
] | venkataanirudhpochiraju@Venkatas-MacBook-Pro.local |
c978b9dfec34de8c806dc69eb913c40e76ae07b8 | a531771a4f2812898a5b54044d6d8c3ca4a249f6 | /Make-Sense-of-Census/code.py | 201996ccf612b417d5cdfadb3c040b849af30bb5 | [
"MIT"
] | permissive | manjunathvihaan5/ga-learner-dst-repo | 57f44e8dacc3bf4f242f94748f56852d22acf3f5 | 5ff5649f604eb89a7f4cab4b0315a3dadea2ba95 | refs/heads/main | 2023-08-25T07:01:40.920224 | 2021-10-19T20:25:05 | 2021-10-19T20:25:05 | 355,458,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census= np.concatenate((data,new_record),axis=0)
print(census)
data.shape
census.shape
age=census[:,0]
max_age= age.max()
print(max_age)
min_age=age.min()
print(min_age)
age_mean=age.mean()
print(age_mean)
age_std=age.std()
print(age_std)
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
race_list=[len_0,len_1,len_2,len_3,len_4]
minority_race=race_list.index(min(race_list))
senior_citizens= census[census[:,0]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len= len(senior_citizens)
print(senior_citizens_len)
avg_working_hours= working_hours_sum/senior_citizens_len
print(avg_working_hours)
high=census[census[:,1]>10]
avg_pay_high=high[:,7].mean()
print(avg_pay_high)
low=census[census[:,1]<=10]
avg_pay_low=low[:,7].mean()
print(avg_pay_low)
| [
"79388801+manjunathvihaan5@users.noreply.github.com"
] | 79388801+manjunathvihaan5@users.noreply.github.com |
95f5369cf13a4cc86db0d5c04bb83509514c7992 | 737c9fe51d6b369ef7951ae63c89e3fae5a5fdc1 | /sublimious.py | 75b0d16326ec46f0b1f520c9f241b78078a6b545 | [] | no_license | gitter-badger/sublimious | 8870161eca4a87d774ce7946f4009608f9c5ac26 | af3934663637de934093d30584b206166e31cda0 | refs/heads/master | 2021-01-18T07:36:20.434821 | 2015-09-18T03:53:38 | 2015-09-18T03:53:38 | 42,700,918 | 0 | 0 | null | 2015-09-18T05:10:47 | 2015-09-18T05:10:46 | null | UTF-8 | Python | false | false | 3,443 | py | import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import sublime
import json
from .lib.io import write_sublimious_file
from .lib.collector import Collector
def plugin_loaded():
current_path = os.path.dirname(os.path.realpath(__file__))
sublime_dir = os.path.dirname(sublime.packages_path())
packages_dir = os.path.join(sublime_dir, 'Packages')
user_dir = os.path.join(packages_dir, 'User')
status_panel = sublime.active_window().create_output_panel("sublimious_status_panel")
sublime.active_window().run_command("show_panel", {"panel": "output.sublimious_status_panel", "toggle": False})
pcontrol_settings = os.path.join(user_dir, 'Package Control.sublime-settings')
settings_file = os.path.join(user_dir, 'Preferences.sublime-settings')
collector = Collector(current_path)
if not collector.get_user_config().nuke_everything:
status_panel.run_command("status", {"text": "Sublimious is currently off."})
status_panel.run_command("status", {"text": "Since this might be your first start, I created a ~/.sublimious file"})
status_panel.run_command("status", {"text": "Open that file and change 'nuke_everything' to True to proceed\n"})
sys.exit()
status_panel.run_command("status", {"text": "Welcome to Sublimious."})
# Nuke everything
settings_current = [os.path.join(current_path, f) for f in os.listdir(current_path) if f.endswith(".sublime-settings")]
settings_user = [os.path.join(user_dir, f) for f in os.listdir(user_dir) if f.endswith(".sublime-settings")]
filelist = settings_current + settings_user
for f in filelist:
os.remove(f)
# Second iteration to initialise all layers with config
collected_config = collector.get_collected_config()
for layer in collector.get_layers():
layer.init(collected_config)
status_panel.run_command("status", {"text": "'%s' layer loaded..." % layer.name})
# Collect all packages
status_panel.run_command("status", {"text": "Collecting all packages..."})
all_packages = collector.collect_key("required_packages") + collector.get_user_config().additional_packages
write_sublimious_file(pcontrol_settings, json.dumps({'installed_packages': all_packages}))
# Get all keybinding definitions and save to keymapfile
status_panel.run_command("status", {"text": "Building keymap..."})
write_sublimious_file("%s/Default.sublime-keymap" % current_path, json.dumps(collector.collect_key("sublime_keymap")))
# Generate a bunch of syntax files depending on layer config
syntax_definitions = collector.collect_syntax_specific_settings()
for syntax, value in syntax_definitions.items():
write_sublimious_file("%s/%s.sublime-settings" % (current_path, syntax), json.dumps(value))
status_panel.run_command("status", {"text": "Collected %s syntax definition..." % syntax})
# Generate package specific settings
for package, setting in collector.get_collected_config()["package_settings"].items():
write_sublimious_file("%s/%s.sublime-settings" % (user_dir, package), json.dumps(setting))
# Take control over sublime settings file
status_panel.run_command("status", {"text": "Taking control over Preferences.sublime-settings..."})
write_sublimious_file(settings_file, json.dumps(collected_config))
status_panel.run_command("status", {"text": "ALL DONE!"})
| [
"me@dave.cx"
] | me@dave.cx |
a1d8afbd254a100e9cada4b2b37a471d5d076aae | 73726c82254ba59ddb7f85ec7d2e7d53f48e7061 | /pivoteer/writer/search.py | 01fec902af2fca8782834ed1137ad2d95c99d689 | [
"MIT"
] | permissive | brewst001/RAPID | bd65aeb22d6932582c3b67a014bdc2d1a5615716 | f7aa702be1136d5d51db1b9bb2f11729216c6445 | refs/heads/master | 2021-04-04T07:51:47.961931 | 2021-01-01T01:23:31 | 2021-01-01T01:23:31 | 248,439,142 | 0 | 0 | MIT | 2020-03-19T07:35:23 | 2020-03-19T07:35:23 | null | UTF-8 | Python | false | false | 1,110 | py | """
Classes and functions for writing Search Records.
A Search Record is an IndicatorRecord with a record type of "SR."
"""
from core.google import SearchResult
from pivoteer.writer.core import CsvWriter
class SearchCsvWriter(CsvWriter):
"""
A CsvWriter for IndicatorRecord objects with a record type of "SR" (Search Record)
"""
def __init__(self, writer):
"""
Create a new CsvWriter for Search Records using the given writer
:param writer:
"""
super(SearchCsvWriter, self).__init__(writer)
def create_title_rows(self, indicator, records):
title = "Top Google Search Results for Indicator '%s'" % indicator
return [[title]]
def create_header(self):
return ["Title", "URL", "Content"]
def create_rows(self, record):
info = record["info"]
results = info["results"]
for result in results:
search_result = SearchResult.from_dict(result)
row = [search_result.title,
search_result.url,
search_result.content]
yield row
| [
"choman@eitccorp.com"
] | choman@eitccorp.com |
11196eb3c3c119871e932d3d88c4e57141ead2de | 9b004e9978a8c4579fb4ebec3d6e50acd272cd14 | /utils.py | 0b9175a7bc58b61ef9cd79d6df0ce7cf9b5f6d1d | [
"MIT"
] | permissive | TwelveYC/Continuous-Time-Network-Modeling | c8184272d82bbbf95aa13abc590f0563efeccbe6 | 72a9b858367a20c0a0b6b4a10050f3e33b034949 | refs/heads/master | 2022-08-04T11:35:57.181342 | 2020-05-19T08:48:28 | 2020-05-19T08:48:28 | 264,342,383 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | time_window = [1085980961, 1086000000]
def network_filter(infos):
if time_window[0] >= time_window[1]:
time_window[0], time_window[1] = time_window[1], time_window[0]
infos_length = infos.shape[0]
for index in range(infos_length):
current_value = infos[index][-1]
try:
next_value = infos[index+1][-1]
except IndexError:
break
if current_value <= time_window[0] <= next_value:
start = index
if current_value <= time_window[1] <= next_value:
end = index
break
data = infos[start:end]
return data, start, end
def get_weight(delta):
weight = {}
index = 0
for edge in delta:
weight[(edge[0], edge[1])] = delta[index, 3]
index += 1
return weight
| [
"18039329128@163.com"
] | 18039329128@163.com |
a350ed78ef7c78122fb66062ee6daeb65dcbd048 | 525f94d72b86eea43b9a85d6f2c0bdb99b23cb64 | /manage.py | f5fe93bbc1b606b07baf8e4284a6c87d634ce370 | [] | no_license | zmz/vnfdeploy | 19abbf5cbf5f0bce2d507daba1782596b4ac3f4e | 74e8fa2480aabdc6daac7a90c2cf4d27f2955328 | refs/heads/master | 2020-03-09T06:47:20.526451 | 2018-04-08T14:37:26 | 2018-04-08T14:37:26 | 128,648,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vnfdeploy.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"zhaimanzhao@example.com"
] | zhaimanzhao@example.com |
655fddc823928f91492aad610835384ab03e8aae | 37f98d27bf8555beae35dfd0de6fd8c311b7f4a1 | /Formulario/apps.py | ab4b462e5b71f769619a3e614d83b458f75ca09a | [] | no_license | XikoCat/LES | 952cd6e5bbad6ccb208f24bf39e430e47e100e98 | 5bb526c2bbfdcc504b67cfc2e0f46c00335691b2 | refs/heads/main | 2023-06-24T01:19:29.828182 | 2021-07-07T10:36:39 | 2021-07-07T10:36:39 | 360,229,255 | 0 | 7 | null | 2021-07-05T23:20:32 | 2021-04-21T16:03:11 | JavaScript | UTF-8 | Python | false | false | 158 | py | from django.apps import AppConfig
class FormularioConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Formulario'
| [
"guigasreis99@gmail.com"
] | guigasreis99@gmail.com |
278051ec6cbcf629f1cebf9e7ec8534cfb5c0706 | 03b9d9cf6393b67f473d5db5d8e3aff593c1578c | /tictactoe.py | 91c365dc98aa6bfad666da2f4b25969a83300798 | [] | no_license | pulkitagrawal34/tictactoe | d6b9d4984569bfdd17735e7ee78d81954930fcd3 | 80620b772e1a85b7496504abf6c98e44721da8bd | refs/heads/master | 2020-05-30T12:05:24.370192 | 2019-04-04T20:10:56 | 2019-04-04T20:10:56 | 189,723,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | symbol1 = str(input("Hi, Welcome to Tic Tac Toe, Player1 which symbol do you wish to choose X or O :")).upper()
symbol2= ""
if symbol1=="X" or symbol1=="O":
if symbol1 =="X":
symbol2 = "O"
else:
symbol2= "X"
values= [" "," "," "," "," "," "," "," "," "]
def print_grid():
print(" ")
print("| {} | {} | {} |".format(values[0], values[1], values[2]))
print("----------------")
print("| {} | {} | {} |".format(values[3], values[4], values[5]))
print("----------------")
print("| {} | {} | {} |".format(values[6], values[7], values[8]))
print(" ")
def check_winner(player1):
poss = [(0,1,2), (3,4,5), (6,7,8), (0,3,6), (1,4,7), (2,5,8), (0,4,8), (2,4,6)]
for i in poss:
if values[i[0]]==values[i[1]]==values[i[2]]!= " ":
print("congratulations {}, you won!!!".format(player1))
return True
break
print(" Player 1 symbol: {}".format(symbol1))
print(" Player 2 symbol: {}".format(symbol2))
print_grid()
for i in range(9):
value1 = int(input("Player1, please enter your choice: "))
if values[value1-1] == " " and values[value1-1] != "X" and values[value1-1] != "O":
values[value1-1] = symbol1
else:
print(" The place is already assigned ")
break
print_grid()
result = check_winner("player1")
if result== True:
break
value2 = int(input("Player2, please enter your choice: "))
if values[value2-1] == " " and values[value2-1] != "X" and values[value2-1] != "O":
values[value2-1] = symbol2
else:
print(" The place is already assigned ")
break
print_grid()
result = check_winner("player2")
if result== True:
break
else:
print(" ")
print("please enter correct value")
| [
"agrawal.pulkit34@gmail.com"
] | agrawal.pulkit34@gmail.com |
5a7d6605786fc5fcc99855f8443cc353f88d41e1 | d7cfe98faeb0fe1b4ce02d54d8bbedaca82764f7 | /0805_list2/0805hw.py | 9b81c5c1d7ad7827934492d71fde55202a2de6b8 | [] | no_license | Anseik/algorithm | 27cb5c8ec9692cf705a8cea1d60e079a7d78ef72 | 925404006b84178682206fbbb3b989dcf4c3dee9 | refs/heads/master | 2023-02-26T00:02:01.696624 | 2021-02-03T14:10:28 | 2021-02-03T14:10:28 | 301,753,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | import sys
sys.stdin = open('sum.txt')
T = 10
for tc in range(1, T+1):
tc = int(input())
arr = [list(map(int, input().split())) for _ in range(100)]
N = len(arr) # 행의 길이
M = len(arr[0]) # 열의 길이
max_num = 0 # 최대 값을 비교하기 위한 변수 선언
total_dia1 = 0
total_dia2 = 0
#가로
for row in range(N):
total_row = 0 # 열이 바뀔때 마다 0으로 초기화 해야하므로 위치 조정
for rowin in range(M):
total_row += arr[row][rowin]
if row == rowin: #왼오대각선
total_dia1 += arr[row][rowin]
elif row + rowin == len(arr)-1: #오왼 대각선
total_dia2 += arr[row][rowin] # [행][열] 순으로 조정 [row][rowin]
if total_row > max_num: # 각 행을 순회할때 마다 total_row와 max_sum을 비교하여 total_row가 더 크면 max_sum에 저장
max_num = total_row
if total_dia1 > max_num: # for 문을 다 돌고 나야 대각선의 합이 구해지므로 for문 밖에서 if 문으로 비교하여 값이 더 크면 max_sum에 저장
max_num = total_dia1
if total_dia2 > max_num:
max_num = total_dia2
for col in range(M): # 열 우선 순회
total_col = 0
for colin in range(N):
total_col += arr[colin][col]
if total_col > max_num: # 각 열의 합이 max_sum보다 큰지 비교하고 크면 max_sum에 저장
max_num = total_col
print('#%d' % tc, max_num) | [
"kma9271@naver.com"
] | kma9271@naver.com |
3aee7e0ff5c37853dae067588888023b40a8e4a7 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/additional_element.py | 72953cad961237f92dd486be93a95629a91844ba | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 898 | py | from __future__ import annotations
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.travelport.com/schema/util_v52_0"
@dataclass
class AdditionalElement:
"""
To add or update reference data master records.
Parameters
----------
name
Please provide other column names. This should match with exact
database column name
value
Please provide corresponding value of the Name field
"""
class Meta:
namespace = "http://www.travelport.com/schema/util_v52_0"
name: None | str = field(
default=None,
metadata={
"name": "Name",
"type": "Attribute",
"required": True,
}
)
value: None | str = field(
default=None,
metadata={
"name": "Value",
"type": "Attribute",
"required": True,
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
15d4f3165a9bed215e27830574c790cbb0a85adf | ebe5a2fad13f8675c19aad889a51d6d149f3ba9a | /utils/dataloaders/data_loader_train_det.py | be61c4dfdfffbcecb8c716bfdbbecaa0f7cd6c23 | [
"MIT"
] | permissive | bentocg/seebirds | ca8244cf8daa438411f5d8651e76584031dcd274 | 8d606ecf5e8568397e54d453846810504c64330d | refs/heads/master | 2020-09-10T23:41:29.935948 | 2019-11-15T07:30:21 | 2019-11-15T07:30:21 | 221,868,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,381 | py | # modified from torchvision/datasets/folder.py
import torch.utils.data as data
from PIL import Image
import os
import os.path
import pandas as pd
import numpy as np
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def make_dataset(dir, extensions, training_set, shuffle=True, labels=['AdultBird', 'Chick', 'Egg']):
images = []
locations = {label: [] for label in labels}
det_df = pd.read_csv('./training_sets/{}/centroids.csv'.format(training_set), index_col=0)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
images.append(path)
# get locations
if fname not in list(det_df.Image):
for label in labels:
locations[label].append ([])
else:
for label in labels:
loc_str = det_df.loc[det_df.Image == fname][label].values[0]
if type(loc_str) == str:
locs = [int(ele) for ele in loc_str.split("_")]
locs = np.array([(locs[i+1], locs[i]) for i in range(0, len(locs)-1, 2)]).reshape(-1, 2)
else:
locs = []
locations[label].append(locs)
# random permutation
if shuffle:
idcs = np.random.permutation([idx for idx in range(len(images))])
images = [images[idx] for idx in idcs]
for label in labels:
locations[label] = [locations[label][idx] for idx in idcs]
return [images, locations]
class DatasetFolder(data.Dataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (list[string]): A list of allowed extensions.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
samples (list): List of (sample path, class_index) tuples
"""
def __init__(self, root, loader, extensions, training_set, shuffle=True, shape_transform=None, int_transform=None,
img_dim=[1080, 1920]):
samples, locations = make_dataset(root, extensions, training_set, shuffle)
if len(samples) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.img_dim = img_dim
self.samples = samples
self.locations = locations
self.n_classes = len(locations)
self.shape_transform = shape_transform
self.int_transform = int_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path = self.samples[index]
locs = [self.locations[label][index] for label in self.locations]
sample = self.loader(path)
hit_value = 255
locations = np.zeros([self.img_dim[0], self.img_dim[1], self.n_classes], dtype=np.uint8)
for idx, loc in enumerate(locs):
for pnt in loc:
if pnt[0] < self.img_dim[0] and pnt[1] < self.img_dim[1]:
locations[pnt[0], pnt[1], idx] = hit_value
locations = Image.fromarray(locations.astype('uint8'))
if self.shape_transform is not None:
sample, locations, count = self.shape_transform(sample, locations)
if self.int_transform is not None:
sample = self.int_transform(sample)
return sample, count, locations
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Shape Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.shape_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Intensity Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.int_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolderTrainDet(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, training_set, loader=default_loader, extensions=IMG_EXTENSIONS, shuffle=True, shape_transform=None, int_transform=None):
super(ImageFolderTrainDet, self).__init__(root=root, training_set=training_set, loader=loader, extensions=extensions,
shuffle=shuffle, shape_transform=shape_transform,
int_transform=int_transform)
| [
"bentocollares@gmail.com"
] | bentocollares@gmail.com |
1aa01a9696faa0f4c4ffb063e80ba12a999512ee | 80aec89e42bf21d8beb3b42bebd794a805f295eb | /Version Control Using Git/BootstrapCI.py | ca69fed12cdad8bea852547e99cdbae9c7790bbe | [] | no_license | hammadshaikhha/Tools-and-Documents-for-Researchers | 2b8c946455fdeefb9a517317ab7f95b6775397b6 | 0fa0d004a01148529db83c9998a1038bad325d65 | refs/heads/master | 2020-05-01T07:46:01.463696 | 2019-11-22T23:55:03 | 2019-11-22T23:55:03 | 177,359,427 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | '''
Purpose: Compare Classical vs Bootstrap confidence intervals when sample size
is small and population is right skewed.
'''
# Load packages
import numpy as np
import seaborn
from scipy import stats
import matplotlib.pyplot as plt
from random import choices
# Initialize parameters
shape_a = 1.5
shape_b = 5
alpha = 0.05
n_sim = 1000
pop_mean = shape_a/(shape_a+shape_b)
# Lists for tracking
t_coverage = []
boot_means_list = []
boot_coverage = []
boot_tstats_list = []
boot_coverage_prob = []
t_coverage_prob = []
# Right skewed Population distribution
#X = np.random.beta(shape_a, shape_b, 5000)
#data = np.random.beta(shape_a, shape_b, n_sample)
# Distribution of population
#seaborn.distplot(X)
#plt.title("Right Skewed Population Distribution")
#plt.show()
# Itterate over sample sizes
for n_sample in range(20, 100, 20):
# Degrees of freedom
df = n_sample-1
# Repeatedly sample from population
for sim in range(n_sim):
# Draw a sample from Population
data = np.random.beta(shape_a, shape_b, n_sample)
#data = np.random.normal(0,1,n_sample)
## Bootstrap 95% CI
for boot_sim in range(n_sim):
# Resample with replacement from the sample
boot_sample = choices(data, k = n_sample)
boot_mean = np.mean(boot_sample)
# Standard error of bootstrap sample
boot_sample_se = np.std(boot_sample)/np.sqrt(n_sample)
# Bootstrap t-stats
boot_tstat = (boot_mean - np.mean(data))/boot_sample_se
boot_tstats_list.append(boot_tstat)
# Store sample mean in Lists
boot_means_list.append(boot_mean)
# Bootstrap CI lower and upper bounds
boot_se = np.std(boot_means_list)
#boot_CI_lower = np.quantile(boot_means, alpha/2)
#boot_CI_upper = np.quantile(boot_means, 1-alpha/2)
#margin_error_boot = stats.t.ppf(1-alpha/2, df)*boot_se
margin_error_boot_low = np.quantile(boot_tstats_list, alpha/2)*boot_se
margin_error_boot_up = np.quantile(boot_tstats_list, 1-alpha/2)*boot_se
# Reset bootstrap lists
boot_means_list = []
boot_tstats_list = []
## Standard 95% CI
# CI components
sample_mean = np.mean(data)
sample_std = np.std(data)
margin_error = stats.t.ppf(1-alpha/2, df)*(sample_std/np.sqrt(n_sample))
# Lower and upper bounds for student t
CI_lower = sample_mean - margin_error
CI_upper = sample_mean + margin_error
# Lower and upper bounds for boot strap
boot_CI_lower = sample_mean + margin_error_boot_low
boot_CI_upper = sample_mean + margin_error_boot_up
# Check whether CI covers true population mean
t_coverage.append(pop_mean <= CI_upper and pop_mean >= CI_lower)
boot_coverage.append(pop_mean <= boot_CI_upper and pop_mean >= boot_CI_lower)
# Store coverage probability for student t 95% CI
t_coverage_prob.append(np.mean(t_coverage))
boot_coverage_prob.append(np.mean(boot_coverage))
# Scatter plot of sample size and covarage probability
boot_scatter = plt.scatter(range(20, 100, 20), boot_coverage_prob)
t_scatter = plt.scatter(range(20, 100, 20), t_coverage_prob)
plt.legend([boot_scatter, t_scatter], ["Bootstrap CI", "Student t CI"])
plt.title("Coverage Probability and Sample Size")
plt.xlabel("Sample Size")
plt.ylabel("Probability of Covering True Mean")
plt.show()
| [
"hammy.shaikh@mail.utoronto.ca"
] | hammy.shaikh@mail.utoronto.ca |
5343cb1815652c455db8a17c89c33d74c34cfc61 | e5e4f644f3c5bd69adef71ddabfc0ad986755604 | /morningprayers_project/todo/migrations/0001_initial.py | 8dbc03dcf973d43ca62b214c3645cbb9308f2b4d | [] | no_license | shobberlyridge/morningprayers | f54ba7f35f690e0748a140d42c4dc1bdb10006f8 | f4b10b223cbae9a7c435394676c47c376d7fb644 | refs/heads/master | 2020-05-03T13:59:27.604006 | 2019-03-31T11:02:14 | 2019-03-31T11:02:14 | 178,665,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 2.1.7 on 2019-03-31 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=25)),
('description', models.CharField(max_length=100)),
],
),
]
| [
"edward@albany.me.uk"
] | edward@albany.me.uk |
5f8a3085e4ae101fad5f30a62fcd92d3ab2fbada | ba331fd3ccedb9fa438ca371e333251fa6bce15b | /bttest.py | 01716cafa6bca92c1c5294c8db86e2f0f5b5e9a5 | [] | no_license | wei-coder/tree | b1b08eecfe26b86da3db13144eabc0f8fccfa402 | 06e4e4b1f2395f9e2c0253acaf2b0b62c8c11163 | refs/heads/master | 2021-10-27T13:41:07.195377 | 2019-04-17T12:10:02 | 2019-04-17T12:10:02 | 113,540,319 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | #!/usr/local/bin/python3.8
# encoding: utf-8
import pexpect
import sys
import random
import time
import os
q_order = 'q'
p_order = 'p'
d_order = 'd'
i_order = 'i'
common = [".*quit]:", pexpect.EOF, pexpect.TIMEOUT]
add = [".*insert:", pexpect.EOF, pexpect.TIMEOUT]
remove = [".*delete:", pexpect.EOF, pexpect.TIMEOUT]
prompt = [common,add, remove, pexpect.EOF, pexpect.TIMEOUT]
def insert(child, value):
index = child.expect(common)
if(0 == index):
child.sendline(i_order)
index = child.expect(add)
if(0 == index):
child.sendline(str(value))
else:
print("timeout!")
else:
show(child)
print("can not insert node!")
def delete(child, value):
index = child.expect(common)
if(0 == index):
child.sendline(d_order)
index = child.expect(remove)
if(0 == index):
child.sendline(str(value))
else:
print("timeout!")
else:
print("can not delete node!")
def show(child):
index = child.expect(common)
if(0 == index):
child.sendline(p_order)
else:
print("can not show!")
def quit_proc(child):
child.interact()
def start_proc(cmd, logfd):
child = pexpect.spawn(cmd)
child.logfile = logfd
return child
def main_test(child):
show(child)
for i in range(0, 20):
value = random.randint(1,1000)
#value = i
insert(child,value)
time.sleep(0.001)
#show(child)
if __name__=='__main__':
os.system('rm -rf core.*')
os.system('rm -rf log.txt')
os.system('./gcc.sh')
fout = open("log.txt", "+wb")
os.system("tailf log.txt &")
cmd = './bin_tree'
child = start_proc(cmd,fout)
main_test(child)
os.system("ps -ef | grep tailf | grep -v grep | cut -c 9-15 | xargs kill -9")
quit_proc(child)
| [
"mazhiwei@didichuxing.com"
] | mazhiwei@didichuxing.com |
e9f47196e5378c2e19942da38c4cbf8c59c79085 | 669c206d30ea65a059c1e87876159a9b3c19458e | /Part 2 - Regression/Section 4 - Simple Linear Regression/simple_linear_regression.py | 27853618c33f4288170a7e1b5e5747a95a1c4e70 | [] | no_license | AtmiyaVaghela/udemy-machinelearning | 798e4ad35b368830c825a30dbc6af581efd4dd22 | f9e65a6f6263927d3e909b544e53b5e694d52510 | refs/heads/master | 2020-03-28T20:44:05.187517 | 2018-12-21T11:35:15 | 2018-12-21T11:35:15 | 149,096,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | # Simple Linear Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =1/3, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Visualising the Training set results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
# Visualising the Test set results
plt.scatter(X_test, y_test, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Test set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show() | [
"mailtoatmiya@gmail.com"
] | mailtoatmiya@gmail.com |
f870175d0e929eb6eecad68d33abcfe80ce9b210 | 258de533d72a25a454259c58950eed5ba8cbaf86 | /game.py | a891aaee71eca0e0264bdfad0dae8016d3809b1e | [] | no_license | leonardopiresousa/Game.Python | 734173dc0b8b123efd004c4ad22eba90eda6feb5 | a23894850fe4b2eac9838bba0b4bf9d43f4b8959 | refs/heads/master | 2023-06-01T21:37:14.616863 | 2021-06-15T00:32:19 | 2021-06-15T00:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from models.calcular import Calcular
def main() -> None:
pontos: int = 0
jogar(pontos)
def jogar(pontos: int) -> None:
dificuldade: int = int(input('Informe o nível de dificuldade desejado [1, 2, 3, 4]: '))
calc: Calcular = Calcular(dificuldade)
print('Informe o resultado para a seguinte operação: ')
calc.mostrar_operacao()
resultado: int = int(input())
if calc.verificar_resultado(resultado):
pontos += 1
print(f'Tu tens {pontos} ponto(s).')
continuar: int = int(input('Desejas continuar a jogar? [1 = Sim, 0 = Não]'))
if continuar:
jogar(pontos)
else:
print(f'Finalizaste o jogo com {pontos} ponto(s).')
print('Até à próxima!')
if __name__ == '__main__':
main()
| [
"leo17sn@gmail.com"
] | leo17sn@gmail.com |
ed6eabdc094e547631ddb4cc5e3a10d4b7a37158 | 07c61596c1fba2e2a7034fe5af9707794ea2e2c1 | /Kattis/addingwords.py3 | 8018221692c99ffee9041938edba3de0df57e4b2 | [] | no_license | H-Shen/Collection_of_my_coding_practice | 2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4 | 6415552d38a756c9c89de0c774799654c73073a6 | refs/heads/master | 2023-08-24T21:19:08.886667 | 2023-08-22T03:47:39 | 2023-08-22T03:47:39 | 180,731,825 | 8 | 1 | null | 2021-08-13T18:25:25 | 2019-04-11T06:48:09 | C++ | UTF-8 | Python | false | false | 1,285 | py3 | # -*- coding: utf-8 -*-
# https://open.kattis.com/problems/addingwords
#
variableTable = dict()
try:
while True:
s = input().split()
if s[0] == 'clear':
variableTable.clear()
elif s[0] == 'def':
variableTable[s[1]] = int(s[2])
else:
expression = ' '.join(s[1:]) + ' '
expressionWithValueReplaced = ""
hasAns = True
for i in s[1:-1]:
if (i == "+" or i == "-"):
expressionWithValueReplaced += i
elif (i not in variableTable):
hasAns = False
break
else:
expressionWithValueReplaced += str(variableTable[i])
if hasAns:
resultInDict = False
result = eval(expressionWithValueReplaced)
for key, value in variableTable.items():
if value == result:
resultInDict = True
print(expression + key)
break
if not resultInDict:
print(expression + "unknown")
else:
print(expression + "unknown")
except:
pass
| [
"haohu.shen@ucalgary.ca"
] | haohu.shen@ucalgary.ca |
fcba717859670bfbb4c11b5323f0f8fe247c0b43 | 387bd1c179e9f0ce5ed626d408d78f95c1c84704 | /deltapv/spline.py | adc2fa87635d8780b24a3ed08bf66b03a59d55df | [
"MIT"
] | permissive | WenchenLi/deltapv | bf401d71a145be441fad5d17839a8b1a7e41241d | ffe1f9d46818f33fe51479fc00dc3bf826c6cf64 | refs/heads/master | 2023-07-01T05:28:40.986059 | 2021-07-28T15:20:00 | 2021-07-28T15:20:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | from jax import numpy as jnp, grad, vmap, jit
from functools import partial
import matplotlib.pyplot as plt
def quadratic(x, coef):
a, b, c = coef
y = a * x**2 + b * x + c
return y
def qspline(x, y):
n = x.size
M = jnp.zeros((3 * (n - 1), 3 * (n - 1)))
z = jnp.zeros(3 * (n - 1))
M = M.at[0, 0].set(1)
z = z.at[1].set(y[0])
for i in range(n - 1):
M = M.at[3 * i + 1, 3 * i:3 * i + 3].set(jnp.array([x[i]**2, x[i], 1]))
z = z.at[3 * i + 1].set(y[i])
M = M.at[3 * i + 2, 3 * i:3 * i + 3].set([x[i + 1]**2, x[i + 1], 1])
z = z.at[3 * i + 2].set(y[i + 1])
for i in range(n - 2):
M = M.at[3 * i + 3, 3 * i:3 * i + 6].set(
jnp.array([2 * x[i + 1], 1, 0, -2 * x[i + 1], -1, 0]))
coef = jnp.linalg.solve(M, z)
a = coef[::3]
b = coef[1::3]
c = coef[2::3]
return a, b, c
@jit
def predict(x, xp, coef):
a, b, c = coef
idx = jnp.clip(jnp.searchsorted(xp, x) - 1, 0)
y = a[idx] * x**2 + b[idx] * x + c[idx]
return y
def ascent(df, x0=0., lr=1., tol=1e-6, niter=100):
x = x0
for _ in range(niter):
deriv = df(x)
x = x + lr * deriv
if jnp.abs(deriv) < 1e-6:
break
return x
def findmax(x, coef):
a, b, _ = coef
xl = x[:-1]
xu = x[1:]
filla = jnp.where(a != 0, a, 1) # avoid divide-by-zero
xm = jnp.clip(-b / (2 * filla), xl, xu)
yl = quadratic(xl, coef)
yu = quadratic(xu, coef)
ym = quadratic(xm, coef)
xall = jnp.concatenate([xl, xu, xm])
yall = jnp.concatenate([yl, yu, ym])
idxmax = jnp.argmax(yall)
ymax = yall[idxmax]
xmax = xall[idxmax]
return ymax, xmax
def calcPmax_gd(v, j):
p = v * j
coef = qspline(v, p)
fun = partial(predict, xp=v, coef=coef)
dfun = grad(fun)
vbest = ascent(dfun, x0=v[jnp.argmax(p)])
pmax = fun(vbest)
return pmax
def calcPmax(v, j):
p = v * j
coef = qspline(v, p)
pmax, vmax = findmax(v, coef)
return pmax, vmax
def qinterp(x, xp, yp):
coef = qspline(xp, yp)
y = predict(x, xp, coef)
return y
| [
"sean.mann.hk@gmail.com"
] | sean.mann.hk@gmail.com |
398bf5ce1b274b17e833f77259595b3afe67ee9a | 2d5cccc0d41a81b505c8c3b18e09c6eb3dd30018 | /battle_simulator/units/unit.py | b496e7513a175b0cb5c6678847d622b9fc53c3c1 | [] | no_license | NedelkoA/light-it | 5e4c55024f2a2eec48400610c6b4f28ddbbbeb42 | 12499b7bde4ca665e5ad6bed14a96ace39aa8fc8 | refs/heads/master | 2020-03-14T11:49:37.262999 | 2018-06-01T14:02:13 | 2018-06-01T14:02:13 | 131,597,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from abc import ABCMeta, abstractmethod
class Unit(metaclass=ABCMeta):
@abstractmethod
def attack(self, target, clock):
pass
@abstractmethod
def take_damage(self, dmg):
pass
@property
@abstractmethod
def alive(self):
pass
@property
@abstractmethod
def health(self):
pass
@property
@abstractmethod
def attack_power(self):
pass
| [
"nedartem@mail.ru"
] | nedartem@mail.ru |
363e2952b9826010aa3228020f24e7c391f24110 | e4e93391fd4792109eeee83a2e022f694577f545 | /clean2.py | 5c943d3f5f6966561c61fc99633ef3a3691b1083 | [] | no_license | madinabah2016/DataCleaning | 113022dd2f705ffcdf0f96890670f19a2727ab5f | 7bd6f34996812c23e6d8b79ee3ae4eb0caf965f7 | refs/heads/master | 2022-12-19T01:56:32.222649 | 2020-09-24T08:28:23 | 2020-09-24T08:28:23 | 294,628,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,146 | py | #!/usr/bin/env python3
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
import math
def update_job_function(df):
list = ['Artist/performer','Consultant','Designer','Engineer','Financial analyst or advisor','Manager or administrator','Military service','Paralegal or legal aide', 'Policy analyst or political aide',
'Researcher', 'Scientific/lab technician', 'Software developer or programmer','Teacher (or teacher-in-training)', 'Writer/journalist/editor', 'Other', 'Sales or marketing associate', 'Business or market analyst', 'Data analyst or data scientist', 'Nurse', 'Organizer, activist, or politician']
for i in range(len(list)):
df.loc[df['job_function'] == (i+1), 'outcome'] = list[i]
#need to edit other with jobdesc1_16_text
df = df.drop(['jobdesc1_16_TEXT'], axis = 1)
def update_outcome_with_emp_classes(df):
df.loc[df['emp_class_1'] == 1, 'outcome'] = 'Fellowship'
df.loc[df['emp_class_1'] == 1, 'outcome'] = 'Working (Part-Time/Internship)'
def update_employment_category_with_emp_classes(df):
df.loc[df['emp_class_1'] == 1, 'employment_category'] = 'Fellowship'
df.loc[df['emp_class_2'] == 1, 'employment_category'] = 'Internship'
df.loc[df['emp_class_3'] == 1, 'employment_category'] = 'Freelancer'
df.loc[df['emp_class_4'] == 1, 'employment_category'] = 'Temporary/Contract Work Assignment'
df = df.drop(['emp_class_1', 'emp_class_2', 'emp_class_3', 'emp_class_4', 'emp_class_5'], axis=1)
def update_employer_name_with_Intern(row):
if isfloat(row['Intern_text']) == False:
return row['Intern_text']
return row['employer_name']
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def update_employer_name_with_volunteer(df):
list = ['Peace Corps','Teach for America','City Year','AmeriCorps','Citizen Schools','Alliance for Catholic Education','other','Teaching Assistant Program in France']
for i in range(len(list)):
df.loc[df['ngo_pick'] == (i+1), 'employer_name'] = list[i]
#need to edit other with ngo_pick_7_Text
def combine_fall_rows(row):
if row['fall_1'] == 1 and row['fall_4']== 1:
return 'Continuing Education'
else:
if row['fall_1'] == 1:
return 'Working (Full-Time)'
elif row['fall_2'] == 1:
return 'Working (Part-Time/Internship)'
elif row['fall_3'] == 1:
return 'Working (Full-Time)'
elif row['fall_4'] == 1:
return 'Continuing Education'
elif row['fall_5'] == 1:
return 'Continuing Education'
elif row['fall_6'] == 1:
return 'Continuing Education'
elif row['fall_7'] == 1:
return 'Other'
elif row['fall_10'] == 1:
return 'Other'
elif row['fall_11'] == 1:
return 'Other'
elif row['fall_12'] == 1:
return 'Other'
return "None"
def getOutcome(df):
lockedRows = df[['fall_1', 'fall_2','fall_3', 'fall_4','fall_5', 'fall_6','fall_7', 'fall_10','fall_11', 'fall_12' ]]
return lockedRows.apply(combine_fall_rows, axis=1)
def main():
print("Data Cleaning")
df = pd.read_excel('data.xlsx', sheet_name='Sheet1', skiprows = [1])
df['Finished'] = df['Finished'].replace({1:'true', 0:'false'})
df = df.drop(['RecordedDate'], axis=1)
df['rescind'] = df['rescind'].replace({1:'true', 2:'false'})
df = df.rename(columns={'ResponseId': 'response_id', 'rescind':'had_rescinded_offer', 'fellow_text': 'fellowship_name', 'firmname':'employer_name', 'jobtitle':'job_title', 'emptype':'employment_category', 'liveus':'state', 'livenonus':'country', 'jobdesc1':'job_function', 'jobsect':'employer_industry'})
df['outcome'] = getOutcome(df)
df = df.drop(['fall_12_TEXT', 'fall_1', 'fall_2', 'fall_3', 'fall_4', 'fall_5', 'fall_6', 'fall_7', 'fall_10', 'fall_11', 'fall_12'], axis=1)
df.loc[df['military'] != 1, 'outcome'] = 'Military'
df.loc[df['ngo'] != 1, 'outcome'] = 'Volunteering'
update_employer_name_with_volunteer(df)
df = df.drop(['ngo_pick_7_TEXT', 'ngo_pick', 'nxact_text', 'primary', 'primary_6_TEXT', 'ngo', 'military'], axis=1)
df['employer_name'] = df[['employer_name', 'Intern_text']].apply(update_employer_name_with_Intern, axis=1)
df.loc[(df['jobplans'] == 3 ) | (df['jobplans'] == 4 ) | (df['jobplans'] == 5 ) , 'country'] = 'Still Looking (Employment)'
df = df.drop(['jobplans'], axis=1)
df['employment_category'].map({ 1:'Freelancer', 2:'Organization', 3:'Organization', 4:'Organization' })
update_outcome_with_emp_classes(df)
update_employment_category_with_emp_classes(df)
df.loc[df['liveinout'] == 1, 'country'] = 'United States'
df = df.drop(['liveinout', 'workdesc', 'jobsect_32_TEXT', 'Rescind2'], axis = 1)
update_job_function(df)
df.loc[df['edplans'] == 2, 'outcome'] = 'Still Looking (Continuing Education)'
print(df.head(4))
#print(df['outcome'])
#df.to_excel("output1.xlsx")
if __name__ == "__main__":
main()
| [
"bahmadina19@gmail.com"
] | bahmadina19@gmail.com |
7e577ecfa6bbf25dc171e66407f494492d5c7c76 | f0fe412430e15777ccde263810f058789500b46e | /dl/experiments/exp_def.py | debac5842044eb8a943d5e292dbd184119f1a7af | [
"MIT"
] | permissive | so-hyeun/BRET | 0230be498afd847e73f851348fcc403a326e5ff0 | cd59b02991944d5418a70b1c4c42e150eb6b1cca | refs/heads/master | 2023-02-20T05:00:58.411131 | 2021-01-15T12:48:08 | 2021-01-15T12:48:08 | 293,409,008 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,152 | py | # coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import yaml
from data_utils.vocab import Vocabulary
from data_utils.task_def import TaskType, DataFormat, EncoderModelType
from data_utils.metrics import Metric
from mt_dnn.loss import LossCriterion
class TaskDef(dict):
def __init__(self, label_vocab, n_class, data_type, task_type, metric_meta, split_names, enable_san, dropout_p, loss, kd_loss, adv_loss,
task_weight): ####
"""
:param label_vocab: map string label to numbers.
only valid for Classification task or ranking task.
For ranking task, better label should have large number
"""
super().__init__(**{k: repr(v) for k, v in locals().items()}) # ensure the class is JSON serializable
self.label_vocab = label_vocab
self.n_class = n_class
self.data_type = data_type
self.task_type = task_type
self.metric_meta = metric_meta
self.split_names = split_names
self.enable_san = enable_san
self.dropout_p = dropout_p
self.loss = loss
self.kd_loss = kd_loss
self.adv_loss = adv_loss
self.task_weight = task_weight ####
@classmethod
def from_dict(cls, dict_rep):
return cls(**dict_rep)
class TaskDefs:
def __init__(self, task_def_path):
self._task_def_dic = yaml.safe_load(open("dl/"+task_def_path))
global_map = {}
n_class_map = {}
data_type_map = {}
task_type_map = {}
metric_meta_map = {}
split_names_map = {}
enable_san_map = {}
dropout_p_map = {}
loss_map = {}
kd_loss_map = {}
adv_loss_map = {}
task_weight_map = {} ####
for task, task_def in self._task_def_dic.items():
assert "_" not in task, "task name should not contain '_', current task name: %s" % task
n_class_map[task] = task_def["n_class"]
data_format = DataFormat[task_def["data_format"]]
data_type_map[task] = data_format
task_type_map[task] = TaskType[task_def["task_type"]]
#print(task, task_def)
metric_meta_map[task] = tuple(Metric[metric_name] for metric_name in task_def["metric_meta"])
split_names_map[task] = task_def.get("split_names", ["train", "dev", "test"])
enable_san_map[task] = task_def["enable_san"]
if "labels" in task_def:
labels = task_def["labels"]
label_mapper = Vocabulary(True)
for label in labels:
label_mapper.add(label)
global_map[task] = label_mapper
if "dropout_p" in task_def:
dropout_p_map[task] = task_def["dropout_p"]
# loss map
if "loss" in task_def:
t_loss = task_def["loss"]
loss_crt = LossCriterion[t_loss]
loss_map[task] = loss_crt
else:
loss_map[task] = None
if "kd_loss" in task_def:
t_loss = task_def["kd_loss"]
loss_crt = LossCriterion[t_loss]
kd_loss_map[task] = loss_crt
else:
kd_loss_map[task] = None
if "adv_loss" in task_def:
t_loss = task_def["adv_loss"]
loss_crt = LossCriterion[t_loss]
adv_loss_map[task] = loss_crt
else:
adv_loss_map[task] = None
if "task_weight" in task_def: ####
task_weight_map[task] = task_def["task_weight"]
self._global_map = global_map
self._n_class_map = n_class_map
self._data_type_map = data_type_map
self._task_type_map = task_type_map
self._metric_meta_map = metric_meta_map
self._split_names_map = split_names_map
self._enable_san_map = enable_san_map
self._dropout_p_map = dropout_p_map
self._loss_map = loss_map
self._kd_loss_map = kd_loss_map
self._adv_loss_map = adv_loss_map
self._task_def_dic = {}
self._task_weight_map = task_weight_map ####
def get_task_names(self):
return list(self._task_type_map.keys())
def get_task_def(self, task_name):
if task_name not in self._task_def_dic:
assert task_name in self._task_type_map
self._task_def_dic[task_name] = TaskDef(
self._global_map.get(task_name, None),
self._n_class_map[task_name],
self._data_type_map[task_name],
self._task_type_map[task_name],
self._metric_meta_map[task_name],
self._split_names_map[task_name],
self._enable_san_map[task_name],
self._dropout_p_map.get(task_name, None),
self._loss_map[task_name],
self._kd_loss_map[task_name],
self._adv_loss_map[task_name],
self._task_weight_map.get(task_name, 1) ####
)
return self._task_def_dic[task_name]
| [
"s3267s@naver.com"
] | s3267s@naver.com |
9d7ed97c4e32b6d1240534ee386bd2f6ecc0a59f | dc5d9c301c62ce629fd26854599455bc946b0e34 | /generate_datasets.py | 0dadd88f175dbc2f8fd69ba2449a18c83fd20f91 | [] | no_license | quinlan-alexander/SNPFinderProject | 87572d70f1bb4a293a7396957812fd6ec92d6a40 | 4b91425bc4b8e44018d46f084cfad8772179a852 | refs/heads/master | 2021-04-06T10:06:57.156320 | 2018-03-28T02:11:47 | 2018-03-28T02:11:47 | 124,350,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | #!/usr/bin/env python3
# Name: Quinlan Alexander (qalexand)
# Group Members: None
import os
import arv
import pandas as pd
from find_snp import SNPFinder
from spinner import Spinner
########################################################################
# CommandLine
########################################################################
class CommandLine():
'''
Handle the command line, usage and help requests.
'''
def __init__(self, inOpts=None):
'''
Implement a parser to interpret the command line argv string using argparse.
'''
import argparse
self.parser = argparse.ArgumentParser(
description='Generate population datasets from 23andme raw text files',
epilog='',
add_help=True, # default is True
prefix_chars='-',
usage='%(prog)s [options] -option1[default] <input >output'
)
self.parser.add_argument('inFileDir', action='store', help='23ndMe raw data file directory')
self.parser.add_argument('outFile', action='store', help='output file name of CSV dataset containing all results')
self.parser.add_argument('-g', '--genes', action='store', default=['MTHFR'], help='genes to query for SNPs')
self.parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
if inOpts is None:
self.args = self.parser.parse_args()
else:
self.args = self.parser.parse_args(inOpts)
########################################################################
# Main
# Here is the main program
#
#
########################################################################
def main(inCL=None):
'''
Generate some data.
'''
if inCL is None:
cmd_line = CommandLine()
else:
cmd_line = CommandLine(inCL)
print(cmd_line.args)
directory = os.fsencode(cmd_line.args.inFileDir)
all_snps_results_df = pd.DataFrame()
spinner = Spinner()
spinner.start()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".txt"):
filepath = os.path.join(str(directory.decode()), filename)
genome = arv.load(filepath)
genes = list(cmd_line.args.genes.split(','))
snpFinder = SNPFinder(filename,genome, genes)
snpFinder.find_snp_by_genes()
all_snps_results_df = all_snps_results_df.append(snpFinder.relevant_SNPs_df)
all_snps_results_df.to_csv(cmd_line.args.outFile, index=False)
spinner.stop()
samples_df = pd.read_csv('/Users/ba25714/PycharmProjects/final_project/sample_results.csv')
score_counts_by_gene_df = pd.DataFrame(columns=['gene', 'warning', 'caution', 'ok'])
for gene in samples_df['gene'].unique():
warn = samples_df.loc[(samples_df['gene'] == 'gene') & samples_df['score'] <= .05].agg(['count'])['score']
caution = samples_df.loc[(samples_df['gene'] == 'gene') & (samples_df['score'] > .05) & (samples_df['score'] <= 0.5)].agg(['count'])['score']
ok = samples_df.loc[(samples_df['gene'] == 'gene') & (samples_df['score'] > .50) & (samples_df['score'] <= 1.0)].agg(['count'])['score']
score_counts_by_gene_df.append({'gene': gene, 'warning': warn, 'caution': caution, 'ok': ok}, ignore_index=True)
if __name__ == "__main__":
main() | [
"quinlan.alexander@gmail.com"
] | quinlan.alexander@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.