blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5be3582b173c7125710ef92297f6d65604214c6d | cd26139f73a1810e1ea19614cce09835a299d0ca | /kmeans_clustering/lemonade_code.py | 519a319c193b70451684c1c18e493ec16d5081ed | [] | no_license | eubr-bigsea/Lemonade_apps | de8df4e764ba3c796296158b1bb679cc923e579c | 193f0c6248c4cc74b1c53f6d05697c26b5617051 | refs/heads/master | 2021-01-12T06:14:48.372820 | 2017-06-25T21:40:14 | 2017-06-25T21:40:14 | 77,331,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,490 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Auto-generated Spark code from Lemonade Workflow
# (c) Speed Labs - Departamento de Ciência da Computação
# Universidade Federal de Minas Gerais
# More information about Lemonade to be provided
#
import os
import json
import string
import sys
import time
import unicodedata
from pyspark.ml import Pipeline
from pyspark.ml.classification import *
from pyspark.ml.clustering import *
from pyspark.ml.evaluation import *
from pyspark.ml.feature import *
from pyspark.ml.tuning import *
from pyspark.sql import SparkSession
from pyspark.sql.window import Window
from pyspark.sql.functions import *
from pyspark.sql.types import *
from timeit import default_timer as timer
reload(sys)
sys.setdefaultencoding('utf8')
# Global utilities functions definitions
strip_accents = udf(
lambda text: ''.join(c for c in unicodedata.normalize('NFD', text)
if unicodedata.category(c) != 'Mn'), StringType())
strip_punctuation = udf(lambda text:
text.translate(
dict((ord(char), None)
for char in string.punctuation)),
StringType())
def juicer_debug(name, variable, df):
""" Debug code """
print '#' * 20
print '|| {} ||'.format(name)
print '== {} =='.format(variable)
df.show()
schema = df.schema
for attr in schema:
print attr.name, attr.dataType, attr.nullable, attr.metadata
def k_means_clustering_gen_df1(spark_session):
start = timer()
df1 = KMeans()
df1.setMaxIter(10000)
df1.setK(10)
df1.setInitMode("k-means||")
time_elapsed = timer() - start
return df1, time_elapsed
def data_reader_gen_df3(spark_session):
start = timer()
schema_df3 = None
url_df3 = 'hdfs://spark01.ctweb.inweb.org.br:9000/lemonade/samples/diabetic_data.csv'
df3 = spark_session.read\
.option('nullValue', '')\
.option('treatEmptyValuesAsNulls',
'true')\
.csv(url_df3, schema=schema_df3,
header=True, sep=',',
inferSchema=True, mode='DROPMALFORMED')
df3.cache()
time_elapsed = timer() - start
return df3, time_elapsed
def feature_indexer_gen_df4_models(spark_session, df3):
start = timer()
col_alias = dict(
[
[
"gender", "gender_indexed"], [
"race", "race_indexed"], [
"age", "age_indexed"], [
"weight", "weight_indexed"], [
"readmitted", "readmitted_indexed"], [
"diag1", "diag1_indexed"], [
"diag2", "diag2_indexed"], [
"diag3", "diag3_indexed"]])
indexers = [StringIndexer(inputCol=col, outputCol=alias,
handleInvalid='skip')
for col, alias in col_alias.iteritems()]
# Use Pipeline to process all attributes once
pipeline = Pipeline(stages=indexers)
models = dict([(col[0], indexers[i].fit(df3)) for i, col in
enumerate(col_alias)])
labels = [model.labels for model in models.itervalues()]
# Spark ML 2.0.1 do not deal with null in indexer.
# See SPARK-11569
df3_without_null = df3.na.fill('NA', subset=col_alias.keys())
df4 = pipeline.fit(df3_without_null).transform(df3_without_null)
time_elapsed = timer() - start
return df4, models, time_elapsed
def feature_assembler_gen_df0(spark_session, df4):
start = timer()
assembler = VectorAssembler(
inputCols=[
"admission_type_id",
"discharge_disposition_id",
"admission_source_id",
"time_in_hospital",
"num_lab_procedures",
"num_procedures",
"num_medications",
"number_outpatient",
"number_emergency",
"number_inpatient",
"number_diagnoses",
"gender_indexed",
"race_indexed",
"age_indexed",
"weight_indexed",
"readmitted_indexed",
"diag1_indexed",
"diag2_indexed",
"diag3_indexed"],
outputCol="features")
df4_without_null = df4.na.drop(
subset=[
"admission_type_id",
"discharge_disposition_id",
"admission_source_id",
"time_in_hospital",
"num_lab_procedures",
"num_procedures",
"num_medications",
"number_outpatient",
"number_emergency",
"number_inpatient",
"number_diagnoses",
"gender_indexed",
"race_indexed",
"age_indexed",
"weight_indexed",
"readmitted_indexed",
"diag1_indexed",
"diag2_indexed",
"diag3_indexed"])
df0 = assembler.transform(df4_without_null)
time_elapsed = timer() - start
return df0, time_elapsed
def clustering_model_gen_df2_df2_model(spark_session, df0, df1):
start = timer()
df1.setFeaturesCol('features')
df2_model = df1.fit(df0)
# There is no way to pass which attribute was used in clustering, so
# this information will be stored in uid (hack).
df2_model.uid += '|features'
df2 = df2_model.transform(df0)
time_elapsed = timer() - start
return df2, df2_model, time_elapsed
def projection_gen_df2_tmp_5(spark_session, df2):
start = timer()
df2_tmp_5 = df2.select("patient_nbr", "prediction")
juicer_debug('juicer.spark.etl_operation.Select', 'df2_tmp_5', df2_tmp_5)
time_elapsed = timer() - start
return df2_tmp_5, time_elapsed
def main():
start = time.time()
app_name = u'## Experimento K-Means - Variação de Núcleos ##'
spark_options = {
"driver-library-path": '{}/lib/native/'.format(
os.environ.get('HADOOP_HOME')),
}
builder = SparkSession.builder.appName(app_name)
spark_session = builder.getOrCreate()
for option, value in spark_options.iteritems():
spark_session.conf.set(option, value)
session_start_time = time.time()
# spark_session.sparkContext.addPyFile('/tmp/dist.zip')
# Declares and initializes variabels in order to do not generate NameError.
# Some tasks may not generate code, but at least one of their outputs is
# connected to a valid input in a task generating code. This happens when
# task has a port with multiplicity MANY
df1, ts_df1 = k_means_clustering_gen_df1(spark_session)
df3, ts_df3 = data_reader_gen_df3(spark_session)
df4, models, ts_df4 = feature_indexer_gen_df4_models(spark_session, df3)
df0, ts_df0 = feature_assembler_gen_df0(spark_session, df4)
df2, df2_model, ts_df2 = clustering_model_gen_df2_df2_model(
spark_session, df0, df1)
df2_tmp_5, ts_df2_tmp_5 = projection_gen_df2_tmp_5(spark_session, df2)
end = time.time()
print "{}\t{}".format(end - start, end - session_start_time)
return {
'114c207c-5371-4037-9525-9f470a9e15af': (df1, ts_df1),
'1e5b2f54-cd43-480b-9735-587e0584b283': (df3, ts_df3),
'0ee7bbbe-c786-4529-9a67-0c1bd8830fc8': (df4, models, ts_df4),
'dc773e75-f591-4f17-a3ba-6c23fb0324ab': (df0, ts_df0),
'42380b12-1ddc-40c3-a2a6-c4fdd9e65194': (df2, df2_model, ts_df2),
'c69dbcc6-71af-4270-bdd3-8f80d14a0b8b': (df2_tmp_5, ts_df2_tmp_5),
}
| [
"noreply@github.com"
] | noreply@github.com |
814f03eadcaab69f52eefd4a56dbfdb84e3b45c7 | 298d6d0d1ccf5e376fb001a3f00a30a5e2dd82cf | /run.py | 3aba6a757d44e46e7d4d1dc06d063b2c2c1dbf8e | [
"Apache-2.0"
] | permissive | srivastavaanuj11/Feedback-using-Pose-Estimation | 4650b0ba1e7387e7bef5870c65285f646f4d3171 | b6e019b6d2e1bd6aea72de6fa4bea649d6d8aef3 | refs/heads/master | 2022-09-13T07:28:26.939174 | 2020-05-28T19:04:07 | 2020-05-28T19:04:07 | 267,564,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | import argparse
import logging
import sys
import time
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
logger = logging.getLogger('TfPoseEstimatorRun')
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image', type=str, default='./images/p1.jpg')
parser.add_argument('--model', type=str, default='cmu',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. '
'default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(args.image, None, None)
if image is None:
logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
try:
import matplotlib.pyplot as plt
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
a.set_title('Result')
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
bgimg = cv2.resize(bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]), interpolation=cv2.INTER_AREA)
# show network output
a = fig.add_subplot(2, 2, 2)
plt.imshow(bgimg, alpha=0.5)
tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
tmp2 = e.pafMat.transpose((2, 0, 1))
tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)
a = fig.add_subplot(2, 2, 3)
a.set_title('Vectormap-x')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
a = fig.add_subplot(2, 2, 4)
a.set_title('Vectormap-y')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
plt.savefig("mygraph.png")
except Exception as e:
logger.warning('matplitlib error, %s' % e)
cv2.imshow('result', image)
cv2.waitKey()
| [
"noreply@github.com"
] | noreply@github.com |
28256be21b3e08c5125a0dc883ce7faab4d88b3e | 0731fefd8d3fec0a3ba76dcb69ebecf8c378a3a6 | /GenerateArchimedesSpiralPoints.py | 4848c6b3ef417cb0bf8b426d5807c6d95ec5fcfc | [
"MIT"
] | permissive | MrHuman22/Fusion360-Scripts | 38ad46ad49dfdd419e53a8e18ec7932f30c87c6a | 42dd8bd96fee5e395eba95dc27ca66ea3b374e46 | refs/heads/master | 2022-11-07T18:14:59.917355 | 2020-07-01T05:49:07 | 2020-07-01T05:49:07 | 276,012,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | #Author-Peter Newman
#Description-Generates an archemedes spiral
import adsk.core, adsk.fusion, adsk.cam, traceback
from math import cos, sin, pi
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
des = adsk.fusion.Design.cast(app.activeProduct)
root = des.rootComponent
sk = root.sketches.add(root.xYConstructionPlane)
points = adsk.core.ObjectCollection.create()
numTurns = 2
pointsPerTurn = 40
distanceBetweenTurns = 1
theta = 0
offset = 0
ui.messageBox(f'Creating archimedes spiral with {numTurns} turns, offset from (0,0,0) by {offset}, {pointsPerTurn} points per turn and {distanceBetweenTurns}mm between turns')
for i in range(pointsPerTurn * numTurns + 1):
r = offset + distanceBetweenTurns * theta
x = r*cos(theta)
y = r*sin(theta)
points.add(adsk.core.Point3D.create(x,y,0))
theta += pi * 2 / pointsPerTurn # iterate theta
sk.sketchCurves.sketchFittedSplines.add(points)
ui.messageBox('Complete!')
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| [
"pwn394@newcastle.edu.au"
] | pwn394@newcastle.edu.au |
8d0af377fef87addec5a8ec9aa2e107cf6ad9be6 | af1c44a718c589ce40efd488adff9466f8d840f8 | /Todos.py | 28f9b5d4348ae828116504fc6f6b218e84f56333 | [] | no_license | ErickdeMauleon/Mineria-en-Python | f5bef0720fc9039febca2df24e65f3755f916aee | e242a545f9a08463bee0817ea5517ede97700a98 | refs/heads/master | 2020-03-30T21:55:03.837349 | 2018-10-04T23:13:29 | 2018-10-04T23:13:29 | 151,646,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 17:37:24 2018
@author: Santillan
"""
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
# Cargamos los dato
iris = load_iris()
datos = pd.DataFrame(iris.data, columns = iris.feature_names)
datos.head()
datos.columns
datos = datos.rename(index=str, columns={"sepal length (cm)": "sepal_l",
"sepal width (cm)": "sepal_w",
"petal length (cm)" :"petal_l",
"petal width (cm)" : "petal_w"})
datos['Target'] = iris.target
# Separamos los datos en validacion y entrenamiento
X_train, X_test, y_train, y_test = train_test_split(
datos.drop('Target', axis = 1), # X
datos.Target, # y
test_size = 0.35,
random_state = 42)
modelo = ['Tree', 'Bagging', 'RandForest', 'Naive_bayes', 'NN']
train = []
test = []
# Inicializamos los paramateros para gridsearch
grid_parametros = {
'n_estimators': [50, 100, 150],
'min_samples_split': [15, 20, 30],
'class_weight': ['balanced', None]
}
# Inicializamos el modelo
rf = RandomForestClassifier()
rf_cv = GridSearchCV(rf, grid_parametros,
verbose=10, scoring='accuracy')
rf_cv.fit(X_train, y_train)
rf_cv.best_score_
np.sum(rf_cv.predict(X_test) == y_test)/len(y_test)
confusion_matrix(y_test, rf_cv.predict(X_test))
| [
"noreply@github.com"
] | noreply@github.com |
88a9307b7a34109f21a0c235a7127e3fb3b294b4 | ddd190a024211ffaa42094ffa6ead4cfec6637c8 | /spiders/some/test.py | f3610ebac21b227015bad71c309c4d7b1285f04a | [
"MIT"
] | permissive | goodking-bq/scrapy_workstation | 82d0bb2996ddf23d915b5f58283be52fc63c0c3f | 76a0897cbde417a7d07bbac9f42958f56109d0d1 | refs/heads/master | 2021-01-23T22:30:04.400939 | 2017-09-09T07:33:36 | 2017-09-09T07:33:36 | 102,935,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | import requests
print 'start'
res = requests.post('http://www2.j32048downhostup9s.info/freeone/down.php',
data={'type': 'torrent',
'id': 'OK18BZy',
'name': 'OK18BZy'
}, headers={
"Content-Disposition": 'attachment; filename="OK18BZy.torrent"',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded",
})
print '==='
print res.content
| [
"120225883@qq.com"
] | 120225883@qq.com |
bb75ababe070a106b7be22792dee5606e5db1fff | 129b47345fc9d0036cf0a3fd6bf9ed0d579d864b | /DataScience/Analytics/sandbox/addressLinkingML.py | 32010a1f5382f23f07eee2f42caae4d9e272f437 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | ONSdigital/address-index-data | 75055c1bbb9427308bfda7506e1e5986e52d55e0 | f14f8a22ed93e23436f43d47a897b895d81f82b0 | refs/heads/develop | 2023-08-17T23:24:40.017213 | 2023-08-15T09:49:00 | 2023-08-15T09:49:00 | 71,133,628 | 15 | 8 | MIT | 2023-08-15T09:49:02 | 2016-10-17T12:00:25 | Python | UTF-8 | Python | false | false | 8,537 | py | #!/usr/bin/env python
"""
ONS Address Index - Address Linking
===================================
This script can be used to test which string distance metrics are more important for
solving identifying correct matches.
Running
-------
After all requirements are satisfied, the script can be invoked using CPython interpreter::
python addressLinkingML.py
Requirements
------------
:requires: pandas (0.19.1)
:requires: numpy (1.11.2)
:requires: scikit-learn (0.18.1)
:requires: matplotlib (1.5.3)
Author
------
:author: Sami Niemi (sami.niemi@valtech.co.uk)
Version
-------
:version: 0.2
:date: 12-Dec-2016
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier)
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
import matplotlib
from sklearn.externals import joblib
matplotlib.use('Agg') # to prevent Tkinter crashing on cdhut-d03
import matplotlib.pyplot as plt
def load_data(filepath='/Users/saminiemi/Projects/ONS/AddressIndex/linkedData/training_data.csv',
verbose=False):
"""
Reads in string distance metrics that can be used to train a supervised classification model.
Returns the data in a dataframe with features and target.
:param filepath: location and filename to read in
:type filepath: str
:param verbose: whether or not to show additional information
:type verbose: bool
:return: data containing string distance metrics and target (match=1, non-match=0)
:rtype: pandas.DataFrame
"""
columns = {'TestData_Index': np.int64, 'flat_dl': np.float64, 'pao_dl': np.float64, 'building_name_dl': np.float64,
'building_number_dl': np.float64, 'pao_number_dl': np.float64, 'AddressBase_Index': np.int64,
'street_dl': np.float64, 'town_dl': np.float64, 'locality_dl': np.float64, 'pao_suffix_dl': np.float64,
'flatw_dl': np.float64, 'sao_number_dl': np.float64, 'organisation_dl': np.float64,
'department_dl': np.float64, 'street_desc_dl': np.float64, 'similarity_sum': np.float64,
'block_mode': np.int32, 'UPRN_old': np.float64, 'UPRN': np.float64}
data = pd.read_csv(filepath, dtype=columns, low_memory=False, na_values=None, usecols=columns.keys())
msk = data['UPRN'].isnull() | data['UPRN_old'].isnull()
data = data.loc[~msk]
examples = len(data.index)
print('Found {} observations'.format(examples))
data = data.fillna(value=0., axis=1)
msk = data['UPRN_old'] == data['UPRN']
data['target'] = 0
data.loc[msk, 'target'] = 1
data.drop(['UPRN', 'UPRN_old', 'block_mode'], axis=1, inplace=True)
positives = data['target'].sum()
negatives = examples - positives
if verbose:
print(data.info())
print('Found {} positives'.format(positives))
print('Found {} negatives'.format(negatives))
return data
def check_performance(y_test, y_pred, td_test, output='Logistic'):
"""
Calculate AUC and plot ROC.
:param y_test: actual results
:param y_pred: predicted probabilities for the positive class
:return: None
"""
combined_data = pd.DataFrame({'target': y_test, 'probability': y_pred, 'TestData_Index': td_test})
combined_data.sort_values(by='probability', ascending=False, inplace=True)
combined_data.drop_duplicates('TestData_Index', keep='first', inplace=True)
print('Correctly Predicted = {} addresses'.format(combined_data['target'].sum()))
print('AUC={}'.format(roc_auc_score(y_test, y_pred)))
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=2, color='b', label='AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=1.5, color='k', label='Random')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.savefig('/Users/saminiemi/Projects/ONS/AddressIndex/linkedData/' + output + 'ROC.png')
plt.close()
def build_model(data):
"""
Train a simple logistic regression model on 70 per cent of the data and test the performance on 30
per cent of the data.
The logistic regression uses L2-regularisation with intercept being fitted. The function outputs
coefficient weights which can be interpreted as the importance of features. Computes the probabilities
manually and asserts that they are the same as returned by scikit-learn. This is simply to confirm
the mechanics of computing probabilities from scikit-learn intercept and coefficients.
:param data: input data with features and target
:type data: pandas.DataFrame
:return: None
"""
y = data['target'].values
similarity_sum = data['similarity_sum'].values
similarity_sum /= similarity_sum.max()
TestData_Index = data['TestData_Index']
tmp = data.drop(['target', 'similarity_sum', 'TestData_Index', 'AddressBase_Index'], axis=1)
columns = np.asarray([x.replace('_dl', '').replace('_', ' ') for x in tmp.columns.values])
X = tmp.values
X_train, X_test, y_train, y_test, ss_train, ss_test, td_train, td_test = \
train_test_split(X, y, similarity_sum, TestData_Index, test_size=0.3, random_state=42)
print('{} matches in test data'.format(np.sum(y_test)))
print('similarity sum:')
check_performance(y_test, ss_test, td_test, output='SimilaritySum')
lg = LogisticRegression(class_weight='balanced', max_iter=100000, solver='sag', verbose=True, n_jobs=-1)
rf = RandomForestClassifier(n_estimators=1000, n_jobs=-1, verbose=True)
et = ExtraTreesClassifier(n_estimators=1000, n_jobs=-1, verbose=True)
for clf, name in zip((lg, rf, et), ('LogisticRegression', 'RandomForest', 'ExtraTrees')):
print('\n', name)
# build model and store on disk
clf.fit(X_train, y_train)
joblib.dump(clf, '/Users/saminiemi/Projects/ONS/AddressIndex/linkedData/' + name + '.pkl')
# predict probabilities and check performance
y_pred = clf.predict_proba(X_test)
check_performance(y_test, y_pred[:, 1], td_test, output=name)
if 'Logistic' in name:
print('\nFeature Importance:')
print('Intercept = ', clf.intercept_[0])
for column, coefficient in zip(columns, clf.coef_[0]):
print('{0} = {1}'.format(column, coefficient))
n_features_generator = range(len(clf.coef_[0]))
indices = np.argsort(clf.coef_[0])[::-1]
plt.figure(figsize=(16, 12))
plt.title("Feature Importance")
plt.bar(n_features_generator, clf.coef_[0][indices], color="r", align="center")
plt.xticks(n_features_generator, columns, rotation=45)
plt.xlim([-1, X.shape[1]])
plt.savefig('/Users/saminiemi/Projects/ONS/AddressIndex/linkedData/' + name + 'FeatureImportance.png')
plt.tight_layout()
plt.close()
manual_probs = 1. / (1 + np.exp(-(clf.intercept_[0] + np.sum(clf.coef_[0] * X_test, axis=1))))
np.testing.assert_almost_equal(y_pred[:, 1], manual_probs)
else:
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
columns = columns[indices]
n_features_generator = range(X.shape[1])
# Print the feature ranking
print("Feature ranking:")
for column, feature in zip(columns, n_features_generator):
print("%d. feature %s = %.5f" % (feature + 1, column, importances[indices[feature]]))
# Plot the feature importances of the forest
plt.figure(figsize=(16, 12))
plt.title("Feature Importance")
plt.bar(n_features_generator, importances[indices], color="r", yerr=std[indices], align="center")
plt.xticks(n_features_generator, columns, rotation=45)
plt.xlim([-1, X.shape[1]])
plt.savefig('/Users/saminiemi/Projects/ONS/AddressIndex/linkedData/' + name + 'FeatureImportance.png')
plt.tight_layout()
plt.close()
if __name__ == "__main__":
data = load_data()
build_model(data)
| [
"Sami.Niemi@valtech.co.uk"
] | Sami.Niemi@valtech.co.uk |
a2b54ee2f3628be86e3954e5c93eea54b2a128c6 | 4f66c3ee8707e8c85c8bf8ba15bb7ceb3fe00608 | /literature/PlanckSZ2015.py | 4e8cf69a704e36beff7220224e431d8f89bf1db1 | [] | no_license | edoaltamura/xl-zooms | bed433b5c272ffc90261be3d65ab109861c056de | fa4634cb999c5867d0349567412f0df2e3926dbb | refs/heads/master | 2023-06-24T23:41:27.419153 | 2021-07-27T18:12:08 | 2021-07-27T18:12:08 | 281,772,466 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,428 | py | import re
import os
import unyt
import numpy as np
from typing import List
from matplotlib import pyplot as plt
import itertools
from .cosmology import Article, repository_dir
comment = (
"Planck 2015 results. XXVII. The second Planck catalogue of Sunyaev-Zeldovich sources"
)
class PlanckSZ2015(Article):
field_names = ('source_number name y5r500 y5r500_error validation_status redshift redshift_source_name '
'mass_sz mass_sz_pos_err mass_sz_neg_err').split()
def __init__(self, **cosmo_kwargs):
super().__init__(
citation="Planck Collaboration (2015)",
comment=comment,
bibcode="2016A&A...594A..27P",
hyperlink="https://ui.adsabs.harvard.edu/abs/2016A%26A...594A..27P/abstract",
**cosmo_kwargs
)
self.hconv = 0.70 / self.h
self.process_data()
self.bin_data()
def process_data(self):
data = []
with open(f'{repository_dir}/planck2015_sz2.dat') as f:
lines = f.readlines()
for line in lines:
if not line.startswith('#') and not line.isspace():
line_data = line.split('|')[1:-1]
for i, element_data in enumerate(line_data):
if element_data.isspace():
# If no data, replace with Nan
line_data[i] = np.nan
elif re.search('[a-df-zA-Z]', element_data):
# If contains letters, remove white spaces
line_data[i] = element_data.strip()
else:
line_data[i] = float(element_data.strip())
data.append(line_data)
data = list(map(list, itertools.zip_longest(*data, fillvalue=None)))
for i, field in enumerate(data):
data[i] = np.array(field)
# Redshift columns: data[5]
ez = self.ez_function(data[5])
luminosity_distance = self.luminosity_distance(data[5]) / ((data[5] + 1) ** 2)
conversion_factors = [
1,
None,
ez ** (-2 / 3) * (luminosity_distance.value * self.hconv * (np.pi / 10800.0) * unyt.arcmin) ** 2.0,
ez ** (-2 / 3) * (luminosity_distance.value * self.hconv * (np.pi / 10800.0) * unyt.arcmin) ** 2.0,
1,
1,
None,
1.e14 * self.hconv * unyt.Solar_Mass,
1.e14 * self.hconv * unyt.Solar_Mass,
]
for i, (field, conversion) in enumerate(zip(self.field_names, conversion_factors)):
if isinstance(data[i][0], str):
setattr(self, field, data[i])
else:
setattr(self, field, data[i] * conversion)
def bin_data(self, nbins: int = 10):
bins = np.logspace(
min(np.log10(self.mass_sz.value)),
max(np.log10(self.mass_sz.value)),
num=nbins
)
bin_centres = 10. ** (0.5 * (np.log10(bins[1:]) + np.log10(bins[:-1])))
digitized = np.digitize(self.mass_sz.value, bins)
bin_median = [np.median(self.y5r500.value[digitized == i]) for i in range(1, len(bins))]
bin_perc16 = [np.percentile(self.y5r500.value[digitized == i], 16) for i in range(1, len(bins))]
bin_perc84 = [np.percentile(self.y5r500.value[digitized == i], 84) for i in range(1, len(bins))]
setattr(self, 'binned_mass_sz', np.asarray(bin_centres) * unyt.Solar_Mass)
setattr(self, 'binned_y5r500_median', np.asarray(bin_median) * unyt.arcmin ** 2)
setattr(self, 'binned_y5r500_perc16', np.asarray(bin_perc16) * unyt.arcmin ** 2)
setattr(self, 'binned_y5r500_perc84', np.asarray(bin_perc84) * unyt.arcmin ** 2)
return bin_centres, bin_median, bin_perc16, bin_perc84
def generate_kde(self):
# Perform the kernel density estimate
import scipy.stats as st
x = np.log10(self.mass_sz.value[~np.isnan(self.mass_sz.value)])
y = np.log10(self.y5r500.value[~np.isnan(self.y5r500.value)])
xmin = min(np.log10(self.mass_sz.value))
xmax = max(np.log10(self.mass_sz.value))
ymin = min(np.log10(self.y5r500.value))
ymax = max(np.log10(self.y5r500.value))
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
return 10 ** xx, 10 ** yy, f
def quick_display(self):
# Display the catalogue data
plt.scatter(self.mass_sz.value, self.y5r500.value, c='orange', s=2)
# kde = self.generate_kde()
# plt.contour(kde[0], kde[1], kde[2], colors='k')
# Overlay binned data
plt.fill_between(
self.binned_mass_sz,
self.binned_y5r500_perc16,
self.binned_y5r500_perc84,
color='aqua', alpha=0.85, linewidth=0
)
plt.plot(self.binned_mass_sz, self.binned_y5r500_median, c='k')
plt.ylabel(r'$Y_{SZ}\ (5 \times R_{500})$ [arcmin$^2$]')
plt.xlabel(r'$M_{SZ}$ [M$_\odot$]')
plt.title('Planck 2015 SZ2 catalogue')
plt.xlim([5e13, 5e15])
plt.ylim([1e-6, 2e-3])
plt.xscale('log')
plt.yscale('log')
plt.show()
plt.close() | [
"38359901+edoaltamura@users.noreply.github.com"
] | 38359901+edoaltamura@users.noreply.github.com |
e8a9cbc0dc675c291596b8da02d4e99220ff8c24 | 70745034a4e117b643ee65db59f827ad1174cf53 | /TensorFlow/convolutional_NN.py | c47d4eab5fe27915c522846b4a32a3557edec441 | [] | no_license | mhichen/HandsOnML | 45a33b7bffc2460006f3e1ff0a22df3663402fe4 | bbdd242a546f1409a98737fc1150858963a73052 | refs/heads/master | 2020-03-15T14:51:43.966731 | 2018-05-24T00:43:35 | 2018-05-24T00:43:35 | 132,198,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | #!/usr/bin/python3
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.datasets import load_sample_images, load_sample_image
if __name__ == "__main__":
china = load_sample_image("china.jpg")
flower = load_sample_image("flower.jpg")
dataset = np.array([china, flower], dtype = np.float32)
batch_size, height, width, channels = dataset.shape
filters = np.zeros(shape = (7, 7, channels, 2), dtype = np.float32)
filters[:, 3, :, 0] = 1
filters[3, :, :, 1] = 1
X = tf.placeholder(shape = (None, height, width, channels), dtype = tf.float32)
# s_h and s_w are both 2
#convolution = tf.nn.conv2d(X, filters, strides = [1, 2, 2, 1], padding = "SAME")
conv = tf.layers.conv2d(X, filters = 2, kernel_size = 7, strides = [2, 2], padding = "SAME")
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
output = sess.run(conv, feed_dict = {X: dataset})
plt.imshow(output[1, :, :, 1], cmap = "gray")
plt.show()
| [
"mhichen@umich.edu"
] | mhichen@umich.edu |
c4c6e0af2a87a16415a3f0575945f66d748ea0f4 | 2ed1cccb49ee1549f09747061a2513fb053c707d | /20181004/DProposed_gpu3.py | 91281bf826beb09c8480f4a1812fba4e8869a002 | [] | no_license | hhjung1202/Prob_network | 1c766ef5191727a63a38654622e21f0d986b923e | dedd4e525c9393f15452709dda377ceee9849c15 | refs/heads/master | 2020-03-22T11:42:27.705442 | 2018-11-11T14:29:39 | 2018-11-11T14:29:39 | 139,990,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,940 | py | import torch
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
from DPmodel import *
import os
import torch.backends.cudnn as cudnn
import time
import utils
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
def main(model_dir, model, dataset, batch_size=128):
utils.default_model_dir = model_dir
utils.c = None
utils.str_w = ''
# model = model
lr = 0.1
start_time = time.time()
if dataset == 'cifar10':
if batch_size is 128:
train_loader, test_loader = utils.cifar10_loader()
elif batch_size is 64:
train_loader, test_loader = utils.cifar10_loader_64()
elif dataset == 'cifar100':
train_loader, test_loader = utils.cifar100_loader()
if torch.cuda.is_available():
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
print("USE", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model).cuda()
cudnn.benchmark = True
else:
print("NO GPU -_-;")
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
criterion = nn.CrossEntropyLoss().cuda()
start_epoch = 0
checkpoint = utils.load_checkpoint(model_dir)
if not checkpoint:
pass
else:
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
utils.init_learning(model.module)
for epoch in range(start_epoch, 300):
if epoch < 150:
learning_rate = lr
elif epoch < 225:
learning_rate = lr * 0.1
else:
learning_rate = lr * 0.01
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
train(model, optimizer, criterion, train_loader, epoch, True)
test(model, criterion, test_loader, epoch, True)
utils.switching_learning(model.module)
print('switching_learning to Gate')
train(model, optimizer, criterion, train_loader, epoch, False)
test(model, criterion, test_loader, epoch, False)
utils.switching_learning(model.module)
print('switching_learning to Gate')
if epoch % 5 == 0:
model_filename = 'checkpoint_%03d.pth.tar' % epoch
utils.save_checkpoint({
'epoch': epoch,
'model': model,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_filename, model_dir)
now = time.gmtime(time.time() - start_time)
weight_extract(model, optimizer, criterion, train_loader, epoch)
utils.conv_weight_L1_printing(model.module)
print('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
def train(model, optimizer, criterion, train_loader, epoch, is_main):
model.train()
train_loss = 0
total = 0
correct = 0
for batch_idx, (data, target) in enumerate(train_loader):
if torch.cuda.is_available():
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += predicted.eq(target.data).cpu().sum()
if batch_idx % 10 == 0 and is_main is True:
utils.print_log('Epoch: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
print('Epoch: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
elif batch_idx % 10 == 0 and is_main is False:
utils.print_log('SWICH: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
print('SWICH: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
def weight_extract(model, optimizer, criterion, train_loader, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if torch.cuda.is_available():
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
utils.c = target.view(-1,1) # batch array torch.tensor[128]
utils.c = utils.c.type(torch.cuda.FloatTensor)
utils.weight_extract_densenet(model.module)
for i in utils.c:
for j in i:
utils.str_w = utils.str_w + str(j.tolist()) + ','
utils.str_w += '\n'
utils.save_to_csv()
utils.str_w = ''
if batch_idx % 100 == 0:
print('Epoch: {}'.format(epoch))
def test(model, criterion, test_loader, epoch, is_main):
model.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (data, target) in enumerate(test_loader):
if torch.cuda.is_available():
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
outputs = model(data)
loss = criterion(outputs, target)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += predicted.eq(target.data).cpu().sum()
max_result.append(correct)
if is_main is True:
utils.print_log('# TEST : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}%) | Max: ({})'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
print('# TEST : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}% | Max: ({}))'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
elif is_main is False:
utils.print_log('$ TEST_S : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}%) | Max: ({})'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
print('$ TEST_S : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}% | Max: ({}))'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
layer_set = [14, 20, 32, 44, 56, 110]
def do_learning(model_dir, db, layer, num_gate=0, batch_s=128, block_config=(6,6,6), is_bottleneck=True):
global max_result
max_result = []
model_selection = DenseNet(num_classes=10, num_gate=num_gate
, block_config=block_config, is_bottleneck=is_bottleneck)
dataset = 'cifar' + str(db)
main(model_dir, model_selection, dataset, batch_s)
if __name__=='__main__':
for i in range(10):
if i % 2 == 0:
block_config = (12, 12, 12)
is_bottleneck = False
else:
block_config = (6,6,6)
is_bottleneck = True
model_dir = '../hhjung/Dense_Prop/cifar10/DenseNet40/' + str(i)
do_learning(model_dir, 10, layer_set[5], num_gate=0
, batch_s=64, block_config=block_config, is_bottleneck=is_bottleneck) | [
"hhjung1202@naver.com"
] | hhjung1202@naver.com |
6f91a39f20fe34c6fdf4363fd8d49ff102161a40 | 7b6da2978e072a6658687eb7a1ac89fb9b87a83b | /ProjetoFinal _Jogo_Gafanhoto_Vermelho/main.py | 741164b60eb4d9f9caa688b705f558b846384399 | [] | no_license | ribeirolivia/Projetos | 52fc6cb44a2a0e1e794acb84caccecb52c346a34 | 08b8c260374036b22305a23586d14d88e7a0ffb3 | refs/heads/main | 2023-06-23T08:07:38.431082 | 2021-07-20T22:28:39 | 2021-07-20T22:28:39 | 386,445,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | # coding: utf-8
import Introducao
Introducao.introducao()
Introducao.menu_intro()
| [
"noreply@github.com"
] | noreply@github.com |
43c082f25a6e6e30257d8c14911f7426fd6de2e1 | 89144239f73e73cde5da4537b13df699d680382d | /themes/pidginicons/pidgin_control.py | aaaf6252399b0c9437256fe570216f22b2a814be | [] | no_license | pspeter3/elementary-pidgin | 60c5b9e929defb72815b320a71c5f239a056c966 | 099eba81034726a916642d7d788aafc5e93846aa | refs/heads/master | 2020-05-18T16:22:20.121439 | 2012-04-09T19:46:48 | 2012-04-09T19:46:48 | 3,856,756 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,763 | py | #!/usr/bin/env python
#
# Copyright (C) 2009-2010 Jason Smith, Rico Tzschichholz
# 2010 Lukasz Piepiora, Robert Dyer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import atexit
import gobject
import dbus
import dbus.glib
import glib
import sys
import os
try:
from dockmanager.dockmanager import DockManagerItem, DockManagerSink, DOCKITEM_IFACE
from signal import signal, SIGTERM
from sys import exit
except ImportError, e:
exit()
pidginbus = "im.pidgin.purple.PurpleService"
pidginpath = "/im/pidgin/purple/PurpleObject"
pidginitem = "im.pidgin.purple.PurpleInterface"
class PidginDBus():
def __init__(self):
bus = dbus.SessionBus()
obj = bus.get_object (pidginbus, pidginpath)
self.iface = dbus.Interface (obj, pidginitem)
def IsConnected(self):
status = self.iface.PurpleSavedstatusGetCurrent()
return not self.iface.PurpleSavedstatusGetType(status) == 1
def IsAway(self):
status = self.iface.PurpleSavedstatusGetCurrent()
return not self.iface.PurpleSavedstatusGetType(status) == 5
def getStatus(self):
status = self.iface.PurpleSavedstatusGetCurrent()
return self.iface.PurpleSavedstatusGetType(status)
def Available(self):
new_status = self.iface.PurpleSavedstatusNew("", 2)
self.iface.PurpleSavedstatusActivate(new_status)
def Disconnect(self):
new_status = self.iface.PurpleSavedstatusNew("", 1)
self.iface.PurpleSavedstatusActivate(new_status)
def Away(self):
new_status = self.iface.PurpleSavedstatusNew("", 5)
self.iface.PurpleSavedstatusActivate(new_status)
def Busy(self):
new_status = self.iface.PurpleSavedstatusNew("", 3)
self.iface.PurpleSavedstatusActivate(new_status)
def Invisible(self):
new_status = self.iface.PurpleSavedstatusNew("", 4)
self.iface.PurpleSavedstatusActivate(new_status)
class PidginItem(DockManagerItem):
def __init__(self, sink, path):
DockManagerItem.__init__(self, sink, path)
self.pidgin = None
#Menu Items
self.add_menu_item ("Available", "/usr/share/pixmaps/pidgin/status/16/available.png","Status")
self.add_menu_item ("Away", "/usr/share/pixmaps/pidgin/status/16/away.png","Status")
self.add_menu_item ("Busy", "/usr/share/pixmaps/pidgin/status/16/busy.png","Status")
self.add_menu_item ("Invisible", "/usr/share/pixmaps/pidgin/status/16/invisible.png","Status")
self.add_menu_item ("Disconnect", "/usr/share/pixmaps/pidgin/status/16/offline.png","Status")
self.bus.add_signal_receiver(self.name_owner_changed_cb,
dbus_interface='org.freedesktop.DBus',
signal_name='NameOwnerChanged')
obj = self.bus.get_object ("org.freedesktop.DBus", "/org/freedesktop/DBus")
self.bus_interface = dbus.Interface(obj, "org.freedesktop.DBus")
self.bus_interface.ListNames (reply_handler=self.list_names_handler, error_handler=self.list_names_error_handler)
self.bus.add_signal_receiver(self.status_changed, "AccountStatusChanged", pidginitem, pidginbus, pidginpath)
self.bus.add_signal_receiver(self.conversation_updated, "ConversationUpdated", pidginitem, pidginbus, pidginpath)
def list_names_handler(self, names):
if pidginbus in names:
self.init_pidgin_objects()
# self.set_menu_buttons()
self.update_badge()
def list_names_error_handler(self, error):
print "error getting bus names - %s" % str(error)
def name_owner_changed_cb(self, name, old_owner, new_owner):
if name == pidginbus:
if new_owner:
self.init_pidgin_objects()
else:
self.pidgin = None
# self.set_menu_buttons()
self.update_badge()
def init_pidgin_objects(self):
self.pidgin = PidginDBus()
self.update_icon()
def status_changed(self, account, old, new):
# self.set_menu_buttons()
self.update_icon()
self.update_badge()
def update_icon(self):
status = self.pidgin.getStatus()
if status == 2:
status_image = '/usr/share/pixmaps/pidgin/status/48/available.svg'
elif status == 5:
status_image = '/usr/share/pixmaps/pidgin/status/48/away.svg'
elif status == 3:
status_image = '/usr/share/pixmaps/pidgin/status/48/busy.svg'
elif status == 4:
status_image = '/usr/share/pixmaps/pidgin/status/48/invisible.svg'
elif status == 1:
status_image = '/usr/share/pixmaps/pidgin/status/48/offline.svg'
self.set_icon(status_image)
return True
def conversation_updated(self, conv, type):
self.update_badge()
# def clear_menu_buttons(self):
# for k in self.id_map.keys():
# self.remove_menu_item(k)
# def set_menu_buttons(self):
# self.clear_menu_buttons()
#
# if not self.pidgin or not self.iface:
# return
#
# if self.pidgin.IsConnected():
# if self.pidgin.IsAway():
# self.add_menu_item ("Set Away", "/usr/share/pixmaps/pidgin/status/16/away.png")
# else:
# self.add_menu_item ("Set Available", "/usr/share/pixmaps/pidgin/status/16/available.png")
# self.add_menu_item ("Disconnect", "/usr/share/pixmaps/pidgin/status/16/offline.png")
# else:
# self.add_menu_item ("Connect", "/usr/share/pixmaps/pidgin/status/16/available.png")
def update_badge(self):
if not self.pidgin:
self.reset_badge()
return False
convs = self.pidgin.iface.PurpleGetConversations()
count = 0
for conv in convs:
count = count + self.pidgin.iface.PurpleConversationGetData(conv, "unseen-count")
if count:
self.set_badge("%s" % count)
else:
self.reset_badge()
return True
def menu_pressed(self, menu_id):
menu_id = self.id_map[menu_id]
if menu_id == "Disconnect":
self.pidgin.Disconnect()
elif menu_id == "Away":
self.pidgin.Away()
elif menu_id == "Invisible":
self.pidgin.Invisible()
elif menu_id == "Busy":
self.pidgin.Busy()
else:
self.pidgin.Available()
class PidginSink(DockManagerSink):
def item_path_found(self, pathtoitem, item):
if item.Get(DOCKITEM_IFACE, "DesktopFile", dbus_interface="org.freedesktop.DBus.Properties").endswith ("pidgin.desktop"):
self.items[pathtoitem] = PidginItem(self, pathtoitem)
pidginsink = PidginSink()
def cleanup ():
pidginsink.dispose ()
if __name__ == "__main__":
mainloop = gobject.MainLoop(is_running=True)
atexit.register (cleanup)
signal(SIGTERM, lambda signum, stack_frame: exit(1))
mainloop.run()
| [
"pspeter333@gmail.com"
] | pspeter333@gmail.com |
69cd60edbe6f2e2089ecca8b242444c3acff3313 | cb935e728ae61c7e8cf367886e99ab568211d580 | /pong.py | 7a19eec8c0554e72fbbce3ac4c8597ad00812fa1 | [] | no_license | andrewhuyha/Pong | 65856f415310c69ce9cf0443faffd71b6a7cce78 | f7ef82c4666956d7b3de30bc657ee47c9a7854ca | refs/heads/master | 2021-06-24T13:53:20.276847 | 2021-04-27T05:12:05 | 2021-04-27T05:12:05 | 223,002,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,848 | py | #First Python Game!
#By: Andrew Ha
#Date Started: 10/3/19
import turtle
gameScreen = turtle.Screen()
gameScreen.title("Pong")
gameScreen.bgcolor("black")
gameScreen.setup(width = 800, height = 600)
#Score
playerOneScore = 0
playerTwoScore = 0
#Stops window from updating
gameScreen.tracer(0)
#Paddle 1(Left Side)
paddleOne = turtle.Turtle()
paddleOne.speed(0)
paddleOne.shape("square")
paddleOne.color("magenta")
paddleOne.shapesize(stretch_wid = 5, stretch_len = 1)
paddleOne.penup()
paddleOne.goto(-350, 0)
#Paddle 2(Right Side)
paddleTwo = turtle.Turtle()
paddleTwo.speed(0)
paddleTwo.shape("square")
paddleTwo.color("magenta")
paddleTwo.shapesize(stretch_wid = 5, stretch_len = 1)
paddleTwo.penup()
paddleTwo.goto(350, 0)
#Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("circle")
ball.color("white")
ball.penup()
ball.dx = 5
ball.dy = 5
#Draw
draw = turtle.Turtle()
draw.speed(0)
draw.color("white")
draw.penup()
draw.hideturtle()
draw.goto(0 , 260)
draw.write("Player 1 0 Player 2 0" , align = "center", font= ("Courier", 24, "normal"))
#Movements
def paddleOneUp():
y = paddleOne.ycor()
y += 40
if y > 250:
y = 250
paddleOne.sety(y)
def paddleOneDown():
y = paddleOne.ycor()
y -= 40
if y < - 250:
y = -250
paddleOne.sety(y)
def paddleTwoUp():
y = paddleTwo.ycor()
y += 30
if y > 250:
y = 250
paddleTwo.sety(y)
def paddleTwoDown():
y = paddleTwo.ycor()
y -= 30
if y < - 250:
y = -250
paddleTwo.sety(y)
#Keyboard Binding
gameScreen.listen()
gameScreen.onkeypress(paddleOneUp, "w")
gameScreen.onkeypress(paddleOneDown, "s")
gameScreen.onkeypress(paddleTwoUp, "Up")
gameScreen.onkeypress(paddleTwoDown, "Down")
#Main game loop
while True:
gameScreen.update()
#Ball Movement
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
#Check to see if the ball hits the Upper and Lower Borders
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -280:
ball.sety(-280)
ball.dy *= -1
#Check to see if the ball hits the Right and Left Borders
if ball.xcor() > 390:
ball.goto(0,0)
ball.dx *= -1
playerOneScore += 1
draw.clear()
draw.write("Player 1: {} Player 2: {}".format(playerOneScore, playerTwoScore), align = "center", font= ("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0,0)
ball.dx *= -1
playerTwoScore += 1
draw.clear()
draw.write("Player 1: {} Player 2: {}".format(playerOneScore, playerTwoScore), align = "center", font= ("Courier", 24, "normal"))
#Collision with ball and paddle
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddleTwo.ycor() + 40 and ball.ycor() > paddleTwo.ycor() - 40):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddleOne.ycor() + 40 and ball.ycor() > paddleOne.ycor() - 40):
ball.setx(-340)
ball.dx *= -1
| [
"noreply@github.com"
] | noreply@github.com |
7ba35e17eca92a7be3abb0ee3e77b2ce98eda3bd | c4165e3e4da2c9981a9483fd8bda0721fd4f2b84 | /src/main/google/LRU.py | 164df7594bf5c62bafd8dc5c459d523b77f1ebbd | [] | no_license | tczhaodachuan/LeetCode | 6cd1704651d6d25306a50c583a93202f27e487d2 | df178fa4f97f163aa5890ec28094e6d655bfbc9a | refs/heads/master | 2021-05-24T03:29:48.555117 | 2020-07-22T03:40:50 | 2020-07-22T03:40:50 | 73,576,097 | 1 | 0 | null | 2020-07-22T03:40:51 | 2016-11-12T20:16:58 | Python | UTF-8 | Python | false | false | 1,392 | py | class LRU(object):
def __init__(self, capacity):
self.capacity = capacity
self.cache = dict()
self.head = None
self.end = None
def get(self, key):
if self.cache.has_key(key):
node = self.cache.get(key)
self.remove(node)
self.setHead(node)
return node
return -1
def remove(self, node):
if node.pre_p != None:
node.pre_p = node.next_p
else:
self.head = node.next_p
if node.next_p != None:
node.next_p.pre_p = node.pre_p
else:
self.end = node.pre_p
def setHead(self, node):
node.next_p = self.head
node.pre_p = None
if self.head != None:
self.head.pre_p = node
self.head = node
if self.end == None:
self.end = self.head
def setKey(self, key, value):
if self.cache.has_key(key):
node = self.cache.get(key)
node.value = value
self.remove(node)
self.setHead(node)
return node
else:
node = Node(key, value)
self.cache.setdefault(key, node)
self.setHead(node)
class Node(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.pre_p = None
self.next_p = None
| [
"tczhaodachuan@gmail.com"
] | tczhaodachuan@gmail.com |
8ef9af340d5e228e081e4752208ca6f0fc86e61c | 45284836ae85685226b1f1e3b83e207e184aee0e | /05_ProbabilityAndStatistics/01_ProbAndStatsInPython_Beginner/01_IntroductionToStatistics/11_MeasuresOfCentralTendency.py | f822187cd0dd31bf9867f58f1fd34ff63b9187d8 | [] | no_license | gaurab123/DataQuest | 5060efc3d3449e6e098cb77d7fed913516aabdbd | a9da9a90fab639d239340edfc7d0b2010edf2b35 | refs/heads/master | 2021-09-14T15:10:13.047034 | 2018-05-02T19:11:23 | 2018-05-02T19:11:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | print("this mission cannot be run locally as the data used is loaded \"behind the scenes\" and I really don't have access to it")
import matplotlib.pyplot as plt
# Let's put a line over our plot that shows the mean.
# This is the same histogram we plotted for skew a few screens ago.
plt.hist(test_scores_normal)
# We can use the .mean() method of a numpy array to compute the mean.
mean_test_score = test_scores_normal.mean()
# The axvline function will plot a vertical line over an existing plot.
plt.axvline(mean_test_score)
# Now we can show the plot and clear the figure.
plt.show()
# When we plot test_scores_negative, which is a very negatively skewed distribution, we see that the small values on the left pull the mean in that direction.
# Very large and very small values can easily skew the mean.
# Very skewed distributions can make the mean misleading.
plt.hist(test_scores_negative)
plt.axvline(test_scores_negative.mean())
plt.show()
# We can do the same with the positive side.
# Notice how the very high values pull the mean to the right more than we would expect.
plt.hist(test_scores_positive)
plt.axvline(test_scores_positive.mean())
plt.show()
mean_normal = test_scores_normal.mean()
mean_negative = test_scores_negative.mean()
mean_positive = test_scores_positive.mean()
print(mean_normal)
print(mean_negative)
print(mean_positive) | [
"kenneth.kite@gmail.com"
] | kenneth.kite@gmail.com |
9c7efaaa782f42236b3ee163464ef9d613bc033c | 0a5c103662e2ccea7698480bca28fb5c285aeafb | /info/dicom.py | 033a6a5fb0ed810ef20d7d4d98a1b7d9b7f8d109 | [] | no_license | joanshen0508/image_preprocessing | a8b9dc90e92552ca11af8b220a2ce235a558aef1 | 478e63593884d572a049590588df158c59447bab | refs/heads/master | 2022-04-02T17:08:56.559871 | 2019-10-29T15:24:48 | 2019-10-29T15:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | from __future__ import division, print_function
import os
from os.path import join
from pandas import DataFrame
import re
import dicom
from inout.io_common import get_dicom_files_in_folder
class DicomDataSummary():
"""
This function allows the generation of information stored on nrrd files.
"""
def __init__(self, **kwargs):
self.input_folder = 'input'
self.output_folder = 'output'
# All the arguments that are passed to the constructor of the class MUST have its name on it.
for arg_name, arg_value in kwargs.items():
self.__dict__["_" + arg_name] = arg_value
def __getattr__(self, attr):
'''Generic getter for all the properties of the class'''
return self.__dict__["_" + attr]
def __setattr__(self, attr, value):
'''Generic setter for all the properties of the class'''
self.__dict__["_" + attr] = value
def generate_data_summary(self, folder_name_regex, file_name='data_summary'):
"""It generates a small summary from the data_sum as a CSV file (shape and voxel size)
:param folder_name_regex:
:return:
"""
cases = [x for x in os.listdir(self._input_folder) if os.path.isdir(join(self._input_folder, x))]
cases.sort()
colums_dic = {'Date':'AcquisitionDate',
'EchoTime':'EchoTime',
'EchoTrainLength':'EchoTrainLength',
'Manufacturer':'Manufacturer',
'Model':'ManufacturerModelName',
'Modality':'Modality',
'RepetitionTime': 'RepetitionTime',
'Orientation': 'ImageOrientationPatient'}
extra_columns = ['Size', 'Spacing', 'PixelSize']
all_columns = extra_columns + list(colums_dic.keys())
data_sum = DataFrame(index=cases, columns=all_columns)
# In this case we look for folders inside each case
for c_case in cases:
print(F"---------- {c_case}----------")
try:
matched_folders = [x for x in os.listdir(join(self._input_folder, c_case)) if not (re.search(folder_name_regex, x) is None)]
if len(matched_folders) > 1:
print(F'Warning: more than one folder matched: {matched_folders}')
if len(matched_folders) == 0:
print(F'Warning: folder not matched for {c_case}')
continue
else:
final_folder_name = join(self._input_folder, c_case, matched_folders[0])
all_dicom_files = get_dicom_files_in_folder(final_folder_name)
ds = dicom.read_file(all_dicom_files[0]) # Reads dataset
for c_name, c_key in colums_dic.items():
data_sum.loc[c_case][c_name] = eval(F'ds.{c_key}')
data_sum.loc[c_case]['Size'] = F'{ds.Rows} x {ds.Columns} x {len(all_dicom_files)}'
spacing = ds.PixelSpacing
data_sum.loc[c_case]['Spacing'] = F'{spacing[0]} x {spacing[1]} x {ds.SliceThickness}'
data_sum.loc[c_case]['PixelSize'] = F'{spacing[0]*spacing[1]*ds.SliceThickness:.2f}'
except Exception as e:
print(F'Failed for folder {c_case}: {e}')
continue
data_sum.to_csv(join(self._output_folder, file_name))
| [
"olmozavala@gmail.com"
] | olmozavala@gmail.com |
e69f0f7583c1022af9442415e61c2769e37c4122 | dbf770eef8233f7da1850309cc4b7145bd8d67f1 | /PYTHON-ADVANCED-SEPT-2020/PYTHON ADVANCED/03_MULTYDIMENSINAL LISTS/EXERCISE/06_chess.py | 654a9eb872ce75844a3566d42fa88934b8ec214a | [] | no_license | vasil-panoff/PYTHON-ADVANCED-SEPT-2020_repo | 610a37d1681ce9d0aa86628523620e1571b438dd | c63434f91de42d2f1241b6d76a96c7c63711c1d0 | refs/heads/master | 2023-03-22T07:44:53.620221 | 2021-03-15T20:42:14 | 2021-03-15T20:42:14 | 309,829,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | possible_moves = (
(-1, -2),
(-1, 2),
(1, -2),
(1, 2),
(2, -1),
(2, 1),
(-2, 1),
(-2, -1),
)
board_size = int(input())
matrix = [['0'] * board_size for i in range(board_size)]
def is_valid(i, j):
if i < 0 or j < 0 or i >= board_size or j >= board_size:
return False
return matrix[i][j] == "K"
knights_dict = {}
def update_knights(i1, j1, i2, j2):
if not is_valid(i2, j2):
return
if (i2, j2) not in knights_dict:
knights_dict[(i2, j2)] = []
knights_dict[(i2, j2)].append((i1, j1))
if (i1, j1) not in knights_dict:
knights_dict[i1, j1] = []
knights_dict[(i1, j1)].append((i2, j2))
for i in range(board_size):
row = list(input())
for j in range(board_size):
if row[j] == "K":
matrix[i][j] = "K"
for move_i, move_j in possible_moves:
i1 = i
j1 = j
i2 = i + move_i
j2 = j + move_j
update_knights(i1, j1, i2, j2)
num_removed = 0
max_knight = get_max_knight(knights_dict)
while len(max_knight) > 0:
remove_knight(matrix, max_knight)
knights_dict
num_removed += 1
print(num_removed)
| [
"vasil.panov@gmail.com"
] | vasil.panov@gmail.com |
f6462e692332d8b61c7908f67535fbd65c5926db | 1fb230e5382ccbf79898fad3a025f8ffeb957f70 | /raster_development/ee-density-upload-iso3.py | 2f44c821321c11ce0668be27e6661cb8e16c791b | [
"Apache-2.0"
] | permissive | GlobalFishingWatch/psychic-guide-squirrel | fcae6999248357280cc747455468b3fc54f8199c | 7d503abeddce80df7d42cdea5e982131e466305c | refs/heads/master | 2021-01-17T06:12:20.876972 | 2017-02-07T17:16:02 | 2017-02-07T17:16:02 | 55,070,984 | 1 | 1 | null | 2016-12-05T19:44:20 | 2016-03-30T14:42:18 | Jupyter Notebook | UTF-8 | Python | false | false | 12,328 | py | #!/usr/bin/env python
from __future__ import division
import copy
import datetime
import itertools as it
import json
import math
import multiprocessing as mp
import multiprocessing.dummy
import posixpath as pp
import random
import threading
import time
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
import click
import ee
from googleapiclient import discovery
import oauth2client.client
_GCS_API = None
_CREDENTIALS = None
def rate_limit(iterator, number, seconds):
"""Given an iterator, ensure that only N items are yielded within a given
time window. Used to prevent swamping the API.
Parameters
----------
iterator : iter
Produce items from this iterator.
number : int
Don't emit more than ``number`` of items per ``seconds``.
seconds : int
Don't emit more than ``number`` of items per ``seconds``.
Yields
------
object
From ``iterator``.
"""
lock = threading.Lock()
start = time.time()
end = start + seconds
n_yielded = 0
for item in iterator:
yield item
n_yielded += 1
if n_yielded >= number or time.time() >= end:
wait = end - time.time() + random.uniform(0.01, 0.001)
time.sleep(wait)
start = time.time()
end = start + seconds
n_yielded = 0
def _gcs_api():
"""Lazily ensure the GCS API has been discovered."""
global _GCS_API
if _GCS_API is None:
_GCS_API = discovery.build('storage', 'v1', credentials=_credentials())
return _GCS_API
def _credentials():
"""Lazily ensure we are authenticated."""
global _CREDENTIALS
if _CREDENTIALS is None:
_CREDENTIALS = oauth2client.client.GoogleCredentials.get_application_default()
return _CREDENTIALS
def gcs_listdir(path):
"""Recursively list a GCS directory."""
url = urlsplit(path)
assert url.scheme == 'gs', "Need a GCS URL, not: {}".format(path)
bucket = url.netloc
prefix = url.path[1:]
request = _gcs_api().objects().list(bucket=bucket, prefix=prefix)
response = request.execute()
while response is not None:
for item in response.get('items', []):
yield 'gs://' + pp.join(item['bucket'], item['name'])
request = _gcs_api().objects().list_next(request, response)
if request is None:
break
response = request.execute()
def _upload_params(paths, nodata, dst):
"""Given a bunch of file paths, construct Earth Engine ingestion requests.
The EE docs are lacking and the source is stupid hard to read. Here's
an example request that this thing builds:
{
'missingData': {
'value': -9999
},
'pyramidingPolicy': 'MODE',
'tilesets': [
{
'sources': [
{
'primaryPath': 'gs://new-benthos-pipeline/scratch/inital-daily-density/allvessels_2013-05-01.tif'
}
]
}
],
'id': 'users/kwurster/2013-05-01-trash',
'properties': {
'system:time_end': 1367366400000,
'system:time_start': 1367366400000,
'gfw': 'density'
}
}
Parameters
----------
paths : iter
Iterable producing GCS URLs.
nodata : str or int or float or None
Set nodata value or policy for all input images.
dst : str
Target image collection.
"""
for p in paths:
name = pp.basename(p)
start_time = datetime.date(
year=int(name[4:8]),
month=int(name[9:11]),
day=int(name[12:14]))
end_time = start_time + datetime.timedelta(days=1)
props = {
'system:time_end': 1000*time.mktime(start_time.timetuple()),
'system:time_start': 1000*time.mktime(end_time.timetuple()),
'country':name[:3],
# 'geartype':'trawler'
}
request = {
'pyramidingPolicy': 'MODE',
'tilesets': [
{
'sources': [
{
'primaryPath': p
}
]
}
],
'id': pp.splitext(pp.join(dst, name))[0],
'properties': props
}
if nodata is not None:
request['missingData'] = {
'value': nodata
}
yield request
def _ls_collection(path):
"""List images in an Earth Engine ImageCollection."""
path = path.rstrip('/')
results = ee.data.getList({'id': path})
for item in results:
if item['type'] == 'Image':
yield item['id']
elif item['type'] == 'ImageCollection':
for i in _ls_collection(item['id']):
yield i
else:
raise ValueError(
"Unrecognized asset type: '{}'".format(item['type']))
def _upload(kwargs):
"""Send an ingestion request to Earth Engine. For use with
`multiprocessing.dummy.Pool.imap_unordered()`."""
request = kwargs['request']
retries = kwargs['retries']
retry_wait = kwargs['retry_wait']
task_id = kwargs['task_id']
out = {
'completed': False,
'request': request,
'response': None,
'exceptions': []
}
attempt = 1
while attempt <= retries:
try:
out['response'] = ee.data.startIngestion(task_id, request)
out['completed'] = True
break
except ee.EEException as e:
attempt += 1
out['exceptions'].append(str(e))
time.sleep(retry_wait + random.uniform(0.1, 0.01))
else:
out['completed'] = False
return out
@click.command()
@click.argument('indir')
@click.argument('collection')
@click.option(
'--nodata', type=click.FLOAT, default=None, show_default=True,
help="Set nodata value for all images.")
@click.option(
'--qps', default=3, type=click.FLOAT, show_default=True,
help="Queries per second.")
@click.option(
'--retries', default=5, show_default=True,
help="Number of retries for any given API call.")
@click.option(
'--retry-wait', metavar='SECONDS', type=click.FLOAT, default=1.1,
show_default=True,
help="Amount of time to wait between retry attempts.")
@click.option(
'--threads', default=3, show_default=True,
help="Execute queries across N threads. Can't be more than --qps.")
@click.option(
'--wait / --no-wait', default=False, show_default=True,
help="Wait for all ingestion jobs to complete before exiting.")
@click.option(
'--wait-sleep', default=3, show_default=True,
help="When waiting for ingestion jobs to complete, sleep for N seconds "
"between each check.")
@click.pass_context
def cli(
ctx, indir, collection, nodata, qps, retries, threads, retry_wait, wait,
wait_sleep):
"""Upload density rasters to a single Earth Engine image collection.
Default behavior is to schedule the ingest requests and exit. Use the
--wait flag to poll until all tasks are complete.
If an input image already exists in the target collection it is ignored.
"""
collection = collection.rstrip('/')
print collection
if threads > qps:
raise click.BadParameter(
"'--threads' cannot be larger than '--qps': {} >= {}.".format(
threads, qps))
ee.Initialize()
ee.data.create_assets([collection], ee.data.ASSET_TYPE_IMAGE_COLL, False)
completed = set(map(pp.basename, _ls_collection(collection)))
# When checking to see if any of the requested uploads are running we can
# also grab their task ID's in case we need to --wait for everything to
# finish
discovered_running = set()
# Filter out images that have already been ingested or are currently running
for t in ee.data.getTaskList():
if t['task_type'] == 'INGEST':
o_path = t['description'].lower().split('asset ingestion: ')[1]
# Image was already successfully ingested
if t['state'] == 'COMPLETED':
url = urlsplit(t['output_url'][0])
o_path = url.query.split('asset=')[1]
if pp.dirname(o_path).rstrip('/') == collection:
completed.add(pp.basename(o_path))
# Image is currently being ingested
elif t['state'] == 'RUNNING':
if pp.dirname(o_path).rstrip('/') == collection:
completed.add(pp.basename(o_path))
discovered_running.add(t['id'])
inpaths = [
p for p in gcs_listdir(indir) if pp.basename(p)[:10] not in completed and pp.basename(p) != '']
task_ids = []
# All input paths have been ingested
if not inpaths and not discovered_running:
click.echo("All input images have already been ingested.")
ctx.exit()
elif inpaths:
id_batch_size = 1000
if len(inpaths) <= id_batch_size:
id_batches = [len(inpaths)]
else:
_n_full = int(math.floor(len(inpaths) / id_batch_size))
id_batches = ([id_batch_size] * _n_full) + [len(inpaths) % 1000]
task_ids = list(
it.chain.from_iterable(map(ee.data.newTaskId, id_batches)))
tasks = _upload_params(
paths=inpaths,
nodata=nodata,
dst=collection)
tasks = ({
'request': r,
'retries': retries,
'retry_wait': retry_wait,
'task_id': tid
} for tid, r in it.izip_longest(task_ids, tasks))
tasks = rate_limit(tasks, qps, 1)
label = 'Loading {} images'.format(len(inpaths))
progressbar = click.progressbar(tasks, length=len(inpaths), label=label)
with progressbar as tasks:
pool = mp.dummy.Pool(threads)
if threads == 1:
results = (_upload(t) for t in tasks)
else:
results = pool.imap_unordered(_upload, tasks)
for res in results:
if not res['completed']:
raise click.ClickException(
"Request failed: {}".format(json.dumps(res)))
if wait:
click.echo("Waiting for ingestion to finish ...")
check_ids = copy.deepcopy(set(task_ids)) | discovered_running
failed_tasks = []
n_failed = 0
n_completed = 0
while check_ids:
time.sleep(wait_sleep)
# Keep track of failed tasks so we can update the user each cycle
_n_failed = 0
_n_completed = 0
for task in ee.data.getTaskList():
if task['id'] in check_ids:
state = task['state']
if state == 'FAILED':
# Task is marked as failed, but only because the target
# asset already exists
if 'cannot overwrite asset' in \
task['error_message'].lower():
_n_completed += 1
# Actual failure
else:
_n_failed += 1
failed_tasks.append(task)
check_ids.discard(task['id'])
elif state == 'COMPLETED':
_n_completed += 1
check_ids.discard(task['id'])
# Update the user
n_failed += _n_failed
n_completed += _n_completed
if _n_failed > 0 or _n_completed > 0:
click.echo(
"Found {} failed and {} completed tasks".format(
_n_failed, _n_completed))
# Final status report
click.echo("Failed: {}".format(n_failed))
click.echo("Completed: {}".format(n_completed))
if failed_tasks:
click.echo("Failed tasks:")
for task in failed_tasks:
click.echo(json.dumps(task, sort_keys=True))
if __name__ == '__main__':
cli()
| [
"davidkroodsma@gmail.com"
] | davidkroodsma@gmail.com |
c0ab8764c55a83565f75e6d16b589701ec260dab | 2aa4feaa6eb3adf74d5e570be27a6ccf0d37d43b | /hello-worldj91.py | f6c4b7b0ad5c0afbbd688dc52588085087a3b541 | [] | no_license | applepie405/Marcel-s-Python-GUI-Window | 483f06c7703144e57c85c2d7058becedd8021c99 | 5386d5a03ad21d2cbbbd26615362c3c8e3cbc225 | refs/heads/main | 2023-03-23T23:25:21.744354 | 2021-03-24T02:02:09 | 2021-03-24T02:02:09 | 337,589,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | from tkinter import *
root = Tk()
root.title("Goal Tracker")
# Create and set the message text variable
message_text = StringVar()
message_text.set("Welcome! You can deposit or withdraw money and see your progress towards your goals.")
# Create and pack the message label
message_label = Label(root, textvariable=message_text, wraplength=250)
message_label.pack()
# Create the PhotoImage and label to hold it
neutral_image = PhotoImage(file="/images/python/neutral.png")
image_label = Label(root, image=neutral_image)
image_label.pack()
# Create and set the account details variable
account_details = StringVar()
account_details.set("Savings: $0 \nTotal balance: $0")
# Create the details label and pack it into the GUI
details_label = Label(root, textvariable=account_details)
details_label.pack()
# Create a label for the amount field and pack it into the GUI
amount_label = Label(root, text="Amount:")
amount_label.pack()
# Create a variable to store the amount
amount = DoubleVar()
amount.set("")
# Create an entry to type in amount
amount_entry = Entry(root, textvariable=amount)
amount_entry.pack()
# Create a submit button
submit_button = Button(root, text="Submit")
submit_button.pack()
# Run the mainloop
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
9804f8564de4716be8abab8b427521211de74d90 | 50f938fe67c3c65c5a95fc8454e1a1817f8ef3d0 | /search insert position.py | f2531843935e0d7c7f0e89580b584ad07c903e25 | [] | no_license | guoziqingbupt/Lintcode-Answer | 44e4d5bab46fd1d3400c2de0b810269eb5606f20 | 4b92bf4ad6f170160550a052011e63c07a068106 | refs/heads/master | 2021-04-06T13:55:35.080919 | 2018-07-02T03:24:27 | 2018-07-02T03:24:27 | 125,335,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | class Solution:
"""
@param A: an integer sorted array
@param target: an integer to be inserted
@return: An integer
"""
def searchInsert(self, A, target):
left, right = 0, len(A) - 1
while left <= right:
mid = (left + right) // 2
if A[mid] == target:
return mid
elif A[mid] < target:
left = mid + 1
else:
right = mid - 1
return left
# write your code here
| [
"guoziqing@bupt.edu.cn"
] | guoziqing@bupt.edu.cn |
e3d855b27a0237708ac51b2eaa0fc2658ab2ed91 | 21616c71bae82cbda2727788e049d163d1586884 | /vfb_rest/vfb_rest/wsgi.py | d7dd5ee2d00e1fb9eb37f26d38b1d1804a4d025d | [] | no_license | VirtualFlyBrain/vfb_rest | 25d263099596dd7281e8edfacd8b1eb6075c6e41 | 06959ca042c357bfe0082e4701cf024a8ac4b9f3 | refs/heads/master | 2023-07-06T14:29:23.381265 | 2021-03-24T15:56:59 | 2021-03-24T15:56:59 | 146,908,474 | 0 | 0 | null | 2023-09-06T02:23:00 | 2018-08-31T15:16:07 | HTML | UTF-8 | Python | false | false | 393 | py | """
WSGI config for vfb_rest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vfb_rest.settings')
application = get_wsgi_application()
| [
"nicolas.matentzoglu@gmail.com"
] | nicolas.matentzoglu@gmail.com |
7331e23f58cbfdb1cc393fe29479040421c5b479 | f87c2674bad47cf76d3c05c12ad2f9c307702668 | /psupr_ca2_JT_7496.py | f6ec3afbdfaf4dae6988359d4bd40d5b5d5605d9 | [] | no_license | gengliangyu2008/PRMLS-CA2-project | 5d401a56f230011a03d6d1a4306978b675b77939 | 3706fa7c3b7d53e186e71f78e799938e5064c796 | refs/heads/master | 2022-02-21T18:24:13.220553 | 2019-09-29T15:59:59 | 2019-09-29T15:59:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,842 | py | # -*- coding: utf-8 -*-
"""PSUPR_CA2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1KOPmeiyA7s63G7nArAYTy7XikXryYSGj
"""
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import h5py
from tensorflow.keras.callbacks import ModelCheckpoint,CSVLogger,LearningRateScheduler
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import add
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from google.colab import files
uploaded = files.upload()
def implt(img):
plt.figure()
plt.imshow(img)
plt.axis('off')
# Set up 'ggplot' style
plt.style.use('ggplot') # if want to use the default style, set 'classic'
plt.rcParams['ytick.right'] = True
plt.rcParams['ytick.labelright']= True
plt.rcParams['ytick.left'] = False
plt.rcParams['ytick.labelleft'] = False
plt.rcParams['font.family'] = 'Arial'
def read_data_set(h5_file='out.h5'):
with h5py.File(h5_file, 'r') as hf:
X_train = hf['X_train'].value
print('Read X_train: ', X_train.shape)
y_train = hf['y_train'].value
print('Read y_train: ', y_train.shape)
X_test = hf['X_test'].value
print('Read X_test: ', X_test.shape)
y_test = hf['y_test'].value
print('Read y_test: ', y_test.shape)
return (X_train, y_train, X_test, y_test)
X_train_data, y_train_data, X_test_data, y_test_data = read_data_set(h5_file='ca2data.h5' )
#data = cifar10.load_data()
(trDat, trLbl) = X_train_data, y_train_data
(tsDat, tsLbl) = X_test_data, y_test_data
# Convert the data into 'float32'
# Rescale the values from 0~255 to 0~1
trDat = trDat.astype('float32')/255
tsDat = tsDat.astype('float32')/255
# Retrieve the row size of each image
# Retrieve the column size of each image
imgrows = trDat.shape[1]
imgclms = trDat.shape[2]
channel = trDat.shape[3]
# Perform one hot encoding on the labels
# Retrieve the number of classes in this problem
trLbl = to_categorical(trLbl)
tsLbl = to_categorical(tsLbl)
num_classes = tsLbl.shape[1]
# fix random seed for reproducibility
seed = 42
np.random.seed(seed)
optmz = optimizers.Adam(lr=0.001)
modelname = 'cifar10ResV1Cfg5'
# define the deep learning model
def resLyr(inputs,
numFilters=16,
kernelSz=3,
strides=1,
activation='relu',
batchNorm=True,
convFirst=True,
lyrName=None):
convLyr = Conv2D(numFilters,
kernel_size=kernelSz,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4),
name=lyrName+'_conv' if lyrName else None)
x = inputs
if convFirst:
x = convLyr(x)
if batchNorm:
x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
if activation is not None:
x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
else:
if batchNorm:
x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
if activation is not None:
x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
x = convLyr(x)
return x
def resBlkV1(inputs,
numFilters=16,
numBlocks=5,
downsampleOnFirst=True,
names=None):
x = inputs
for run in range(0,numBlocks):
strides = 1
blkStr = str(run+1)
if downsampleOnFirst and run == 0:
strides = 2
y = resLyr(inputs=x,
numFilters=numFilters,
strides=strides,
lyrName=names+'_Blk'+blkStr+'_Res1' if names else None)
y = resLyr(inputs=y,
numFilters=numFilters,
activation=None,
lyrName=names+'_Blk'+blkStr+'_Res2' if names else None)
if downsampleOnFirst and run == 0:
x = resLyr(inputs=x,
numFilters = numFilters,
kernelSz=1,
strides=strides,
activation=None,
batchNorm=False,
lyrName=names+'_Blk'+blkStr+'_lin' if names else None)
x = add([x,y],
name=names+'_Blk'+blkStr+'_add' if names else None)
x = Activation('relu',
name=names+'_Blk'+blkStr+'_relu' if names else None)(x)
return x
def createResNetV1(inputShape=(128,128,3),
numClasses=3):
inputs = Input(shape=inputShape)
v = resLyr(inputs,
lyrName='Input')
v = resBlkV1(inputs=v,
numFilters=16,
numBlocks=5,
downsampleOnFirst=False,
names='Stg1')
v = resBlkV1(inputs=v,
numFilters=32,
numBlocks=5,
downsampleOnFirst=True,
names='Stg2')
v = resBlkV1(inputs=v,
numFilters=64,
numBlocks=5,
downsampleOnFirst=True,
names='Stg3')
v = resBlkV1(inputs=v,
numFilters=128,
numBlocks=5,
downsampleOnFirst=True,
names='Stg4')
v = AveragePooling2D(pool_size=8,
name='AvgPool')(v)
v = Flatten()(v)
outputs = Dense(numClasses,
activation='softmax',
kernel_initializer='he_normal')(v)
model = Model(inputs=inputs,outputs=outputs)
model.compile(loss='categorical_crossentropy',
optimizer=optmz,
metrics=['accuracy'])
return model
# Setup the models
model = createResNetV1() # This is meant for training
modelGo = createResNetV1() # This is used for final testing
model.summary()
def lrSchedule(epoch):
lr = 1e-3
if epoch > 160:
lr *= 0.5e-3
elif epoch > 140:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
LRScheduler = LearningRateScheduler(lrSchedule)
# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
filepath = modelname + ".hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_acc',
verbose=0,
save_best_only=True,
mode='max')
# Log the epoch detail into csv
csv_logger = CSVLogger(modelname +'.csv')
callbacks_list = [checkpoint,csv_logger,LRScheduler]
# Fit the model
datagen = ImageDataGenerator(width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=20,
horizontal_flip=True,
vertical_flip=False)
model.fit_generator(datagen.flow(trDat, trLbl, batch_size=32),
validation_data=(tsDat, tsLbl),
epochs=200, #originally 200
verbose=1,
steps_per_epoch=len(trDat)/32,
callbacks=callbacks_list)
# Now the training is complete, we get
# another object to load the weights
# compile it, so that we can do
# final evaluation on it
modelGo.load_weights(filepath)
modelGo.compile(loss='categorical_crossentropy',
optimizer=optmz,
metrics=['accuracy'])
# Make classification on the test dataset
predicts = modelGo.predict(tsDat)
# Prepare the classification output
# for the classification report
predout = np.argmax(predicts,axis=1)
testout = np.argmax(tsLbl,axis=1)
labelname = ['cat',
'bird',
'dog']
# the labels for the classfication report
testScores = metrics.accuracy_score(testout,predout)
confusion = metrics.confusion_matrix(testout,predout)
print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
import pandas as pd
records = pd.read_csv(modelname +'.csv')
plt.figure()
plt.subplot(211)
plt.plot(records['val_loss'])
plt.plot(records['loss'])
plt.yticks([0,0.20,0.40,0.60,0.80,1.00])
plt.title('Loss value',fontsize=12)
ax = plt.gca()
ax.set_xticklabels([])
plt.subplot(212)
plt.plot(records['val_acc'])
plt.plot(records['acc'])
plt.yticks([0.6,0.7,0.8,0.9,1.0])
plt.title('Accuracy',fontsize=12)
plt.show()
from tensorflow.keras.utils import plot_model
plot_model(model,
to_file=modelname+'_model.pdf',
show_shapes=True,
show_layer_names=False,
rankdir='TB') | [
"noreply@github.com"
] | noreply@github.com |
049ab8dd8552a31c314ba77c550026c4ad2f3698 | f7820f347356ec97d54b4dc81fd4b133371fe364 | /tests/test_events.py | ce6c5fc097e73c13a43ad5d94887f207a1c9853e | [] | no_license | geospatial-jeff/cognition-pipeline | a029c8009dbceb5605bad5cf0dd418b7091c2235 | 7343e7d18e01a3ac2a1646012e4e67f9d1698bec | refs/heads/master | 2022-12-14T15:46:38.086800 | 2019-01-06T22:36:31 | 2019-01-06T22:36:31 | 161,524,087 | 4 | 0 | null | 2022-12-08T01:31:18 | 2018-12-12T17:43:08 | Python | UTF-8 | Python | false | false | 4,010 | py | import unittest
import boto3
import os
import json
from handler import PipelineUnittests
lambda_client = boto3.client("lambda")
class MyPipelineTestCases(unittest.TestCase):
def setUp(self):
self.pipeline = PipelineUnittests()
def test_invoke(self):
message = {"hello": "world"}
response = self.pipeline.functions["invoke"].invoke(message)
self.assertEqual(message, json.loads(response["body"]))
def test_http_get(self):
id = "test-id"
response = self.pipeline.functions["http_get"].invoke(id)
self.assertEqual(response, id)
def test_http_post(self):
payload = {"hello": "world", "testing": "123"}
response = self.pipeline.functions["http_post"].invoke(json.dumps(payload))
self.assertEqual(payload, json.loads(response))
def test_sns(self):
response = self.pipeline.functions["sns"].invoke("testing")
idx = 0
for message in self.pipeline.resources["LoggingQueue"].listen():
if message.message_attributes["id"]["StringValue"] == "sns":
self.assertEqual(message.body[1:-1], "testing")
message.delete()
idx += 1
self.assertGreater(idx, 0)
def test_sns_bucket_notification(self):
outfile = "data/bucket_notification.txt"
key = "sns/notification.txt"
response = self.pipeline.functions["sns_bucket_notification"].invoke(
outfile, key=key
)
self.assertEqual(response["bucket"], "CognitionPipelineUnittestBucket")
self.assertEqual(response["key"], key)
idx = 0
for message in self.pipeline.resources["LoggingQueue"].listen():
if (
message.message_attributes["id"]["StringValue"]
== "sns_bucket_notification"
):
with open(outfile, "r") as f:
contents = f.read()
# Strip quotes from message
self.assertEqual(message.body[1:-1], contents)
message.delete()
idx += 1
self.assertGreater(idx, 0)
def test_sqs_bucket_notification(self):
outfile = "data/bucket_notification.txt"
key = "sqs/notification.txt"
response = self.pipeline.functions["sqs_bucket_notification"].invoke(
outfile, key=key
)
self.assertEqual(response["bucket"], "CognitionPipelineUnittestBucket")
self.assertEqual(response["key"], key)
idx = 0
for message in self.pipeline.resources["LoggingQueue"].listen():
if (
message.message_attributes["id"]["StringValue"]
== "sqs_bucket_notification"
):
with open(outfile, "r") as f:
contents = f.read()
# Strip quotes from message
self.assertEqual(message.body[1:-1], contents)
message.delete()
idx += 1
self.assertGreater(idx, 0)
def test_sqs(self):
self.pipeline.functions["sqs"].invoke("testing")
idx = 0
for message in self.pipeline.resources["LoggingQueue"].listen():
if message.message_attributes["id"]["StringValue"] == "sqs":
self.assertEqual(message.body[1:-1], "testing")
message.delete()
idx += 1
self.assertGreater(idx, 0)
def test_sqs_aggregate(self):
seq = list(range(10))
self.pipeline.functions["sqs_aggregate"].invoke({"sequence": seq})
values = []
idx = 0
for message in self.pipeline.resources["LoggingQueue"].listen():
if message.message_attributes["id"]["StringValue"] == "sqs_aggregate":
value = int(message.body[1:-1])
values.append(value)
message.delete()
idx += 1
self.assertEqual(sum(seq), sum(values))
self.assertGreater(idx, 0)
| [
"jeff.albrecht@slingshotaerospace.com"
] | jeff.albrecht@slingshotaerospace.com |
d68fceb4204ad57c994d5118e3cce37187d0db18 | 15828ab5229fa53505abdb53e853ea40c6af3083 | /main/urls.py | f467920aad2405a64cb23fe6f5f1503394d97438 | [] | no_license | kudep/conventusdemo | 33cce9d87b379bd46dd6caccb42880d0b8b1d889 | f65563b355ae41a8f0a961ee5a2ae7f6dbd80486 | refs/heads/master | 2021-01-22T23:16:32.562699 | 2017-03-21T21:19:33 | 2017-03-21T21:19:33 | 85,621,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | """main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from story.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', init),
url(r'^storytest/', include('story.urls')), ##Go to story app urls
]
| [
"kuznetsov.den.p@gmail.com"
] | kuznetsov.den.p@gmail.com |
00784a17b99b4077db9e72d37bf5cb26749d3043 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_test/_data/sanity/code-smell/changelog.py | 710b10f6c08ec6f6580b2837b46f9a06e6302fd6 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 1,420 | py | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import subprocess
def main():
paths = sys.argv[1:] or sys.stdin.read().splitlines()
allowed_extensions = ('.yml', '.yaml')
config_path = 'changelogs/config.yaml'
# config must be detected independent of the file list since the file list only contains files under test (changed)
has_config = os.path.exists(config_path)
paths_to_check = []
for path in paths:
if path == config_path:
continue
if path.startswith('changelogs/fragments/.'):
if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'):
continue
print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0))
continue
ext = os.path.splitext(path)[1]
if ext not in allowed_extensions:
print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions)))
paths_to_check.append(path)
if not has_config:
print('changelogs/config.yaml:0:0: config file does not exist')
return
if not paths_to_check:
return
cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check
subprocess.call(cmd) # ignore the return code, rely on the output instead
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
34d8d879fd1dc1d98ab6c49550b0c42bdac6e2f7 | f90022212ee26a5d5ecb08999a015439f2e01e48 | /ProTwo/AppTwo/forms.py | 0213efeb7eb4ac29e8c2d8b9b380f7afdcd1e505 | [] | no_license | miguelmatosduarte/django-deplyment-example | c389b0ffe348836395338bf5c0fa8152612c0f40 | 73aa51bd0d20ad1500028a0945f14086538c2842 | refs/heads/master | 2020-04-01T21:09:44.843492 | 2018-10-18T15:27:33 | 2018-10-18T15:27:33 | 153,640,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from django import forms
from AppTwo.models import User
class Sign_Up_Form(forms.ModelForm):
class Meta:
model = User
fields = "__all__"
| [
"miguelmatosduarte@gmail.com"
] | miguelmatosduarte@gmail.com |
1aec4e2930ae7e12ace2baddd555e7260ec9e136 | 1eae467d92ed934a8aad94feff2abf1ba693dae0 | /1/codiceFiscale.py | 40fcad03e411aebd93a45e66a6f3a5596e3305a5 | [] | no_license | marcocolognese/Analysis-of-information-systems-LABHomeworks | 78267dae86bc900ea495395ad2dd3c0c97ec79f8 | effa8d432fda107eb0701d4818adb3198f783537 | refs/heads/master | 2022-06-30T07:40:30.461032 | 2022-06-16T06:52:11 | 2022-06-16T06:52:11 | 139,051,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,265 | py | #Moduli
import sys
import string
#Strutture dati Globali
vocali = ('a','e','i','o','u')
mesi = ('a','b','c','d','e','h','l','m','p','r','s','t')
comuni = {'udine':'l483', 'verona':'l781', 'legnago': 'e512'}
#CODICI DI CONTROLLO
regole_pari = {}
alfabeto = string.ascii_lowercase
for i in xrange (0,10):
regole_pari[str(i)] = i
for i in xrange (0,26):
regole_pari[alfabeto[i]] = i
regole_dispari = {}
temp_tuple = (1,0,5,7,9,13,15,17,19,21)
for i in xrange(0,10):
regole_dispari[str(i)] = temp_tuple[i]
regole_dispari[alfabeto[i]] = temp_tuple[i]
temp_tuple2 = (2,4,18,20,11,3,6,8,12,14,16,10,22,25,24,23)
index = 0
for i in xrange(10,26):
regole_dispari[alfabeto[i]] = temp_tuple2[index]
index += 1
regole_resto = [alfabeto[i] for i in xrange(0,26)]
#------------------------------
def estrai_nome(aString):
temp_string = ''
for aChar in aString:
if not aChar in vocali:
temp_string += aChar
if len(temp_string) >= 4:
temp_string = temp_string[0] + temp_string[2] + temp_string[3]
break
index = 0
while len(temp_string) < 3 and index < len(aString):
if (not aString[index] in temp_string) or (aString[index] in vocali) :
temp_string += aString[index]
index += 1
for i in range(0, 3-len(temp_string)):
temp_string = temp_string + 'x'
return temp_string
def estrai_cognome(aString):
temp_string = ''
for aChar in aString:
if not aChar in vocali:
temp_string += aChar
if len(temp_string) >= 3:
break
index = 0
while len(temp_string) < 3 and index < len(aString):
if (not aString[index] in temp_string) or (aString[index] in vocali) :
temp_string += aString[index]
index += 1
for i in range(0, 3-len(temp_string)):
temp_string = temp_string + 'x'
return temp_string
def genera_mese(unMese):
return mesi[int(unMese)-1]
def codice_comune(comune):
return comuni[comune]
def genera_giorno(unGiorno, unSesso):
if unSesso == 'm':
return unGiorno
else:
return str(int(unGiorno)+40)
def genera_codice_controllo(aCodiceFiscale):
parita = 1
temp_dispari = 0
temp_pari = 0
for aChar in aCodiceFiscale:
if parita:
temp_dispari += int(regole_dispari.get(aChar))
parita = 0
else:
temp_pari += int(regole_pari.get(aChar))
parita = 1
return regole_resto[(temp_dispari+temp_pari) % 26]
def main(nome, cognome, data_nascita, comune, sesso): #pragma: no cover
nomeCF = estrai_nome(nome)
cognomeCF = estrai_cognome(cognome)
data_nascitaCF = data_nascita.split("/")
anno_nascitaCF = data_nascitaCF[2][2:]
mese_nascitaCF = genera_mese(data_nascitaCF[1])
giorno_nascitaCF = genera_giorno(data_nascitaCF[0], sesso)
codice_fiscale = cognomeCF + nomeCF + anno_nascitaCF + mese_nascitaCF + giorno_nascitaCF + codice_comune(comune.lower())
codiceCF = genera_codice_controllo(codice_fiscale)
codice_fiscale += codiceCF
print codice_fiscale
if __name__ == '__main__': #pragma: no cover
if len(sys.argv) > 5:
main(string.lower(sys.argv[1]).replace(' ', ''), string.lower(sys.argv[2]).replace(' ', ''), string.lower(sys.argv[3]), string.lower(sys.argv[4]).replace(' ', ''), string.lower(sys.argv[5]))
else:
print("Missing argument")
| [
"marco.colognese1995@gmail.com"
] | marco.colognese1995@gmail.com |
de2a869e4886495403457448183507387e5318e5 | f5713ebaee8445d5ce7d64f50a7fc2ece362a22c | /apps/principles/forms.py | bfb6988c7267b54a1b96e11e7c75707e84a984e3 | [
"CC-BY-3.0"
] | permissive | gerlad/greenline | bfacaa15e7481ee9b9d7171072724930396facda | 219082c2d10d815c9fb21ac339349a1cb8b9bd04 | refs/heads/master | 2020-05-19T17:55:22.510522 | 2011-03-21T15:06:44 | 2011-03-21T15:06:44 | 1,227,024 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from django import forms
from principles.models import Entry
from principles.models import Principle
class PrincipleForm(forms.Form):
choices = forms.MultipleChoiceField(choices=(('1', 'Choice One'), ('2', 'Choice Two'),), initial=['1'], required=False,)
'''
class PrincipleForm(forms.ModelForm):
class Meta:
model = Entry
fields = ('title', 'body')
''' | [
"gmccollam@gmail.com"
] | gmccollam@gmail.com |
ed5de0c4b11242dea29bd7aa8862a1c803564de6 | d72547d817698b73949bf2ea79a0a1f1de180a33 | /horse_racing/views.py | 824607e4db80e1ee12329708967fc129c16280f1 | [] | no_license | iuvei/horse-racing | 383c19525069c0fb362026e91e12e7709ffc2da5 | 2cdfbdfe4608728f1efd0ab2d931ba41c6723d05 | refs/heads/main | 2023-06-16T08:33:40.685724 | 2021-07-09T13:06:21 | 2021-07-09T13:06:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,317 | py | import string
import datetime
import random
from django.conf import settings
from django.http import JsonResponse
from django.contrib import auth, messages
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from .paytm_checksum import generate_checksum, verify_checksum
from .models import Registration, Transaction, TransactionDetail, Player, HorseRacing, GamePointHistory, GamePlayHistory, \
Subscriber, Referral, CommissionHistory
time_left = datetime.datetime.now() + datetime.timedelta(minutes=4)
# Create your views here.
def index(request):
return render(request, "index.html", {'home': 'active'})
def notfound(request):
return render(request, "404.html")
def about(request):
return render(request, "about.html", {'other': 'active'})
def affiliate(request):
return render(request, "affiliate.html", {'other': 'active'})
def contact(request):
return render(request, "contact.html", {'contact': 'active'})
def faq(request):
return render(request, "faq.html", {'other': 'active'})
def hiw(request):
return render(request, "how-it-work.html", {'other': 'active'})
def lottery(request):
return render(request, "lottery.html", {'lottery': 'active'})
def play(request):
return render(request, "play.html", {'play': 'active'})
def tc(request):
return render(request, "terms-conditions.html", {'other': 'active'})
def tcd(request):
return render(request, "terms-conditions-details.html", {'other': 'active'})
@login_required(login_url='/login')
def tournaments(request):
global time_left
now = datetime.datetime.now()
time_diff = str(time_left - now)
# print("global: ",time_left)
# print("now: ",time_diff)
if time_diff[0:5] == '-1 da':
time_left = datetime.datetime.now() + datetime.timedelta(minutes=4)
current_second = 240
milli_sec = 240000
else:
current_minute = time_diff[2:4]
current_second = time_diff[5:7]
if '0' in current_second:
current_second = current_second[1]
elif '0' in current_minute:
current_minute = current_minute[1]
current_second = (int(current_minute) * 60) + int(current_second)
current_second = int(current_second)
milli_sec = current_second * 1000
# print("minute ",current_minute)
# print("second ",current_second)
user = request.user
g_no = HorseRacing.objects.latest('timestamp')
if g_no.open:
g_no = g_no.game_no
else:
g_no = g_no.game_no
g_no = str(g_no)
g_no = g_no[8:]
g_no = int(g_no)
g_no += 1
d = datetime.date.today().strftime("%Y%m%d")
h = str(g_no)
d += h
g_no = int(d)
h_race = HorseRacing(game_no=g_no)
h_race.save()
if Registration.objects.filter(user=user).exists():
bal = Registration.objects.get(user=request.user)
win_bal = bal.win_balance
bal = bal.balance
d = str(g_no)
cond = ['-time']
all_bets = GamePlayHistory.objects.all().order_by(*cond)
my_bets = GamePlayHistory.objects.filter(player=user).order_by(*cond)
context = {
'play': 'active',
"race_no": d,
"bal": bal,
"second_left": current_second,
"milli_sec_left": milli_sec,
"win_bal": win_bal,
"all_bets": all_bets,
"my_bets": my_bets,
}
return render(request, "tournaments.html", context)
def signup(request):
if request.method == 'POST':
name = request.POST.get('name', None)
mob = request.POST.get('mob', None)
password = request.POST.get('password', None)
referral = request.POST.get('referral', None)
try:
user = User.objects.get(username=mob)
messages.error(request, "Username has already been taken. Please try another")
return render(request, 'signup.html')
except User.DoesNotExist:
new_user = User.objects.create_user(username=mob, password=password, first_name=name)
new_user.save()
reg = Registration(referral=referral, phone_number=mob, full_name=name, user=new_user)
reg.save()
if referral == "DEFAULT000":
pass
else:
if Referral.objects.filter(referral=referral).exists():
ref = Referral.objects.get(referral=referral)
ref.assign_to = new_user
ref.save()
# extra bonus
reg.balance = '30'
reg.save()
auth.login(request, new_user)
return redirect("tournaments")
else:
return render(request, "signup.html", {"referral": "DEFAULT000"})
def join(request, referral):
return render(request, "signup.html", {"referral": referral})
@login_required(login_url="/login")
def referral(request):
user = request.user
if Referral.objects.filter(created_by=user).exists():
referral = Referral.objects.get(created_by=user).referral
else:
lower_alphabet = list(string.ascii_lowercase)
upper_alphabet = list(string.ascii_uppercase)
hex_digits = list(string.hexdigits)
digits = list(string.digits)
characters = lower_alphabet + upper_alphabet + hex_digits + digits
referral = ''
for i in range(10):
referral += random.choice(characters)
ref = Referral(referral=referral, created_by=user)
ref.save()
data = 'Hey, Join with my referral link to get instant 30 rupees to Play Horse Racing and earn real money Here is ' \
'the direct link to get the real money '
referral_url = 'http://' + get_current_site(request).domain + '/join' + referral
data += referral_url
return JsonResponse({"result": referral_url, "referral": referral, "data": data}, status=200)
def login(request):
if request.method == 'POST':
mob = request.POST.get('mob', None)
password = request.POST.get('password', None)
user = auth.authenticate(username=mob, password=password)
if user is not None:
auth.login(request, user)
return redirect("tournaments")
messages.error(request, "Account Does not Exists!")
return render(request, "login.html")
def logout(request):
auth.logout(request)
return redirect("/")
def initiate_payment(request):
if request.method == "GET":
return redirect('tournaments')
amount = int(request.POST['amount'])
user = request.user
transaction = Transaction.objects.create(made_by=user, amount=amount)
transaction.save()
merchant_key = settings.PAYTM_SECRET_KEY
params = {
'MID': settings.PAYTM_MERCHANT_ID,
'ORDER_ID': str(transaction.order_id),
'CUST_ID': 'fe-iron9091@gmail.com', # str(transaction.made_by.email),
'TXN_AMOUNT': str(transaction.amount),
'CHANNEL_ID': settings.PAYTM_CHANNEL_ID,
'WEBSITE': settings.PAYTM_WEBSITE,
# ('EMAIL': request.user.email),
# ('MOBILE_N0': '9911223388'),
'INDUSTRY_TYPE_ID': settings.PAYTM_INDUSTRY_TYPE_ID,
'CALLBACK_URL': 'http://'+get_current_site(request).domain+'/callback',
# ('PAYMENT_MODE_ONLY', 'NO'),
}
checksum = generate_checksum(params, merchant_key)
transaction.checksum = checksum
transaction.save()
params['CHECKSUMHASH'] = checksum
return render(request, 'payments/redirect.html', context=params)
@csrf_exempt
def callback(request):
if request.method == 'POST':
received_data = dict(request.POST)
paytm_params = {}
paytm_checksum = received_data['CHECKSUMHASH'][0]
for key, value in received_data.items():
if key == 'CHECKSUMHASH':
paytm_checksum = value[0]
else:
paytm_params[key] = str(value[0])
# Verify checksum
is_valid_checksum = verify_checksum(paytm_params, settings.PAYTM_SECRET_KEY, str(paytm_checksum))
if is_valid_checksum:
received_data['message'] = "Checksum Matched"
else:
received_data['message'] = "Checksum Mismatched"
return render(request, 'payments/callback.html', context=received_data)
txn = Transaction.objects.get(order_id=paytm_params['ORDERID'])
if paytm_params['STATUS'] == 'TXN_SUCCESS':
txn_detail = TransactionDetail(made_by=txn, transaction_id=paytm_params['TXNID'],
bank_txn_id=paytm_params['BANKTXNID'], currency=paytm_params['CURRENCY'],
status=paytm_params['STATUS'], gateway_name=paytm_params['GATEWAYNAME'],
bank_name=paytm_params['BANKNAME'], payment_mode=paytm_params['PAYMENTMODE'],
user=txn.made_by)
txn_detail.save()
messages.info(request, "Recharged Successfully! Wallet Updated")
user = txn.made_by
reg = Registration.objects.get(user=user)
bal = float(reg.balance)
bal += float(paytm_params['TXNAMOUNT'])
reg.balance = str(bal)
reg.save()
else:
txn_detail = TransactionDetail(made_by=txn, transaction_id=paytm_params['TXNID'],
bank_txn_id=paytm_params['BANKTXNID'], currency=paytm_params['CURRENCY'],
status=paytm_params['STATUS'], gateway_name=paytm_params['GATEWAYNAME'],
bank_name=paytm_params['BANKNAME'], payment_mode=paytm_params['PAYMENTMODE'],
user=txn.made_by)
txn_detail.save()
messages.info(request, "Recharge Unsuccessfull! Try Again")
return redirect("tournaments")
@csrf_exempt
def join_game(request):
if request.is_ajax and request.method == "POST":
user = request.user
horse_name = request.POST.get('horse_name')
amount = int(request.POST.get('amount'))
# setting values for horse race and player model
# for horse racing model
horse_race = HorseRacing.objects.latest('timestamp')
betted_horse_name = ''
if horse_race.open:
if horse_name == "horse1":
total_amount = horse_race.horse1
total_amount += amount
betted_horse_name = 'Brick Red'
horse_race.horse1 = total_amount
elif horse_name == "horse2":
total_amount = horse_race.horse2
total_amount += amount
horse_race.horse2 = total_amount
betted_horse_name = 'Violet'
else:
total_amount = horse_race.horse3
total_amount += amount
horse_race.horse3 = total_amount
betted_horse_name = 'Red'
horse_race.save()
else:
return JsonResponse({"msg": "Wait for the game to over then try again!"}, status=200)
# for player model
player = Player(player=user, game=horse_race, bet_on=betted_horse_name, amount=amount)
player.save()
# deducting balance from users account
reg_user = Registration.objects.get(user=user)
reg_user_bal = float(reg_user.balance)
reg_user_bal -= amount
reg_user.balance = str(reg_user_bal)
reg_user.save()
# making history
gph = GamePointHistory(amount=amount, made_by=user, balance=int(reg_user_bal))
gph.save()
return JsonResponse({"msg": True, "bal": reg_user_bal}, status=200)
return JsonResponse({"msg": False}, status=200)
def start_race(request):
if request.is_ajax and request.method == 'GET':
horse_race = HorseRacing.objects.latest('timestamp')
winner = None
horse = ''
if horse_race.open:
winner = '30'
h1 = horse_race.horse1
h2 = horse_race.horse2
h3 = horse_race.horse3
if h3 <= h1 and h3 <= h2:
horse = 'horse3'
horse_race.winner = 'horse3'
elif h2 <= h1 and h2 <= h3:
horse = 'horse2'
horse_race.winner = 'horse2'
else:
horse = 'horse1'
horse_race.winner = 'horse1'
horse_race.save()
return JsonResponse({"winner": winner, "horse": horse}, status=200)
return JsonResponse({"winner": False}, status=200)
@csrf_exempt
def set_result(request):
if request.is_ajax and request.method == "POST":
selected = request.POST.get('selected', None)
winner = request.POST.get('winner', None)
horse_race = HorseRacing.objects.latest('timestamp')
user = request.user
try:
player = Player.objects.filter(player=user, game=horse_race)[0]
except IndexError:
return JsonResponse({'result': False}, status=200)
reg_user = Registration.objects.filter(user=user)[0]
amount = player.amount
if winner == 'horse1':
horse_race.winner = 'horse1'
elif winner == 'horse2':
horse_race.winner = 'horse2'
elif winner == 'horse3':
horse_race.winner = 'horse3'
horse_race.open = False
horse_race.save()
total_betting = 0
total_betting += horse_race.horse1
total_betting += horse_race.horse2
total_betting += horse_race.horse3
if winner == selected:
player.result = 'Win'
reg_user.win_balance = str(amount * 2)
reg_user.save()
else:
player.result = 'Lose'
player.save()
game_history = GamePlayHistory(amount=player.amount, which_horse=selected, result=player.result, game=horse_race,
player=user, total_bet=total_betting)
game_history.save()
# setting referral bonus
if player.result == 'Win':
reff = reg_user.referral
if reff == 'DDEFAULT000':
pass
else:
if Referral.objects.filter(assign_to=user).exists():
reff = Referral.objects.get(assign_to=user)
awarde_user = reff.created_by
awarde_user = User.objects.filter(username=user)[0]
awarde_user = Registration.objects.filter(user=awarde_user)[0]
commission = float(100) * (10.0 / float(amount)) # user's commission
w_bal = float(awarde_user.win_balance)
w_bal += commission
commission = str(w_bal)
awarde_user.win_balance = commission
awarde_user.save()
# setting history
com_history = CommissionHistory(amount=player.amount, you_got=commission, which_horse=selected,
referred_user=user, user=awarde_user)
com_history.save()
return JsonResponse({"result": True}, status=200)
return JsonResponse({"result": False}, status=200)
@login_required(login_url='/login')
def profile(request):
user = request.user
game_his = GamePlayHistory.objects.filter(player=user)
transaction = TransactionDetail.objects.filter(user=user)
wal = GamePointHistory.objects.filter(made_by=user).order_by('-date', '-time')
user = Registration.objects.filter(user=user)
ref = CommissionHistory.objects.filter(user=user[0]).order_by('-timestamp')
param = {
'this_user': user,
"game_his": game_his,
"wallet": wal,
"transaction": transaction,
"referred": ref,
}
return render(request, "profile.html", param)
def subscribe(request):
email = request.POST['email']
sub = Subscriber(contact=email)
sub.save()
return redirect("/") | [
"elahifaiz00@gmail.com"
] | elahifaiz00@gmail.com |
a250c52c942542c5047617e59af057be46c29e3a | 36cae759a3d67da9b2787327b2a99c2c69525c56 | /B2BSWE/Patterns/Prog_Character_Desc.py | d141d88ca48ed7705e0a7c941df0f0847a6b11de | [] | no_license | solouniverse/Python | a64515695c359e3a6e30288742d0bc432adf20fd | c072c681ae29fce28676de1108d41ca9fc9cbb27 | refs/heads/master | 2022-11-30T20:17:43.715018 | 2022-11-25T13:54:49 | 2022-11-25T13:54:49 | 239,029,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | try:
import sys, os
sys.path.append(os.getcwd())
from Utils import Prog_Utils as u
except Exception as e:
print("Exception: ", e)
utils = u.Utils()
n = utils.getNoOfRuns()
while n > 0:
rows, columns = utils.getMatrixSize()
ucAlphaIdx = 65 # A
lcAlphaIdx = 97 # a
for row in range(rows + ucAlphaIdx - 1, ucAlphaIdx-1, -1):
for col in range(columns + ucAlphaIdx - 1, ucAlphaIdx-1, -1):
alphabet = chr(row)
print(alphabet, end=" ")
print()
n = n - 1 | [
"solouniverse19@gmail.com"
] | solouniverse19@gmail.com |
9544265ec76e9e6bce081dee8ea03c2ea278a212 | c7e0c86a24521a13c3b06c73244e9f5854f47284 | /smarts/env/tests/test_metrics.py | a9d43943678df687bf3dc894e97b9aeab7c2cbb1 | [
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-NC-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"BSD-3-Clause",
"MIT",
"LGPL-... | permissive | huawei-noah/SMARTS | 243d1f1fa4d3afe52a1dd8f7c6c500054d4a1a97 | 2ae8bd76a0b6e4da5699629cec0fefa5aa47ce67 | refs/heads/master | 2023-08-31T05:06:29.064270 | 2023-08-28T23:11:31 | 2023-08-28T23:11:31 | 301,903,883 | 824 | 212 | MIT | 2023-08-08T14:52:00 | 2020-10-07T02:11:23 | Python | UTF-8 | Python | false | false | 10,884 | py | # MIT License
#
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dataclasses
from unittest import mock
import gymnasium as gym
import numpy as np
import pytest
from smarts.core.agent_interface import AgentInterface, DoneCriteria
from smarts.core.controllers import ActionSpaceType
from smarts.core.coordinates import Heading, Point
from smarts.core.plan import EndlessGoal, Goal, Mission, PositionalGoal, Start
from smarts.env.gymnasium.wrappers.metric.metrics import Metrics, MetricsError
from smarts.zoo.agent_spec import AgentSpec
def _intrfc_improper():
return [
{"accelerometer": False},
{"max_episode_steps": None},
{"neighborhood_vehicle_states": False},
{"waypoint_paths": False},
{
"done_criteria": DoneCriteria(
collision=False,
off_road=True,
)
},
{
"done_criteria": DoneCriteria(
collision=True,
off_road=False,
)
},
]
@pytest.fixture
def get_agent_spec(request):
base_intrfc = AgentInterface(
action=ActionSpaceType.TargetPose,
accelerometer=True,
done_criteria=DoneCriteria(
collision=True,
off_road=True,
off_route=False,
on_shoulder=False,
wrong_way=False,
not_moving=False,
agents_alive=None,
),
max_episode_steps=5,
neighborhood_vehicle_states=True,
waypoint_paths=True,
)
return AgentSpec(interface=dataclasses.replace(base_intrfc, **request.param))
@pytest.fixture(scope="module")
def get_scenario(request):
from pathlib import Path
from smarts.sstudio.scenario_construction import build_scenario
if request.param == "single_agent_intersection":
scenario = str(
Path(__file__).resolve().parents[3]
/ "scenarios"
/ "sumo"
/ "intersections"
/ "1_to_1lane_left_turn_c_agents_1"
)
num_agents = 1
elif request.param == "multi_agent_merge":
scenario = str(
Path(__file__).resolve().parents[3]
/ "scenarios"
/ "sumo"
/ "merge"
/ "3lane_agents_2"
)
num_agents = 2
build_scenario(scenario=scenario)
return (scenario, num_agents)
@pytest.fixture
def make_env(get_agent_spec, get_scenario):
env = gym.make(
"smarts.env:hiway-v1",
scenarios=[get_scenario[0]],
agent_interfaces={
f"AGENT_{agent_id}": get_agent_spec.interface
for agent_id in range(get_scenario[1])
},
headless=True,
)
yield env
env.close()
@pytest.mark.parametrize("get_agent_spec", _intrfc_improper(), indirect=True)
@pytest.mark.parametrize("get_scenario", ["single_agent_intersection"], indirect=True)
def test_improper_interface(make_env):
# Verify proper agent interface enabled.
with pytest.raises(AttributeError):
env = Metrics(env=make_env)
@pytest.mark.parametrize("get_agent_spec", [{}], indirect=True)
@pytest.mark.parametrize("get_scenario", ["single_agent_intersection"], indirect=True)
def test_init(make_env):
# Verify instantiation of Metrics wrapper.
env = Metrics(env=make_env)
# Verify blocked access to underlying private variables.
for elem in ["_scen", "_road_map", "_records", "smarts"]:
with pytest.raises(AttributeError):
getattr(env, elem)
def _mock_mission(start: Start, goal: Goal):
def func(scenario_root, agents_to_be_briefed):
return [Mission(start=start, goal=goal)]
return func
@pytest.mark.parametrize("get_agent_spec", [{}], indirect=True)
@pytest.mark.parametrize("get_scenario", ["single_agent_intersection"], indirect=True)
def test_reset(make_env):
# Verify a scenario without PositionalGoal fails suitability check.
with mock.patch(
"smarts.core.scenario.Scenario.discover_agent_missions",
side_effect=_mock_mission(
start=Start(position=np.array([0, 0, 0]), heading=Heading(0)),
goal=EndlessGoal(),
),
):
with pytest.raises(MetricsError):
env = Metrics(env=make_env)
env.reset()
return
@pytest.mark.parametrize("get_agent_spec", [{}], indirect=True)
@pytest.mark.parametrize("get_scenario", ["single_agent_intersection"], indirect=True)
def test_end_in_off_road(make_env):
# Verify that env.score() can be computed when vehicle goes off road.
env = Metrics(env=make_env)
obs, _ = env.reset()
agent_name = next(iter(env.agent_interfaces.keys()))
dones = {"__all__": False}
while not dones["__all__"]:
actions = {
agent_name: np.append(
obs[agent_name]["ego_vehicle_state"]["position"][:2]
+ np.array([0.5, -0.8]),
[obs[agent_name]["ego_vehicle_state"]["heading"], 0.1],
)
}
obs, _, dones, _, _ = env.step(actions)
assert obs[agent_name]["events"]["off_road"], (
"Expected vehicle to go off road, but it did not. "
f"Events: {obs[agent_name]['events']}."
)
env.score()
# Verify that Count values increase with episode.
records = env.records()
scen_name = next(iter(records.keys()))
counts = records[scen_name][agent_name].counts
assert counts.goals == 0
assert counts.episodes == 1
assert counts.steps == 3
@pytest.mark.parametrize(
"get_agent_spec",
[{"max_episode_steps": 27, "done_criteria": DoneCriteria(off_route=True)}],
indirect=True,
)
@pytest.mark.parametrize("get_scenario", ["single_agent_intersection"], indirect=True)
def test_end_in_off_route(make_env):
# Verify that env.score() can be computed when vehicle ends in off route.
# Note:
# Point(-12, -1.6, 0) lies on edge-west-WE_0, i.e., to the left of the junction.
# Point( 12, -1.6, 0) lies on edge-east-WE_0, i.e., to the right of the junction.
# Point(1.5, 30.5, 0) lies on edge-north-SN_0, i.e., to the top of the junction.
with mock.patch(
"smarts.core.scenario.Scenario.discover_agent_missions",
side_effect=_mock_mission(
start=Start(position=np.array([-12, -1.6, 0]), heading=Heading(-1.57)),
goal=PositionalGoal(position=Point(x=1.5, y=30.5, z=0), radius=3),
),
):
env = Metrics(env=make_env)
obs, _ = env.reset()
agent_name = next(iter(env.agent_interfaces.keys()))
dones = {"__all__": False}
while not dones["__all__"]:
actions = {
agent_name: np.append(
obs[agent_name]["ego_vehicle_state"]["position"][:2]
+ np.array([1, 0]),
[obs[agent_name]["ego_vehicle_state"]["heading"], 0.1],
)
}
obs, _, dones, _, _ = env.step(actions)
assert (
obs[agent_name]["ego_vehicle_state"]["lane_id"].rstrip() == "edge-east-WE_0"
), (
"Expected vehicle to drive off route, but it is at lane: "
f"{obs[agent_name]['ego_vehicle_state']['lane_id']}."
)
assert obs[agent_name]["events"]["off_route"], (
"Expected vehicle to go off route, but it did not. "
f"Events: {obs[agent_name]['events']}."
)
env.score()
@pytest.mark.parametrize("get_agent_spec", [{"max_episode_steps": 1}], indirect=True)
@pytest.mark.parametrize("get_scenario", ["single_agent_intersection"], indirect=True)
def test_end_in_junction(make_env):
# Verify that env.score() can be computed when vehicle ends in a junction.
# Note:
# Point(-1.76, 2.05, 0) lies on :junction-intersection_1_0, i.e., inside the junction.
with mock.patch(
"smarts.core.scenario.Scenario.discover_agent_missions",
side_effect=_mock_mission(
start=Start(position=np.array([-1.86, 1.95, 0]), heading=Heading(-1.00)),
goal=PositionalGoal(position=Point(x=1.5, y=30.5, z=0), radius=3),
),
):
env = Metrics(env=make_env)
obs, _ = env.reset()
agent_name = next(iter(obs.keys()))
actions = {
agent_id: np.array([-1.76, 2.05, -0.91, 0.1]) for agent_id in obs.keys()
}
obs, _, dones, _, _ = env.step(actions)
assert (
obs[agent_name]["ego_vehicle_state"]["lane_id"].rstrip()
== ":junction-intersection_1_0"
), (
"Expected vehicle to be inside junction, but it is at lane: "
f"{obs[agent_name]['ego_vehicle_state']['lane_id']}."
)
assert (
obs[agent_name]["events"]["reached_max_episode_steps"] and dones["__all__"]
), (
"Expected vehicle to reach max episode steps and become done, but "
f"it did not. Dones: {dones}. Events: {obs[agent_name]['events']}."
)
env.score()
@pytest.mark.parametrize("get_agent_spec", [{}], indirect=True)
@pytest.mark.parametrize("get_scenario", ["multi_agent_merge"], indirect=True)
def test_records_and_scores(make_env):
# Verify that records and scores are functional in multi-agent environment.
# Note:
# env.score() is only callable after >=1 episode. Hence step through 1 episode.
env = Metrics(env=make_env)
obs, _ = env.reset()
terminated = {"__all__": False}
while not terminated["__all__"]:
actions = {
agent_name: np.append(
agent_obs["ego_vehicle_state"]["position"][:2], [0, 0.1]
)
for agent_name, agent_obs in obs.items()
}
obs, _, terminated, _, _ = env.step(actions)
env.records()
env.score()
| [
"noreply@github.com"
] | noreply@github.com |
ffe86aff360fe66f589944e6f4e66d44eefc999a | ddfb17730fd7b9388bf6edc3b2414db6cdd2d6c9 | /zzz_old_stuff/GetVariables.py | e209e191db80a068022d4268b26072cef1bd48eb | [
"MIT"
] | permissive | JaakkoAhola/LES-scripting | eb09258a9acb7ab14b76f00073c8b6c921bdc300 | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | refs/heads/master | 2020-09-03T14:24:26.332554 | 2020-04-02T12:15:19 | 2020-04-02T12:15:19 | 219,483,540 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | def get_ts_variable(fname,var_name,target_time):
# Function for extracting scalar data from NetCDF file based on the given time value.
#
#import numpy
import netCDF4 as netcdf
# Open the target NetCDF file
ncid = netcdf.Dataset(fname,'r+')
# Variable
var = ncid.variables[var_name]
# Time
times = ncid.variables['time']
#
# Find the correct time value
i=0
out=-999
for tt in times:
if tt==target_time:
out=var[i]
break
i=i+1
# Close file
ncid.close()
return out
#
fname='/cygdrive/k/LES/UCLALES-SALSA/bin/ascos_30ccn_2D.ts.nc'
target_time=3600 # Time (s)
var_name='cfrac' # Cloud fraction
cf=get_ts_variable(fname,var_name,target_time)
print("Cloud fraction=",cf)
var_name='zc' # Cloud top height
dt=600 # Time step
zc1=get_ts_variable(fname,var_name,target_time)
zc2=get_ts_variable(fname,var_name,target_time+dt)
dzdt=(zc2-zc1)/600
print("Cloud top height tendency=",(zc2-zc1)/600,"m/s")
# Entrainment rate
div=1.5e-6 # Divergency (from NAMELIST)
E=dzdt-div*zc1
print("Entrainment rate",E,"m/s")
| [
"jaakko.ahola@fmi.fi"
] | jaakko.ahola@fmi.fi |
1d272705faf2bbdc1fdbd6b49ad2bb71b1a70d85 | 810a8ed334a29b81ddd5a4364c06d0272c3aae39 | /clash-of-code/shortest/hexagon.py | 4fd47cf99b5eb0c26472bb476aeafcc46280b64a | [
"MIT"
] | permissive | charlesfranciscodev/codingame | d7bdfc244cb58fa356dec73fb2f2ec5470755dbc | 37b8e269e40f1b4e9807c305874f3b97f7c03a03 | refs/heads/master | 2023-08-16T23:26:20.818882 | 2023-08-16T17:12:06 | 2023-08-16T17:12:06 | 179,561,845 | 53 | 26 | MIT | 2021-05-19T19:36:46 | 2019-04-04T19:16:12 | Python | UTF-8 | Python | false | false | 24 | py | print(6*int(input())**2) | [
"charlesfranciscodev@gmail.com"
] | charlesfranciscodev@gmail.com |
5cff6f6902a20d16ffe5d07ddf2fbc4b5f81a60c | 96c1852886840edfa4aa61a2d9c4f80d4d868015 | /mainCode.py | 10afbd0c42deb2889ebec03721014c1a927f4870 | [] | no_license | rishitaggarwal/Teacher-Helper | 305c9b687320dfe1a95310eed02080c5ef9df47e | bc8213760f27bcfeea7f2f1b24cc8961a605e895 | refs/heads/main | 2022-12-30T06:05:43.768814 | 2020-10-22T11:07:38 | 2020-10-22T11:07:38 | 306,310,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import pandas as pd
import csv
import plotly.graph_objects as px
df = pd.read_csv("data.csv")
studentdf = df.loc[df["student_id"]=="TRL_rst"]
print(studentdf.groupby("level")["attempt"].mean())
fig = px.Figure(px.Bar(x = studentdf.groupby("level")["attempt"].mean(),
y = ["Level 1","Level 2","Level 3", "Level 4"],
orientation = "h"))
fig.show() | [
"noreply@github.com"
] | noreply@github.com |
9d1e8ffeefbf7cee1e32d4c38a282759cf4dd220 | 577ba42cbf0a3230966ac66ef60fd401486e4c06 | /website/apps/core/migrations/0021_transfer_year.py | 96778623b064dec78ae6724511bdcd803f81ac46 | [
"Apache-2.0"
] | permissive | shh-dlce/pulotu | 984ca86de3ffe03e83bbb15b0d497f1ebf190ecd | 82acbb8a3b7f3ec3acc76baffd4047265a77f7d3 | refs/heads/master | 2021-01-10T03:51:13.337840 | 2015-12-09T09:46:55 | 2015-12-09T09:46:55 | 46,917,922 | 2 | 0 | Apache-2.0 | 2021-11-16T11:51:48 | 2015-11-26T09:48:42 | Python | UTF-8 | Python | false | false | 8,072 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for source in orm.Source.objects.all():
source.year_new = source.year
source.save()
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("Cannot reverse this migration!")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.culture': {
'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'coder': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'core.language': {
'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"},
'abvdcode': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'core.section': {
'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.source': {
'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'year': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'year_new': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
symmetrical = True
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
3a648159b1c2aea371c11614bfded14f7873bb3a | 136512dbc6a4352fbdf60b9324ea1a21aed87569 | /ex31.py | a4dbaaca4c02de91d83da15ae8d07ab51838d61c | [] | no_license | wxlsally1980/For-PY104 | f6c5c3e37db3ac2891cbf5b3e33a7677afffaadb | 15992692733207e054f3947332964b9e5e49d13b | refs/heads/master | 2021-01-02T08:31:06.219634 | 2017-08-02T07:40:36 | 2017-08-02T07:40:36 | 99,016,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input(">")
if door == "1":
print "There's a giant hear here eating a cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input(" > ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You stare into the endless abyss at Cthulu's retina."
print "1. Blueberrues."
print "2. Yellow jacket clothspins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input(">")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyesinto a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!" | [
"noreply@github.com"
] | noreply@github.com |
11e59e419b0c3729fbdc1c49c14476a7cdcae54a | 2feb2bdda21fb6a077fc19459484599390884ea3 | /t_laplace.py | 4e07761f9117cbe556c32e85dfc0c81f5a373fe9 | [] | no_license | mateussiil/analise-sistemas-lineares | f40082214d8ab3ba689b874beb332a293a17ad96 | 1c39945d07ff58bdbaa14aa40d9e9afb8f9fe0c4 | refs/heads/main | 2023-01-12T03:55:52.397907 | 2020-11-23T12:05:56 | 2020-11-23T12:05:56 | 314,095,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | import sympy as sym
from sympy.abc import s,t,x,y,z
import numpy as np
from sympy.integrals import inverse_laplace_transform
import matplotlib.pyplot as plt
from sympy import symbols, fraction, UnevaluatedExpr
import conta as ct
s, w = symbols('s w')
def mod(num , den):
return np.sqrt(num)/np.sqrt(den)
def fase(num , den):
return np.arctan(num) - np.arctan(den)
def questao1(R, C):
H = 1 / ( s*R*C + 1)
return H
# def questao2():
# ws = [10, 1000, 10000]
# C = 100 * 10**-6
# R = 10
# for s in ws:
# w = s
# v = w / (w*w + s*s)
# h = questao1(R, C) * v
# print(h)
# n, d = fraction(h)
# amp = mod(n, d)
# print(amp)
C = 100 * 10**-6
R = 10
# Transfer function
w = 100
H = 1 / ( s*R*C + 1)
n, d = fraction(H)
mod(1, 1)
print(n, d)
fase(n,d)
print('{}*cos({}t + {})'.format(round(mod(1,1),3), w, round(np.degrees(fase(1, w)), 3 )))
tm = np.linspace(0,8,100)
ys = np.zeros(len(tm))
# plt.figure()
# plt.plot(tm, ys, label='y(t)')
# plt.legend()
# plt.xlabel('Time')
# plt.show() | [
"mateus_silva97@hotmail.com"
] | mateus_silva97@hotmail.com |
13ffabca7f530817e54a8039cdf0711514574bef | 2f05e458d1516a4fa13a1f177a2eeacff4780248 | /django/AIST_survey/migrations/0008_auto_20190218_2100.py | 615cd12ffc7e8e7ad1a9d7c1e4259cb1fc8bd56b | [
"MIT"
] | permissive | aistairc/voteclustering_aist | ecaee45dd126acf7b66c333145bd51098ec119d4 | f1ee7409698a05a99ce40cdccbe4c2b1f8f81b4c | refs/heads/master | 2023-02-19T00:32:50.433211 | 2021-01-25T05:42:09 | 2021-01-25T05:42:09 | 281,014,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # Generated by Django 2.1.3 on 2019-02-18 21:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AIST_survey', '0007_auto_20190214_1152'),
]
operations = [
migrations.AddField(
model_name='enquete',
name='has_password',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='enquete',
name='password',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| [
"watashun@gmail.com"
] | watashun@gmail.com |
fdb64336cf73e67d472705398ef382a50c520887 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/kushaldas-retask/allPythonContent.py | d19c699f3ba9cd5827f25619f2fb80dfe0dc8340 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,401 | py | __FILENAME__ = conf
# -*- coding: utf-8 -*-
#
# retask documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 3 14:56:38 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'kr'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'retask'
copyright = u'2012, Kushal Das'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
#pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'retaskdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'retask.tex', u'retask Documentation',
u'Kushal Das', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'retask', u'retask Documentation',
[u'Kushal Das'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'retask', u'retask Documentation',
u'Kushal Das', 'retask', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
########NEW FILE########
__FILENAME__ = flask_theme_support
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
########NEW FILE########
__FILENAME__ = async_producer
from retask import Task
from retask import Queue
import time
queue = Queue('example')
info1 = {'user': 'Fedora planet', 'url': 'http://planet.fedoraproject.org'}
task1 = Task(info1)
queue.connect()
job = queue.enqueue(task1)
print job.result
time.sleep(30)
print job.result
########NEW FILE########
__FILENAME__ = async_worker
from retask import Queue
import time
queue = Queue('example')
queue.connect()
task = queue.wait()
print task.data
time.sleep(15)
queue.send(task, "We received your information dear %s" % task.data['user'])
########NEW FILE########
__FILENAME__ = consumer
from retask import Queue
queue = Queue('example')
queue.connect()
while queue.length != 0:
task = queue.dequeue()
print task.data
########NEW FILE########
__FILENAME__ = producer
from retask import Task
from retask import Queue
queue = Queue('example')
info1 = {'user':'kushal', 'url':'http://kushaldas.in'}
info2 = {'user':'fedora planet', 'url':'http://planet.fedoraproject.org'}
task1 = Task(info1)
task2 = Task(info2)
queue.connect()
queue.enqueue(task1)
queue.enqueue(task2)
########NEW FILE########
__FILENAME__ = sync_producer
from retask import Task
from retask import Queue
queue = Queue('example')
info1 = {'user': 'Fedora planet', 'url': 'http://planet.fedoraproject.org'}
task1 = Task(info1)
queue.connect()
job = queue.enqueue(task1)
job.wait()
print job.result
########NEW FILE########
__FILENAME__ = exceptions
# -*- coding: utf-8 -*-
"""
retask.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Retask's exceptions.
"""
class RetaskException(RuntimeError):
"""Some ambiguous exception occurred"""
class ConnectionError(RetaskException):
"""A Connection error occurred."""
########NEW FILE########
__FILENAME__ = queue
#Copyright (C) 2012, Kushal Das <kushaldas@gmail.com>
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
__author__ = 'Kushal Das <kushaldas@gmail.com>'
__copyright__ = 'Copyright (c) 2012-2013 Kushal Das'
__license__ = 'MIT'
__status__ = 'Development'
__version__ = '0.3'
"""
retask Queue implementation
"""
import json
import redis
import uuid
import six
from .task import Task
from .exceptions import ConnectionError
class Queue(object):
"""
Returns the ``Queue`` object with the given name. If the user
passes optional config dictionary with details for Redis
server, it will connect to that instance. By default it connects
to the localhost.
"""
def __init__(self, name, config=None):
specified_config = config or {}
self.name = name
self._name = 'retaskqueue-' + name
self.config = {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
}
self.config.update(specified_config)
self.rdb = None
self.connected = False
@property
def length(self):
"""
Gives the length of the queue. Returns ``None`` if the queue is not
connected.
If the queue is not connected then it will raise
:class:`retask.ConnectionError`.
"""
if not self.connected:
raise ConnectionError('Queue is not connected')
try:
length = self.rdb.llen(self._name)
except redis.exceptions.ConnectionError as err:
raise ConnectionError(str(err))
return length
def connect(self):
"""
Creates the connection with the redis server.
Return ``True`` if the connection works, else returns
``False``. It does not take any arguments.
:return: ``Boolean`` value
.. note::
After creating the ``Queue`` object the user should call
the ``connect`` method to create the connection.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
"""
config = self.config
self.rdb = redis.Redis(config['host'], config['port'], config['db'],\
config['password'])
try:
info = self.rdb.info()
self.connected = True
except redis.ConnectionError:
return False
return True
def wait(self, wait_time=0):
"""
Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts.
:arg wait_time: Time in seconds to wait, default is infinite.
:return: :class:`~retask.task.Task` object from the queue or False if it timeouts.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
>>> task = q.wait()
>>> print task.data
{u'name': u'kushal'}
.. note::
This is a blocking call, you can specity wait_time argument for timeout.
"""
if not self.connected:
raise ConnectionError('Queue is not connected')
data = self.rdb.brpop(self._name, wait_time)
if data:
task = Task()
task.__dict__ = json.loads(data[1])
return task
else:
return False
def dequeue(self):
"""
Returns a :class:`~retask.task.Task` object from the queue. Returns ``None`` if the
queue is empty.
:return: :class:`~retask.task.Task` object from the queue
If the queue is not connected then it will raise
:class:`retask.ConnectionError`
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
>>> t = q.dequeue()
>>> print t.data
{u'name': u'kushal'}
"""
if not self.connected:
raise ConnectionError('Queue is not connected')
if self.rdb.llen(self._name) == 0:
return None
data = self.rdb.rpop(self._name)
if not data:
return None
if isinstance(data, six.binary_type):
data = six.text_type(data, 'utf-8', errors = 'replace')
task = Task()
task.__dict__ = json.loads(data)
return task
def enqueue(self, task):
"""
Enqueues the given :class:`~retask.task.Task` object to the queue and returns
a :class:`~retask.queue.Job` object.
:arg task: ::class:`~retask.task.Task` object
:return: :class:`~retask.queue.Job` object
If the queue is not connected then it will raise
:class:`retask.ConnectionError`.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
>>> from retask.task import Task
>>> task = Task({'name':'kushal'})
>>> job = q.enqueue(task)
"""
if not self.connected:
raise ConnectionError('Queue is not connected')
try:
#We can set the value to the queue
job = Job(self.rdb)
task.urn = job.urn
text = json.dumps(task.__dict__)
self.rdb.lpush(self._name, text)
except Exception as err:
return False
return job
def send(self, task, result, expire=60):
"""
Sends the result back to the producer. This should be called if only you
want to return the result in async manner.
:arg task: ::class:`~retask.task.Task` object
:arg result: Result data to be send back. Should be in JSON serializable.
:arg expire: Time in seconds after the key expires. Default is 60 seconds.
"""
self.rdb.lpush(task.urn, json.dumps(result))
self.rdb.expire(task.urn, expire)
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.name)
def find(self, obj):
"""Returns the index of the given object in the queue, it might be string
which will be searched inside each task.
:arg obj: object we are looking
:return: -1 if the object is not found or else the location of the task
"""
if not self.connected:
raise ConnectionError('Queue is not connected')
data = self.rdb.lrange(self._name, 0, -1)
for i, datum in enumerate(data):
if datum.find(str(obj)) != -1:
return i
return -1
class Job(object):
"""
Job object containing the result from the workers.
:arg rdb: The underlying redis connection.
"""
def __init__(self, rdb):
self.rdb = rdb
self.urn = uuid.uuid4().urn
self.__result = None
@property
def result(self):
"""
Returns the result from the worker for this job. This is used to pass
result in async way.
"""
if self.__result:
return self.__result
data = self.rdb.rpop(self.urn)
if data:
self.rdb.delete(self.urn)
data = json.loads(data)
self.__result = data
return data
else:
return None
def wait(self, wait_time=0):
"""
Blocking call to check if the worker returns the result. One can use
job.result after this call returns ``True``.
:arg wait_time: Time in seconds to wait, default is infinite.
:return: `True` or `False`.
.. note::
This is a blocking call, you can specity wait_time argument for timeout.
"""
if self.__result:
return True
data = self.rdb.brpop(self.urn, wait_time)
if data:
self.rdb.delete(self.urn)
data = json.loads(data[1])
self.__result = data
return True
else:
return False
########NEW FILE########
__FILENAME__ = release
NAME = 'retask'
VERSION = '0.4'
DESCRIPTION = 'Task Queue implementation in python'
LONG_DESCRIPTION = '''Retask is a simple task queue implementation written for human beings. It provides generic solution to create and manage task queues.'''
AUTHOR = 'Kushal Das'
EMAIL = 'kushaldas@gmail.com'
COPYRIGHT = '2012-13 Kushal Das'
URL = 'https://github.com/kushaldas/retask'
LICENSE = 'MIT'
__all__ = ('NAME', 'VERSION', 'DESCRIPTION', 'LONG_DESCRIPTION', 'AUTHOR', 'EMAIL', 'COPYRIGHT', 'URL', 'LICENSE')
########NEW FILE########
__FILENAME__ = task
#Copyright (C) 2012, Kushal Das <kushaldas@gmail.com>
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
__author__ = 'Kushal Das <kushaldas@gmail.com>'
__copyright__ = 'Copyright (c) 2012-2013 Kushal Das'
__license__ = 'MIT'
__status__ = 'Development'
__version__ = '0.3'
"""
Task Class
"""
import json
class Task(object):
"""
Returns a new Task object, the information for the task is passed through
argument ``data``
:kwarg data: Python object which contains information for the task. Should be serializable through ``JSON``.
"""
def __init__(self, data=None, raw=False, urn=None):
if not raw:
self._data = json.dumps(data)
else:
self._data = data
self.urn = urn
@property
def data(self):
"""
The python object containing information for the current task
"""
return json.loads(self._data)
@property
def rawdata(self):
"""
The string representation of the actual python objects for the task
.. note::
This should not be used directly by the users. This is for internal use
only.
"""
return self._data
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.data))
########NEW FILE########
__FILENAME__ = tests
import unittest
import redis
from mock import patch
from retask import Task
from retask import Queue
class ConnectTest(unittest.TestCase):
"""
Test the connect method
"""
def runTest(self):
queue = Queue('testqueue')
self.assertTrue(queue.connect())
class LengthTest(unittest.TestCase):
"""
Tests the length method of the Queue
"""
@patch('redis.Redis')
def runTest(self, mock_redis):
m = mock_redis.return_value
m.llen.return_value = 2
queue = Queue('testqueue')
queue.connect()
self.assertEqual(queue.length, 2)
class SetTest(unittest.TestCase):
"""
Sets a task in the Queue
"""
def runTest(self):
queue = Queue('testqueue')
queue.connect()
t = Task({'name':'kushal'})
self.assertTrue(queue.enqueue(t))
def tearDown(self):
rdb = redis.Redis()
rdb.delete('retaskqueue-testqueue')
class GetTest(unittest.TestCase):
"""
Gets a task in the Queue
"""
def setUp(self):
queue = Queue('testqueue')
queue.connect()
t = Task({'name':'kushal'})
queue.enqueue(t)
def runTest(self):
queue = Queue('testqueue')
queue.connect()
task = queue.dequeue()
i = task.data
self.assertEqual(task.data['name'], 'kushal')
if __name__ == '__main__':
unittest.main()
########NEW FILE########
| [
"dyangUCI@github.com"
] | dyangUCI@github.com |
c94d59da1ef5d4f9417d826c016759856c5d32d8 | 1d26bc5d5c086fca8fa8bea30bd4b5779e23d25b | /h/services/mark.py | 74743ff47a80c1de8e94381e05b721a3d5952454 | [
"BSD-2-Clause",
"BSD-3-Clause",
"BSD-2-Clause-Views"
] | permissive | maticanzic/h | 65b34e538261f6682efb8b5bc4539b8cc370c6b1 | dc4890e8a29d187315d4f3de5964c9917f2c47c6 | refs/heads/master | 2020-07-10T18:37:44.845423 | 2020-03-01T21:41:24 | 2020-03-01T21:41:24 | 204,337,322 | 0 | 0 | null | 2019-08-25T18:59:30 | 2019-08-25T18:59:29 | null | UTF-8 | Python | false | false | 2,441 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from h import models
class MarkService(object):
def __init__(self, session):
self.session = session
def marked(self, user, annotation):
"""
Check if a given user has marked a given annotation.
:param user: The user to check for a mark.
:type user: h.models.User
:param annotation: The annotation to check for a mark.
:type annotation: h.models.Annotation
:returns: True/False depending on the existence of a mark.
:rtype: bool
"""
query = self.session.query(models.Mark).filter_by(
user=user, annotation=annotation
)
return query.count() > 0
def all_marked(self, user, annotation_ids):
"""
Check which of the given annotation IDs the given user has marked.
:param user: The user to check for a mark.
:type user: h.models.User
:param annotation_ids: The IDs of the annotations to check.
:type annotation_ids: sequence of unicode
:returns The subset of the IDs that the given user has marked.
:rtype set of unicode
"""
# SQLAlchemy doesn't behave in the way we might expect when handed an
# `in_` condition with an empty sequence
if not annotation_ids:
return set()
query = self.session.query(models.Mark.annotation_id).filter(
models.Mark.annotation_id.in_(annotation_ids), models.Mark.user == user
)
return set([f.annotation_id for f in query])
def mark(self, user, annotation):
"""
Create a mark for the given user and annotation.
We enforce the uniqueness of the mark, meaning one user (admin) can only
mark one annotation once. This method first checks if the annotation
is already marked by the admin, if that is the case, then this
is a no-op.
:param user: The user marking the annotation.
:type user: h.models.User
:param annotation: The annotation to be marked.
:type annotation: h.models.Annotation
:returns: None
:rtype: NoneType
"""
if self.marked(user, annotation):
return
mark = models.Mark(user=user, annotation=annotation)
self.session.add(mark)
def mark_service_factory(context, request):
return MarkService(request.db)
| [
"matic.anzic@gmail.com"
] | matic.anzic@gmail.com |
a6cc55e7e446cdc95f81488f5cd85d5dbad8bb7b | 55290a5cb22a87b3affe480c1eaa4653af5394d1 | /apps/storage/migrations/0001_initial.py | 53cd60f40824f7dea0b6724c6500103362b45a1e | [] | no_license | NatanaelMorales/ActividadFinal | 97fd235de51166dde33f89891f1de32d3bf47aac | 3727236b382e5677023f48e4d592c7046cef3840 | refs/heads/master | 2023-08-03T16:07:58.934166 | 2021-09-17T07:50:46 | 2021-09-17T07:50:46 | 407,407,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | # Generated by Django 2.0.6 on 2021-09-17 03:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cita',
fields=[
('pk_cita', models.AutoField(primary_key=True, serialize=False)),
('fecha', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='cliente',
fields=[
('pk_cliente', models.AutoField(primary_key=True, serialize=False)),
('nombre_cliente', models.CharField(max_length=20)),
('apellido_cliente', models.CharField(max_length=20)),
('telefono', models.CharField(max_length=8)),
('direccion', models.CharField(max_length=50)),
],
options={
'verbose_name': 'cliente',
'verbose_name_plural': 'clientes',
'ordering': ['nombre_cliente'],
},
),
migrations.CreateModel(
name='mascota',
fields=[
('pk_mascota', models.AutoField(primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=40)),
('descripcion', models.TextField()),
('raza', models.TextField()),
('estado', models.CharField(max_length=50)),
],
options={
'verbose_name': 'mascota',
'verbose_name_plural': 'mascota',
'ordering': ['nombre'],
},
),
migrations.AddField(
model_name='cita',
name='fk_cliente',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='storage.cliente'),
),
migrations.AddField(
model_name='cita',
name='fk_mascota',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='storage.mascota'),
),
]
| [
"rodnymorales739@gmail.com"
] | rodnymorales739@gmail.com |
0cb269eb77b00fc282b0b7a98450a744901f9bee | af4abf0a22db1cebae466c56b45da2f36f02f323 | /parser/fase2/team08/Tytus_SQLPARSER_G8/optimizacion/Instrucciones/C3D/LlamadaC3D.py | dc3d1a8494a7f8ac134381a1aa4f9d6d7c4e705b | [
"MIT",
"BSD-3-Clause"
] | permissive | joorgej/tytus | 0c29408c09a021781bd3087f419420a62194d726 | 004efe1d73b58b4b8168f32e01b17d7d8a333a69 | refs/heads/main | 2023-02-17T14:00:00.571200 | 2021-01-09T00:48:47 | 2021-01-09T00:48:47 | 322,429,634 | 3 | 0 | MIT | 2021-01-09T00:40:50 | 2020-12-17T22:40:05 | Python | UTF-8 | Python | false | false | 594 | py | from optimizacion.Instrucciones.TablaSimbolos.InstruccionC3D import InstruccionC3D
class LlamadaC3D(InstruccionC3D):
def __init__(self, id,linea, columna):
InstruccionC3D.__init__(self,linea,columna)
self.id = id
print("ENTRO A expresiones")
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(" linea: " + str(self.linea) + " columna: " + str(self.columna))
if self.id != None :
if(self.id == "main"):
return self.id + "()"
else:
return self.id +"()"
| [
"michikatrins@gmail.com"
] | michikatrins@gmail.com |
0c8781f6951eaa7ebc1a3b119337df5890b6014c | 2ff2f6634e3da26c4a69374d0ffb7fc4d9485baf | /player.py | c6ccfc153615e91fdc50f609a5a7cde96ed31ca6 | [] | no_license | nsaenzz/Rock-Paper-Scissors | db59edac3c3e25c3c154eb223f471ae4052177f3 | 49b7b84bc70ac0bb722deba049a1196959b166e5 | refs/heads/master | 2022-12-07T20:36:13.884606 | 2020-09-07T15:55:46 | 2020-09-07T15:55:46 | 293,344,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import random
"""The Player class is the parent class for all of the Players
in this game"""
class Player:
"""constructor clase, initial values of my_move,
opponent move, and all availables moves"""
def __init__(self):
self.my_move = ""
self.their_move = ""
self.moves = ['rock', 'paper', 'scissors']
"""declare funnction move, Every player have different move"""
def move(self):
pass
""" save the moves the playar and opponent"""
def learn(self, my_move, their_move):
self.my_move = my_move
self.their_move = their_move
""" return a random move from the move array"""
def randomChoice(self):
return random.choice(self.moves)
def typeOfPlayer(self):
pass
| [
"neil_saenz@yahoo.com"
] | neil_saenz@yahoo.com |
16badd2fbe280d1152ca490718023aef3087552f | 78873050d2335f6d6df45813e2ac9d974d7dce47 | /dictionary_data_structure1.py | 275c7d2b216d2eb849cd246343c4fdca403b1bb7 | [] | no_license | harshu1470/dictionary | 07af495c7f4f2642ba86936de769d70c50cf43e9 | a041d37370a53eec1414d9e270baf42885cb8aec | refs/heads/master | 2022-11-16T19:27:29.076444 | 2020-07-17T08:57:27 | 2020-07-17T08:57:27 | 280,376,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | #dictionary
dict = {"name": "harshit", "education": "B.Tech" ,"steam": "IT"}
#print dictionary
print(dict)
#print the keys of dictionary
print(dict.keys())
#print the key,values of dictionary
print(dict.items())
#print key for specified value
for key,value in dict.items():
if value=="B.Tech":
print(key)
#print the associated value for key
print(dict.get("name"))
#adding key and value in dictionary
dict['class']="8th"
print(dict)
#define new dictionary
dict1 ={"name":"kapil", "colege": "JECRC"}
#An empty dictionary
dict2 ={}
#concating two dictionary in new empty dictionary
for d in (dict,dict1):
dict2.update(d)
print(dict2)
#if key is present in dictionary then printing it's present
for key in dict:
if key == "name1":
print("it;s already present")
#using funtion for checking key is present in the dictionary or not
def key_present(a):
if a in dict :
print("it's already present")
else:
print("it's not present" )
#fucntion calling
key_present("name")
#printing keys with there values
for dict_keys, dict_values in dict.items():
print(dict_keys ,"->", dict_values ) | [
"noreply@github.com"
] | noreply@github.com |
b93260df15ec3b7ec598572a2cee1d41b1db0c22 | 41a672c9505b5b53c58a01d5455acc410949aa24 | /tests/aoutgoing/negative/group/C_39.py | 2f601a227919b3259ac2e7c4da1ce6d2ad77009c | [] | no_license | Alexsorgo/mobile_iOS | b045a0ea058726841c88158be8407b7ae45e893e | 7e298f890b408cedad9db9d0aefeccd9c10d6002 | refs/heads/master | 2022-12-12T17:26:14.039876 | 2020-03-18T06:34:56 | 2020-03-18T06:34:56 | 248,154,882 | 0 | 0 | null | 2021-06-02T01:13:05 | 2020-03-18T06:25:17 | Python | UTF-8 | Python | false | false | 956 | py | from configs import config
from enums import error_enums
from screens.group.group_screen import GroupScreen
from controls.menu import Menu
from tests.aoutgoing.base_test import BaseTest
from utils.logs import log
from utils.verify import Verify
class TestC39(BaseTest):
"""
User has the ability to create group chat with 1 more user
"""
EMPTY_NAME = ''
FRIEND = config.AMERICA_FIRSTNAME + ' ' + config.AMERICA_LASTNAME
def test_c39(self):
log.info("Create group with empty group name")
menu = Menu(self.driver)
group = GroupScreen(self.driver)
menu.go_to(menu.wenums.GROUPS, [menu.wenums.NEW_GROUP])
group.add_user(self.FRIEND)
group.tap_done()
group.tap_group_name()
group.set_group_name(self.EMPTY_NAME)
group.tap_save()
log.info("Verify group doesn't create")
Verify.true(group.error_verify(error_enums.GROUP_NAME_MIN), "Group created")
| [
"oleksii_mishchenko@epam.com"
] | oleksii_mishchenko@epam.com |
255564aec5fbd2217b5838adab632433649e48fb | 4a5f3b26fca176a80ca8eca796bc646bb225b017 | /LSTM/lstm.py | fd4fa6f6352b46b189efd111d6f07ccf1a6d69d2 | [] | no_license | musyoku/NLP | 9a63dc882b07b017f7cfc72d863c4d9e5cbeff5e | 9b040bb960b65fb2a1c330adafa6c52e3284a0c1 | refs/heads/master | 2021-01-21T04:53:57.029200 | 2016-07-10T17:08:03 | 2016-07-10T17:08:03 | 55,848,677 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,963 | py | # -*- coding: utf-8 -*-
import os, time
import numpy as np
import chainer
from chainer import cuda, Variable, optimizers, serializers, function, link
from chainer.utils import type_check
from chainer import functions as F
from chainer import links as L
from bnlstm import BNLSTM
from embed_id import EmbedID
activations = {
"sigmoid": F.sigmoid,
"tanh": F.tanh,
"softplus": F.softplus,
"relu": F.relu,
"leaky_relu": F.leaky_relu,
"elu": F.elu
}
class Conf:
def __init__(self):
self.use_gpu = True
self.n_vocab = -1
# 文字埋め込みベクトルの次元数
self.embed_size = 200
# 各隠れ層のユニット数を入力側から出力側に向かって並べる
# Unit sizes of each hidden layers
# e.g 500(input vector)->250->100(output vector)
# q_fc_units = [250]
# We don't contain input and output unit size here.
self.lstm_hidden_units = [1000]
# if true, it uses BNLSTM
self.lstm_apply_batchnorm = False
self.lstm_apply_dropout = False
# Fully-connected network that converts an output of the LSTM to a label distribution or an embed vector
# We don't contain input and output unit size here.
self.fc_hidden_units = [500]
self.fc_apply_batchnorm = False
self.fc_apply_dropout = False
self.fc_activation_function = "tanh"
# "embed_vector": outputs an embed vector. Instead of softmax layer, We use EmbedID.reverse() to convert vector to label id.
# "softmax": outputs a probability distribution of label ids using softmax layer
self.fc_output_type = LSTM.OUTPUT_TYPE_SOFTMAX
self.learning_rate = 0.0025
self.gradient_momentum = 0.95
def check(self):
if len(self.lstm_hidden_units) < 1:
raise Exception("You need to add one or more hidden layers to LSTM network.")
if len(self.fc_hidden_units) < 1:
raise Exception("You need to add one or more hidden layers to fully-connected network.")
class LSTMNetwork(chainer.Chain):
def __init__(self, **layers):
super(LSTMNetwork, self).__init__(**layers)
self.n_layers = 0
self.apply_dropout = False
def forward_one_step(self, x, test):
chain = [x]
# Hidden layers
for i in range(self.n_layers):
u = getattr(self, "layer_%i" % i)(chain[-1])
output = u
if self.apply_dropout:
output = F.dropout(output, train=not test)
chain.append(output)
return chain[-1]
def reset_state(self):
for i in range(self.n_layers):
getattr(self, "layer_%i" % i).reset_state()
def __call__(self, x, test=False):
return self.forward_one_step(x, test=test)
class FullyConnectedNetwork(chainer.Chain):
def __init__(self, **layers):
super(FullyConnectedNetwork, self).__init__(**layers)
self.n_layers = 0
self.activation_function = "tanh"
self.apply_dropout = False
self.apply_batchnorm = False
def forward_one_step(self, x, test):
f = activations[self.activation_function]
chain = [x]
# Hidden layers
for i in range(self.n_layers):
u = chain[-1]
if self.apply_batchnorm:
u = getattr(self, "batchnorm_%i" % i)(u, test=test)
u = getattr(self, "layer_%i" % i)(u)
output = f(u)
if self.apply_dropout and i != self.n_layers - 1:
output = F.dropout(output, train=not test)
chain.append(output)
return chain[-1]
def __call__(self, x, test=False):
return self.forward_one_step(x, test=test)
class LSTM:
OUTPUT_TYPE_SOFTMAX = 1
OUTPUT_TYPE_EMBED_VECTOR = 2
def __init__(self, conf, name="lstm"):
self.output_type = conf.fc_output_type
self.embed_id, self.lstm, self.fc = self.build(conf)
self.name = name
self.optimizer_lstm = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
self.optimizer_lstm.setup(self.lstm)
self.optimizer_lstm.add_hook(chainer.optimizer.GradientClipping(10.0))
self.optimizer_fc = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
self.optimizer_fc.setup(self.fc)
self.optimizer_fc.add_hook(chainer.optimizer.GradientClipping(10.0))
self.optimizer_embed_id = optimizers.Adam(alpha=conf.learning_rate, beta1=conf.gradient_momentum)
self.optimizer_embed_id.setup(self.embed_id)
self.optimizer_embed_id.add_hook(chainer.optimizer.GradientClipping(10.0))
def build(self, conf):
conf.check()
wscale = 1.0
embed_id = EmbedID(conf.n_vocab, conf.embed_size, ignore_label=-1)
if conf.use_gpu:
embed_id.to_gpu()
lstm_attributes = {}
lstm_units = [(conf.embed_size, conf.lstm_hidden_units[0])]
lstm_units += zip(conf.lstm_hidden_units[:-1], conf.lstm_hidden_units[1:])
for i, (n_in, n_out) in enumerate(lstm_units):
if conf.lstm_apply_batchnorm:
lstm_attributes["layer_%i" % i] = BNLSTM(n_in, n_out)
else:
lstm_attributes["layer_%i" % i] = L.LSTM(n_in, n_out)
lstm = LSTMNetwork(**lstm_attributes)
lstm.n_layers = len(lstm_units)
lstm.apply_dropout = conf.lstm_apply_dropout
if conf.use_gpu:
lstm.to_gpu()
fc_attributes = {}
fc_units = [(conf.lstm_hidden_units[-1], conf.fc_hidden_units[0])]
fc_units += zip(conf.fc_hidden_units[:-1], conf.fc_hidden_units[1:])
if conf.fc_output_type == self.OUTPUT_TYPE_EMBED_VECTOR:
fc_units += [(conf.fc_hidden_units[-1], conf.embed_size)]
elif conf.fc_output_type == self.OUTPUT_TYPE_SOFTMAX:
fc_units += [(conf.fc_hidden_units[-1], conf.n_vocab)]
else:
raise Exception()
for i, (n_in, n_out) in enumerate(fc_units):
fc_attributes["layer_%i" % i] = L.Linear(n_in, n_out, wscale=wscale)
fc_attributes["batchnorm_%i" % i] = L.BatchNormalization(n_in)
fc = FullyConnectedNetwork(**fc_attributes)
fc.n_layers = len(fc_units)
fc.activation_function = conf.fc_activation_function
fc.apply_batchnorm = conf.fc_apply_batchnorm
fc.apply_dropout = conf.fc_apply_dropout
if conf.use_gpu:
fc.to_gpu()
return embed_id, lstm, fc
def __call__(self, x, test=False, softmax=True):
output = self.embed_id(x)
output = self.lstm(output, test=test)
output = self.fc(output, test=test)
if softmax and self.output_type == self.OUTPUT_TYPE_SOFTMAX:
output = F.softmax(output)
return output
@property
def xp(self):
return np if self.lstm.layer_0._cpu else cuda.cupy
@property
def gpu(self):
return True if self.xp is cuda.cupy else False
def reset_state(self):
self.lstm.reset_state()
def predict(self, word, test=True, argmax=False):
xp = self.xp
c0 = Variable(xp.asarray([word], dtype=np.int32))
if self.output_type == self.OUTPUT_TYPE_SOFTMAX:
output = self(c0, test=test, softmax=True)
if xp is cuda.cupy:
output.to_cpu()
if argmax:
ids = np.argmax(output.data, axis=1)
else:
ids = [np.random.choice(np.arange(output.data.shape[1]), p=output.data[0])]
elif self.output_type == self.OUTPUT_TYPE_EMBED_VECTOR:
output = self(c0, test=test, softmax=False)
if argmax:
ids = self.embed_id.reverse(output.data, to_cpu=True, sample=False)
else:
ids = self.embed_id.reverse(output.data, to_cpu=True, sample=True)
return ids[0]
def distribution(self, word, test=True):
xp = self.xp
c0 = Variable(xp.asarray([word], dtype=np.int32))
output = self(c0, test=test, softmax=True)
if xp is cuda.cupy:
output.to_cpu()
return output.data
def train(self, seq_batch, test=False):
self.reset_state()
xp = self.xp
sum_loss = 0
seq_batch = seq_batch.T
for c0, c1 in zip(seq_batch[:-1], seq_batch[1:]):
c0 = Variable(xp.asanyarray(c0, dtype=np.int32))
c1 = Variable(xp.asanyarray(c1, dtype=np.int32))
output = self(c0, test=test, softmax=False)
if self.output_type == self.OUTPUT_TYPE_SOFTMAX:
loss = F.softmax_cross_entropy(output, c1)
elif self.output_type == self.OUTPUT_TYPE_EMBED_VECTOR:
target = Variable(self.embed_id(c1).data)
loss = F.mean_squared_error(output, target)
else:
raise Exception()
sum_loss += loss
self.zero_grads()
sum_loss.backward()
self.update()
if self.gpu:
sum_loss.to_cpu()
return sum_loss.data
def zero_grads(self):
self.optimizer_lstm.zero_grads()
self.optimizer_fc.zero_grads()
self.optimizer_embed_id.zero_grads()
def update(self):
self.optimizer_lstm.update()
self.optimizer_fc.update()
self.optimizer_embed_id.update()
def should_save(self, prop):
if isinstance(prop, chainer.Chain) or isinstance(prop, chainer.optimizer.GradientMethod) or isinstance(prop, EmbedID):
return True
return False
def load(self, dir=None):
if dir is None:
raise Exception()
for attr in vars(self):
prop = getattr(self, attr)
if self.should_save(prop):
filename = dir + "/%s_%s.hdf5" % (self.name, attr)
if os.path.isfile(filename):
print "loading", filename
serializers.load_hdf5(filename, prop)
else:
print filename, "missing."
print "model loaded."
def save(self, dir=None):
if dir is None:
raise Exception()
try:
os.mkdir(dir)
except:
pass
for attr in vars(self):
prop = getattr(self, attr)
if self.should_save(prop):
serializers.save_hdf5(dir + "/%s_%s.hdf5" % (self.name, attr), prop)
print "model saved."
| [
"musyoku@users.noreply.github.com"
] | musyoku@users.noreply.github.com |
f9ce80fc7c86f46273b2cd1af74496ed58daa089 | 2fb77cffd832f9d80824913acaba89be286c21de | /polls/urls.py | 9b8b1297de4fcd612de92ff4b9fb16aa5ecfb900 | [] | no_license | eolhc/first_tango_w_django | d353612606511f739ba09f635f407f65bf013508 | cafa1a2228f5ddacc506395edf61cfd073d6aa27 | refs/heads/master | 2021-01-13T12:36:03.286231 | 2017-01-09T12:40:10 | 2017-01-09T12:40:10 | 78,404,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | from django.conf.urls import url
from . import views
# urlpatterns = [
# # ex: /polls/
# url(r'^$', views.index, name='index'),
# # ex: /polls/5/
# url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'),
# # # ex: /polls/5/results/
# url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'),
# # # ex: /polls/5/vote/
# url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote')
# ]
app_name = 'polls'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
| [
"cqphua@gmail.com"
] | cqphua@gmail.com |
b339fef273a720af6207908c8f0f7f502bff6272 | 920fb0402ccff659f76a6ad8bf5ea9c9fa6e3b48 | /python-oq/openquake/settings.py | 5f3767bb7c7f7b6c9d9e21615473d0eed1bc0832 | [] | no_license | arbeit/openquake-packages | 347eaab58fc5446640bd852ffae1a47eae2fea06 | d253f09d7848e6cf32e8c7756551436da413176b | refs/heads/master | 2020-12-25T17:37:55.058808 | 2012-10-23T08:21:20 | 2012-10-23T08:21:20 | 4,167,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010-2012, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""Django settings for OpenQuake."""
from openquake.utils import config
# DEBUG = True
DB_SECTION = config.get_section('database')
def _db_cfg(db_name):
"""
Helper method to create db config items for the various roles and schemas.
:param db_name: The name of the database configuration. Configurations for
this name will be loaded from the site specific config file. If an item
doesn't exist in the config file, a default value will be used instead.
:returns: Configuration dict, structured like so::
{'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'openquake',
'USER': 'openquake',
'PASSWORD': 'secret',
'HOST': 'localhost',
'PORT': '5432',
}
"""
return dict(
ENGINE='django.contrib.gis.db.backends.postgis',
NAME=DB_SECTION.get('name', 'openquake'),
USER=DB_SECTION.get('%s_user' % db_name, 'openquake'),
PASSWORD=DB_SECTION.get('%s_password' % db_name, ''),
HOST=DB_SECTION.get('host', ''),
PORT=DB_SECTION.get('port', ''),
)
_DB_NAMES = (
'admin',
'job_init',
'job_superv',
'reslt_writer',
)
DATABASES = dict((db, _db_cfg(db)) for db in _DB_NAMES)
DEFAULT_USER = 'admin'
# We need a 'default' database to make Django happy:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'openquake',
'USER': DB_SECTION.get('%s_user' % DEFAULT_USER, 'oq_admin'),
'PASSWORD': DB_SECTION.get('%s_password' % DEFAULT_USER, 'openquake'),
'HOST': '',
'PORT': '',
}
DATABASE_ROUTERS = ['openquake.db.routers.OQRouter']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
TIME_ZONE = 'Europe/Zurich'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'change-me-in-production'
| [
"mh@foldr3.com"
] | mh@foldr3.com |
0afb5da3c5bf377521020e90704fbd297b46c016 | e5b778a273e3888ad0575a9dada39d458158127a | /students/migrations/0009_lesson_icon.py | a4f54c33999a2c365ea2cd47c5cf66dca551542c | [] | no_license | SevenLines/django-tealeaf | 896784baead7b9514e83edad8c3c2defdcdd060b | 959dbcbdd37a4e8f45de400e71710c5e746a97da | refs/heads/master | 2021-01-23T00:01:43.793383 | 2015-05-15T15:58:52 | 2015-05-15T15:58:52 | 17,891,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('filer', '__first__'),
('students', '0008_auto_20141128_1807'),
]
operations = [
migrations.AddField(
model_name='lesson',
name='icon',
field=filer.fields.image.FilerImageField(default=None, blank=True, to='filer.Image', null=True),
preserve_default=True,
),
]
| [
"mmailm@mail.ru"
] | mmailm@mail.ru |
b2a23f1789cef25f1bf20ca22704a1b29e2ca619 | 607a1413172b9101165efea7eb8e6cdd7231ef21 | /BMI.py | 727c6c1d4ea0deed3f3b41ff86bc4000a9fe4293 | [] | no_license | zhangxiaofeng9971zxf/moocPython | 822479a83290f49e583260a41bcb7b633012ed95 | a35428842a17e051e8e6b2e8cf6cb8e945342276 | refs/heads/master | 2020-04-17T13:20:55.426988 | 2019-01-24T02:10:35 | 2019-01-24T02:10:35 | 166,611,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | height = eval(input('请输入身高m:'))
weight = eval(input('请输入体重kg:'))
def BMI(weight,height):
bmih = height*height
bmi = weight/bmih
if bmi <18.5:
print('国际标准偏瘦')
elif 18.5<bmi<25:
print('国际标准正常')
elif 25<bmi<30:
print('国际标准偏胖')
else:
print('国际肥胖')
print('{:.2f}'.format(bmi))
BMI(weight,height)
| [
"zhangxiaofeng9971@126.com"
] | zhangxiaofeng9971@126.com |
d7ba5806812a2679ca922d44651cc1c3964fc84e | 7d6373ddb734eb8a8b14c694e2b68d391972d7e0 | /talipp/indicators/OBV.py | 5d25726edea04de42c85c5d6d0feb54b385fc650 | [
"MIT"
] | permissive | b1nhm1nh/talipp | eaeda7c35afda62a81cc19b8ea856d6ab7b06422 | 9300aa8b632bcfe7b48ed63c06fb06793a4a5674 | refs/heads/master | 2023-07-20T19:34:25.272972 | 2021-08-31T15:03:58 | 2021-08-31T15:03:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | from typing import List, Any
from talipp.indicators.Indicator import Indicator
from talipp.ohlcv import OHLCV
class OBV(Indicator):
"""
On Balance Volume
Output: a list of floats
"""
def __init__(self, input_values: List[OHLCV] = None):
super().__init__()
self.initialize(input_values)
def _calculate_new_value(self) -> Any:
if len(self.input_values) == 1:
return self.input_values[0].volume
else:
value = self.input_values[-1]
prev_value = self.input_values[-2]
if value.close == prev_value.close:
return self.output_values[-1]
elif value.close > prev_value.close:
return self.output_values[-1] + value.volume
else:
return self.output_values[-1] - value.volume | [
"28791551+nardew@users.noreply.github.com"
] | 28791551+nardew@users.noreply.github.com |
366b8ceb78b71f9ddc9827b05f1fed1a6846ef14 | 6e723d5620c6320d3fae1ca7f5d6bbd3413f8953 | /chap13/demo6.py | c017aef0574fdbc001548aa9a8152d49545f486c | [] | no_license | ericprogram/python | 73d55375e931fb0b421d70e85e5dc7ecdf4ab6cd | e992153838a8b7d37e11b225027f394b4fae127f | refs/heads/main | 2023-04-02T21:37:20.350760 | 2021-04-07T13:10:55 | 2021-04-07T13:10:55 | 332,361,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # --*--coding:utf-8--*--
# author : hmchen
# email : hmchen@cenboomh.com
# date : 2021/1/26 16:49
"""
Object 类
"""
class Student():
def __init__(self ,name ,age):
self.name = name
self.age = age
def __str__(self):
return "我的名字是:{0},我今年{1}岁了".format(self.name, self.age)
stu = Student("张三", 22)
print(dir(stu))
print(stu)
print(type(stu)) | [
"hmchen@cenboomh.com"
] | hmchen@cenboomh.com |
b4b91b5d66a9d54dce6d0b77cee372aaf37467af | 76effcf4ae0971e3ea0e69b743c21f98485d1d33 | /Python/spidey.py | cb76d83968ddd0982159b4a7bf1401591d94cf23 | [] | no_license | Vutov/SideProjects | 289684b58fef14d63f83954bb68baeb54f37e3fc | ba0e48825e3f90f9da0e7506c89354622198c4a5 | refs/heads/master | 2021-01-23T07:33:53.818539 | 2018-01-19T10:13:06 | 2018-01-19T10:13:06 | 26,778,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
url = 'http://bg04b.eu/'
urls = [url]
visited = [url]
# Gets page's HTML
while len(urls) > 0:
try:
htmlText = requests.get(urls[0]).text
except:
print(urls[0])
# print(htmlText)
soup = BeautifulSoup(htmlText)
# Remove visited page from all stack
urls.pop(0)
# Find all <a> tags with value of href
for link in soup.findAll('a', href=True):
# print (link['href'])
# Normalize urls
normalizedUrl = urljoin(url, link['href'])
# print(normalizedUrl)
# Check if already visited and is proper part of the site
if url in normalizedUrl and normalizedUrl not in visited:
urls.append(normalizedUrl)
visited.append(normalizedUrl)
# Just to have some info
print(len(urls))
print(visited) | [
"VutovSpas@gmail.com"
] | VutovSpas@gmail.com |
4b6f4ab0d4309902059bfe4311720cbd57cae1ae | 19b9bc401df28ede4b4a5f6fa00ee17042299719 | /archive/To09-14/09-10/seongbin_09-10.py | 6914e1c9bc761a600e9f8594b7b6879b289c387e | [] | no_license | kibitzing/FluentPython | fb0c21c164e0bf7a543e8c3c0d92c6f6c303ebb6 | 75c743ce4c5e65c2d891babd23f0e4b873924000 | refs/heads/master | 2021-07-10T23:42:58.224235 | 2019-02-12T14:24:06 | 2019-02-12T14:24:06 | 147,219,835 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | Contributors = [(1, 'kbitzing', 'Inha University', 'Electronic', 'Male'),
(3, 'kdhht2334', 'Inha University', 'Electronic', 'Male'),
(2, 'khahn0213', 'Seoul University', 'Mathematics', 'Male'),
(4, 'zelabean', 'Inha University', 'Electronic', 'Male'),
(5, '114569', 'Inha University', 'Electronic', 'Female'),
(6, 'sseung0703', 'Inha University', 'Electronic', 'Male')]
sorted_Contributors = sorted(Contributors)
for idx, id, campus, major, gender in sorted_Contributors:
print(id)
| [
"noreply@github.com"
] | noreply@github.com |
20669507584ec8d470940c096ab228e4dc37fb86 | 1dbb1f10e2c63997478aa96cf0de252de3e7048a | /leetcode-medium-collection/python3/addTwoNumbers.py | 27a32ea0f436cd2faf565052dbb7f815e956d9a2 | [] | no_license | Zchappie/Garage | 1f2e5e39d5143bb2a37c46696873e52f35d990b9 | db739652737b98818a63a6f8e8b7326e1673ec96 | refs/heads/master | 2020-12-26T17:53:41.586483 | 2020-08-30T08:06:31 | 2020-08-30T08:06:31 | 237,585,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
# if not l1 or not l2:
# return l1 or l2
# power1 = 0
# num1 = [l1.val]
# head1 = l1
# while head1.next:
# power1 += 1
# head1 = head1.next
# if type(head1) is int:
# num1 += [head1]
# else:
# num1 += [head1.val]
# power2 = 0
# num2 = [l2.val]
# head2 = l2
# while head2.next:
# power2 += 1
# head2 = head2.next
# if type(head2) is int:
# num2 += [head2]
# else:
# num2 += [head2.val]
# digit1 = [pow(10,k) for k in range(power1,-1,-1)]
# integer1 = sum([a*b for a,b in zip(digit1, num1)])
# digit2 = [pow(10,k) for k in range(power2,-1,-1)]
# integer2 = sum([a*b for a,b in zip(digit2, num2)])
"""
IMPORTANT: the next can't be an integer in the linked list.
It could only be None or another ListNode.
"""
carry = 0
sumList = ListNode(0)
sumListTail = sumList
while l1 or l2 or carry:
adder1 = (l1.val if l1 else 0)
adder2 = (l2.val if l2 else 0)
carry, res = divmod(adder1 + adder2 + carry, 10)
sumListTail.next = ListNode(res)
sumListTail = sumListTail.next
l1 = (l1.next if l1 else None)
l2 = (l2.next if l2 else None)
return sumList.next
l1 = ListNode(1)
l2 = ListNode(2)
s = Solution()
print(s.addTwoNumbers(l1, l2)) | [
"zeek.uni@outlook.com"
] | zeek.uni@outlook.com |
95f0a6b1dea85668266643f4da2a8db0c7fd0953 | e6b4a7169daec3b0bef0b17698efd7e3d296aa01 | /primerproyecto/manage.py | f2ce52c8085440235e840bb9f173f223f0781571 | [] | no_license | Billgeorge/Django | 3b46a9132726e99efe14def2514987d9aea11f13 | 893497c226a1090e53fc67b0793d79d831b1520d | refs/heads/master | 2022-12-20T09:39:36.881601 | 2016-01-21T01:48:06 | 2016-01-21T01:48:06 | 50,071,448 | 0 | 1 | null | 2022-12-10T20:19:36 | 2016-01-21T00:52:37 | Python | UTF-8 | Python | false | false | 257 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "primerproyecto.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"jorjek4@hotmail.com"
] | jorjek4@hotmail.com |
97593d01e20dce08a5a4070ab12a31941668ffee | 2dc1e4e9ee004d55fb2ae77f912a826b5689fd39 | /src/domarIRCbot/db.py | 9ccb0b506fbed01a55329299e63866105a9391d8 | [] | no_license | lenzls/eRepCrawler | e8003ee2df23ba9f48be7c2c838b0b2fb7024f4c | e5d44201412ffa65848ef3fe6a3b8a1f1de40107 | refs/heads/master | 2021-05-28T15:34:51.358263 | 2011-04-10T22:55:14 | 2011-04-10T22:55:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,063 | py | #
# Copyright (C) 2010 by Filip Brcic <brcha@gna.org>
#
# Persistent object database support, based on shelve
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import shelve
import multiprocessing
import UserDict
class DB(UserDict.DictMixin):
""" Wrapper over shelve that does auto-sync on every write
and avoids throwing errors when keys don't exist, instead returning None.
Made to be thread-safe.
"""
def __init__(self, path):
self.db = shelve.open(path)
self.readLock = multiprocessing.RLock()
self.writeLock = multiprocessing.RLock()
def keys(self):
self.readLock.acquire()
keys = self.db.keys()
self.readLock.release()
return keys
def __len__(self):
self.readLock.acquire()
dblen = len(self.db)
self.readLock.release()
return dblen
def has_key(self, key):
self.readLock.acquire()
result = key in self.db
self.readLock.release()
return result
def __contains__(self, key):
return self.has_key(key)
def get(self, key, default=None):
self.readLock.acquire()
value = self.db.get(key, default)
self.readLock.release()
return value
def __getitem__(self, key):
return self.db.get(key)
def __setitem__(self, key, value):
self.readLock.acquire()
self.writeLock.acquire()
self.db[key] = value
self.writeLock.release()
self.readLock.release()
self.db.sync()
def __delitem__(self, key):
self.readLock.acquire()
if key in self.db:
self.writeLock.acquire()
del self.db[key]
self.writeLock.release()
self.readLock.release()
def close(self):
self.readLock.acquire()
self.writeLock.acquire()
self.db.close()
self.writeLock.release()
self.readLock.release()
def __del__(self):
self.readLock.acquire()
self.writeLock.acquire()
del self.db
self.writeLock.release()
self.readLock.release()
def sync(self):
self.readLock.acquire()
self.writeLock.acquire()
self.db.sync()
self.writeLock.release()
self.readLock.release()
class _PutData(object):
def __init__(self, key, value):
super(_PutData, self).__init__()
self.key = key
self.value = value
class _GetData(object):
def __init__(self, key):
super(_GetData, self).__init__()
self.key = key
class _ReturnData(object):
def __init__(self, value):
super(_ReturnData, self).__init__()
self.value = value
class _Keys(object):
def __init__(self):
super(_Keys, self).__init__()
class _Len(object):
def __init__(self):
super(_Len, self).__init__()
class _DelData(object):
def __init__(self, key):
super(_DelData, self).__init__()
self.key = key
class _Die(object):
def __init__(self):
super(_Die, self).__init__()
class DBManagerProcess(multiprocessing.Process, UserDict.DictMixin):
def __init__(self, db):
super(DBManagerProcess, self).__init__()
self._messages = multiprocessing.Queue()
self._db = db
self._retKey = multiprocessing.Queue()
self._retLen = multiprocessing.Queue()
self._retGet = multiprocessing.Queue()
self._lckKey = multiprocessing.RLock()
self._lckLen = multiprocessing.RLock()
self._lckGet = multiprocessing.RLock()
def keys(self):
self._lckKey.acquire()
self._messages.put(_Keys())
val = self._retKey.get()
self._lckKey.release()
return val.value
def __len__(self):
self._lckLen.acquire()
self._messages.put(_Len())
val = self._retLen.get()
self._lckLen.release()
return val.value
def has_key(self, key):
return key in self.keys()
def __contains__(self, key):
return self.has_key(key)
def get(self, key, default=None):
self._lckGet.acquire()
self._messages.put(_GetData(key))
val = self._retGet.get()
self._lckGet.release()
return val.value or default
def __getitem__(self, key):
return self.get(key)
def put(self, key, value):
self._messages.put(_PutData(key, value))
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
self._messages.put(_DelData(key))
def __del__(self):
if self.is_alive():
self.terminate()
def die(self):
self._messages.put(_Die())
def run(self):
while True:
job = self._messages.get()
if type(job) is _PutData:
self._db[job.key] = job.value
elif type(job) is _GetData:
self._retGet.put(
_ReturnData(self._db[job.key])
)
elif type(job) is _Keys:
self._retKey.put(
_ReturnData(self._db.keys())
)
elif type(job) is _Len:
self._retLen.put(
_ReturnData(len(self._db))
)
elif type(job) is _DelData:
key = job.key
if key in self._db:
del self._db[key]
elif type(job) is _Die:
return
| [
"lenz.simon@googlemail.com"
] | lenz.simon@googlemail.com |
c42d708638a67507a6e3940aec6bc82380a39f72 | 95e792a30d152bcb31f2d19035416349a0bbe1e5 | /galaxyCodeBase/tools/exaremeTools/HEATMAP_HIGHCHART/HEATMAP_HIGHCHART.py | 59a032cfe4821a1b3c7c6135d281cce6a9bae489 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | madgik/galaxy | b965f5f3680213b21db811f9fafcddb131df312e | 15f9388666fb8f5faa1d3f0fccbb9bf1c4a178ba | refs/heads/master | 2023-04-19T03:30:58.407359 | 2023-04-11T16:05:11 | 2023-04-11T16:05:11 | 174,144,956 | 0 | 0 | NOASSERTION | 2020-11-12T13:13:34 | 2019-03-06T12:53:27 | Python | UTF-8 | Python | false | false | 3,983 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import sys
def getopts(argv):
opts = {}
while argv:
if argv[0][0] == '-':
opts[argv[0]] = argv[1]
argv = argv[2:]
else:
argv = argv[1:]
return opts
def heatmap(
inputJson,
title,
xtitle,
ytitle,
):
# https://www.highcharts.com/demo/heatmap
xcategories = []
ycategories = []
mydata = []
init = True
for x in inputJson:
# print 'x',x
if init is True:
variablenames = []
for (key, value) in x.iteritems():
variablenames.append(str(key))
init = False
# print 'variablenames:', str(variablenames)
# print 'inputdata:', str(x[variablenames[0]]), str(x[variablenames[1]]), str(x[variablenames[2]])
if str(x[variablenames[0]]) not in xcategories:
xcategories.append(str(x[variablenames[0]]))
if str(x[variablenames[1]]) not in ycategories:
ycategories.append(str(x[variablenames[1]]))
mydata.append([xcategories.index(str(x[variablenames[0]])),
ycategories.index(str(x[variablenames[1]])),
float(x[variablenames[2]])])
consufion_matrix = {
'chart': {
'type': 'heatmap',
'marginTop': 40,
'marginBottom': 80,
'plotBorderWidth': 1,
},
'title': {'text': 'Confusion Matrix'},
'xAxis': {'categories': xcategories,
'title': {'enabled': True,
'style': {'fontWeight': 'normal'},
'text': str(xtitle)}},
'yAxis': {'categories': ycategories,
'title': {'enabled': True,
'style': {'fontWeight': 'normal'},
'text': str(ytitle)}},
'colorAxis': {'min': 0, 'minColor': '#FFFFFF',
'maxColor': '#6699ff'},
'legend': {
'align': 'right',
'layout': 'vertical',
'margin': 0,
'verticalAlign': 'top',
'y': 25,
'symbolHeight': 280,
},
'tooltip': {'enabled': False},
'series': [{'borderWidth': 1, 'data': mydata,
'dataLabels': {'enabled': True, 'color': '#000000'
}}],
}
return consufion_matrix
def main():
args = sys.argv[1:]
opts = getopts(args)
if not opts or len(opts) < 5:
print 'Usage:'
print ' -in Input'
print ' -t Title'
print ' -xt xTitle'
print ' -yt yTitle'
print ' -o Output'
return 0
title = opts.get('-t')
xtitle = opts.get('-xt')
ytitle = opts.get('-yt')
try:
inputFile = open(opts.get('-in'), 'r')
inputData = inputFile.read()
inputJson = json.loads(inputData)
except ValueError:
print("Input file should be:")
print('[{ "result" : [{')
print(' "data": [...],')
print(' "type": "application/json"')
print(' } , ... ')
print('] }')
results = []
for i in xrange(len(inputJson)):
try:
result = {'data': heatmap(inputJson[i]['result'][0]['data'], title,
xtitle, ytitle),'type': 'application/vnd.highcharts+json'}
except ValueError:
print("Input file should be:")
print('[{ "result" : [{')
print(' "data": [...],')
print(' "type": "application/json"')
print(' } , ... ')
print('] }')
results.append(result)
finalResult = { 'result' : results}
outputFile = open(opts.get('-o'), 'w')
outputFile.write(json.dumps(finalResult))
outputFile.close
if __name__ == '__main__':
main()
| [
"tkarabatsis@hotmail.com"
] | tkarabatsis@hotmail.com |
af2f6fbc88738658ff7601a7778fe71e41a634e5 | 8959b1441d205e1e875859358e435263e0b1c19f | /Python/debug/td.py | 921eb44824757efc5e7cacce06f443fdcd6b5f23 | [] | no_license | jamesregis/my-bomberman | 01b4a3f065d51f1ec738ffdd18234aa02935c60a | 981c73469c8df127aa1980ac0a683343a4dd58ea | refs/heads/main | 2023-04-18T12:36:20.977097 | 2021-04-22T20:47:05 | 2021-04-22T20:47:05 | 360,639,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import threading
def hello():
print "hello, world"
def hello2():
print "hello, world 2!"
t = threading.Timer(5, hello)
t1 = threading.Timer(2, hello2)
t.start()
t1.start()
| [
"james.regis@bara.ca"
] | james.regis@bara.ca |
b724034fa2c2040a7580228001de6c6eafbd8d43 | 10d33e1e6eb2280720937e239f8733d968b85eac | /myplot-new.py | dc880069f339021ab15860f00fd9eae9ab39e426 | [] | no_license | rdangovs/animation | 8259999dcf8af38eff4783fcc6c26c276e609489 | 390a984399541668e145ddc5f60bd8889d493a88 | refs/heads/master | 2021-01-25T10:56:56.258615 | 2018-03-01T03:38:24 | 2018-03-01T03:38:24 | 123,377,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
ITER = 20000
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2.0, 2.0), ylim=(-2.0, 2.0))
ax.grid()
line_lstm, = ax.plot([], [], 'o-', lw=2, color = "blue")
line_eunn, = ax.plot([], [], 's-', lw=2, color = "red")
line_rum, = ax.plot([], [], 'o-', lw=2, color = "green")
line_goru, = ax.plot([], [], 'o-', lw=2, color = "k")
time_template = 'Hid. states: LSTM (blue), EUNN (red), GORU (black), RUM (green); \n Train. iter. = 500, Time step = %d, Procedure: %s.'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
# create a time array from 0..100 sampled at 0.05 second steps
lstm_data = np.load("./data_LSTM_" + str(ITER) + ".npy")
eunn_data = np.load("./data_EUNN_" + str(ITER) + ".npy")
goru_data = np.load("./data_GORU_" + str(ITER) + ".npy")
rum_data = np.load("./data_RUM_" + str(ITER) + ".npy")
xlstm = lstm_data[:,0]
ylstm = lstm_data[:,1]
xeunn = eunn_data[:,0]
yeunn = eunn_data[:,1]
xgoru = goru_data[:,0]
ygoru = goru_data[:,1]
xrum = rum_data[:,0]
yrum = rum_data[:,1]
def init():
line_lstm.set_data([], [])
line_eunn.set_data([], [])
line_goru.set_data([], [])
line_rum.set_data([], [])
time_text.set_text('')
return (line_lstm, \
line_eunn, \
line_rum, \
line_goru, \
time_text)
def animate(i):
x_lstm = [0, xlstm[i]]
y_lstm = [0, ylstm[i]]
x_eunn = [0, xeunn[i]]
y_eunn = [0, yeunn[i]]
x_goru = [0, xgoru[i]]
y_goru = [0, ygoru[i]]
x_rum = [0, xrum[i]]
y_rum = [0, yrum[i]]
line_lstm.set_data(x_lstm, y_lstm)
line_eunn.set_data(x_eunn, y_eunn)
line_goru.set_data(x_goru, y_goru)
line_rum.set_data(x_rum, y_rum)
if i <= 9:
s = "reading"
elif i <= 110:
s = "waiting"
else:
s = "writing"
time_text.set_text(time_template % (i, s))
return (line_lstm, \
line_eunn, \
line_rum, \
line_goru, \
time_text)
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(xlstm)),
interval=50, blit=True, init_func=init)
ani.save('time-series-iter-' + str(ITER) + '.mp4', fps=5)
plt.show()
| [
"Darumen@dhcp-18-111-97-46.dyn.MIT.EDU"
] | Darumen@dhcp-18-111-97-46.dyn.MIT.EDU |
e7a9408e49112ddd9f5aafdb874c3377f4ad2d1c | 767745e9c6207db9f6a9cf4f0be1af4732e7a111 | /raiden/tests/integration/transfer/test_directransfer_invalid.py | d88e7c9f8a32e7f40677ba8b9fc4c75d7c1a3340 | [
"MIT"
] | permissive | gcarq/raiden | ecc91860b99447028baea7fd171c19996644a5ef | 82241c6da9188c4e029aef3bb42f0ab9f055c0e4 | refs/heads/master | 2020-03-10T03:31:55.174762 | 2018-04-11T19:18:21 | 2018-04-11T19:18:21 | 129,167,527 | 0 | 0 | MIT | 2018-04-11T23:52:12 | 2018-04-11T23:52:12 | null | UTF-8 | Python | false | false | 6,738 | py | # -*- coding: utf-8 -*-
import pytest
from raiden.api.python import RaidenAPI
from raiden.messages import DirectTransfer
from raiden.transfer import channel
from raiden.transfer.state import EMPTY_MERKLE_ROOT
from raiden.tests.utils.blockchain import wait_until_block
from raiden.tests.utils.factories import (
UNIT_HASHLOCK,
make_address,
make_privkey_address,
)
from raiden.tests.utils.transfer import (
assert_synched_channel_state,
get_channelstate,
sign_and_inject,
)
@pytest.mark.skip(reason='direct_transfer_async doesnt return AsyncResult anymore')
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('number_of_nodes', [2])
def test_failsfast_directtransfer_exceeding_distributable(
raiden_network,
token_addresses,
deposit
):
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
async_result = alice_app.raiden.direct_transfer_async(
token_address,
deposit * 2,
bob_app.raiden.address,
identifier=1,
)
assert not async_result.get_nowait()
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidtoken(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
identifier = 1
invalid_token_address = make_address()
channel_identifier = channel0.identifier
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=1,
token=invalid_token_address,
channel=channel_identifier,
transferred_amount=0,
recipient=app1.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
assert_synched_channel_state(
token_address,
app0, deposit, [],
app1, deposit, [],
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidlocksroot(raiden_network, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
balance0 = channel.get_balance(channel0.our_state, channel0.partner_state)
balance1 = channel.get_balance(channel0.partner_state, channel0.our_state)
identifier = 1
invalid_locksroot = UNIT_HASHLOCK
channel_identifier = channel0.identifier
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=1,
token=token_address,
channel=channel_identifier,
transferred_amount=0,
recipient=app1.raiden.address,
locksroot=invalid_locksroot,
)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
assert_synched_channel_state(
token_address,
app0, balance0, [],
app1, balance1, []
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidsender(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
other_key, other_address = make_privkey_address()
channel0 = get_channelstate(app0, app1, token_address)
channel_identifier = channel0.identifier
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=token_address,
channel=channel_identifier,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
direct_transfer_message,
other_key,
other_address,
app0,
)
assert_synched_channel_state(
token_address,
app0, deposit, [],
app1, deposit, []
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidnonce(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
transferred_amount = 10
same_identifier = 1
event = channel.send_directtransfer(
channel0,
transferred_amount,
same_identifier,
)
direct_transfer_message = DirectTransfer.from_event(event)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
# Send a *different* direct transfer with the *same nonce*
invalid_transferred_amount = transferred_amount // 2
invalid_direct_transfer_message = DirectTransfer(
identifier=same_identifier,
nonce=1,
token=token_address,
channel=channel0.identifier,
transferred_amount=invalid_transferred_amount,
recipient=app1.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
invalid_direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
assert_synched_channel_state(
token_address,
app0, deposit - transferred_amount, [],
app1, deposit + transferred_amount, [],
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('settle_timeout', [30])
def test_received_directtransfer_closedchannel(raiden_network, token_addresses, deposit):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
RaidenAPI(app1.raiden).channel_close(
token_address,
app0.raiden.address,
)
wait_until_block(
app0.raiden.chain,
app0.raiden.chain.block_number() + 1,
)
# Now receive one direct transfer for the closed channel
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=token_address,
channel=channel0.identifier,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
# The local state must not change since the channel is already closed
assert_synched_channel_state(
token_address,
app0, deposit, [],
app1, deposit, [],
)
| [
"hack.augusto@gmail.com"
] | hack.augusto@gmail.com |
c60b5a503cfaa9e6f2b04e43b863f1aa2380a421 | ef73a39feb698bf2b8c5d84ca0f05e7d88b3a646 | /Testcase/test_serviceManage.py | 223348b1ddab9a20babadfb3bd92656bfcff5e9f | [] | no_license | hedanhe/ApiTest | ac9d569bce769af76c56ef1b656dc7a5a5266d71 | 0b9aa2e7eff90bfa5f7c98270325fa9dc71bbeaa | refs/heads/master | 2022-11-29T18:35:50.205741 | 2020-08-07T03:58:58 | 2020-08-07T03:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: yanghong
@file: test_serviceManage.py
@time: 2020/5/25 11:19
@desc:增值服务管理-服务项管理
'''
import json
import warnings
import allure
import pytest
from Common.Assert import Assertions
from Common.DB import DB_config
from Common.Login import loginAdmin
from Common.logger import Log
from Conf.Config import Config
from Params.param import GetData
warnings.simplefilter("ignore", ResourceWarning)
@allure.epic('增值服务管理')
@allure.feature("服务项管理")
class TestServiceManage:
log = Log().getlog()
db = DB_config()
allData = GetData(excelFileName='admin_api.xlsx', sheetName='Sheet1')
test = Assertions()
accountInfoDate = allData.getTestCaseData(menuName='服务项管理', belongs='accountInfo')
accountDetailDate = allData.getTestCaseData(menuName='服务项管理', belongs='accountDetail')
accountSaveDate = allData.getTestCaseData(menuName='服务项管理', belongs='accountSave')
def setup_class(self):
self.base = loginAdmin(usr=Config().adminuser, pwd=Config().adminpwd) # 用同一个登录成功后的session
@allure.severity("normal")
@allure.title("查询机构的服务项目账户余额")
@pytest.mark.parametrize("data", accountInfoDate)
def test_accountInfo(self, data):
apiUrl = data['ApiUrl']
requestsMethod = data['Method']
sendData = json.loads(data['Data'])
expected = json.loads(data['expected'])
self.log.info('本次使用参数:%s' % sendData)
r = self.base.sendRequest(apiUrl, requestsMethod, sendData)
self.log.info('接口返回值:%s' % r.json())
print('接口返回值:%s' % r.json())
self.test.verifyExpected(r.json(), expected)
@pytest.fixture(scope='class')
def accountDetail(self, request):
apiUrl = request.param['ApiUrl']
requestsMethod = request.param['Method']
sendData = json.loads(request.param['Data'])
expected = json.loads(request.param['expected'])
self.log.info('本次使用参数:%s' % sendData)
r = self.base.sendRequest(apiUrl, requestsMethod, sendData)
self.log.info('接口返回值:%s' % r.json())
print('接口返回值:%s' % r.json())
return r.json(), self.test.verifyExpected(r.json(), expected)
@allure.severity("normal")
@allure.title("查询机构的服务项目详情")
@pytest.mark.dependency(name='detail')
@pytest.mark.parametrize("accountDetail", accountDetailDate, indirect=True)
def test_accountDetail(self, accountDetail):
assert accountDetail[1]
@allure.severity("normal")
@allure.title("服务项目保存接口")
# @pytest.mark.dependency(depends=["detail"]) # 用例依赖上一接口,若上一接口执行失败直接跳过。用例依赖间又需要获取前面的返回值
@pytest.mark.parametrize("accountDetail", accountDetailDate, indirect=True)
@pytest.mark.parametrize("data", accountSaveDate, indirect=False)
def test_serviceUpdata(self, accountDetail, data):
apiUrl = data['ApiUrl']
requestsMethod = data['Method']
sendData = json.loads(data['Data'])
sendData['accountDetail'] = json.dumps(accountDetail[0]['data']) # 直接保存,不修改数据
expected = json.loads(data['expected'])
self.log.info('本次使用参数:%s' % sendData)
r = self.base.sendRequest(apiUrl, requestsMethod, sendData)
self.log.info('接口返回值:%s' % r.json())
print('接口返回值:%s' % r.json())
self.test.verifyExpected(r.json(), expected)
| [
"hong.yang@qizhixinxi.com"
] | hong.yang@qizhixinxi.com |
89eea659b56e7e721f9943ae5b3106bc2d905972 | b8a373f6ef648da7eb50b193723a17e1cb793504 | /djangoapp/frecord.py | 3bfb8665c2edd53c1829a8dc70f3269cd0e5d869 | [] | no_license | strongqli/visualstock | 37627e7fe3c8ae21cf2fff646bebd764a97f02da | 18662a34db3734604f59a330babfed6cf7300787 | refs/heads/master | 2016-09-06T01:50:41.566194 | 2014-03-20T09:21:39 | 2014-03-20T09:21:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class FinanceRecord(models.Model):
stock_no = models.CharField(max_length=6L)
quarter = models.CharField(max_length=8L)
stock_name = models.CharField(max_length=10L, blank=True)
link = models.CharField(max_length=128L, blank=True)
eps = models.DecimalField(null=True, max_digits=12, decimal_places=4, blank=True)
revenue = models.DecimalField(null=True, max_digits=22, decimal_places=2, blank=True)
revenue_growth_byyear = models.DecimalField(null=True, max_digits=12, decimal_places=2, blank=True)
revenue_growth_byquar = models.DecimalField(null=True, max_digits=12, decimal_places=2, blank=True)
earning = models.DecimalField(null=True, max_digits=22, decimal_places=2, blank=True)
earning_growth_byyear = models.DecimalField(null=True, max_digits=12, decimal_places=2, blank=True)
earning_growth_byquar = models.DecimalField(null=True, max_digits=12, decimal_places=2, blank=True)
net_assets = models.DecimalField(null=True, max_digits=12, decimal_places=4, blank=True)
roe = models.DecimalField(null=True, max_digits=12, decimal_places=2, blank=True)
cash_flow_pershare = models.DecimalField(null=True, max_digits=12, decimal_places=4, blank=True)
margin = models.DecimalField(null=True, max_digits=12, decimal_places=2, blank=True)
issuedate = models.DateField(null=True, blank=True)
class Meta:
db_table = 'finance_record'
| [
"strongqli@gmail.com"
] | strongqli@gmail.com |
cc48085dc7ade65b50e1a12660038561569afbf8 | 9032a759b29da1c5f6a3aeb710e06c2aa6080a54 | /03_zhihu/zhihu/items.py | 2f44ad005e7a4166258e10bfcb1b47949f0accef | [
"MIT"
] | permissive | GongkunJiang/MySpider | 5e5aa89348a8a94cb1b3b807dcbfe1662e534d14 | 8c088f696679b13568843af521279f9f25f40314 | refs/heads/master | 2020-03-27T10:32:14.768792 | 2018-12-07T11:35:18 | 2018-12-07T11:35:18 | 146,427,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class ZhihuItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
url = Field() #保存抓取问题的url
title = Field() #抓取问题的标题
description = Field() #抓取问题的描述
answer = Field() #抓取问题的答案
name = Field() #个人用户的名称
| [
"jgk19950903@gmail.com"
] | jgk19950903@gmail.com |
0d9da472ddc42d26ecd7df9af9a4c9d5fff890c6 | c638d7f179f12bd6fb8f62bb00467e5b5fd97e8e | /python/0015.py | 13e11a869843d9125ea5f619b86c0c7c77cbffb0 | [] | no_license | colblitz/projectEulerTeamCarrot | 32044477fcda892e338e628f4d44db2760029b12 | 23284f4defb8ddab33feefea8f5ee2fa162cfd67 | refs/heads/master | 2021-01-18T20:20:22.758054 | 2014-06-14T08:13:04 | 2014-06-14T08:13:04 | 8,715,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from math import *
from util import *
#######################
# Project Euler #0015 #
# Team Carrot: #
# Joseph Lee #
# Benjamin Lee #
# Stephanie Yu #
#######################
'''
Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
'''
# Version 01
@profile
def main01():
size = 21
grid = [[0]*size for i in xrange(size)]
grid[0][0] = 1
# For each diagonal
for d in xrange(1, size*2 - 1):
width = d + 1
if d >= size:
width = 2*size - d - 1
for j in xrange(width):
if d < size:
x = j
else:
x = d + j + 1 - size
y = d - x
value = 0
if ( x > 0 ):
value += grid[x - 1][y]
if ( y > 0 ):
value += grid[x][y - 1]
grid[x][y] = value
# Code here
printAnswer(grid[size-1][size-1])
main01()
| [
"z.joseph.lee.z@gmail.com"
] | z.joseph.lee.z@gmail.com |
737b5c13e51941c403388b2ef31e09078d6855db | f565655ca13bc13053f6809031bfaab88041a5fb | /analyzer/views.py | 3e59543bfdbeccae60bc408141a1d0e70c2796de | [
"Apache-2.0"
] | permissive | multipathtester/mptcpbench-apps | 7d34fa116295ee50a0d397d08e7a5858c1c37959 | 12a7ff1fbb95041e7e5adf551377c8cb97f62b7c | refs/heads/master | 2020-06-04T06:27:08.342550 | 2019-06-14T08:19:36 | 2019-06-14T08:19:36 | 191,904,136 | 0 | 0 | null | 2019-06-14T08:19:39 | 2019-06-14T08:17:36 | Python | UTF-8 | Python | false | false | 1,720 | py | from django.shortcuts import render
from django.http import Http404
from django.contrib.auth.decorators import login_required
from .mobility_helper import get_mobile_stats
from .benchmark_helper import get_benchmark_stats
from .multipath_helper import get_multipath_stats
from .mobility_detail import get_mobility_detail, get_quic_mobility_detail
@login_required
def mobility_info(request):
if not request.user.is_superuser:
raise Http404
version_operator = request.GET.get("version_operator", None)
version = request.GET.get("version", None)
only_mobility = request.GET.get("only_mobility", None) is not None
data_dict = get_mobile_stats(version_operator=version_operator,
version=version,
only_mobility=only_mobility)
return render(request, 'analyzer/mobility.html', data_dict)
@login_required
def benchmark_info(request):
if not request.user.is_superuser:
raise Http404
data_dict = get_benchmark_stats()
return render(request, 'analyzer/benchmark.html', data_dict)
@login_required
def multipath_info(request):
if not request.user.is_superuser:
raise Http404
data_dict = get_multipath_stats()
return render(request, 'analyzer/multipath.html', data_dict)
@login_required
def mobility_detail(request):
if not request.user.is_superuser:
raise Http404
data_dict = get_mobility_detail()
return render(request, 'analyzer/mobility.html', {})
@login_required
def quic_mobility_detail(request):
if not request.user.is_superuser:
raise Http404
data_dict = get_quic_mobility_detail()
return render(request, 'analyzer/mobility.html', {})
| [
"quentin.deconinck@uclouvain.be"
] | quentin.deconinck@uclouvain.be |
764623587649698f98cff8508d79386a867a299d | 5b7f989b51e3fcc626f118c01aae6bc8d1c48144 | /finance/finance/yafinance 10.34.31 PM/migrations/0001_initial.py | d73aca49462fb6547c345c22cf4f4ee2912b4a0a | [] | no_license | Lucysaiheng/Yafinance | 3b2cbf10cfe9ce5d310cbe69aec3459ba3306ba6 | 88bc5c16458aa291580336eb36d7fc73d6926b91 | refs/heads/master | 2020-03-14T18:19:00.262241 | 2018-05-03T02:34:51 | 2018-05-03T02:34:51 | 131,666,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | # Generated by Django 2.0 on 2018-04-30 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cid', models.CharField(max_length=100)),
('url', models.CharField(blank=True, max_length=100, null=True)),
('region', models.CharField(blank=True, max_length=100, null=True)),
('latitude', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('longtitude', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('previous_close', models.DecimalField(blank=True, decimal_places=10, max_digits=20, null=True)),
('copen', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('bid', models.CharField(blank=True, max_length=50, null=True)),
('ask', models.CharField(blank=True, max_length=50, null=True)),
('volume', models.IntegerField(blank=True, null=True)),
('avgvolume', models.IntegerField(blank=True, null=True)),
('market_cap', models.CharField(blank=True, max_length=50, null=True)),
('EPS', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('day_min', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('day_max', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('week_min', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('week_max', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
],
),
]
| [
"asaiheng@gmail.com"
] | asaiheng@gmail.com |
3747c58466d61b4927db56413de77336dc18c23c | a29a73de4df917da642adec96286d7ed3b2a0a42 | /myDPPO30/utils.py | 517d56784cc6eb43201f5f64de6650e156de4859 | [
"MIT"
] | permissive | tankche1/Learn-To-Run | 9f0546f2d2c74cf18879579a3ccb2aeb3bea2765 | 27a48c8e1ec5864ab58caa9df4098a1089641cc0 | refs/heads/master | 2021-03-24T11:07:15.949621 | 2017-10-18T14:43:41 | 2017-10-18T14:43:41 | 101,266,609 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | import math
import numpy as np
import torch
import torch.multiprocessing as mp
class TrafficLight:
"""used by chief to allow workers to run or not"""
def __init__(self, val=True):
self.val = mp.Value("b", False)
self.lock = mp.Lock()
def get(self):
with self.lock:
return self.val.value
def switch(self):
with self.lock:
self.val.value = (not self.val.value)
class Counter:
"""enable the chief to access worker's total number of updates"""
def __init__(self, val=True):
self.val = mp.Value("i", 0)
self.lock = mp.Lock()
def get(self):
# used by chief
with self.lock:
return self.val.value
def increment(self):
# used by workers
with self.lock:
self.val.value += 1
def reset(self):
# used by chief
with self.lock:
self.val.value = 0
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (
2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1)
def get_flat_params_from(model):
params = []
for param in model.parameters():
params.append(param.data.view(-1))
flat_params = torch.cat(params)
return flat_params
def set_flat_params_to(model, flat_params):
prev_ind = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(
flat_params[prev_ind:prev_ind + flat_size].view(param.size()))
prev_ind += flat_size
def get_flat_grad_from(net, grad_grad=False):
grads = []
for param in net.parameters():
if grad_grad:
grads.append(param.grad.grad.view(-1))
else:
grads.append(param.grad.view(-1))
flat_grad = torch.cat(grads)
return flat_grad
| [
"15307130191@fudan.edu.cn"
] | 15307130191@fudan.edu.cn |
776377c3da71f0b0d361f79fd983ddb6ef38ab0b | 5df19cecae55fd4852d2d89620b059a5d6d76ad0 | /Koo/Fields/Calendar/__terp__.py | 8bcfacca93c271a5be0bcca132ea10ceccebe719 | [] | no_license | gisce/openerp-client-kde | 5f8acbad46f83b9b493029b2f5518dc8576782f7 | b17aabb2e9c6cece0c855f665047d342701ca78f | refs/heads/master | 2023-03-03T01:03:59.272851 | 2023-02-15T11:02:44 | 2023-02-15T11:02:44 | 114,909,552 | 2 | 3 | null | 2023-02-15T11:00:38 | 2017-12-20T16:41:43 | Python | UTF-8 | Python | false | false | 2,057 | py | ##############################################################################
#
# Copyright (c) 2008 Albert Cervera i Areny <albert@nan-tic.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
[
{ 'name': 'date', 'type': 'widget', 'class': 'CalendarFieldWidget.DateFormWidget' },
{ 'name': 'time', 'type': 'widget', 'class': 'CalendarFieldWidget.TimeFormWidget' },
{ 'name': 'datetime', 'type': 'widget', 'class': 'CalendarFieldWidget.DateTimeFormWidget' },
{ 'name': 'float_time', 'type': 'widget', 'class': 'CalendarFieldWidget.FloatTimeFormWidget' },
{ 'name': 'date', 'type': 'delegate', 'class': 'CalendarFieldWidget.DateFieldDelegate' },
{ 'name': 'time', 'type': 'delegate', 'class': 'CalendarFieldWidget.TimeFieldDelegate' },
{ 'name': 'datetime', 'type': 'delegate', 'class': 'CalendarFieldWidget.DateTimeFieldDelegate' },
{ 'name': 'float_time', 'type': 'delegate', 'class': 'CalendarFieldWidget.FloatTimeFieldDelegate' }
]
| [
"albert@nan-tic.com"
] | albert@nan-tic.com |
9ef06f88374271a107c201be2ffdddafe32e9977 | 369f2d06856d70a6f68b8f81bca203ebce8e3a89 | /wholesale_data.py | 11925ef848f12aea4489e1a25360e244ecb966c3 | [] | no_license | Enantiodromis/Data_Mining_UCI_Machine_Learning | ffec3a22690dcbc2f30a7a08e1ddef1a12141c0e | 79b23b3eeae02befbbbc3a7ed03331122d5d911e | refs/heads/master | 2023-09-01T05:08:27.897012 | 2021-10-23T12:01:04 | 2021-10-23T12:01:04 | 337,441,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,576 | py | # AUTHOR: GEORGE BRADLEY
# LAST EDIT: 18/02/2021
# TITLE: CW_PART_2.PY
import numpy as np
import pandas as pd
from prettytable import PrettyTable
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
# The read_data() function reads in csv and outputs a dataframe.
# Additionally a df with a specific column dropped can also be created.
def read_data(data_path, data_cols, excluded_columns = ''):
df = pd.read_csv(data_path, usecols=data_cols) # Reading in csv, specifiying which columns in particular
if len(excluded_columns): # If the exluded_columns input is not equal to 0 then:
df_dropped = df.drop([excluded_columns], axis = 1) # dropping the specified column and saving it to df_dropped
return df, df_dropped
else:
return df
# The mean_and_ranges_df() function is used to retrieve the means and ranges
# for every column of a specified dataframe
def mean_and_ranges_df(data_frame):
table = PrettyTable() # Creating a table using prettyTable
table.field_names = ["COLUMN","MEAN","MIN","MAX"] # Defining column names
table.align["COLUMN"] = "l" #
table.align["MEAN"] = "c" # Formatting the position
table.align["MIN"] = "c" # of column names
table.align["MAX"] = "c" #
for col in data_frame.columns: # Iterating over all the columns within a specified
col_mean = data_frame[col].mean() # Calculating the mean for the column
col_max = data_frame[col].max() # Calculating the max for the column
col_min = data_frame[col].min() # Calculating the min for the column
table.add_row([col, col_mean, col_min, col_max]) # Adding all the above calculated data as a row in the table
print(table) # Displaying the table
# The bc_distance_calc() function is used to calculate the between cluster score.
def bc_distance_calc(centroids):
distance_matrix = euclidean_distances(centroids) # Calculating the euclidean distance between centroids results in a distance matrix
uniq_distances = np.unique(np.around(distance_matrix, decimals=4)) # The values are rounded to prevent float errors and the unique values are kept
sqr_distances = np.square(uniq_distances) # Squaring the values
bc_score = np.sum(sqr_distances) # Summing all the values, which returns the bc score
return bc_score
# The display_cluster_data() function prints the within cluster and between cluster distances and scores
def display_cluster_data(wc, bc, clusters):
if not isinstance(clusters,list): # If there is only one cluster value, the below line will be printed
print("K:" + str(clusters) + " WC:" + str(wc) + " BC:" + str(bc) + " BC/WC:" + str(bc/wc))
else:
for el in range(len(clusters)): # Iterating through all the cluster sizes and for each one the WC, BC and BC/WC values are printed
print("K:" + str(clusters[el]) + " WC:" + str(wc[el]) + " BC:" + str(bc[el]) + " BC/WC:" + str(bc[el]/wc[el]))
# The scatter_plotting() function is used to plot the scatterplots when triggered.
def scatter_plotting(data_frame, labels, centroids, clusters):
nrows = 5 # Specifying the rows for the plots
ncols = 3 # Specifying the columns for the plots
cols = data_frame.columns # Storing the names of all the columns within the dataframe
num_columns = len(cols) # Storing the number of columns within the dataframe
scaler = clusters * 2 # Creating a scaler to ensure the fig is not too small
# Intitialising a series of colours so that every cluster up to 13 clusters will have a unique colour.
colors = ["blue","orange","purple","yellow","pink","magenta","beige","brown","gray","cyan","black","red","green",]
# Creating the figure
fig = plt.figure(figsize=(scaler*ncols, scaler*nrows)) # Specifying the figure size
fig.subplots_adjust(wspace=0.4) # Width spacing between subplots
fig.subplots_adjust(hspace=0.4) # Vertical spacing between subplots
sub_idx = 1 # Initialising the index of the subplots
for col_1 in range(num_columns): # First element of the pair
for col_2 in range(col_1+1, num_columns): # Second element of the pair
for cluster_num in range(clusters): # Iterating over the number of clusters
fig.add_subplot(nrows, ncols, sub_idx) # Adding the subplot
Label_col_1 = data_frame[labels == cluster_num].iloc[:,col_1] # Accessing the data for X
Label_col_2 = data_frame[labels == cluster_num].iloc[:,col_2] # Accessing the data for Y
# Plotting the scatter whilst specifying certain stylizing variables
plt.scatter(x=Label_col_1, y=Label_col_2, c=colors[cluster_num],s=30,linewidths=0,alpha=0.5, label="Cluster " + str(cluster_num))
plt.scatter(centroids[:,col_1],centroids[:,col_2], marker='s', s=30, color='k') # Plotting the centroids
plt.legend(fontsize='8',) # Specifiying the legend fontsize
# Setting subplot X and Y labels
plt.title(str(cols[col_1]) + " & " + str(str(cols[col_2])) + " pair", fontsize = 10) # Setting the subplot title
plt.xlabel(str(cols[col_1]), fontsize = 10)
plt.ylabel(str(cols[col_2]), fontsize = 10)
sub_idx += 1
fig.savefig('scatter'+str(cluster_num)+'.png', dpi=250) # Saving the scatter plot figure as png with dpi of 250
# The k_means_pairs_run() function is used run the kmeans algorithm with every possible pair of attributes from the dataframe
def k_means_pairs_run(data_frame, clusters, plotting = False, cluster_data = False):
if not isinstance(clusters, list): # If only processing one cluster at a time
kmeans = KMeans(n_clusters=clusters, random_state=0) # Call KMeans with random_state=0 to allow replication
model = kmeans.fit(data_frame) # Fitting the kmeans model
labels = kmeans.labels_ # Storing the predicted labels
centroids = model.cluster_centers_ # Storing the cluster centers
wc_value = model.inertia_ # Storing the within cluster value
bc_distance = bc_distance_calc(centroids) # Storing the between cluster distance values
if plotting == True: # If plotting is true
scatter_plotting(data_frame, labels, centroids, clusters) # The above defined scatter_plotting function is called and the plots made
if cluster_data == True: # If cluster data is true
display_cluster_data(wc_value, bc_distance, clusters) # The above mentioned display_cluster_data is called and the WC and BC related data is printed
else: # If there are multiple number of clusters passed in the code below achieves the same as above, iteratively executing for each value.
wc_list = []
bc_store = []
for k_number in clusters:
kmeans = KMeans(n_clusters=k_number, random_state=0)
model = kmeans.fit(data_frame)
labels = model.labels_
centroids = model.cluster_centers_
wc_values = model.inertia_
bc_distance = bc_distance_calc(centroids)
wc_list.append(wc_values)
bc_store.append(bc_distance)
if plotting == True:
scatter_plotting(data_frame, labels, centroids, k_number)
if cluster_data == True:
display_cluster_data(wc_list, bc_store, clusters)
# # # # # # # # # # # # #
# CALLING THE FUNCTIONS #
# # # # # # # # # # # # #
# Reading in the required csv file and creating the dataframes used for the rest of the solutions.
data_cols_1 = ['Fresh','Milk','Grocery','Frozen','Detergents_Paper','Delicassen']
wholsesale_df = read_data("data/wholesale_customers.csv",data_cols_1)
# Question 2.1:
# Create a table in the report with the mean and range (min and max) for each attribute.
mean_and_ranges_df(wholsesale_df)
# Question 2.2:
# Run k-means with k = 3 and construct a scatterplot for each pair of attributes using Pyplot.
# Therefore, 15 scatter plots should be constructed in total. Different clusters should appear with different colors in the scatter plot.
k_means_pairs_run(wholsesale_df, 3, True, False)
# Question 2.3:
# Run k-means for each possible value of k in the set {3,5,10}.
# Create a table with the between cluster distance BC, within cluster distance WC and ratio BC/WC
# For each K value.
k_list = [3,5,10]
k_means_pairs_run(wholsesale_df, k_list, False, True)
| [
"georgerebradley@gmail.com"
] | georgerebradley@gmail.com |
a0f25305534ecd8df21add4bcac177aedf095136 | 57d370a3b4d1255b881787bd89c1ed0ace77714e | /MainApplication/Aggregator/CombineResults.py | 4f3bc09dd8bca1b612efddf816f144c517304e88 | [
"Apache-2.0"
] | permissive | kiliczsh/infinity-search | 908ba715440f262335f44480b77428a66caceb84 | 1c08cbf38157d46a3395127fa258a485e1f8123c | refs/heads/master | 2022-11-28T01:05:22.580663 | 2020-04-01T21:01:47 | 2020-04-01T21:01:47 | 285,981,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,765 | py | import SearchEngines.Bing.BingSearch as Bing
import SearchEngines.Mojeek.MojeekSearch as Mojeek
import SearchEngines.WikiMedia.WikiSearches as WikiMedia
formatted_query = ''
# Just making the query in the form of a query string by replacing the spaces with a '+'
def format_query(query):
global formatted_query
formatted_query = query.replace(' ', '+')
# Coming later
def rank_all(results):
# print(len(results))
return results
def combine_results(current, new):
all_data = current
# for page in new:
# all_data.append(page)
all_data.append(new)
return all_data
def format_bing(data):
# Bing Data is already formatted
# for page in data:
# x = 0
data.append(['View More Results From Bing', 'https://www.bing.com/search?q=' + formatted_query, '', 'https://www.bing.com/favicon.ico'])
return data
def format_mojeek(data):
# Mojeek Data is already formatted
data.append(['View More Results From Mojeek', 'https://www.mojeek.com/search?&q=' + formatted_query, '', 'https://www.mojeek.com/favicon.ico'])
return data
# really just Bing for now
def search_all(query, offset=0):
format_query(query)
# try:
# mojeek1 = Mojeek.get_results(formatted_query)
# mojeek2 = Mojeek.get_results(formatted_query, s=11)
#
# # print(mojeek1)
# # print(mojeek2)
#
# mojeek = mojeek1
# for page in mojeek2:
# mojeek.append(page)
#
# mojeek = format_mojeek(mojeek)
#
# all_results = []
# all_results = combine_results(all_results, mojeek)
# ranked_results = rank_all(all_results)
# return ranked_results
#
# except Exception as e:
# print(e)
# mojeek = [[]]
try:
bing = Bing.get_all(query, count=25, offset=offset)
except Exception as e:
# print(e)
bing = [[]]
bing = format_bing(bing)
all_results = []
all_results = combine_results(all_results, bing)
ranked_results = rank_all(all_results)
return ranked_results
def search_mojeek(query):
format_query(query)
try:
mojeek1 = Mojeek.get_results(formatted_query)
mojeek2 = Mojeek.get_results(formatted_query, s=11)
# print(mojeek1)
# print(mojeek2)
mojeek = mojeek1
for page in mojeek2:
mojeek.append(page)
mojeek = format_mojeek(mojeek)
all_results = []
all_results = combine_results(all_results, mojeek)
ranked_results = rank_all(all_results)
return ranked_results
except Exception as e:
print(e)
# mojeek = [[]]
return [[]]
# mojeek = format_bing(mojeek)
# all_results = []
# all_results = combine_results(all_results, mojeek)
# ranked_results = rank_all(all_results)
# return ranked_results
def search_bing_images(query, offset=1):
try:
bing = Bing.get_images(query, count=10, offset=offset)
except Exception as e:
print(e)
return [[]]
# bing = []
# bing = format_bing(bing)
all_results = []
all_results = combine_results(all_results, bing)
ranked_results = rank_all(all_results)
return ranked_results
def search_bing_mojeek(query):
try:
mojeek = Mojeek.get_results(formatted_query)
bing = Bing.get_all(formatted_query)
for result in bing:
count = 0
for res in mojeek:
count += 1
if result[1] == res[1]:
print('F')
except Exception as e:
print(e)
results =[[]]
# print('')
def second_page_results(query): # From Mojeek
format_query(query)
try:
mojeek = Mojeek.get_results(formatted_query, s=21)
mojeek = format_mojeek(mojeek)
all_results = []
all_results = combine_results(all_results, mojeek)
ranked_results = rank_all(all_results)
return ranked_results
except Exception as e:
print(e)
mojeek = [[]]
mojeek = format_mojeek(mojeek)
all_results = []
all_results = combine_results(all_results, mojeek)
ranked_results = rank_all(all_results)
return ranked_results
def search_wiki_results(query):
format_query(query)
try:
results = WikiMedia.get_all_results_open_search(query)
results['Wikidata'] = []
except Exception as e:
print(e)
results = {'Wikipedia': [], 'Wiktionary': [], 'Wikibooks': [], 'Wikiquote': [], 'Wikivoyage': [],
'Wikisource': [], 'Wikispecies': [], 'Wikinews': [], 'Wikiversity': [], 'Wikidata': [],
'Metawiki': [], 'Wikicommons': []
}
return results
| [
"N/A"
] | N/A |
250d64a708bded929b8db3d0633e5156cedaeb5c | 5c008dad5881eaefe36501628d8d3c16e122b9c0 | /Week_01/week_01_problems/w01_problem_01.py | d303b8e283b07f7443eaaf687b58a2a77d70d43a | [] | no_license | xeusteerapat/MIT-6.00.1x-EDX | 846787898b06cd5223fa825cf9757f2e92449b02 | cae706564efdc41e435b309493484ea9348c908d | refs/heads/master | 2020-07-18T22:30:43.502441 | 2019-11-04T16:16:51 | 2019-11-04T16:16:51 | 206,325,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | s = 'azcbobobegghakl'
num_vowels = 0
for vowel in s:
if vowel == 'a' or vowel == 'e' or vowel == 'i' or vowel == 'o' \
or vowel == 'u':
num_vowels += 1
print("Number of vowels: " + str(num_vowels))
| [
"xeus085@gmail.com"
] | xeus085@gmail.com |
7b193febd0816faa15a5993d96e7648bf84d756e | acbf7d7e61c90786c9535599d4ecb0f378ac44e8 | /Leetcode-100/first_last_in_sorted.py | b27460afdf171ccd0b14e993088c2f1e51bfc1f5 | [] | no_license | kumawat0008/Data-Structures-Algorithms | 61f02e4126de87b8c0a929acb00ea94dce48738b | 5bab125578b4a662c864fd9d5e929861fdbf33ec | refs/heads/master | 2022-06-26T19:45:26.193765 | 2022-06-09T10:50:32 | 2022-06-09T10:50:32 | 239,322,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | class Solution(object):
def first(self, arr, low, high, x, n):
if (high >= low):
mid = (low + high) // 2
if ((mid == 0 or x > arr[mid - 1]) and arr[mid] == x):
return mid
elif (x > arr[mid]):
return self.first(arr, (mid + 1), high, x, n)
else:
return self.first(arr, low, (mid - 1), x, n)
return -1
def last(self, arr, low, high, x, n):
if (high >= low):
mid = (low + high) // 2
if ((mid == n - 1 or x < arr[mid + 1]) and arr[mid] == x):
return mid
elif (x < arr[mid]):
return self.last(arr, low, (mid - 1), x, n)
else:
return self.last(arr, (mid + 1), high, x, n)
return -1
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
first = self.first(nums, 0, len(nums) - 1, target, len(nums))
last = self.last(nums, 0, len(nums) - 1, target, len(nums))
return [first, last]
| [
"dkumawat@ciena.com"
] | dkumawat@ciena.com |
8ef1e4cdfe95348f881050bf452ff59aa6fd624a | 5e3816e1dd190a8e8b90c3a82409c6d910e8c699 | /test.py | 8e711cfe9419d301620ec137f02da35687e401a4 | [
"MIT"
] | permissive | Vignesh-Kumar-07/Twitter-tweets | 8ea90b11b77196e57f7af5a20511c7b09fa9f576 | 1d81ac3e4503d15f8bdf0e67c3559b14c0ecf1f6 | refs/heads/main | 2023-08-17T17:23:15.411209 | 2021-10-11T07:36:14 | 2021-10-11T07:36:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | import re
# To Read Tweets directly from Program:
tweet = """Most employees fulfilled with used hardware based on device turns four years old.
Some job categories (hardware or software developer, technical sales, etc.) are eligible at three years.
Recognizing that employee job roles or device request qualities and inventory allows , the CIO considers requests for early refresh.
When submitting, provide a detailed justification as to requirements can change at any time why a new device is needed.
Devices reviews an early refresh request the request if the, Devices honors the request.
Early refresh requests are eligible for refresh when their primary computing may be funding levels and available"""
# To Read Tweets from File:
# with open("tweets.txt", "r") as f:
# tweet = f.read()
tot_no_of_words = len(tweet.split())
half_of_words = tot_no_of_words // 2
list_of_abusive_words = ["device", "refresh"]
abusive_words = 0
for abusive_word in list_of_abusive_words:
abusive_words += len(re.findall(abusive_word, tweet))*2
if abusive_words == 0:
print("Pure Content.")
elif tot_no_of_words > abusive_words:
print("Less Abusive.")
else:
print("More Abusive.")
| [
"noreply@github.com"
] | noreply@github.com |
a2e56cf287af531ae6f21480a03d26a1ab96e9e5 | cb476c5fdf7e7da1abb4229635a79ea88f465399 | /lianxi/qqmusic.py | 22408b320c1ed8e8d9b7428bfa9ed5970774f9df | [] | no_license | ZHAISHENKING/spiderapp | cb66f68f3ec40f027d8cdbdd1ec357f25c668cda | 51b09dd98e64e6d0612a313ff6c90f6fbd0f63dc | refs/heads/master | 2022-12-15T00:30:44.168143 | 2018-07-17T13:42:57 | 2018-07-17T13:42:57 | 136,556,921 | 0 | 0 | null | 2022-07-06T19:49:39 | 2018-06-08T02:42:16 | Python | UTF-8 | Python | false | false | 2,852 | py | import requests
import json, time
class qqMusic(object):
def __init__(self):
self.date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
def get_json(self):
header = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'referer': 'https://y.qq.com/n/yqq/toplist/4.html',
}
params = {
'tpl': '3',
'page': 'detail',
'date': '2018-05-29',
'topid': '4',
'type': 'top',
'song_begin': '0',
'song_num': '30',
'g_tk': '5381',
'jsonpCallback': 'MusicJsonCallbacktoplist',
'loginUin': '0',
'hostUin': '0',
'format': 'jsonp',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq',
'needNewCode': '0',
}
url = 'http://c.y.qq.com/v8/fcg-bin/fcg_v8_toplist_cp.fcg'
r = requests.get(url, headers=header, params=params).text
init_data = r.replace('MusicJsonCallbacktoplist(', '').rstrip(')')
"""
到这知识获取到歌曲相关信息 但没有歌曲的链接
接下来取出songmid, 获取歌曲链接
"""
json_data = json.loads(init_data)
return json_data
def get_link(self, songmid):
url = 'http://c.y.qq.com/base/fcgi-bin/fcg_music_express_mobile3.fcg'
header = {
'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'referer':'https://y.qq.com/portal/player.html'
}
"""
songmid,filename是变量
"""
params = {
'g_tk': '5381',
'jsonpCallback': 'MusicJsonCallback2584168335259782',
'loginUin': 0,
'hostUin': 0,
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': 0,
'platform': 'yqq',
'needNewCode': 0,
'cid': '205361747',
'callback': 'MusicJsonCallback2584168335259782',
'uin': 0,
'songmid': songmid,
'filename': 'C400'+songmid+'.m4a',
'guid': '9384442260'
}
r = requests.get(url, headers=header, params=params).text
init_data = r.split('(')[1].rstrip(')')
data = json.loads(init_data)
data = data['data']['items'][0]
url = 'http://dl.stream.qqmusic.qq.com/'+data['filename']+'?vkey='+data['vkey']+'&guid=9384442260&uin=0&fromtag=66'
print(url)
q = qqMusic()
paihang = q.get_json()
for i in paihang['songlist']:
q.get_link(i['data']['songmid'])
| [
"18700790825@163.com"
] | 18700790825@163.com |
6d41fea80601f45b88584c7c63c5d46b02129c99 | e828768c2812f1248f5eac46e22482f1095b23f7 | /custom/paceToTotalTime.py | f6d237e9f983a385bdbe3716dd9c1f7f4b43f64c | [] | no_license | derek-watson14/HackerRank | 0ff33d19210350e400b3b8bdd2589e2957e0c4e5 | 957e9c0868d1b82e9f040bd7700c134b80abdf18 | refs/heads/master | 2023-07-16T15:47:24.147976 | 2021-09-03T00:52:33 | 2021-09-03T00:52:33 | 318,676,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | # Convert a pace + distance to total time or time + distance to pace
class UserInput:
def __init__(self, unit, mode, time, distance):
self.unit = unit
self.mode = mode
self.time = time
self.distance = distance
class Time:
def __init__(self, minutes, seconds):
self.minutes = minutes
self.seconds = seconds
def time_to_seconds(time):
minutes, seconds = time.split(":")
min_in_sec = int(minutes) * 60
return int(seconds) + min_in_sec
def seconds_to_formatted(seconds):
raw_mins = str(round(seconds / 60, 4))
minutes = raw_mins.split(".")[0]
raw_secs = round(float(f'0.{raw_mins.split(".")[1]}') * 60)
seconds = raw_secs if raw_secs > 9 else "0" + str(raw_secs)
return Time(minutes, seconds)
def convert_time(time, distance, mode):
time_in_sec = time_to_seconds(time)
if mode[1] == "Total time":
converted_seconds = round(time_in_sec / float(distance))
else:
converted_seconds = round(float(distance) * time_in_sec)
return seconds_to_formatted(converted_seconds)
def result_string(unit, mode, converted):
if mode[1] == "Total time":
return f"Pace per {unit[1]}: {converted.minutes} minutes {converted.seconds} seconds"
else:
return f"Total activity time: {converted.minutes} minutes, {converted.seconds} seconds"
def get_user_input():
mode, unit = [None, None]
while mode not in ["1", "2"]:
mode = input("\n~> Choose calculation type:\n1) Pace to total time\n2) Total time to pace\nInput choice: ")
mode = [">> Pace to total time", "Pace"] if mode == "1" else [">> Total time to pace", "Total time"]
print(mode[0])
while unit not in ["1", "2"]:
unit = input("\n~> Choose unit:\n1) Miles\n2) Kilometers\nInput choice: ")
unit = [">> Miles", "mile"] if unit == "1" else [">> Kilometers", "kilometer"]
print(unit[0])
print(f"\n~> Input time and distance:\nFormat: {mode[1]}: mm:ss | Distance: x.x")
time = input(f"{mode[1]}: ")
distance = input(f"Distance ({unit[1]}s): ")
return UserInput(unit, mode, time, distance)
def main():
u_input = get_user_input()
converted = convert_time(u_input.time, u_input.distance, u_input.mode)
print(result_string(u_input.unit, u_input.mode, converted))
if __name__ == "__main__":
main() | [
"derek.watson92@gmail.com"
] | derek.watson92@gmail.com |
55eb160926cb77920b63568d4be18c54eeebdb2d | 41b59a9c8381fa3a92f5d2c37c91261afb9c82c4 | /QCDEventShape/2017/MC/test/crab_bin_py8_3200_inf.py | ad911d60a95de92ad286c8ea8f0a46bafbafeab1 | [] | no_license | Sumankkundu/ChargedParticle | c6d4f90b55df49321df2ecd758bb1f39db896f8c | eb5bada24b37a58ded186d6e5d2d7bd00898fefe | refs/heads/master | 2023-07-15T03:34:33.377203 | 2021-08-31T05:01:32 | 2021-08-31T05:01:32 | 231,091,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | #from CRABClient.UserUtilities import config, getUsernameFromSiteDB
from CRABClient.UserUtilities import config
config = config()
config.General.requestName ='ESVQCD_UL_Ptbinned_3200toinf_tuneCP5_bin'
#config.General.workArea = 'crab_projects_1'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'Run_QCD_test_miaod_v2_106x_mc_cfg.py'
#config.JobType.maxMemoryMB = 9000 # Default is 2500 : Max I have used is 13000
#config.JobType.maxJobRuntimeMin = 2750 #Default is 1315; 2750 minutes guaranteed to be available; Max I have used is 9000
#config.JobType.numCores = 4
config.JobType.inputFiles= [
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_JRV2_MC_PtResolution_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_JRV2_MC_SF_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunB_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunC_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunD_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunE_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunF_V5_DATA_UncertaintySources_AK4PFchs.txt"
]
config.Data.inputDataset ='/QCD_Pt_3200toInf_TuneCP5_13TeV_pythia8/RunIISummer19UL17MiniAOD-106X_mc2017_realistic_v6-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'EventBased'
#config.Data.splitting = 'LumiBased'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 10 # for Automatic must be 180-2700 range
config.Data.unitsPerJob = 1 #For Filebased or Lumibased
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
#config.Data.outLFNDirBase = '/store/user/%s/' % (sukundu)
config.Data.publication = True
config.Data.outputDatasetTag = 'MC_PY82017UL_Bin'
config.JobType.allowUndistributedCMSSW = True
config.Site.storageSite ='T2_IN_TIFR'
| [
"skundu91phys@gmail.com"
] | skundu91phys@gmail.com |
b1bd4b4e18caf300b155f876311c7836c1d9b4d3 | dfca57356cfabc01b42fdbca21c9692eb552ceb0 | /core_algo.py | 006dd340abcd335d95a0174e31d49bc5e864b381 | [] | no_license | simonZhou86/image-denoising | 8b634c42bd9bb93372335da34f0d64bb192722f4 | 4a57f8a6308256acce329fd528ca9df1740986ef | refs/heads/main | 2023-06-24T16:14:55.519620 | 2021-08-05T07:40:18 | 2021-08-05T07:40:18 | 377,167,696 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,934 | py | import sys, os, math
import numpy as np
from PIL import Image
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
def ft1D( signal ):
return np.fft.fft( signal )
# Input is a 2D numpy array of complex values.
# Output is the same.
def forwardFT( image ):
# YOUR CODE HERE
#
# You must replace this code with your own, keeping the same function name are parameters.
row = image.shape[0]
column = image.shape[1]
#result_image = np.empty([row, column], np.complex)
result_image = np.array(image, dtype = "complex")
for r in range(row): # apply forward transform, treat column as a 1D singal, compute F(x,v)
result_image[r,:] = ft1D(result_image[r,:])
for c in range(column): # based on F(x,v), compute F(u,v)
result_image[:,c] = ft1D(result_image[:,c])
return result_image
def inverseFT( image ):
# YOUR CODE HERE
#
# You must replace this code with your own, keeping the same function name are parameters.
# the inverse of fourier transform is just the conjugate of conjugate of the FT image
result_image = np.array(np.conjugate(image)) # compute the comjugate of the image
row = result_image.shape[0]
column = result_image.shape[1]
for r in range(row):
result_image[r,:] = ft1D(result_image[r,:]) / row # do the inverse transform on F(x,v)
for c in range(column):
result_image[:,c] = ft1D(result_image[:,c]) / column # do the inverse transform on x for f(x,y) using F(x,v)
final_image = np.conjugate(result_image) # conjugate back
return final_image
def multiplyFTs( image, filter ):
# YOUR CODE HERE
image_w = image.shape[0]
image_h = image.shape[1]
ft_filter_shifted = filter.copy()
filter_w = ft_filter_shifted.shape[0]
filter_h = ft_filter_shifted.shape[1]
#print(image.shape)
# after simplyfy the above equation, we have e^{pi*i*x} for x-direction and e^{pi*i*y} for y-direction
# combine x and y we have e^{pi*i*x} * e^{pi*i*y} = e^{pi*i*(x+y)} = (e^{pi*i})^(x+y) = (-1)^(x+y) by euler formula
for i in range(0, filter_w):
for j in range(0, filter_h):
ft_filter_shifted[i][j] = ft_filter_shifted[i][j] * ((-1) ** (i+j))
#print(ft_filter_shifted.shape)
result_image = np.empty([image_w, image_h], np.complex) # generate a blank image
result_image = ft_filter_shifted * image # multiply in frequency domain = convolve in spatial domain, and set it to result image
return result_image
def modulatePixels( image, x, y, isFT ):
# YOUR CODE HERE
print ('current pointed at', x, y)
gaussian_mean = 0
gaussian_std = float(radius) / 2.0 # compute the standard deviation
#copy_image = image.copy()
xdim = image.shape[1] # x values
ydim = image.shape[0] # y values
xmin = x - radius # compute the neighborhood
xmax = x + radius
ymin = y - radius
ymax = y + radius
#imagec = image.copy()
for i in range(xmin, xmax):
for j in range(ymin, ymax):
distance = np.sqrt(((i-x)**2 + (j-y)**2)) # euclidian distance around the clicked point (a circle)
if distance <= (radius):
#exp_part = np.exp(-0.5 * ((distance / gaussian_std) ** 2))
gaussian_factor = np.exp(-0.5 * ((distance / gaussian_std) ** 2)) # no need to normalized the gaussian
#gaussian_factor = (1/(gaussian_std * np.sqrt(2 * np.pi))) * exp_part
mode = editmode(gaussian_factor) # get the factor for additive mode and subtractive mode
if isFT: # if the graph is in FT
ftx = wrap(i, xdim) # check for outbound values
fty = wrap(j, ydim)
ak = 2*np.real(image[fty, ftx]) # derive for angle value based on "real fourier transform" notes
bk = 2*np.imag(image[fty, ftx]) # this note is provided by Prof. Stewart
A = np.sqrt((ak ** 2 + bk ** 2))
theta = np.arctan2(bk, ak)
factor = np.log(A + 1)
newA = np.exp(factor * mode) - 1
pixel_value = newA * np.cos(theta) + 1j * newA * np.sin(theta) # compute new pixel values
image[fty, ftx] = pixel_value # update current pixel value and the corresponding symmetric point in the graph
image[ydim - 1 - fty, xdim - 1 - ftx] = pixel_value
else:
if (i >= 0 and i < xdim and j >= 0 and j < ydim): # check for outbound values
image[j, i] *= mode
# #pass
def editmode(gaussian): # helper function to detect which mode we want
if editMode == "s":
return 1 - gaussian
return 1 + 0.1*gaussian
# For an image coordinate, if it's < 0 or >= max, wrap the coorindate
# around so that it's in the range [0,max-1]. This is useful in the
# modulatePixels() function when dealing with FT images.
def wrap( val, max ):
if val < 0:
return val+max
elif val >= max:
return val-max
else:
return val | [
"noreply@github.com"
] | noreply@github.com |
4937c31eb53c8d0279fb30488f96365137beb3f6 | 136cb5450842633f86d1d806b1d7e93bc8672ea3 | /tests/test_utils.py | cc9036eb40a0f0b3cdd631ad635c085793b52e8e | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | jimr/Showcase | 2f7c647cab31953f0beb08b35bbcac9fded9e2e2 | 1685680954e3df825c1b1102a2140e3395533376 | refs/heads/master | 2023-08-31T01:36:34.360692 | 2021-06-11T10:34:40 | 2021-06-11T10:34:40 | 84,255,929 | 0 | 0 | MIT | 2023-08-21T22:59:31 | 2017-03-07T23:17:53 | Python | UTF-8 | Python | false | false | 967 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from showcase import utils
class TestProbablyText(unittest.TestCase):
def path_to(self, fname):
return os.path.join(os.path.dirname(__file__), 'data', fname)
def check_file(self, fname, expected=True):
self.assertEqual(
utils.is_probably_text(self.path_to(fname)), expected,
)
def test_text(self):
self.check_file('test.txt')
def test_text_no_ext(self):
self.check_file('test')
def test_unicode(self):
self.check_file('test_unicode')
def test_md(self):
self.check_file('test.md')
def test_rst(self):
self.check_file('test.rst')
def test_json(self):
self.check_file('test.json')
def test_csv(self):
self.check_file('test.csv')
def test_tsv(self):
self.check_file('test.tsv')
def test_png(self):
self.check_file('test.png', False)
| [
"jim@jimr.org"
] | jim@jimr.org |
5c4659b8640f73ab91097d2497532feefa68587b | 586927f7eb1e6f07bc169353b06dc51520bb11a1 | /src/valdomain.py | bec7e3eeda2c797605916c662226cc8bf6de7586 | [] | no_license | davidvictoria/xibalba | fa71477b6667017dd34e470083d5ddce7633fd4e | a7c21b343d3a78d60db8bf5019eb4b3dae901d22 | refs/heads/main | 2023-08-21T21:58:40.467668 | 2021-09-30T20:25:47 | 2021-09-30T20:25:47 | 410,717,457 | 0 | 0 | null | 2021-09-30T20:25:48 | 2021-09-27T02:30:04 | null | UTF-8 | Python | false | false | 11,303 | py | import socket
from ssl import PROTOCOL_TLSv1
from OpenSSL import SSL
import pendulum
import requests
import json
import time
def get_domains(cert):
san = ''
ext_count = cert.get_extension_count()
for i in range(0, ext_count):
ext = cert.get_extension(i)
if 'subjectAltName' in str(ext.get_short_name()):
san = ext.__str__()
return san
def get_cert_info(cert):
cert_info = {}
cert_subject = cert.get_subject()
subject = cert_subject.CN
subject_org = cert_subject.O
issuer_country = cert.get_issuer().countryName
issuer_organization_name = cert.get_issuer().organizationName
issuer_organization_unit_name = cert.get_issuer().organizationalUnitName
issuer_common_name = cert.get_issuer().commonName
serial = str(cert.get_serial_number())
cert_algo = cert.get_signature_algorithm().decode()
cert_version = (cert.get_version() + 1)
cert_domains = get_domains(cert)
cert_expired = cert.has_expired()
valid_from = cert.get_notBefore().decode('utf-8')
valid_to = cert.get_notAfter().decode('utf-8')
valid_from = pendulum.parse(valid_from, strict=False)
valid_to = pendulum.parse(valid_to, strict=False)
now = pendulum.now()
remaining_days = (valid_to - now).days
valid_from_string = valid_from.to_datetime_string()
valid_to_string = valid_to.to_datetime_string()
cert_info = {'subject': subject, 'subject_org': subject_org, 'issuer_country': issuer_country, 'issuer_organization_name': issuer_organization_name, 'issuer_organization_unit_name': issuer_organization_unit_name, 'issuer_common_name': issuer_common_name, 'serial': serial, 'cert_algo': cert_algo, 'cert_version': cert_version, 'cert_domains': cert_domains, 'cert_expired': cert_expired, 'valid_from': valid_from_string, 'valid_to': valid_to_string, 'remaining_days': remaining_days}
return cert_info
def get_cert(host):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
osobj = SSL.Context(PROTOCOL_TLSv1)
sock.connect((host, 443))
oscon = SSL.Connection(osobj, sock)
oscon.set_tlsext_host_name(host.encode())
oscon.set_connect_state()
oscon.do_handshake()
cert = oscon.get_peer_certificate()
sock.close()
return cert
def check_sslabs(host):
while True:
r = requests.get('https://api.ssllabs.com/api/v3/analyze?host='+host)
response = json.loads(r.text)
if response['status'] == 'ERROR':
return response['statusMessage']
break
if response['status'] in ['IN_PROGRESS', 'READY']:
try:
status = response['endpoints'][0]['statusMessage']
ip = response['endpoints'][0]['ipAddress']
except:
status = None
ip = None
if status == 'Ready':
break
else:
time.sleep(10)
r = requests.get('https://api.ssllabs.com/api/v3/getEndpointData?host='+host+'&s='+ip)
response = json.loads(r.text)
grade = response['grade']
is_exceptional = response['isExceptional']
is_trusted = response['details']['certChains'][0]['trustPaths'][0]['trust'][0]['isTrusted']
try:
trust_error = response['details']['certChains'][0]['trustPaths'][0]['trust'][0]['trustErrorMessage']
except:
trust_error = ''
has_sct = response['details']['hasSct']
try:
server_signature = response['details']['serverSignature']
except:
server_signature = ''
sni_required = 'Requerido' if response['details']['sniRequired'] == True else 'No requerido'
hsts_policy = response['details']['hstsPolicy']['status']
hpkp_policy = response['details']['hpkpPolicy']['status']
protocols = response['details']['protocols']
vuln_poodle = '¡Vulnerable!' if response['details']['poodle'] == True else 'No vulnerable'
vuln_heartbleed = '¡Vulnerable!' if response['details']['heartbleed'] == True else 'No vulnerable'
vuln_heartbeat = '¡Vulnerable!' if response['details']['heartbeat'] == True else 'No vulnerable'
vuln_freak = '¡Vulnerable!' if response['details']['freak'] == True else 'No vulnerable'
vuln_logjam = '¡Vulnerable!' if response['details']['logjam'] == True else 'No vulnerable'
vuln_drownVulnerable = '¡Vulnerable!' if response['details']['drownVulnerable'] == True else 'No vulnerable'
vuln_vulnBeast = '¡Vulnerable!' if response['details']['vulnBeast'] == True else 'No vulnerable'
vuln_zombiePoodle = '¡Vulnerable!' if response['details']['zombiePoodle'] == 3 else 'No vulnerable'
vuln_openSslCcs = '¡Vulnerable!' if response['details']['openSslCcs'] == 3 else 'No vulnerable'
vuln_openSSLLuckyMinus20 = '¡Vulnerable!' if response['details']['openSSLLuckyMinus20'] == 2 else 'No vulnerable'
vuln_ticketbleed = '¡Vulnerable!' if response['details']['ticketbleed'] == 2 else 'No vulnerable'
vuln_bleichenbacher = '¡Vulnerable!' if response['details']['bleichenbacher'] in (2, 3) else 'No vulnerable'
vuln_goldenDoodle = '¡Vulnerable!' if response['details']['goldenDoodle'] in (4, 5) else 'No vulnerable'
vuln_zeroLengthPaddingOracle = '¡Vulnerable!' if response['details']['zeroLengthPaddingOracle'] in (6, 7) else 'No vulnerable'
vuln_sleepingPoodle = '¡Vulnerable!' if response['details']['sleepingPoodle'] in (10, 11) else 'No vulnerable'
vuln_poodleTls = '¡Vulnerable!' if response['details']['poodleTls'] == 2 else 'No vulnerable'
vuln_freak = '¡Vulnerable!' if response['details']['freak'] == True else 'No vulnerable'
return {'grade': grade, 'is_exceptional': is_exceptional, 'is_trusted': is_trusted, 'trust_error': trust_error, 'has_sct': has_sct, 'server_signature': server_signature, 'sni_required': sni_required, 'hsts_policy': hsts_policy, 'hpkp_policy': hpkp_policy, 'protocols': protocols, 'vuln_poodle': vuln_poodle, 'vuln_heartbleed': vuln_heartbleed, 'vuln_heartbeat': vuln_heartbeat, 'vuln_freak': vuln_freak, 'vuln_logjam': vuln_logjam, 'vuln_drownVulnerable': vuln_drownVulnerable, 'vuln_vulnBeast': vuln_vulnBeast, 'vuln_zombiePoodle': vuln_zombiePoodle, 'vuln_openSslCcs': vuln_openSslCcs, 'vuln_openSSLLuckyMinus20': vuln_openSSLLuckyMinus20, 'vuln_ticketbleed': vuln_ticketbleed, 'vuln_bleichenbacher': vuln_bleichenbacher, 'vuln_goldenDoodle': vuln_goldenDoodle, 'vuln_zeroLengthPaddingOracle': vuln_zeroLengthPaddingOracle, 'vuln_sleepingPoodle': vuln_sleepingPoodle, 'vuln_poodleTls': vuln_poodleTls, 'vuln_freak': vuln_freak}
def create_txt(host, cert_info, sslabs_result):
var = '[------------- '+host+' -------------]\n'
var += '[Sujeto] Dominio: '+cert_info['subject']+'\n'
if cert_info['subject_org'] != None:
var += '[Sujeto] Organización: '+cert_info['subject_org']+'\n'
if cert_info['issuer_organization_name'] != None and cert_info['issuer_organization_unit_name'] != None:
var += '[Emisor] Organización: '+cert_info['issuer_organization_name']+' ('+cert_info['issuer_organization_unit_name']+')\n'
if cert_info['issuer_organization_name'] != None and cert_info['issuer_organization_unit_name'] == None:
var += '[Emisor] Organización: '+cert_info['issuer_organization_name']+'\n'
var += 'No. de serie: '+cert_info['serial']+'\n'
var += 'Algoritmo de hash: '+cert_info['cert_algo']+'\n'
var += 'Versión: v'+str(cert_info['cert_version'])+'\n'
var += 'Inicio de vigencia: '+cert_info['valid_from']+'\n'
var += 'Fin de vigencia: '+cert_info['valid_to']+'\n'
if cert_info['cert_expired'] == True:
var += 'Vigencia: CERTIFICADO VENCIDO\n'
else:
var += 'Vigencia: Vigente ('+str(cert_info['remaining_days'])+' días restantes)\n'
var += 'Dominios asociados:\n'
for domain in cert_info['cert_domains'].split(', '):
var += '\t\\_ '+domain+'\n'
var += '\n[--- Informe de SSLabs ---]\n'
if sslabs_result['is_exceptional'] == True:
var += 'CERTIFICADO IMPLEMENTADO CORRECTAMENTE. CALIFICACIÓN EXCEPCIONAL.\n'
var += 'Calificación: '+sslabs_result['grade']+'\n'
if sslabs_result['is_trusted'] == False:
var += 'Confiabilidad: NO confiable\n'
if 'invalid certificate' in sslabs_result['trust_error']:
var += '\t\\_ Motivo: Certificado INVÁLIDO\n'
elif 'certificate revoked' in sslabs_result['trust_error']:
var += '\t\\_ Motivo: Certificado REVOCADO\n'
else:
var += '\t\\_ Motivo: '+sslabs_result['trust_error']+'\n'
else:
var += 'Confiabilidad: Confiable\n'
if sslabs_result['server_signature'] != None:
var += 'Firma del servidor: '+sslabs_result['server_signature']+'\n'
var += 'Navegador con soporte SNI requerido: '+sslabs_result['sni_required']+'\n'
if sslabs_result['has_sct'] == 1:
var += 'Disponible en Certificate Transparency Logs: Sí\n'
else:
var += 'Disponible en Certificate Transparency Logs: NO\n'
if sslabs_result['hsts_policy'] == 'present':
var += 'Cabecera HSTS implementada correctamente: Sí\n'
else:
var += 'Cabecera HSTS implementada correctamente: NO\n'
if sslabs_result['hpkp_policy'] == 'present':
var += 'Cabecera HPKP implementada correctamente: Sí\n'
else:
var += 'Cabecera HPKP implementada correctamente: NO\n'
var += 'Protocolos permitidos:\n'
for protocol in sslabs_result['protocols']:
var += '\t\\_ '+protocol['name']+' '+protocol['version']+'\n'
var += '\n-- Vulnerabilidades --\n'
var += 'Poodle: '+sslabs_result['vuln_poodle']+'\n'
var += 'Heartbleed: '+sslabs_result['vuln_heartbleed']+'\n'
var += 'Heartbeat: '+sslabs_result['vuln_heartbeat']+'\n'
var += 'Freak: '+sslabs_result['vuln_freak']+'\n'
var += 'Logjam: '+sslabs_result['vuln_logjam']+'\n'
var += 'DrownVulnerable: '+sslabs_result['vuln_drownVulnerable']+'\n'
var += 'VulnBeast: '+sslabs_result['vuln_vulnBeast']+'\n'
var += 'ZombiePoodle: '+sslabs_result['vuln_zombiePoodle']+'\n'
var += 'OpenSslCcs: '+sslabs_result['vuln_openSslCcs']+'\n'
var += 'OpenSSLLuckyMinus20: '+sslabs_result['vuln_openSSLLuckyMinus20']+'\n'
var += 'Ticketbleed: '+sslabs_result['vuln_ticketbleed']+'\n'
var += 'Bleichenbacher: '+sslabs_result['vuln_bleichenbacher']+'\n'
var += 'GoldenDoodle: '+sslabs_result['vuln_goldenDoodle']+'\n'
var += 'ZeroLengthPaddingOracle: '+sslabs_result['vuln_zeroLengthPaddingOracle']+'\n'
var += 'SleepingPoodle: '+sslabs_result['vuln_sleepingPoodle']+'\n'
var += 'PoodleTls: '+sslabs_result['vuln_poodleTls']+'\n'
print('\tCreando archivo status_'+host+'.txt')
with open('status_'+host+'.txt', 'w') as f:
f.write(var)
return var | [
"devteam@bpm.consulting"
] | devteam@bpm.consulting |
b272becc0f407ed87dd639981bfb2e547cf74bd2 | b0295fc59e46a71c3235f0f19c2631d92678275d | /1-50/016.py | 1383b1a10202a4cc83da8030163d3d04c66bf9e3 | [] | no_license | Marmot93/Project-Euler | 67db19e67d29d9291bdfb155961d11bf027d5635 | a23ba931a56443b92c74a5bb048bfdbe881ad303 | refs/heads/master | 2021-01-12T02:13:51.395321 | 2018-02-08T09:02:45 | 2018-02-08T09:02:45 | 82,549,491 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | """2的1000次方各位数之和"""
# 送分题,一行代码解决问题
num = sum([int(i) for i in str(2 ** 1000)])
print('答案是: ' + str(num))
| [
"654332905@qq.com"
] | 654332905@qq.com |
835f35f32d97ac1b55d4dda8b712add353ad0796 | 66052f5ba08ddac0a56ee140af17cf78b1ff1174 | /PLURALSIGHT_BEGINNERS/lib/python3.9/site-packages/holoviews/tests/plotting/matplotlib/testpointplot.py | ad2dbfe315b9cd12e93a78996c55f6e2f0f001f8 | [] | no_license | enriquefariasrdz/Python | 34704ceed001bbe8a23471eebefbe536b00031a5 | b9191f7ad87b709a1b83c5cb3797a866b56aaa0d | refs/heads/master | 2022-12-26T03:06:26.481456 | 2022-04-20T14:09:57 | 2022-04-20T14:09:57 | 27,020,899 | 1 | 1 | null | 2022-12-18T21:02:43 | 2014-11-23T03:33:52 | Python | UTF-8 | Python | false | false | 14,958 | py | import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Points
from .testplot import TestMPLPlot, mpl_renderer
from ..utils import ParamLogStream
try:
from matplotlib import pyplot
except:
pass
class TestPointPlot(TestMPLPlot):
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('z dimension is not numeric, '
'cannot use to scale Points size.\n')
self.assertEqual(log_msg, warning)
def test_points_cbar_extend_both(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1,2)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'both')
def test_points_cbar_extend_min(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1, None)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'min')
def test_points_cbar_extend_max(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(None, 2)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'max')
def test_points_cbar_extend_clime(self):
img = Points(([0, 1], [0, 3])).opts(style=dict(clim=(None, None)))
plot = mpl_renderer.get_plot(img.opts(colorbar=True, color_index=1))
self.assertEqual(plot.handles['cbar'].extend, 'neither')
def test_points_rcparams_do_not_persist(self):
opts = dict(fig_rcparams={'text.usetex': True})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
mpl_renderer.get_plot(points)
self.assertFalse(pyplot.rcParams['text.usetex'])
def test_points_rcparams_used(self):
opts = dict(fig_rcparams={'grid.color': 'red'})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
plot = mpl_renderer.get_plot(points)
ax = plot.state.axes[0]
lines = ax.get_xgridlines()
self.assertEqual(lines[0].get_color(), 'red')
def test_points_padding_square(self):
points = Points([1, 2, 3]).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_square_per_axis(self):
curve = Points([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.4)
def test_points_padding_hard_xrange(self):
points = Points([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_soft_xrange(self):
points = Points([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_unequal(self):
points = Points([1, 2, 3]).options(padding=(0.05, 0.1))
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_nonsquare(self):
points = Points([1, 2, 3]).options(padding=0.1, aspect=2)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logx(self):
points = Points([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.89595845984076228)
self.assertEqual(x_range[1], 3.3483695221017129)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logy(self):
points = Points([1, 2, 3]).options(padding=0.1, logy=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.89595845984076228)
self.assertEqual(y_range[1], 3.3483695221017129)
def test_points_padding_datetime_square(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 16891.8)
self.assertEqual(x_range[1], 16894.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_datetime_nonsquare(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, aspect=2
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 16891.9)
self.assertEqual(x_range[1], 16894.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_sizes_scalar_update(self):
hmap = HoloMap({i: Points([1, 2, 3]).opts(s=i*10) for i in range(1, 3)})
plot = mpl_renderer.get_plot(hmap)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([10]))
plot.update((2,))
self.assertEqual(artist.get_sizes(), np.array([20]))
###########################
# Styling mapping #
###########################
def test_point_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 0, 1]]))
def test_point_line_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(edgecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_edgecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_line_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(edgecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_edgecolors(),
np.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 0, 1]]))
def test_point_fill_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(facecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_linear_color_op(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1, 2]))
self.assertEqual(artist.get_clim(), (0, 2))
def test_point_linear_color_op_update(self):
points = HoloMap({0: Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='color')}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (0, 2))
plot.update((1,))
self.assertEqual(np.asarray(artist.get_array()), np.array([2.5, 3, 1.2]))
self.assertEqual(artist.get_clim(), (1.2, 3))
def test_point_categorical_color_op(self):
points = Points([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'A')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1, 0]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_point_size_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='size').options(s='size')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_sizes(), np.array([1, 4, 8]))
def test_point_size_op_update(self):
points = HoloMap({0: Points([(0, 0, 3), (0, 1, 1), (0, 2, 2)],
vdims='size'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='size')}).options(s='size')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_sizes(), np.array([3, 1, 2]))
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([2.5, 3, 1.2]))
def test_point_line_width_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 4, 8])
def test_point_line_width_op_update(self):
points = HoloMap({0: Points([(0, 0, 3), (0, 1, 1), (0, 2, 2)],
vdims='line_width'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='line_width')}).options(linewidth='line_width')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [3, 1, 2])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [2.5, 3, 1.2])
def test_point_marker_op(self):
points = Points([(0, 0, 'circle'), (0, 1, 'triangle'), (0, 2, 'square')],
vdims='marker').options(marker='marker')
with self.assertRaises(Exception):
mpl_renderer.get_plot(points)
def test_point_alpha_op(self):
points = Points([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(points)
def test_op_ndoverlay_value(self):
markers = ['d', 's']
overlay = NdOverlay({marker: Points(np.arange(i))
for i, marker in enumerate(markers)},
'Marker').options('Points', marker='Marker')
plot = mpl_renderer.get_plot(overlay)
for subplot, marker in zip(plot.subplots.values(), markers):
style = dict(subplot.style[subplot.cyclic_index])
style = subplot._apply_transforms(subplot.current_frame, {}, style)
self.assertEqual(style['marker'], marker)
def test_point_color_index_color_clash(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color', color_index='color')
with ParamLogStream() as log:
mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ("Cannot declare style mapping for 'color' option "
"and declare a color_index; ignoring the color_index.\n")
self.assertEqual(log_msg, warning)
def test_point_size_index_size_clash(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='size').options(s='size', size_index='size')
with ParamLogStream() as log:
mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ("Cannot declare style mapping for 's' option "
"and declare a size_index; ignoring the size_index.\n")
self.assertEqual(log_msg, warning)
| [
"enriquefariasrdz@gmail.com"
] | enriquefariasrdz@gmail.com |
31af510cc88211d647550bb4b15a5f9689c61f99 | 0a6a908b2c098613bdcad4221fcb0e4e1c1f7d25 | /data Collection.py | 485179c4b6fd9e7ddc74fb4372fc212382195885 | [] | no_license | DanishKumar/Masters-Thesis | e9f1a14bdc28ad3adb4430e741fd50f44649b39b | c1d97dda56692391bb0b83f1b143738e068425ff | refs/heads/master | 2020-06-23T23:31:08.346458 | 2019-07-25T10:37:15 | 2019-07-25T10:37:15 | 198,785,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,713 | py | import tweepy
import csv
import os
ckey = "lTtJFQw7VcSmGuq2rXEPPtiRM"
csecret = "bbEpVT3VLptOw2k8SoVn4HgFP6B0REAxbXwQrxmuGacYb15I5K"
atoken = "2217386142-N9Zg6ySIWmLZm2mHFsBGIf9A32OsKp9jOQZ24gf"
asecret = "cqq2HCyJAzPkbGcWL0ySWqvHuwWYqF7TF4O4czI1Dqqoa"
OAUTH_KEYS = {'consumer_key':ckey, 'consumer_secret':csecret,
'access_token_key':atoken, 'access_token_secret':asecret}
auth = tweepy.OAuthHandler(OAUTH_KEYS['consumer_key'], OAUTH_KEYS['consumer_secret'])
api = tweepy.API(auth)
file_exists = os.path.isfile('C:\Users\Danish Kumar\Desktop\dataset\americanidol.csv')
csvFile = open('americanidol.csv', 'ab')
fields = ('Tweet_Id', 'Tweet_Text','Tweet_authorscreen_name','Tweet_author_id','Tweet_created_at','Tweet_coordinates','Tweet_source','Tweet_user_verified','Tweet_retweet_count','Tweet_lang','Tweet_favcount','Tweet_username','Tweet_userid','Tweet_location') #field names
csvWriter = csv.DictWriter(csvFile, fieldnames=fields)
if not file_exists:
csvWriter.writeheader()
c = tweepy.Cursor(api.search, q="#americanidol", since="2019-04-14", until="2019-04-19", lang="en", tweet_mode="extended").items()
count=0;
while True:
try:
tweet = c.next()
for tweet in tweepy.Cursor(api.search, q="#psl", since="2019-04-14", until="2019-04-19", tweet_mode="extended").items():
print (tweet.id_str, (tweet.full_text.encode('utf-8').replace('\n', '').replace('\r', ' ').decode('unicode_escape').encode('ascii','ignore').strip()), tweet.author.screen_name, tweet.author.id, tweet.created_at,tweet.coordinates,tweet.source,tweet.user.verified,tweet.retweet_count,tweet.lang,tweet.user.favourites_count,tweet.user.name,tweet.user.id_str,tweet.user.location)
csvWriter.writerow({'Tweet_Id': tweet.id_str, 'Tweet_Text': (tweet.full_text.encode('utf-8').replace('\n', '').replace('\r', ' ').decode('unicode_escape').encode('ascii','ignore').strip()),'Tweet_authorscreen_name':tweet.author.screen_name.encode('utf-8').strip(),'Tweet_author_id':tweet.author.id,'Tweet_created_at':tweet.created_at,'Tweet_coordinates':tweet.coordinates,'Tweet_source':tweet.source.encode('utf-8').strip(),'Tweet_user_verified':tweet.user.verified,'Tweet_retweet_count':tweet.retweet_count,'Tweet_lang':tweet.lang.encode('utf-8').strip(),'Tweet_favcount':tweet.user.favourites_count,'Tweet_username':tweet.user.name.encode('utf8').strip(),'Tweet_userid':tweet.user.id_str,'Tweet_location':tweet.user.location.encode('utf-8').strip()})
count +=1
except tweepy.TweepError:
print("Whoops, could not fetch more! just wait for 15 minutes :")
time.sleep(900)
continue
except StopIteration:
break
csvFile.close()
print(count)
| [
"danish.kumar93@gmail.com"
] | danish.kumar93@gmail.com |
31b6e3eaebde03b0a8cc8012f96bd4403011b0ee | e9aef5affd441b13a443dc2e0ae1613db28870e7 | /Test_sum_two_lowest_numbers.py | d2f714b1457d17ee5f7655d1e73cc3d2944827b1 | [] | no_license | glicodin21/Bundyusoft_Test | 06327b051ee7453bfd3bd3037c8e67a1dc86a6ec | 551cd640fc95f03f9b4c8682fc5f4bd28839da6c | refs/heads/master | 2020-07-10T13:38:11.268176 | 2019-08-30T14:29:31 | 2019-08-30T14:29:31 | 204,274,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | import unittest
from Sum_two_lowest_numbers import sum_two_lowest_numbers
class Test(unittest.TestCase):
def test_sum_two_lowest_numbers(self):
self.assertEqual(sum_two_lowest_numbers(
[1, 2, 3]), 3, "Should be 3")
self.assertEqual(sum_two_lowest_numbers(
[1000, 2000, 3000]), 3000, "Should be 3000")
self.assertEqual(sum_two_lowest_numbers(
[-7234123, 857322, 115]), -7234008, "Should be -7234008")
def test_pick_two_lowest_numbers_with_len_validation(self):
self.assertEqual(sum_two_lowest_numbers(
[10, 1, 9, 2]), 3, "Should be 3")
self.assertEqual(sum_two_lowest_numbers(
[-10, 1, 9, -2]), -12, "Should be -12")
self.assertEqual(sum_two_lowest_numbers(
[0, 0, 0]), 0, "Should be 0")
def test_raises_value_error_if_list_is_empty(self):
self.assertRaises(ValueError,
sum_two_lowest_numbers, [])
def test_raises_value_error_if_list_is_contain_one_element(self):
self.assertRaises(ValueError,
sum_two_lowest_numbers, [1])
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
a849badaacd47915c426af336330853080f870d0 | 19e9e246100603b507005dec0c82cfd3d0d08e75 | /settings.py | decd4426e821649c4f3b4c02a4ef00c8b42451bb | [
"MIT"
] | permissive | embrace-inpe/swds-api-downloader | e0ab31860c2d5870486ebaf99a71a26f7b954700 | f4a4f40fca6de713d6eb1d26dc29e36c094ecb32 | refs/heads/master | 2020-04-12T15:20:27.686621 | 2019-04-02T13:08:40 | 2019-04-02T13:08:40 | 162,577,734 | 5 | 4 | MIT | 2019-04-02T13:08:40 | 2018-12-20T12:38:00 | Python | UTF-8 | Python | false | false | 458 | py | # Search filters
# You must passing these values if aren't using the command line args
SEARCH = {
'application': 2,
'start_date': '2019-03-01',
'end_date': '2019-03-01',
'resolution': None,
'station': 2,
'swfilter': 7,
'swtype': None,
'network': None,
'equipment': None
}
# Path to save the files
PATH_TO_SAVE = './tmp/imager/'
# Credentials
# the username key can be your username or e-mail
USERNAME = ''
PASSWORD = ''
| [
"silviolleite@gmail.com"
] | silviolleite@gmail.com |
b020ce1d7374b7195c3545ce178c7b9387f9ddd1 | 72b8e2d69cca8b5ecd28e61ef61fef85f9dd0489 | /q190.py | 3bf0f6319123cdb7f2dd25ae44e6f074a9eafef1 | [] | no_license | maples1993/LeetCode | f975bc8570729d998481b097ee04effe5a7c5977 | 032016724564d0bee85f9e1b9d9d6c769d0eb667 | refs/heads/master | 2020-03-27T22:05:07.397746 | 2018-11-07T06:13:56 | 2018-11-07T06:13:56 | 147,203,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
Date: 2018/9/6
"""
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
n &= 0xFFFFFFFF
print(bin(n))
res = 0 & 0xFFFFFFFF
count = 0
while count < 32:
count += 1
res <<= 1
if n & 1 == 1:
res += 1
n >>= 1
return res
print(Solution().reverseBits(43261596)) | [
"panchao1993@126.com"
] | panchao1993@126.com |
8e5759f7185b426930f438446f26ae2be4b62f17 | 87800bce1a4829402f1f37932e0f01b3feb87fa1 | /viminidb/videbugging.py | ca32c92bee1baf28ba396517ab65e8e468614e23 | [] | no_license | sixkey/py-scripts | f5ca51434bf45faa79692933d5c4824b8cfe59ae | 7166515ba1dd30b7a48bb82c5d39710774200366 | refs/heads/main | 2023-03-15T08:27:20.363569 | 2021-03-02T18:04:56 | 2021-03-02T18:04:56 | 333,176,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def create_loud_function(function):
def loud_function(*args, **kwargs):
res = function(*args, **kwargs)
print(f"{function.__name__}(" +
f"{', '.join([str(x) for x in args])}" +
(f"{', '.join([str(x) for x in kwargs])}" if kwargs else "") +
f") = {str(res)}")
return res
return loud_function
def create_louder_function(function):
def louder_function(*args, **kwargs):
print(f"\n{function.__name__}(" +
f"{', '.join([str(x) for x in args])}" +
(f"{', '.join([str(x) for x in kwargs])}" if kwargs else "") +
")")
res = function(*args, **kwargs)
print(str(res))
return res
return louder_function
| [
"filipku4a@gmail.com"
] | filipku4a@gmail.com |
e1fc43f35600eb1ab30bcb687acd093d5345c74f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_veritable.py | c4260f18bbed72556a78374e7679857fe6dc69a3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py |
#calss header
class _VERITABLE():
def __init__(self,):
self.name = "VERITABLE"
self.definitions = [u'used to describe something as another, more exciting, interesting, or unusual thing, as a way of emphasizing its character: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8917e8c28c35145f26b8c83dc4634b8657f48236 | 26317cbbf23c87a8596f490e15741c4a297ed176 | /constr.py | 81807d407aacb5cbd98127b57dab82bfee7db94f | [] | no_license | vishnuhs/mypython | 694a829298ccb28e5a2f5d1969ffee92f3c90c9d | 8069fa24b1cac76392a29881a868325ffede0c32 | refs/heads/master | 2020-04-21T00:15:44.713817 | 2020-04-02T08:02:08 | 2020-04-02T08:02:08 | 169,191,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | def convert(text):
words=text.split(";")
for word in words:
strwords=word.split("=")
words[words.index(word)]=(strwords[0],strwords[1])
return words
| [
"vishnu.hs1998@gmail.com"
] | vishnu.hs1998@gmail.com |
5208906c09939f76f644bef4f999ef65b8a1cfae | 37438771565238194ea997fa65619bd32c823706 | /catkin_ws/17-11-16/LPH/build/catkin_generated/order_packages.py | 24ce42469160b8cc3411cbaef6a5190b3592e0f2 | [] | no_license | Aaron9477/restore | b040b8be695c513946c0243c4acb735f427d8bba | 8dc13ed7cf0c4e5cde911169d11e330d826f40bd | refs/heads/master | 2021-09-15T10:50:59.969952 | 2018-05-31T03:11:55 | 2018-05-31T03:11:55 | 110,834,815 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/zq610/LPH/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/opt/ros/kinetic".split(';') if "/opt/ros/kinetic" != "" else []
| [
"869788668@qq.com"
] | 869788668@qq.com |
59e0938b9889a36090c117f11970b5d8d271260e | 24eb991aa1a5bad9d89e7c7fdafc0db0f65f820e | /Janken/JankenOutput.py | 9dd10476bbadfd3429305c27345446e4e81f2710 | [] | no_license | yemarn510/YM_Python | 1e4ba16775b920f9ab84991d6d61c98d437ccfde | 72181672d800ec59bac06978cab08a59e734933e | refs/heads/master | 2022-04-15T07:02:11.085185 | 2020-03-15T16:09:00 | 2020-03-15T16:09:00 | 247,501,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | """
This class is for printing message and showing who is
"""
from random import randrange
def show_winner(player_one_count, player_two_count):
"""
This is the function for outputting who is the winner
args : player_one_count : the number of games player 1 won
: player_two_count : the number of games player 2 won
"""
print("The Score is {}-{}. ".format(str(player_one_count),
str(player_two_count)),
end='')
if player_one_count > player_two_count:
print("Player 1 wins.")
elif player_one_count < player_two_count:
print("player 2 wins.")
else:
print("The Game is Draw")
| [
"yemarn.510@gmail.com"
] | yemarn.510@gmail.com |
7bef991e0b0cc4a0ccbb6365b1ad0fae7bce83cf | 7ca46f6b2f115712600b052bab8da6e16f90aad1 | /PythonProjects/fibonacciCalc.py | ead0942aa916e6739346a5663abd717f51bffe96 | [] | no_license | KristinCovert/Bootcamp | 73f83e5e1ce0b03fa1242f355bfbc13a099e9b6c | e4af8a3999bc8e4b9708e35268c619151d77f581 | refs/heads/master | 2021-05-28T01:44:06.211008 | 2014-11-26T23:16:20 | 2014-11-26T23:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | __author__ = 'Kristin'
def range_top():
top_range = input('Up to what number would you like to calculate the Fibonacci: ')
return top_range
def fibonacci(rt):
i = 0
j = 1
fibonacci_list = []
for number in range(0, rt):
k = i + j
fibonacci_list.append(k)
i = j
j = k
return fibonacci_list
if __name__ == '__main__':
x = range_top()
result = fibonacci(x)
print result
l = len(result)
print l
| [
"kristin.bratland@gmail.com"
] | kristin.bratland@gmail.com |
988e2af58befb5de29f610f0a755afa3463f72dd | 6d64861c997a710b95c3378a2a5283ec85d110d5 | /setup.py | 8d7f4e8b5a1e5b1fa6d67cfe991169be68cd3a4c | [
"Apache-2.0"
] | permissive | badmutex/openstack-client-shell | 1f761b9af161d5275e4e0dbce9b67bf9e7432014 | 10c1b4ad58a2562a0d23be4f6d34a36237cbc6f2 | refs/heads/master | 2021-05-29T01:54:49.849567 | 2015-06-12T19:06:53 | 2015-06-12T19:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from setuptools import setup, find_packages
import os.path
# IMPORTANT: use semantic versioning
# http://semver.org
VERSION = '0.4.4'
NAME = 'openstack-client-shell'
# #################################################################### version
module_dir = 'openstack'
version_file = os.path.join(module_dir, 'version.py')
version_module_contents = """\
# WARNING
# This file is automatically generated by setup.py
# Do not modify by hand
version = {version}
""".format(version=VERSION)
with open(version_file, 'w') as fd:
fd.write(version_module_contents)
# #################################################################### setup
setup(name=NAME,
version=VERSION,
description="OpenStack API using shell commands",
author="Badi' Abdul-Wahid",
author_email='abdulwahidc@gmail.com',
packages=find_packages(),
)
| [
"abdulwahidc@gmail.com"
] | abdulwahidc@gmail.com |
9ab8c1cfef72c9b54df1a43e0a919da8d13a725c | 9c81c170f03ba925bf3d0682526245c202e384a7 | /superset/cli/test.py | f175acec470cd59f06f6d1ad8de07765a2520901 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | zcong1993/incubator-superset | 2a08177641eff178dee9db852887ad2d19d70d54 | 269c99293f42089958dc98b5d6e5899509fc3111 | refs/heads/master | 2023-08-17T12:24:59.438120 | 2023-08-17T10:50:24 | 2023-08-17T10:50:24 | 209,522,299 | 0 | 0 | Apache-2.0 | 2023-03-06T08:10:31 | 2019-09-19T10:09:21 | TypeScript | UTF-8 | Python | false | false | 2,860 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import click
from colorama import Fore
from flask.cli import with_appcontext
import superset.utils.database as database_utils
from superset import app, security_manager
logger = logging.getLogger(__name__)
@click.command()
@with_appcontext
def load_test_users() -> None:
"""
Loads admin, alpha, and gamma user for testing purposes
Syncs permissions for those users/roles
"""
print(Fore.GREEN + "Loading a set of users for unit tests")
load_test_users_run()
def load_test_users_run() -> None:
"""
Loads admin, alpha, and gamma user for testing purposes
Syncs permissions for those users/roles
"""
if app.config["TESTING"]:
sm = security_manager
examples_db = database_utils.get_example_database()
examples_pv = sm.add_permission_view_menu("database_access", examples_db.perm)
sm.sync_role_definitions()
gamma_sqllab_role = sm.add_role("gamma_sqllab")
sm.add_permission_role(gamma_sqllab_role, examples_pv)
gamma_no_csv_role = sm.add_role("gamma_no_csv")
sm.add_permission_role(gamma_no_csv_role, examples_pv)
for role in ["Gamma", "sql_lab"]:
for perm in sm.find_role(role).permissions:
sm.add_permission_role(gamma_sqllab_role, perm)
if str(perm) != "can csv on Superset":
sm.add_permission_role(gamma_no_csv_role, perm)
users = (
("admin", "Admin"),
("gamma", "Gamma"),
("gamma2", "Gamma"),
("gamma_sqllab", "gamma_sqllab"),
("alpha", "Alpha"),
("gamma_no_csv", "gamma_no_csv"),
)
for username, role in users:
user = sm.find_user(username)
if not user:
sm.add_user(
username,
username,
"user",
username + "@fab.org",
sm.find_role(role),
password="general",
)
sm.get_session.commit()
| [
"noreply@github.com"
] | noreply@github.com |
64592d3ee4f2219d3ea1f98f687bdb1984f866da | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02780/s702903623.py | ef5c9f02ab956fe90728da489ecd4bc87f90841f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | n,k = map(int,input().split())
P = list(map(int,input().split()))
P[0] = (P[0]+1.)/2
for i in range(1,len(P)):
P[i] = (P[i]+1.)/2
P[i] = P[i-1]+P[i]
ans = 0.
if n==1:
ans = P[0]
elif len(P)-k==0:
ans = P[k-1]
else:
for i in range(len(P)-k):
ans = max(ans,(P[i+k]-P[i]))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d6b0539a2cd34a3318a634029493799c8d1029ff | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /ZQZ510/ZQZ510/spiders/zqz.py | 3a4ae2a8fc42615dd7eaaf1a56965897c452c5d3 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 5,422 | py | # -*- coding: utf-8 -*-
import scrapy
import time
import json
from ZQZ510.items import Zqz510Item
empty_word = 'null'
class ZqzSpider(scrapy.Spider):
name = 'zqz'
allowed_domains = ['zqz510.com']
start_urls = ['http://login.zqz510.com/judgmentDoc']
def parse(self, response):
url = 'http://api.zqz510.com//tmof/query?ftxt=&ti=&apS=&pdStart=&pdEnd=&ty=&psty=&law=&litem=&pageNum=1' \
'&apS=&apD=&ag=&judgd=&tid=&cid=&callback=_jqjsp&_{}='.format(str(int(time.time() * 1000)))
self.cookie = {
'uid': '213facea-5ac7-4069-ae4a-97168d559ebc',
'oid': 'UAGAP00003919',
'JSESSIONID': '9867C3C37D24634CB9D44D1AA5C6188F',
'c': '82f5dd5f-f8ae-459b-9907-fd0bb01d97cb',
}
yield scrapy.Request(url=url, callback=self.parse_first, cookies=self.cookie)
def parse_first(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
total = int(json_text['total'])
all_page = int(total / 10) + 1
for page in range(all_page):
url = 'http://api.zqz510.com//tmof/query?ftxt=&ti=&apS=&pdStart=&pdEnd=&ty=&psty=&law=&litem=&pageNum={}' \
'&apS=&apD=&ag=&judgd=&tid=&cid=&callback=_jqjsp&_{}='.format(str(page + 1), str(int(time.time() * 1000)))
yield scrapy.Request(url=url, callback=self.parse_list, cookies=self.cookie)
def parse_list(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
for data in json_text['data']:
item = Zqz510Item()
if 'agS' in data:
item['agS'] = data['agS']
else:
item['agS'] = empty_word
if 'agidS' in data:
item['agidS'] = data['agidS']
else:
item['agidS'] = empty_word
if 'an' in data:
item['an'] = data['an']
else:
item['an'] = empty_word
if 'anDest' in data:
item['anDest'] = data['anDest']
else:
item['anDest'] = empty_word
if 'anList' in data:
item['anList'] = str(data['anList'])
else:
item['anList'] = empty_word
if 'apS' in data:
item['apS'] = data['apS']
else:
item['apS'] = empty_word
if 'apidS' in data:
item['apidS'] = data['apidS']
else:
item['apidS'] = empty_word
if 'cid' in data:
item['cid'] = data['cid']
else:
item['cid'] = empty_word
if 'docid' in data:
item['docid'] = data['docid']
else:
item['docid'] = empty_word
if 'law' in data:
item['law'] = data['law']
else:
item['law'] = empty_word
if 'link' in data:
item['link'] = data['link']
else:
item['link'] = empty_word
if 'litem' in data:
item['litem'] = data['litem']
else:
item['litem'] = empty_word
if 'ltid' in data:
item['ltid'] = data['ltid']
else:
item['ltid'] = empty_word
if 'pd' in data:
item['pd'] = data['pd']
else:
item['pd'] = empty_word
if 'psty' in data:
item['psty'] = data['psty']
else:
item['psty'] = empty_word
if 'rid' in data:
item['rid'] = data['rid']
else:
item['rid'] = empty_word
if 'ti' in data:
item['ti'] = data['ti']
else:
item['ti'] = empty_word
if 'ty' in data:
item['ty'] = data['ty']
else:
item['ty'] = empty_word
detail_url = 'http://api.zqz510.com/tmof/detail?docid={}&callback=_jqjsp&_{}='.format(item['docid'], str(int(time.time() * 1000)))
yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'item': item}, cookies=self.cookie)
def parse_detail(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
item = response.meta['item']
if 'dtls' in json_text:
item['dtls'] = str(json_text['dtls'])
else:
item['dtls'] = empty_word
if 'ftxt' in json_text:
item['ftxt'] = json_text['ftxt']
else:
item['ftxt'] = empty_word
if 'judg' in json_text:
item['judg'] = str(json_text['judg'])
else:
item['judg'] = empty_word
if 'judgList' in json_text:
item['judgList'] = str(json_text['judgList'])
else:
item['judgList'] = empty_word
if 'links' in json_text:
item['links'] = str(json_text['links'])
else:
item['links'] = empty_word
if 'ltidAll' in json_text:
item['ltidAll'] = str(json_text['ltidAll'])
else:
item['ltidAll'] = empty_word
if 'pdCn' in json_text:
item['pdCn'] = str(json_text['pdCn'])
else:
item['pdCn'] = empty_word
yield item | [
"34021500@qq.com"
] | 34021500@qq.com |
d72f0e6e1d8aaabc1a02b10a8fbc864b8f6d0b65 | 29345337bf86edc938f3b5652702d551bfc3f11a | /python/src/main/python/pyalink/alink/tests/examples/from_docs/test_totensorstreamop.py | 78c1de91112c783148b8652120fe7425e975fcf9 | [
"Apache-2.0"
] | permissive | vacaly/Alink | 32b71ac4572ae3509d343e3d1ff31a4da2321b6d | edb543ee05260a1dd314b11384d918fa1622d9c1 | refs/heads/master | 2023-07-21T03:29:07.612507 | 2023-07-12T12:41:31 | 2023-07-12T12:41:31 | 283,079,072 | 0 | 0 | Apache-2.0 | 2020-07-28T02:46:14 | 2020-07-28T02:46:13 | null | UTF-8 | Python | false | false | 553 | py | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestToTensorStreamOp(unittest.TestCase):
def test_totensorstreamop(self):
df = pd.DataFrame(["FLOAT#6#0.0 0.1 1.0 1.1 2.0 2.1 "])
source = StreamOperator.fromDataframe(df, schemaStr='vec string')
source.link(
ToTensorStreamOp()
.setSelectedCol("vec")
.setTensorShape([2, 3])
.setTensorDataType("float")
).print()
StreamOperator.execute()
pass | [
"shaomeng.wang.w@gmail.com"
] | shaomeng.wang.w@gmail.com |
f4771bd090478972d022ce9b450d530bb2408052 | 6c3ab38e350734f1bc4f0c746ea55a12838ce5ee | /pcserver/mainapp/handlers.py | 93a7d32aa090f9a76b8f6ab1bca16d7d2eda3868 | [] | no_license | joelsemar/Programming-Challenge | 1dd4fb487d02e05ed494e66da99a627970832988 | b8bf8e115dc3c242d62bf696d3268a4b31019592 | refs/heads/master | 2020-05-17T15:16:45.892328 | 2011-08-31T19:17:15 | 2011-08-31T19:17:15 | 2,298,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | from webservice_tools.utils import BaseHandler, AutoListHandler
from webservice_tools.decorators import login_required
from mainapp.models import * #@UnusedWildImport
#Create your handlers here
class PhotosHandler(AutoListHandler):
model = Photo
allowed_methods = ('GET',)
extra_fields = ('image_url',)
exclude = ('image', )
@login_required
def read(self, request, response):
"""
Returns a list of Photo objects.
API Handler: GET /photos
Params:
@key [string] your api key
Returns:
@photos [Photo] list of photos, see Photo docs for details
"""
return super(PhotosHandler, self).read(request, response)
class PhotoHandler(BaseHandler):
model = Photo
allowed_methods = ('GET',)
extra_fields = ('image_url',)
exclude = ('image', )
@login_required
def read(self, request, id, response):
"""
Fetch the details of a photo by id
API Handler: GET /photo/{id}
Params:
@id [id] id of the photo (in the url)
@key [string] your api key
Returns:
@title [string] title
@description [string] a short description
@image_url [url] a url to the corresponding image
"""
return super(PhotoHandler, self).read(request, id, response)
#ALL DEFINITION EOF
module_name = globals().get('__name__')
handlers = sys.modules[module_name]
handlers._all_ = []
for handler_name in dir():
m = getattr(handlers, handler_name)
if type(m) == type(BaseHandler):
handlers._all_.append(handler_name)
| [
"semarjt@gmail.com"
] | semarjt@gmail.com |
9243f350125d286dd52323819b4b4de309f7bcd2 | e09439c9dd7ff01caae74e08ea333a8a11eb8f57 | /src/spectrosegment/CNN_train/10_train.py | 50068b546710be323ad3afd786fc910d18a6b58c | [] | no_license | muachilin/Freesound-General-Purpose-Audio-Tagging-Challenge | f57f81e08bfc7b2d3ca4fe03dc0aa17d9ea812b0 | 18fdd2d9e5dacccce3eef60c0a454e52777bd453 | refs/heads/master | 2020-09-17T01:10:19.274061 | 2019-11-25T13:34:24 | 2019-11-25T13:34:24 | 223,942,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | import numpy as np
import pickle
from sys import argv
import os
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import PReLU
from keras.layers import Conv2D, MaxPooling2D, Flatten, AveragePooling2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, Adam
from keras.utils import np_utils, plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import xgboost as xgb
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
#set_session(sess)
def cnn_model():
model = Sequential()
model.add(Conv2D(100, (10, 3), input_shape = (128, 10, 1), padding = 'same') )
model.add(PReLU())
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2))) # 64 * 5
model.add(Dropout(0.35))
model.add(Conv2D(150, (10, 3), padding = 'same'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2))) # 32 * 2
model.add(Dropout(0.35))
model.add(Conv2D(200, (10, 3), padding = 'same'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2))) # 32 * 2
model.add(Dropout(0.35))
model.add(Conv2D(300, (10, 3), padding = 'same'))
model.add(PReLU())
model.add(BatchNormalization(axis = -1))
#model.add(MaxPooling2D((2,2))) # 16 * 1
model.add(Dropout(0.35))
model.add(Conv2D(400, (10, 3), padding = 'same'))
model.add(PReLU())
#model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.35))
model.add(Flatten())
model.add(Dense(units = 200, activation = 'relu'))
model.add(PReLU(alpha_initializer='zeros'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(units = 100, activation = 'relu'))
model.add(PReLU(alpha_initializer='zeros'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(units = 41, activation = 'softmax'))
model.summary()
return model
def train(modelname):
train_X0 = np.load('train_X_verified.npy')
train_Y0 = np.load('train_Y_verified.npy')
train_X0 = train_X0.reshape(train_X0.shape[0],128,10,1)
train_Y0 = train_Y0.reshape(train_Y0.shape[0],train_Y0.shape[1])
#train_X1 = np.load('train_X_nonveri.npy')
#train_Y1 = np.load('train_Y_nonveri.npy')
#train_X1 = train_X1.reshape(train_X1.shape[0],128,10,1)
#train_Y1 = train_Y1.reshape(train_Y1.shape[0],train_Y1.shape[1])
#train_X = np.concatenate((train_X0[:81876],train_X1[:300000]), axis=0)
#train_Y = np.concatenate((train_Y0[:81876],train_Y1[:300000]), axis=0)
np.random.seed(1200)
index = np.random.permutation(len(train_X0))
train_X0 = train_X0[index]
train_Y0 = train_Y0[index]
train_X = train_X0[13000:]
train_Y = train_Y0[13000:]
val_X = train_X0[:13000]
val_Y = train_Y0[:13000]
model = cnn_model()
checkpoint =[ModelCheckpoint('models/'+modelname, # model filename
monitor='val_loss', # quantity to monitor
verbose=1, # verbosity - 0 or 1
save_best_only= True, # The latest best model will not be overwritten
mode='auto'), # The decision to overwrite model is made
EarlyStopping(monitor = 'val_loss',
patience = 20,
verbose = 0)]
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(train_X,train_Y,epochs=150,batch_size=128,validation_data=(val_X,val_Y),verbose=1,callbacks=checkpoint)
def retrain(modelname):
train_X0 = np.load('train_X_verified.npy')
train_Y0 = np.load('train_Y_verified.npy')
train_X0 = train_X0.reshape(train_X0.shape[0],128,10,1)
train_Y0 = train_Y0.reshape(train_Y0.shape[0],train_Y0.shape[1])
train_X1 = np.load('X_nonveri_filtered.npy')
train_Y1 = np.load('Y_nonveri_filtered.npy')
train_X1 = train_X1.reshape(train_X1.shape[0],128,10,1)
train_Y1 = train_Y1.reshape(train_Y1.shape[0],train_Y1.shape[1])
total_X = np.concatenate((train_X0,train_X1), axis=0)
total_Y = np.concatenate((train_Y0,train_Y1), axis=0)
total_len = total_X.shape[0]
print(total_Y.shape)
print(total_X.shape)
print(total_len)
model = cnn_model()
checkpoint =[ModelCheckpoint('models/'+modelname, # model filename
monitor='val_loss', # quantity to monitor
verbose=0, # verbosity - 0 or 1
save_best_only= True, # The latest best model will not be overwritten
mode='auto'), # The decision to overwrite model is made
EarlyStopping(monitor = 'val_loss',
patience = 20,
verbose = 0)]
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(total_X[int(np.floor(total_len*0.1)):],total_Y[int(np.floor(total_len*0.1)):],epochs=100,batch_size=128,validation_data=(total_X[:int(np.floor(total_len*0.1))],total_Y[:int(np.floor(total_len*0.1))]),verbose=1,callbacks=checkpoint)
def main():
modelname = argv[1]
if argv[2] == 'train':
train(modelname)
elif argv[2] == 'retrain':
retrain(modelname)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
dc10e73e97008209d65dd1570fac21edd864e507 | ce7776a87f57454a70e1d2bde351fda20b43d7ef | /postgres-appliance/scripts/spilo_commons.py | 7742e8c2657ce5997bfff42fc04608f923549a45 | [
"Apache-2.0"
] | permissive | munjalpatel/spilo | 6387640dfe46b3b80076acd7dd3cf59fae2f57eb | f8179d9a5de5e9a78e5a447130f759f97811879b | refs/heads/master | 2023-09-04T19:06:47.323964 | 2021-11-11T16:04:17 | 2021-11-11T16:04:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,646 | py | import logging
import os
import subprocess
import re
import yaml
logger = logging.getLogger('__name__')
RW_DIR = os.environ.get('RW_DIR', '/run')
PATRONI_CONFIG_FILE = os.path.join(RW_DIR, 'postgres.yml')
LIB_DIR = '/usr/lib/postgresql'
# (min_version, max_version, shared_preload_libraries, extwlist.extensions)
extensions = {
'timescaledb': (9.6, 13, True, True),
'pg_cron': (9.5, 14, True, False),
'pg_stat_kcache': (9.4, 14, True, False),
'pg_partman': (9.4, 14, False, True)
}
if os.environ.get('ENABLE_PG_MON') == 'true':
extensions['pg_mon'] = (11, 14, True, False)
def adjust_extensions(old, version, extwlist=False):
ret = []
for name in old.split(','):
name = name.strip()
value = extensions.get(name)
if name not in ret and value is None or value[0] <= version <= value[1] and (not extwlist or value[3]):
ret.append(name)
return ','.join(ret)
def append_extentions(old, version, extwlist=False):
extwlist = 3 if extwlist else 2
ret = []
def maybe_append(name):
value = extensions.get(name)
if name not in ret and (value is None or value[0] <= version <= value[1] and value[extwlist]):
ret.append(name)
for name in old.split(','):
maybe_append(name.strip())
for name in extensions.keys():
maybe_append(name)
return ','.join(ret)
def get_binary_version(bin_dir):
postgres = os.path.join(bin_dir or '', 'postgres')
version = subprocess.check_output([postgres, '--version']).decode()
version = re.match(r'^[^\s]+ [^\s]+ (\d+)(\.(\d+))?', version)
return '.'.join([version.group(1), version.group(3)]) if int(version.group(1)) < 10 else version.group(1)
def get_bin_dir(version):
return '{0}/{1}/bin'.format(LIB_DIR, version)
def is_valid_pg_version(version):
bin_dir = get_bin_dir(version)
postgres = os.path.join(bin_dir, 'postgres')
# check that there is postgres binary inside
return os.path.isfile(postgres) and os.access(postgres, os.X_OK)
def write_file(config, filename, overwrite):
if not overwrite and os.path.exists(filename):
logger.warning('File %s already exists, not overwriting. (Use option --force if necessary)', filename)
else:
with open(filename, 'w') as f:
logger.info('Writing to file %s', filename)
f.write(config)
def get_patroni_config():
with open(PATRONI_CONFIG_FILE) as f:
return yaml.safe_load(f)
def write_patroni_config(config, force):
write_file(yaml.dump(config, default_flow_style=False, width=120), PATRONI_CONFIG_FILE, force)
| [
"noreply@github.com"
] | noreply@github.com |
f375b00087150e1f82d65a9e60ddf607f3a4fdbd | 3845fdd6010cfd86e0cc8dded06c964a81fdf6ac | /registration/views.py | b1b62c20f6649595bc5f6a5473c0b66a0df90ab9 | [
"MIT"
] | permissive | Yash1256/Django-Intern | 72511e9bc3f262cd6509a7353ec8fc64fe9ade0b | c1d42ff344324b56d462ae8c3d5b6682a2b255b6 | refs/heads/master | 2021-09-25T11:45:17.994744 | 2020-10-03T21:38:15 | 2020-10-03T21:38:15 | 249,540,783 | 1 | 1 | MIT | 2021-09-22T18:47:32 | 2020-03-23T20:48:25 | Python | UTF-8 | Python | false | false | 3,741 | py | import math
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.urls import reverse, reverse_lazy
from django.views import View
from .forms import AuthorForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseNotFound
from .models import Author
from post.models import Post
class Landing(View):
def get(self, request):
return render(request, 'registration/index.html')
class Register(View):
def get(self, request):
return render(request, 'registration/register.html')
def post(self, request):
a = AuthorForm(request.POST)
try:
if a.is_valid():
a.save()
return redirect('login')
else:
return render(request, 'registration/register.html', context={'error': a.errors})
except ValidationError as V:
return render(request, 'registration/register.html', context={'error': V.message_dict})
class Login(View):
def get(self, request):
return render(request, 'registration/login.html')
def post(self, request):
email = request.POST.get('email', None)
password = request.POST.get('password', None)
if email is None or password is None:
return render(request, 'registration/login.html', context={'error': 'Fields are required.'})
u = authenticate(request, email=email, password=password)
if u is None:
return render(request, 'registration/login.html', context={'error': 'Email or Password is not valid.'})
else:
login(request, u)
if request.GET.get('next', None):
return redirect(reverse(request.GET.get('next')))
else:
a = Author.objects.get(email=u.email)
return redirect('author', a.pk)
@login_required(redirect_field_name='next', login_url=reverse_lazy('login'))
def author(request, aid):
try:
a = Author.objects.get(pk=aid)
return render(request, 'registration/author.html',
context={'count': Post.objects.filter(author_id=a.pk).count(), 'bdate': a.birthdate,
'name': a.name})
except Author.DoesNotExist:
return HttpResponseNotFound()
@login_required(redirect_field_name='next', login_url=reverse_lazy('login'))
def authors_table(request):
try:
page = request.GET.get('page', '1')
p = int(page)
except ValueError:
p = 1
total = Author.objects.count()
count = math.ceil(total/10)
context = dict()
if count < p:
return HttpResponseNotFound()
else:
if p > 1:
context['prev'] = p - 1
if count > p:
context['next'] = p + 1
if p > 2:
context['prev_pg'] = []
for i in range(p-2, 0, -1):
context['prev_pg'].append(i)
if len(context['prev_pg']) > 2:
context['prev_pg'] = context['prev_pg'][:2]
context['ellipsisP'] = True
if count > p + 1:
context['next_pg'] = []
for i in range(p + 2, count + 1, 1):
context['next_pg'].append(i)
if len(context['next_pg']) > 2:
context['next_pg'] = context['next_pg'][:2]
context['ellipsisN'] = True
context['next_pg'].reverse()
st = (p-1)*10
ed = min(p*10, total)
context['page'] = p
context['authors'] = Author.objects.all()[st:ed]
return render(request, 'registration/authors.html', context=context)
| [
"shuklayash1256@gmail.com"
] | shuklayash1256@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.