hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbfb410cacd5080693f012f125e877edd266870a
| 172
|
py
|
Python
|
features/environment.py
|
geeksforsocialchange/imok
|
efb7189c13c398dbd5d4301ca496a2e583b0f5b7
|
[
"MIT"
] | 6
|
2021-05-12T08:40:36.000Z
|
2022-01-25T08:31:06.000Z
|
features/environment.py
|
geeksforsocialchange/imok
|
efb7189c13c398dbd5d4301ca496a2e583b0f5b7
|
[
"MIT"
] | 14
|
2021-05-12T09:03:08.000Z
|
2021-06-10T13:18:52.000Z
|
features/environment.py
|
geeksforsocialchange/imok
|
efb7189c13c398dbd5d4301ca496a2e583b0f5b7
|
[
"MIT"
] | 1
|
2021-05-14T20:54:15.000Z
|
2021-05-14T20:54:15.000Z
|
from django.conf import settings
settings.NOTIFY_EMAIL = 'root@localhost'
settings.DEBUG = True
def before_all(context):
context.users = {}
context.members = {}
| 17.2
| 40
| 0.72093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.093023
|
cbfc891317c3347008f8eaea66169ec8996add82
| 2,546
|
py
|
Python
|
h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.utils.model_utils import reset_model_threshold
def test_reset_threshold():
"""
Test the model threshold can be reset.
Performance metric should be recalculated and also predictions should be changed based on the new threshold.
"""
# import data
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv"))
# convert columns to factors
airlines["Year"] = airlines["Year"].asfactor()
airlines["Month"] = airlines["Month"].asfactor()
airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
airlines["Cancelled"] = airlines["Cancelled"].asfactor()
airlines['FlightNum'] = airlines['FlightNum'].asfactor()
# set the predictor names and the response column name
predictors = ["Origin", "Dest", "Year", "UniqueCarrier", "DayOfWeek", "Month", "Distance", "FlightNum"]
response = "IsDepDelayed"
# split into train and validation sets
train, valid = airlines.split_frame(ratios = [.8], seed = 1234)
# initialize the estimator
model = H2OGradientBoostingEstimator(seed = 1234, ntrees=5)
# train the model
model.train(x=predictors, y=response, training_frame=train)
old_threshold = model._model_json['output']['default_threshold']
# predict
preds = model.predict(airlines)
# reset the threshold and get the old one
new_threshold = 0.6917189903082518
old_returned = reset_model_threshold(model, new_threshold)
reset_model = h2o.get_model(model.model_id)
reset_threshold = reset_model._model_json['output']['default_threshold']
# predict with reset model
preds_reset = reset_model.predict(airlines)
# compare thresholds
assert old_threshold == old_returned
assert new_threshold == reset_threshold
assert reset_threshold != old_threshold
# compare predictions
preds_local = preds.as_data_frame()
preds_reset_local = preds_reset.as_data_frame()
print("old threshold:", old_threshold, "new_threshold:", new_threshold)
for i in range(airlines.nrow):
if old_threshold <= preds_local.iloc[i, 2] < new_threshold:
assert preds_local.iloc[i, 0] != preds_reset_local.iloc[i, 0]
else:
assert preds_local.iloc[i, 0] == preds_reset_local.iloc[i, 0]
if __name__ == "__main__":
pyunit_utils.standalone_test(test_reset_threshold)
else:
test_reset_threshold()
| 35.361111
| 112
| 0.714061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 788
| 0.309505
|
cbfd7282e7bf8367942a36811a4c23c2043f6215
| 2,324
|
py
|
Python
|
tests/datasets/TestV1/csv2sql.py
|
pvanderknyff/alibabacloud-adb-tableau-connector
|
0280428bfc916530f9de26336631f6a6602c6804
|
[
"MIT"
] | 1
|
2019-08-21T17:53:50.000Z
|
2019-08-21T17:53:50.000Z
|
tests/datasets/TestV1/csv2sql.py
|
aliyun/aliyun-adb-tableau-connector
|
0280428bfc916530f9de26336631f6a6602c6804
|
[
"MIT"
] | 1
|
2020-06-29T08:38:54.000Z
|
2020-06-29T08:38:54.000Z
|
tests/datasets/TestV1/csv2sql.py
|
aliyun/alibabacloud-adb-tableau-connector
|
0280428bfc916530f9de26336631f6a6602c6804
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import argparse
import csv
import sys
'''
This script takes a CSV file with a mandatory header and a sql tablename and converts the data in the csv file into
an SQL INSERT statement.
'''
def parse_arguments():
# initialize argumentparser and arguments
parser = argparse.ArgumentParser(description='Takes a csv file and a tablename and creates an SQL insert statement')
parser.add_argument('csvFile', type=argparse.FileType('r'), help='The CSV file to be read')
parser.add_argument('-t', '--table', dest='tablename', help='The name of the destination SQL table', required=True)
parser.add_argument('-d', '--delimiter', dest='delimiter', default=',', help='The delimiter used in the CSV')
# parse arguments
args = parser.parse_args()
return args
def main():
# parse arguments
args = parse_arguments()
# Open CSV and start output
with args.csvFile as f:
reader = csv.reader(f, delimiter=args.delimiter, quoting=csv.QUOTE_ALL)
# Create the header row, since we may have to repeat it
header_row = 'INSERT INTO `' + args.tablename + '` ('
first = True
for item in next(reader):
if first:
first = False
else:
header_row+=', '
header_row+= item
header_row+=') VALUES '
# Set a counter, since there can't be more than 1000 inserts at a time
counter = 0
# Loop through the rows...
for row in reader:
if counter % 10 == 0:
if counter != 0:
sys.stdout.write(';\n')
#print(header_row)
sys.stdout.write(header_row)
else:
sys.stdout.write(',')
sys.stdout.write('(')
first = True
# Loop through the items in each row
for item in row:
if first:
first = False
else:
sys.stdout.write(', ')
sys.stdout.write('\'' + item.replace('\'', '\'\'').replace('""', 'NULL').replace('&', '&') + '\'')
#sys.stdout.write(item)
sys.stdout.write(')')
# Increase counter
counter += 1
sys.stdout.write(';')
if __name__ == "__main__":
main()
| 31.835616
| 120
| 0.55809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 845
| 0.363597
|
cbff48d02931d3f7dcc779f4f74d3a26a84b6bb5
| 1,043
|
py
|
Python
|
FlaskApp/app.py
|
Dec22gln/FlaskBlog
|
114ca9fc39f039cbdf0f1ff613fb66e364cea171
|
[
"MIT"
] | null | null | null |
FlaskApp/app.py
|
Dec22gln/FlaskBlog
|
114ca9fc39f039cbdf0f1ff613fb66e364cea171
|
[
"MIT"
] | null | null | null |
FlaskApp/app.py
|
Dec22gln/FlaskBlog
|
114ca9fc39f039cbdf0f1ff613fb66e364cea171
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/cv')
def cv():
return render_template('cv.html')
@app.route('/hire-me')
def hireMe():
return render_template('hire-me.html')
@app.route('/project-page')
def projectPage():
return render_template('project-page.html')
@app.route('/projects-compact-grid')
def projects1():
return render_template('projects-compact-grid.html')
@app.route('/projects-no-images')
def projects2():
return render_template('projects-no-images.html')
@app.route('/projects-with-sidebar')
def projects3():
return render_template('projects-with-sidebar.html')
@app.route('/projects-grid-cards')
def projects4():
return render_template('projects-with-sidebar.html')
if __name__ == '__main__':
app.run()
| 21.729167
| 56
| 0.708533
| 0
| 0
| 0
| 0
| 899
| 0.861937
| 0
| 0
| 341
| 0.326942
|
cbffe9c4b5d1ee44110edbd0b422813f50993bf7
| 1,913
|
py
|
Python
|
azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-10-16T13:08:23.000Z
|
2018-10-16T13:08:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .replicator_status_py3 import ReplicatorStatus
class PrimaryReplicatorStatus(ReplicatorStatus):
"""Provides statistics about the Service Fabric Replicator, when it is
functioning in a Primary role.
All required parameters must be populated in order to send to Azure.
:param kind: Required. Constant filled by server.
:type kind: str
:param replication_queue_status: Details about the replication queue on
the primary replicator.
:type replication_queue_status:
~azure.servicefabric.models.ReplicatorQueueStatus
:param remote_replicators: The status of all the active and idle secondary
replicators that the primary is aware of.
:type remote_replicators:
list[~azure.servicefabric.models.RemoteReplicatorStatus]
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'kind': {'key': 'Kind', 'type': 'str'},
'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'},
'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'},
}
def __init__(self, *, replication_queue_status=None, remote_replicators=None, **kwargs) -> None:
super(PrimaryReplicatorStatus, self).__init__(**kwargs)
self.replication_queue_status = replication_queue_status
self.remote_replicators = remote_replicators
self.kind = 'Primary'
| 39.854167
| 103
| 0.665447
| 1,384
| 0.723471
| 0
| 0
| 0
| 0
| 0
| 0
| 1,352
| 0.706743
|
0200db1441c66699ac789aeb7d02549ecd867f2a
| 448
|
py
|
Python
|
example/example/models.py
|
KnightConan/sspdatatables
|
1179a11358734e5e472e5eee703e8d34fa49e9bf
|
[
"MIT"
] | 4
|
2018-11-23T16:17:38.000Z
|
2018-11-26T16:08:49.000Z
|
example/example/models.py
|
zhiwei2017/sspdatatables
|
1179a11358734e5e472e5eee703e8d34fa49e9bf
|
[
"MIT"
] | 8
|
2018-11-26T16:38:55.000Z
|
2019-01-18T15:13:12.000Z
|
example/example/models.py
|
KnightConan/sspdatatables
|
1179a11358734e5e472e5eee703e8d34fa49e9bf
|
[
"MIT"
] | null | null | null |
from django.db import models
from django_countries.fields import CountryField
from django.db.models.deletion import CASCADE
class Author(models.Model):
name = models.CharField(max_length=60)
nationality = CountryField()
class Book(models.Model):
name = models.CharField(max_length=60)
description = models.TextField()
author = models.ForeignKey(Author, on_delete=CASCADE)
published_at = models.DateField(auto_now=True)
| 26.352941
| 57
| 0.765625
| 317
| 0.707589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
020213a818c2a038dbd07a3442e4a8ae253739be
| 4,805
|
py
|
Python
|
workspace/baseline/midi_generator.py
|
SeungHeonDoh/EMOPIA
|
0afb93a91c9226949d617894d6aa2d67c4de4eb6
|
[
"MIT"
] | 69
|
2021-07-12T03:17:17.000Z
|
2022-03-27T06:16:35.000Z
|
workspace/baseline/midi_generator.py
|
SeungHeonDoh/EMOPIA
|
0afb93a91c9226949d617894d6aa2d67c4de4eb6
|
[
"MIT"
] | 7
|
2021-07-27T09:10:15.000Z
|
2022-02-07T05:15:56.000Z
|
workspace/baseline/midi_generator.py
|
SeungHeonDoh/EMOPIA
|
0afb93a91c9226949d617894d6aa2d67c4de4eb6
|
[
"MIT"
] | 7
|
2021-07-12T10:41:14.000Z
|
2022-02-04T10:28:08.000Z
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
import json
import argparse
import numpy as np
import tensorflow as tf
import midi_encoder as me
from train_generative import build_generative_model
from train_classifier import preprocess_sentence
GENERATED_DIR = './generated'
def override_neurons(model, layer_idx, override):
h_state, c_state = model.get_layer(index=layer_idx).states
c_state = c_state.numpy()
for neuron, value in override.items():
c_state[:,int(neuron)] = int(value)
model.get_layer(index=layer_idx).states = (h_state, tf.Variable(c_state))
def sample_next(predictions, k):
# Sample using a categorical distribution over the top k midi chars
top_k = tf.math.top_k(predictions, k)
top_k_choices = top_k[1].numpy().squeeze()
top_k_values = top_k[0].numpy().squeeze()
if np.random.uniform(0, 1) < .5:
predicted_id = top_k_choices[0]
else:
p_choices = tf.math.softmax(top_k_values[1:]).numpy()
predicted_id = np.random.choice(top_k_choices[1:], 1, p=p_choices)[0]
return predicted_id
def process_init_text(model, init_text, char2idx, layer_idx, override):
model.reset_states()
for c in init_text.split(" "):
# Run a forward pass
try:
input_eval = tf.expand_dims([char2idx[c]], 0)
# override sentiment neurons
override_neurons(model, layer_idx, override)
predictions = model(input_eval)
except KeyError:
if c != "":
print("Can't process char", s)
return predictions
def generate_midi(model, char2idx, idx2char, init_text="", seq_len=256, k=3, layer_idx=-2, override={}):
# Add front and end pad to the initial text
init_text = preprocess_sentence(init_text)
# Empty midi to store our results
midi_generated = []
# Process initial text
predictions = process_init_text(model, init_text, char2idx, layer_idx, override)
# Here batch size == 1
model.reset_states()
for i in range(seq_len):
# remove the batch dimension
predictions = tf.squeeze(predictions, 0).numpy()
# Sample using a categorical distribution over the top k midi chars
predicted_id = sample_next(predictions, k)
# Append it to generated midi
midi_generated.append(idx2char[predicted_id])
# override sentiment neurons
override_neurons(model, layer_idx, override)
#Run a new forward pass
input_eval = tf.expand_dims([predicted_id], 0)
predictions = model(input_eval)
return init_text + " " + " ".join(midi_generated)
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='midi_generator.py')
parser.add_argument('--model', type=str, default='./trained', help="Checkpoint dir.")
parser.add_argument('--ch2ix', type=str, default='./trained/char2idx.json', help="JSON file with char2idx encoding.")
parser.add_argument('--embed', type=int, default=256, help="Embedding size.")
parser.add_argument('--units', type=int, default=512, help="LSTM units.")
parser.add_argument('--layers', type=int, default=4, help="LSTM layers.")
parser.add_argument('--seqinit', type=str, default="\n", help="Sequence init.")
parser.add_argument('--seqlen', type=int, default=512, help="Sequence lenght.")
parser.add_argument('--cellix', type=int, default=4, help="LSTM layer to use as encoder.")
parser.add_argument('--override', type=str, default="./trained/neurons_Q1.json", help="JSON file with neuron values to override.")
opt = parser.parse_args()
# Load char2idx dict from json file
with open(opt.ch2ix) as f:
char2idx = json.load(f)
# Load override dict from json file
override = {}
try:
with open(opt.override) as f:
override = json.load(f)
except FileNotFoundError:
print("Override JSON file not provided.")
# Create idx2char from char2idx dict
idx2char = {idx:char for char,idx in char2idx.items()}
# Calculate vocab_size from char2idx dict
vocab_size = len(char2idx)
# Rebuild model from checkpoint
model = build_generative_model(vocab_size, opt.embed, opt.units, opt.layers, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(opt.model))
model.build(tf.TensorShape([1, None]))
if not os.path.exists(GENERATED_DIR):
os.makedirs(GENERATED_DIR)
# Generate 5 midis
for i in range(100):
# Generate a midi as text
print("Generate midi {}".format(i))
midi_txt = generate_midi(model, char2idx, idx2char, opt.seqinit, opt.seqlen, layer_idx=opt.cellix, override=override)
me.write(midi_txt, os.path.join(GENERATED_DIR, "generated_Q1_{}.mid".format(i)))
| 35.330882
| 134
| 0.678044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,181
| 0.245786
|
020563bca2febded13ab705cf7257f5af323ab0d
| 1,616
|
py
|
Python
|
holobot/sdk/chrono/interval_parser.py
|
rexor12/holobot
|
89b7b416403d13ccfeee117ef942426b08d3651d
|
[
"MIT"
] | 1
|
2021-05-24T00:17:46.000Z
|
2021-05-24T00:17:46.000Z
|
holobot/sdk/chrono/interval_parser.py
|
rexor12/holobot
|
89b7b416403d13ccfeee117ef942426b08d3651d
|
[
"MIT"
] | 41
|
2021-03-24T22:50:09.000Z
|
2021-12-17T12:15:13.000Z
|
holobot/sdk/chrono/interval_parser.py
|
rexor12/holobot
|
89b7b416403d13ccfeee117ef942426b08d3651d
|
[
"MIT"
] | null | null | null |
from ..utils import pad_left, try_parse_int
from datetime import timedelta
from typing import Dict, List
TIME_PARTS: List[str] = [ "D", "H", "M", "S" ]
FIXED_INTERVALS: Dict[str, timedelta] = {
"WEEK": timedelta(weeks=1),
"DAY": timedelta(days=1),
"HOUR": timedelta(hours=1)
}
def parse_interval(value: str) -> timedelta:
args: Dict[str, int] = { part: 0 for part in TIME_PARTS }
value = value.upper()
if (fixed_interval := FIXED_INTERVALS.get(value, None)) is not None:
return fixed_interval
if ":" in value:
__parse_delimited_into(value, args)
else: __parse_denoted_into(value, args)
return timedelta(days=args["D"], hours=args["H"], minutes=args["M"], seconds=args["S"])
def __parse_delimited_into(value: str, args: Dict[str, int]) -> None:
split_values = value.split(":")
padded_values = pad_left(split_values, "0", len(TIME_PARTS))
for index in range(0, len(TIME_PARTS)):
part_value = try_parse_int(padded_values[index])
args[TIME_PARTS[index]] = part_value if part_value is not None else 0
if len(split_values) == 2:
args["H"] = args["M"]
args["M"] = args["S"]
args["S"] = 0
def __parse_denoted_into(value: str, args: Dict[str, int]) -> None:
for time_part in args.keys():
split_values = value.split(time_part, 1)
if len(split_values) == 2:
part_value = try_parse_int(split_values[0])
args[time_part] = part_value if part_value is not None else 0
value = split_values[1]
continue
value = split_values[0]
| 36.727273
| 91
| 0.633045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.040223
|
02063c864e384d1ba7ec730d4d03b03f063ebc1f
| 80,245
|
py
|
Python
|
pirates/ai/PiratesMagicWordManager.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/ai/PiratesMagicWordManager.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/ai/PiratesMagicWordManager.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.ai.PiratesMagicWordManager
from direct.showbase.ShowBaseGlobal import *
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from otp.avatar import Avatar
from otp.chat import ChatManager
import string
from direct.showbase import PythonUtil
from otp.otpbase import OTPGlobals
from direct.distributed.ClockDelta import *
from otp.ai import MagicWordManager
from pirates.pirate import DistributedPlayerPirate
from pirates.npc import DistributedNPCTownfolk
from direct.distributed import DistributedCartesianGrid
from pirates.piratesbase import PiratesGlobals
from pirates.piratesgui.RadarUtil import RadarUtil
from pirates.cutscene import Cutscene, CutsceneData
from pirates.effects.Fireflies import Fireflies
from pirates.effects.GroundFog import GroundFog
from pirates.effects.Bonfire import Bonfire
from pirates.effects.CeilingDust import CeilingDust
from pirates.effects.CeilingDebris import CeilingDebris
from pirates.effects.CameraShaker import CameraShaker
from pirates.effects.DarkWaterFog import DarkWaterFog
from pirates.ship import DistributedSimpleShip
from pirates.world import WorldGlobals
from pirates.effects.FireworkGlobals import *
from pirates.effects.FireworkShowManager import FireworkShowManager
from pirates.piratesbase import PLocalizer
class PiratesMagicWordManager(MagicWordManager.MagicWordManager):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('PiratesMagicWordManager')
neverDisable = 1
GameAvatarClass = DistributedPlayerPirate.DistributedPlayerPirate
def __init__(self, cr):
MagicWordManager.MagicWordManager.__init__(self, cr)
self.pendingCameraReparent = None
self.originalLocation = None
self.groundFog = None
self.fireflies = None
self.rainDrops = None
self.rainMist = None
self.rainSplashes = None
self.rainSplashes2 = None
self.stormEye = None
self.stormRing = None
self.fishCamEnabled = False
return
def generate(self):
MagicWordManager.MagicWordManager.generate(self)
self.accept('magicWord', self.b_setMagicWord)
def doLoginMagicWords(self):
MagicWordManager.MagicWordManager.doLoginMagicWords(self)
if base.config.GetBool('want-chat', 0):
self.d_setMagicWord('~chat', localAvatar.doId, 0)
if base.config.GetBool('want-run', 0) or base.config.GetBool('want-pirates-run', 0):
self.toggleRun()
if base.config.GetBool('immortal-mode', 0):
self.d_setMagicWord('~immortal', localAvatar.doId, 0)
def disable(self):
self.ignore('magicWord')
MagicWordManager.MagicWordManager.disable(self)
if self.pendingCameraReparent:
base.cr.relatedObjectMgr.abortRequest(self.pendingCameraReparent)
self.pendingCameraReparent = None
return
def doMagicWord(self, word, avId, zoneId):
def wordIs(w, word=word):
return word[:len(w) + 1] == '%s ' % w or word == w
if word == '~rio':
self.doMagicWord('~run', avId, zoneId)
if MagicWordManager.MagicWordManager.doMagicWord(self, word, avId, zoneId) == 1:
pass
if word == '~walk':
localAvatar.b_setGameState('LandRoam')
localAvatar.motionFSM.on()
if word == '~players':
players = base.cr.doFindAll('DistributedPlayerPirate')
for player in players:
playerText = '%s %s' % (player.getName(), player.doId)
base.talkAssistant.receiveGameMessage(playerText)
if word == '~rocketman':
if localAvatar.rocketOn == 0:
localAvatar.startRocketJumpMode()
base.talkAssistant.receiveGameMessage('Zero hour nine a.m. (Bill Shattner Version)')
else:
localAvatar.endRocketJumpMode()
base.talkAssistant.receiveGameMessage("And I think it's gonna be a long long time")
if word == '~shipUpgrade':
localAvatar.guiMgr.toggleShipUpgrades()
if word == '~shipCam':
if base.shipLookAhead:
base.talkAssistant.receiveGameMessage('Ship Look ahead camera off!')
base.setShipLookAhead(0)
else:
base.talkAssistant.receiveGameMessage('Ship Look ahead camera on!')
base.setShipLookAhead(1)
if word == '~time':
base.talkAssistant.receiveGameMessage('The time is %s' % base.cr.timeOfDayManager.getCurrentIngameTime())
if word == '~todDebug':
base.cr.timeOfDayManager.toggleDebugMode()
if word == '~vismask':
base.talkAssistant.receiveGameMessage('Vis Mask %s' % localAvatar.invisibleMask)
if word == '~target':
localAvatar.setAvatarViewTarget()
if word == '~collisions_on':
pass
if word == '~collisions_off':
pass
if word == '~topten':
base.cr.guildManager.requestLeaderboardTopTen()
if word == '~airender':
pass
if __dev__ and wordIs('~shiphat'):
args = word.split()
if hasattr(localAvatar, 'shipHat'):
localAvatar.shipHat.modelRoot.detachNode()
localAvatar.shipHat = None
if len(args) == 1:
ship = base.shipFactory.getShip(23)
else:
shipClass = args[1]
ship = base.shipFactory.getShip(int(shipClass))
ship.startSailing()
ship.modelRoot.reparentTo(localAvatar.headNode)
ship.modelRoot.setR(90)
ship.modelRoot.setP(-90)
ship.modelRoot.setX(0.8)
ship.modelRoot.setScale(0.004)
ship.modelRoot.setZ(-0.2)
ship.forceLOD(2)
ship.modelCollisions.detachNode()
localAvatar.shipHat = ship
if __dev__ and wordIs('~cr'):
pass
if __dev__ and wordIs('~watch'):
if taskMgr.hasTaskNamed('lookAtDude'):
taskMgr.remove('lookAtDude')
localAvatar.guiMgr.setIgnoreAllKeys(False)
localAvatar.guiMgr.combatTray.initCombatTray()
localAvatar.unstash()
else:
args = word.split()
if len(args) >= 2:
tgtDoId = int(args[1])
def doHeadsUp(task=None):
targetObj = self.cr.doId2do.get(tgtDoId)
if targetObj:
localAvatar.lookAt(targetObj)
return Task.cont
taskMgr.add(doHeadsUp, 'lookAtDude')
localAvatar.guiMgr.setIgnoreAllKeys(True)
localAvatar.guiMgr.combatTray.skillMapping.clear()
localAvatar.stash()
else:
print 'need a target object doId to watch'
if __dev__ and (wordIs('~ccNPC') or wordIs('~ccShip')):
pass
if wordIs('~bonfire'):
bf = Bonfire()
bf.reparentTo(render)
bf.setPos(localAvatar, 0, 0, 0)
bf.startLoop()
print 'bonfire at %s, %s' % (localAvatar.getPos(), localAvatar.getHpr())
if __dev__ and wordIs('~mario'):
localAvatar.toggleMario()
if wordIs('~islandShips'):
args = word.split()
try:
if args[1] == '1':
localAvatar.getParentObj().setOceanVisEnabled(1)
localAvatar.getParentObj().setFlatShips(0)
else:
localAvatar.getParentObj().setOceanVisEnabled(0)
except:
pass
if wordIs('~swamp'):
if self.fireflies:
self.fireflies.destroy()
self.fireflies = None
self.groundFog.destroy()
self.groundFog = None
else:
self.fireflies = Fireflies()
if self.fireflies:
self.fireflies.reparentTo(localAvatar)
self.fireflies.startLoop()
self.groundFog = GroundFog()
if self.groundFog:
self.groundFog.reparentTo(localAvatar)
self.groundFog.startLoop()
if wordIs('~darkfog'):
if self.groundFog:
self.groundFog.destroy()
self.groundFog = None
else:
self.groundFog = DarkWaterFog()
if self.groundFog:
self.groundFog.reparentTo(localAvatar)
self.groundFog.startLoop()
if wordIs('~dust'):
effect = CeilingDust.getEffect()
if effect:
effect.reparentTo(localAvatar)
effect.setPos(0, 0, 10)
effect.play()
effect = CeilingDebris.getEffect()
if effect:
effect.reparentTo(localAvatar)
effect.setPos(0, 0, 20)
effect.play()
cameraShakerEffect = CameraShaker()
cameraShakerEffect.reparentTo(localAvatar)
cameraShakerEffect.setPos(0, 0, 0)
cameraShakerEffect.shakeSpeed = 0.05
cameraShakerEffect.shakePower = 4.5
cameraShakerEffect.numShakes = 2
cameraShakerEffect.scalePower = 1
cameraShakerEffect.play(80.0)
if wordIs('~rain'):
if self.rainDrops:
self.rainDrops.stopLoop()
self.rainDrops = None
if self.rainMist:
self.rainMist.stopLoop()
self.rainMist = None
if self.rainSplashes:
self.rainSplashes.stopLoop()
self.rainSplashes = None
if self.rainSplashes2:
self.rainSplashes2.stopLoop()
self.rainSplashes2 = None
else:
from pirates.effects.RainDrops import RainDrops
self.rainDrops = RainDrops(base.camera)
self.rainDrops.reparentTo(render)
self.rainDrops.startLoop()
from pirates.effects.RainMist import RainMist
self.rainMist = RainMist(base.camera)
self.rainMist.reparentTo(render)
self.rainMist.startLoop()
from pirates.effects.RainSplashes import RainSplashes
self.rainSplashes = RainSplashes(base.camera)
self.rainSplashes.reparentTo(render)
self.rainSplashes.startLoop()
from pirates.effects.RainSplashes2 import RainSplashes2
self.rainSplashes2 = RainSplashes2(base.camera)
self.rainSplashes2.reparentTo(render)
self.rainSplashes2.startLoop()
if wordIs('~clouds'):
args = word.split()
if len(args) >= 2:
level = int(args[1])
base.cr.timeOfDayManager.skyGroup.transitionClouds(level).start()
if wordIs('~storm'):
if self.stormEye:
self.stormEye.stopLoop()
self.stormEye = None
if self.stormRing:
self.stormRing.stopLoop()
self.stormRing = None
else:
args = word.split()
grid = 0
if len(args) > 1:
grid = int(args[1])
pos = Vec3(base.cr.doId2do[201100017].getZoneCellOrigin(grid)[0], base.cr.doId2do[201100017].getZoneCellOrigin(grid)[1], base.cr.doId2do[201100017].getZoneCellOrigin(grid)[2])
from pirates.effects.StormEye import StormEye
self.stormEye = StormEye()
self.stormEye.reparentTo(render)
self.stormEye.startLoop()
from pirates.effects.StormRing import StormRing
self.stormRing = StormRing()
self.stormRing.reparentTo(render)
self.stormRing.setZ(100)
self.stormRing.startLoop()
if wordIs('~alight'):
args = word.split()
if len(args) > 3:
color = Vec4(float(args[1]), float(args[2]), float(args[3]), 1)
base.cr.timeOfDayManager.alight.node().setColor(color)
if wordIs('~dlight'):
args = word.split()
if len(args) > 3:
color = Vec4(float(args[1]), float(args[2]), float(args[3]), 1)
base.cr.timeOfDayManager.dlight.node().setColor(color)
if wordIs('~fog'):
args = word.split()
if len(args) > 3:
color = Vec4(float(args[1]), float(args[2]), float(args[3]), 1)
base.cr.timeOfDayManager.fog.setColor(color)
if len(args) > 4:
base.cr.timeOfDayManager.fog.setExpDensity(float(args[4]))
if len(args) == 2:
base.cr.timeOfDayManager.fog.setExpDensity(float(args[1]))
if __dev__ and wordIs('~turbo'):
localAvatar.toggleTurbo()
if __dev__ and wordIs('~joincrew'):
base.cr.crewManager.requestNewCrew()
if wordIs('~tm'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_TM, 'treasureMapCove')
if wordIs('~tml'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, WorldGlobals.PiratesWorldSceneFileBase)
if wordIs('~pg'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_PG, 'ParlorWorld')
if wordIs('~pgvip'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_PG, 'ParlorVIPWorld')
if wordIs('~pgl'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, WorldGlobals.PiratesWorldSceneFileBase)
if wordIs('~tutorial'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_TUTORIAL, 'RambleshackWorld', self.cr.playGame.handleTutorialGeneration)
if wordIs('~tutoriall'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, WorldGlobals.PiratesWorldSceneFileBase)
if wordIs('~pvp'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_PVP, 'pvp_mayhemWorld1')
if wordIs('~pirateer'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_PVP, 'pirateerMap')
if wordIs('~pvpl'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, WorldGlobals.PiratesWorldSceneFileBase)
if wordIs('~tortuga'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'TortugaWorld')
if wordIs('~portRoyal'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'PortRoyalWorld')
if wordIs('~delFuego'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'DelFuegoWorld')
if wordIs('~bilgewater'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'BilgewaterWorld')
if wordIs('~kingshead'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'KingsheadWorld')
if wordIs('~cuba'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'CubaWorld')
if wordIs('~rumrunner'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'RumrunnerWorld')
if wordIs('~wildisland'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'WildIslandWorld')
if wordIs('~caveA'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'CaveAWorld')
if wordIs('~caveB'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'CaveBWorld')
if wordIs('~caveC'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'CaveCWorld')
if wordIs('~caveD'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'CaveDWorld')
if wordIs('~caveE'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'CaveEWorld')
if wordIs('~jungleA'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'JungleTestWorldA')
if wordIs('~jungleB'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'JungleTestWorld')
if wordIs('~jungleC'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'JungleTestWorldC')
if wordIs('~swampA'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'SwampTestWorld')
if wordIs('~mainWorld'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, WorldGlobals.PiratesWorldSceneFileBase)
if wordIs('~gameArea'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_GENERIC, 'GameAreaSandbox')
if wordIs('~blackpearl') or wordIs('~bp'):
args = word.split()
if len(args) == 1:
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_TM, 'BlackpearlWorld')
if wordIs('~scrimmage'):
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_SCRIMMAGE, 'ScrimmageTestWorld')
if wordIs('~fireworks') or wordIs('~fw'):
args = word.split()
if len(args) >= 2 and args[1] in ['show', 's']:
if len(args) >= 3:
showType = args[2]
timestamp = 0.0
if len(args) >= 4:
timestamp = args[3]
if base.cr.activeWorld:
localAvatar.getParentObj().fireworkShowType = int(showType)
localAvatar.getParentObj().beginFireworkShow(timeStamp=timestamp)
else:
if len(args) >= 2 and args[1] in ['type', 't']:
fireworkType = 0
if len(args) >= 3:
fireworkType = int(args[2])
from pirates.effects.Firework import Firework
firework = Firework(fireworkType)
firework.reparentTo(render)
firework.setPos(Point3(10525, 19000, 245))
firework.play()
else:
if len(args) >= 2 and args[1] in ['effect', 'e']:
trailType = 0
burstType = 0
if len(args) >= 3:
burstType = int(args[2])
if len(args) >= 4:
trailType = int(args[3])
from pirates.effects.FireworkEffect import FireworkEffect
firework = FireworkEffect(burstType, trailType)
firework.reparentTo(render)
firework.setPos(Point3(10525, 19000, 245))
firework.play()
if wordIs('~te'):
if localAvatar.gameFSM.getCurrentOrNextState() == 'LandRoam':
localAvatar.b_setGameState('TeleportOut')
else:
if localAvatar.gameFSM.getCurrentOrNextState() == 'TeleportOut':
localAvatar.b_setGameState('LandRoam')
if wordIs('~lfa'):
args = word.split()
activityName = None
if len(args) >= 2:
activityName = args[1]
if activityName == 'blackjack':
localAvatar.requestActivity(PiratesGlobals.GAME_STYLE_BLACKJACK)
else:
if activityName == 'poker':
localAvatar.requestActivity(PiratesGlobals.GAME_STYLE_POKER)
else:
if activityName == 'pvp':
localAvatar.requestActivity(PiratesGlobals.GAME_TYPE_PVP)
else:
if activityName == 'tm':
localAvatar.requestActivity(PiratesGlobals.GAME_TYPE_TM)
else:
if activityName == 'hsa':
localAvatar.requestActivity(PiratesGlobals.GAME_TYPE_HSA)
else:
if activityName == 'mmp':
self.cr.teleportMgr.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, WorldGlobals.PiratesWorldSceneFileBase)
if wordIs('~term') or wordIs('terminator'):
localAvatar.setEquippedWeapons([10103, 10106, 10115])
localAvatar.d_requestEquipWeapons([10103, 10106, 10115])
if wordIs('~battleRandom'):
args = word.split()
if len(args) >= 2:
command = args[1]
if command == 'resync':
localAvatar.battleRandom.resync()
self.notify.info('Client Battle random resynced, counter=0')
else:
response = 'Client Battle random attack counter=%s main counter=%s' % (localAvatar.battleRandom.attackCounter, localAvatar.battleRandom.mainCounter)
self.setMagicWordResponse(response)
if wordIs('~cutscene'):
args = word.split()
name = None
if len(args) >= 2:
csId = args[1]
else:
csId = base.config.GetString('default-cutscene', '0')
if int(csId) >= len(CutsceneData.CutsceneNames):
return
name = CutsceneData.CutsceneNames[int(csId)]
cs = PythonUtil.ScratchPad()
def destroyCutscene(cs=cs):
cs.cutscene.destroy()
c = Cutscene.Cutscene(self.cr, name, PythonUtil.DelayedFunctor(destroyCutscene, '~cutscene-destroy'))
cs.cutscene = c
c.play()
destroyCutscene = None
if wordIs('~forceLod'):
for n in render.findAllMatches('**/+LODNode'):
n.node().forceSwitch(n.node().getHighestSwitch())
if wordIs('~wave'):
args = word.split()
patch = base.cr.doFind('OceanGrid').water.patch
if len(args) < 4:
response = '~wave num amplitude wavelength speed'
numWaves = patch.getNumWaves()
num = 0
while numWaves > 0:
if patch.isWaveEnabled(num):
numWaves -= 1
if patch.getWaveTarget(num) != SeaPatchRoot.WTZ or patch.getWaveFunc(num) != SeaPatchRoot.WFSin:
response = '%s\n%s NON-SINE-WAVE' % (response, num)
else:
response = '%s\n%s amp=%s len=%s spd=%s' % (response, num, patch.getWaveAmplitude(num), patch.getWaveLength(num), patch.getWaveSpeed(num))
num += 1
else:
num = int(args[1])
amplitude = float(args[2])
wavelength = float(args[3])
speed = float(args[4])
patch.enableWave(num)
patch.setWaveTarget(num, SeaPatchRoot.WTZ)
patch.setWaveFunc(num, SeaPatchRoot.WFSin)
patch.setChoppyK(num, 0)
patch.setWaveAmplitude(num, amplitude)
patch.setWaveLength(num, wavelength)
patch.setWaveSpeed(num, speed)
response = 'wave %s modified' % num
self.setMagicWordResponse(response)
if wordIs('~roll'):
args = word.split()
if len(args) < 2:
response = '~roll angle [fakeMass]'
else:
if localAvatar.ship is None:
response = 'not on a ship'
else:
if len(args) > 2:
localAvatar.ship._rocker.setFakeMass(float(args[2]))
localAvatar.ship.addRoll(float(args[1]))
response = 'rolling!'
self.setMagicWordResponse(response)
if wordIs('~ru'):
if hasattr(self, 'radarUtil') and self.radarUtil and not self.radarUtil.isDestroyed():
self.radarUtil.destroy()
else:
self.radarUtil = RadarUtil()
if __dev__ and wordIs('~todpanel'):
tod = base.cr.timeOfDayManager
from pirates.leveleditor import TimeOfDayPanel
p = TimeOfDayPanel.TimeOfDayPanel(tod)
if __dev__ and wordIs('~kraken'):
args = word.split()[1:]
if args and args[0]:
if not hasattr(base, 'oobeMode') or not base.oobeMode:
base.oobe()
base.oobeCamera.wrtReparentTo(render)
if wordIs('~pvpmoney') or wordIs('~pvpinfamy'):
if localAvatar.ship and localAvatar.ship.renownDisplay:
taskMgr.doMethodLater(2.0, localAvatar.ship.renownDisplay.loadRank, 'pvp-infamy-display', [])
if localAvatar.guiMgr and localAvatar.guiMgr.pvpPanel and hasattr(localAvatar.guiMgr.pvpPanel, 'renownDisplay') and localAvatar.guiMgr.pvpPanel.renownDisplay:
taskMgr.doMethodLater(2.0, localAvatar.guiMgr.pvpPanel.renownDisplay.loadRank, 'pvp-infamy-display', [])
if localAvatar.guiMgr and localAvatar.guiMgr.titlesPage:
taskMgr.doMethodLater(2.0, localAvatar.guiMgr.titlesPage.refresh, 'titles-refresh', [])
if wordIs('~profileCard'):
args = word.split()
if len(args) >= 2:
profileId = int(args[1])
else:
profileId = localAvatar.getDoId()
localAvatar.guiMgr.handleAvatarDetails(profileId)
if wordIs('~gmNameTag'):
args = word.split()
if len(args) < 2 and localAvatar.isGM():
response = PLocalizer.MAGICWORD_GMNAMETAG
self.setMagicWordResponse(response)
if len(args) >= 2 and localAvatar.isGM():
if args[1] == 'enable':
localAvatar.setGMNameTagState(1)
else:
if args[1] == 'disable':
localAvatar.setGMNameTagState(0)
else:
if args[1] == 'setString':
xCount = 0
stringToSet = ''
for i in args:
if xCount < 2:
pass
else:
stringToSet = '%s %s' % (stringToSet, args[xCount])
xCount += 1
localAvatar.setGMNameTagString(stringToSet)
else:
if args[1] == 'setColor':
localAvatar.setGMNameTagColor(args[2])
else:
if wordIs('~liveCam'):
LiveCamTransforms = {'1': [Vec3(-385.776, -2369.64, 52.4644), Vec3(-18.0412, -3.24766, 0), 39.3076, 0], '2': [Vec3(79.1195, -2521.26, 52.4644), Vec3(-18.0412, -3.24766, 0), 39.3076, 0], '3': [Vec3(2858.35, 931.111, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1], '4': [Vec3(3551.93, 532.437, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1], '5': [Vec3(4245.52, 133.763, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1], '6': [Vec3(4939.1, -264.911, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1]}
lodNodes = render.findAllMatches('**/+LODNode')
for i in xrange(0, lodNodes.getNumPaths()):
lodNodes[i].node().forceSwitch(lodNodes[i].node().getHighestSwitch())
localAvatar.clearInterestNamed(None, ['liveCam'])
localAvatar.getParentObj().setOceanVisEnabled(0)
args = word.split()
if len(args) > 1:
camNum = args[1]
camData = LiveCamTransforms[camNum]
localAvatar.cameraFSM.request('Control')
if camData[3]:
camParent = render
else:
camParent = localAvatar.getParentObj()
base.cam.reparentTo(camParent)
base.cam.setPos(camData[0])
base.cam.setHpr(camData[1])
base.camLens.setFov(camData[2])
if camData[3] == 0:
localAvatar.setInterest(localAvatar.getParentObj().doId, [
11622, 11621, 11443, 11442, 11620, 11619, 11441, 11086, 11085, 11263, 11264, 11265, 11444, 11266, 11267, 11445, 11446, 11268, 11269, 11447, 11449, 11270, 11448, 11271, 11272, 11450, 11451, 11273, 11095, 11093, 11094, 11092, 11091, 11090, 11089, 11088, 11087, 11623, 11624, 11625, 11626, 11627, 11628, 11629, 11807, 11630, 11452, 11274, 11096, 11275, 11277, 11276, 11099, 11098, 11097, 11455, 11454, 11453, 11631, 11632, 11633, 11100, 11278, 11456, 11634, 11990, 11812, 11811, 11989, 11988, 11987, 11809, 11810, 11808, 11986, 11985, 12164, 12163, 12162, 11984, 11806, 11805, 11983, 12161, 12160, 11982, 11804, 11803, 11981, 11980, 12159, 11802, 11801, 11979, 12158, 12157, 12156, 11978, 11799, 11800, 11977, 11798, 11976, 11975, 11797, 11796, 11974, 11084, 11262, 11440, 11618, 11795, 11617, 11439, 11261, 11083, 11082, 11260, 11438, 11616, 11794, 11793, 11615, 11437, 11081, 11259, 11080, 11258, 11436, 11614, 11435, 11257, 11079, 11973, 11972, 12155, 12154, 12153], [
'liveCam'])
else:
localAvatar.getParentObj().setOceanVisEnabled(1)
localAvatar.getParentObj().setFlatShips(0)
else:
localAvatar.cameraFSM.request('FPS')
base.cam.reparentTo(camera)
base.cam.setPos(0, 0, 0)
base.cam.setHpr(0, 0, 0)
base.camLens.setFov(63.742)
else:
if wordIs('~showCams'):
render.findAllMatches('**/liveCamParent*').detach()
LiveCamTransforms = {'1': [Vec3(-385.776, -2369.64, 52.4644), Vec3(-18.0412, -3.24766, 0), 39.3076, 0], '2': [Vec3(79.1195, -2521.26, 52.4644), Vec3(-18.0412, -3.24766, 0), 39.3076, 0], '3': [Vec3(2858.35, 931.111, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1], '4': [Vec3(3551.93, 532.437, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1], '5': [Vec3(4245.52, 133.763, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1], '6': [Vec3(4939.1, -264.911, 37.9564), Vec3(-29.8904, -7.12525, 0), 39.3076, 1]}
camModel = NodePath('camera')
lens = PerspectiveLens()
lens.setFov(base.camLens.getFov())
lens.setFov(39.3076)
g = lens.makeGeometry()
gn = GeomNode('frustum')
gn.addGeom(g)
gnp = camModel.attachNewNode(gn)
if not localAvatar.getShip():
for camNum in range(1, 3):
camData = LiveCamTransforms[str(camNum)]
camParent = localAvatar.getParentObj().attachNewNode('liveCamParent-%s' % camNum)
camParent.setPos(camData[0])
camParent.setHpr(camData[1])
camParent.setScale(10)
camModel.instanceTo(camParent)
for camNum in range(3, 7):
camData = LiveCamTransforms[str(camNum)]
camParent = render.attachNewNode('liveCamParent-%s' % camNum)
camParent.setPos(camData[0])
camParent.setHpr(camData[1])
camParent.setScale(10)
camModel.instanceTo(camParent)
else:
if wordIs('~hideCams'):
render.findAllMatches('**/liveCamParent*').detach()
else:
if wordIs('~dropBlockers'):
ga = localAvatar.getParentObj()
blockers = ga.findAllMatches('**/blocker_*')
blockers.stash()
else:
if __dev__ and wordIs('~effects'):
args = word.split()
self.configEffects(args)
else:
if __dev__ and wordIs('~shipsRock'):
configIs = 'ships-rock'
args = word.split()
self.configShipsRock(configIs, args)
else:
if __dev__ and wordIs('~shipsRockWithoutWaves'):
configIs = 'ships-rock-without-waves'
args = word.split()
self.configShipsRock(configIs, args)
else:
if __dev__ and wordIs('~wantCompassTask'):
self.configToggleBool('want-compass-task')
else:
if __dev__ and wordIs('~wantPatchie'):
def turnOffSeapatch():
if hasattr(base.cr.activeWorld.worldGrid, 'cleanupWater'):
base.cr.activeWorld.worldGrid.cleanupWater()
def turnOnSeapatch():
if hasattr(base.cr.activeWorld.worldGrid, 'setupWater'):
base.cr.activeWorld.worldGrid.setupWater()
self.configToggleBool('want-compass-task', offCode=turnOffSeapatch, onCode=turnOnSeapatch)
else:
if __dev__ and wordIs('~wantShipColl'):
if localAvatar.ship and localAvatar.ship.controlManager.controls.has_key('ship'):
if localAvatar.ship.controlManager.controls['ship'].collisionsActive:
localAvatar.ship.controlManager.controls['ship'].setCollisionsActive(0)
self.setMagicWordResponse('ship collisions OFF')
else:
localAvatar.ship.controlManager.controls['ship'].setCollisionsActive()
self.setMagicWordResponse('ship collisions ON')
else:
self.setMagicWordResponse('get on a ship!')
else:
if __dev__ and wordIs('~wantCannonColl'):
if localAvatar.ship:
args = word.split()
if len(args) > 1:
type = int(args[1])
base.cr.cannonballCollisionDebug = type
else:
if base.cr.cannonballCollisionDebug == 0:
base.cr.cannonballCollisionDebug = 1
else:
base.cr.cannonballCollisionDebug = 0
if base.cr.cannonballCollisionDebug == 0:
self.setMagicWordResponse('cannonball collisions set to ALL OFF')
else:
if base.cr.cannonballCollisionDebug == 1:
self.setMagicWordResponse('cannonball collisions set to ALL ON')
else:
if base.cr.cannonballCollisionDebug == 2:
self.setMagicWordResponse('cannonball collisions set to Broadside ONLY ON')
else:
if base.cr.cannonballCollisionDebug == 3:
self.setMagicWordResponse('cannonball collisions set to Deck ONLY ON')
else:
self.setMagicWordResponse('get on a ship!')
else:
if __dev__ and wordIs('~wantEventCollider'):
self.configWantEventCollider()
else:
if __dev__ and wordIs('~wantFloorEventRay'):
self.configWantFloorEventRay()
else:
if __dev__ and wordIs('~optimized1'):
if not localAvatar.ship:
self.setMagicWordResponse('get on a ship FIRST')
self.configWantFloorEventRay()
self.configWantEventCollider()
self.configWantWaterRippleRay()
self.configToggleBool('want-compass-task')
configIs = 'ships-rock'
args = word.split()
self.configShipsRock(configIs, args)
self.configEffects(args)
else:
if __dev__ and wordIs('~optimized2'):
if not localAvatar.ship:
self.setMagicWordResponse('get on a ship FIRST')
self.configWantFloorEventRay()
self.configWantEventCollider()
self.configWantWaterRippleRay()
else:
if wordIs('~setCannonFireVis'):
args = word.split()
type = 'all'
if len(args) > 2:
if args[2] == 'broadside':
type = 'broadside'
else:
if args[2] == 'deck':
type = 'deck'
if len(args) > 1:
dist = int(args[1])
else:
if type == 'broadside':
dist = config.GetInt('cannon-fire-broadside-dist', 3500)
else:
dist = config.GetInt('cannon-fire-dist', 3500)
if type == 'all' or type == 'deck':
DistributedSimpleShip.DistributedSimpleShip.CannonFireDist = dist
self.setMagicWordResponse('setting deck cannon visibility distance to %s' % dist)
if type == 'all' or type == 'broadside':
DistributedSimpleShip.DistributedSimpleShip.CannonFireBroadsideDist = dist
self.setMagicWordResponse('setting broadside cannon visibility distance to %s' % dist)
else:
if wordIs('~setWakeVis'):
args = word.split()
dist = config.GetInt('ship-wake-dist', 3800)
if len(args) > 1:
dist = int(args[1])
DistributedSimpleShip.DistributedSimpleShip.ShipWakeDist = dist
self.setMagicWordResponse('setting wake visibility distance to %s' % dist)
else:
if wordIs('~setRockVis'):
args = word.split()
dist = config.GetInt('ship-rock-dist', 1000)
if len(args) > 1:
dist = int(args[1])
DistributedSimpleShip.DistributedSimpleShip.ShipRockDist = dist
self.setMagicWordResponse('setting rocking visibility distance to %s' % dist)
else:
if __dev__ and wordIs('~wantReducedShipColl'):
shipPilot = localAvatar.ship.controlManager.controls.get('ship')
shipCollNode = shipPilot.cNodePath.node()
if shipCollNode.getNumSolids() > 1:
shipCollNode.removeSolid(2)
shipCollNode.removeSolid(1)
self.setMagicWordResponse('removing mid and stern spheres')
else:
shipCollNode.addSolid(shipPilot.cMidSphere)
shipCollNode.addSolid(shipPilot.cSternSphere)
self.setMagicWordResponse('adding mid and stern spheres')
else:
if __dev__ and wordIs('~wantCollideMasks'):
args = word.split()
force = None
if len(args) > 1:
force = int(args[1])
from pirates.ship import DistributedSimpleShip
clientShips = filter(lambda x: isinstance(x, DistributedSimpleShip.DistributedSimpleShip) and x is not localAvatar.ship, base.cr.doId2do.values())
cleared = False
for currShip in clientShips:
shipCollWall = currShip.hull[0].collisions.find('**/collision_hull')
if not shipCollWall.isEmpty():
if shipCollWall.getCollideMask() & PiratesGlobals.ShipCollideBitmask == BitMask32.allOff():
shipCollWall.setCollideMask(shipCollWall.getCollideMask() | PiratesGlobals.ShipCollideBitmask)
else:
shipCollWall.setCollideMask(shipCollWall.getCollideMask() ^ PiratesGlobals.ShipCollideBitmask)
cleared = True
if cleared:
self.setMagicWordResponse('cleared ship collide bitmasks')
else:
self.setMagicWordResponse('set ship collide bitmasks')
else:
if __dev__ and wordIs('~saveCamera'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
id = cameraOV.saveFixture(int(args[0]))
else:
id = cameraOV.saveFixture()
self.setMagicWordResponse('camera saved: %d' % id)
else:
if __dev__ and wordIs('~removeCamera'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
cameraOV.removeFixture(int(args[0]))
else:
self.setMagicWordResponse('need camera id to remove')
else:
if __dev__ and wordIs('~standbyCamera'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
cameraOV.standbyFixture(int(args[0]))
else:
self.setMagicWordResponse('need camera id to standby')
else:
if __dev__ and wordIs('~blinkCamera'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
cameraOV.blinkFixture(int(args[0]))
else:
self.setMagicWordResponse('need camera id to blink')
else:
if __dev__ and wordIs('~testCamera'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
cameraOV.testFixture(int(args[0]))
else:
self.setMagicWordResponse('need camera id to test')
else:
if __dev__ and wordIs('~storeCameras'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
cameraOV.storeToFile(args[0])
else:
self.setMagicWordResponse('need name to store')
else:
if __dev__ and wordIs('~loadCameras'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
args = word.split()[1:]
if args:
cameraOV.loadFromFile(args[0])
else:
self.setMagicWordResponse('need name to load')
else:
if __dev__ and wordIs('~startRecording'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
cameraOV.startRecording()
else:
if __dev__ and wordIs('~stopRecording'):
camera = base.cr.doFind('DistributedCamera')
cameraOV = camera.getOV()
cameraOV.stopRecording()
else:
if __dev__ and base.config.GetBool('want-fishing-game', 0) and wordIs('~fishcam'):
self.toggleFishCam()
self.setMagicWordResponse('toggling fish cam')
cameraOV.stopRecording()
else:
if wordIs('~fishR'):
self.doRequestFish(word, localAvatar, zoneId, localAvatar.doId)
else:
if wordIs('~leg'):
args = word.split()[1:]
if args:
base.fishingGame.wantLeg = arg[0]
else:
base.fishingGame.wantLeg = 1
else:
if wordIs('~legWin'):
if hasattr(base, 'fishingGame'):
if base.fishingGame.fsm.getCurrentOrNextState() == 'LegendaryFish':
base.fishingGame.lfgFsm.request('Win')
else:
self.setMagicWordResponse('Not battling legendary fish! (use ~leg)')
else:
self.setMagicWordResponse('Fishing Game not started.')
else:
if wordIs('~cdunlockall'):
messenger.send('cdUnlockAll')
else:
if wordIs('~camSpin'):
args = word.split()
dist = 40
if len(args) > 1:
dist = float(args[1])
def spin(task=None):
localAvatar.cameraFSM.getCurrentCamera().setH(localAvatar.cameraFSM.getCurrentCamera().getH() + 1)
return Task.cont
if taskMgr.hasTaskNamed('camSpin'):
localAvatar.cameraFSM.getCurrentCamera().setH(0)
localAvatar.cameraFSM.getCurrentCamera()._setCamDistance(14)
localAvatar.cameraFSM.getCurrentCamera().forceMaxDistance = True
localAvatar.cameraFSM.getCurrentCamera()._startCollisionCheck()
taskMgr.remove('camSpin')
else:
localAvatar.cameraFSM.getCurrentCamera()._stopCollisionCheck()
localAvatar.cameraFSM.getCurrentCamera().forceMaxDistance = False
localAvatar.cameraFSM.getCurrentCamera()._setCamDistance(dist)
taskMgr.add(spin, 'camSpin')
else:
if wordIs('~hostilizeNear'):
interactivesNear = base.cr.interactionMgr.sortInteractives()
for currInteractive in interactivesNear:
if isinstance(currInteractive, DistributedNPCTownfolk.DistributedNPCTownfolk):
self.b_setMagicWord('~hostilize ' + str(currInteractive.doId))
return
def configEffects(self, args):
effectCats = args[1:]
def toggleEffects(on=None):
if effectCats:
for currEffectCat in effectCats:
if currEffectCat == 'clearCustom':
base.cr.effectToggles = {}
continue
if currEffectCat == 'listEffectCats':
response = 'known effect types are: \n%s' % base.cr.effectTypes.keys()
self.setMagicWordResponse(response)
continue
effectTypes = base.cr.effectTypes.get(currEffectCat, [currEffectCat])
for currEffectType in effectTypes:
newStatus = not base.cr.effectToggles.get(currEffectType, base.config.GetBool('want-special-effects', 1))
base.cr.effectToggles[currEffectType] = newStatus
response = 'effect %s set to %s' % (currEffectType, choice(newStatus, 'ON', 'OFF'))
self.setMagicWordResponse(response)
base.cr.wantSpecialEffects = base.config.GetBool('want-special-effects', 1)
from pirates.ship import DistributedSimpleShip
clientShips = filter(lambda x: isinstance(x, DistributedSimpleShip.DistributedSimpleShip), base.cr.doId2do.values())
if base.cr.queryShowEffect('BlackSmoke') or base.cr.queryShowEffect('Fire'):
for ship in clientShips:
if base.cr.queryShowEffect('BlackSmoke'):
ship.startSmoke()
if base.cr.queryShowEffect('Fire'):
ship.startFire()
else:
if not base.cr.queryShowEffect('BlackSmoke') or not base.cr.queryShowEffect('Fire'):
for ship in clientShips:
if not base.cr.queryShowEffect('BlackSmoke'):
ship.stopSmoke()
if not base.cr.queryShowEffect('Fire'):
ship.stopFire()
if effectCats:
toggleEffects()
else:
self.configToggleBool('want-special-effects', offCode=lambda p1=False: toggleEffects(p1), onCode=lambda p1=True: toggleEffects(p1))
return
def configWantEventCollider(self):
currControls = localAvatar.controlManager.currentControls
if currControls == None:
return
colliderExists = base.shadowTrav.hasCollider(currControls.cEventSphereNodePath) or currControls.cTrav.hasCollider(currControls.cEventSphereNodePath)
if colliderExists:
currControls.cTrav.removeCollider(currControls.cEventSphereNodePath)
base.shadowTrav.removeCollider(currControls.cEventSphereNodePath)
currControls.pusher.addInPattern('enter%in')
currControls.pusher.addOutPattern('exit%in')
self.setMagicWordResponse('event sphere OFF')
else:
currControls.pusher.clearInPatterns()
currControls.pusher.clearOutPatterns()
avatarRadius = 1.4
base.shadowTrav.addCollider(currControls.cEventSphereNodePath, currControls.event)
self.setMagicWordResponse('event sphere ON')
return
def configWantFloorEventRay(self):
if localAvatar.cTrav.hasCollider(localAvatar.cFloorNodePath):
localAvatar.cTrav.removeCollider(localAvatar.cFloorNodePath)
self.setMagicWordResponse('floor event ray OFF')
else:
localAvatar.cTrav.addCollider(localAvatar.cFloorNodePath, localAvatar.floorEventHandler)
self.setMagicWordResponse('floor event ray ON')
def configWantWaterRippleRay(self):
if localAvatar.cTrav.hasCollider(localAvatar.cWaterNodePath):
localAvatar.cTrav.removeCollider(localAvatar.cWaterNodePath)
self.setMagicWordResponse('water ripple ray OFF')
else:
localAvatar.cTrav.addCollider(localAvatar.cWaterNodePath, localAvatar.waterEventHandler)
self.setMagicWordResponse('water ripple ray ON')
def configWantShadowPlacer(self):
if localAvatar.shadowPlacer.cTrav.hasCollider(localAvatar.shadowPlacer.cRayNodePath):
localAvatar.shadowPlacer.cTrav.removeCollider(localAvatar.shadowPlacer.cRayNodePath)
self.setMagicWordResponse('shadow placer ray OFF')
else:
localAvatar.shadowPlacer.cTrav.addCollider(localAvatar.shadowPlacer.cRayNodePath, localAvatar.shadowPlacer.lifter)
self.setMagicWordResponse('shadow placer ray ON')
def configShipsRock(self, configIs, args):
onlyPlayerRocks = False
if len(args) > 1:
if args[1] == 'playerOnly':
onlyPlayerRocks = True
(config.GetInt(configIs, 1) == 1 or config.GetInt(configIs, 1) == 2) and ConfigVariableInt(configIs).setValue(0)
self.setMagicWordResponse('%s OFF (all ships)' % configIs)
else:
if onlyPlayerRocks:
ConfigVariableInt(configIs).setValue(2)
self.setMagicWordResponse('%s ON (local player ship only)' % configIs)
else:
ConfigVariableInt(configIs).setValue(1)
self.setMagicWordResponse('%s ON (all ships)' % configIs)
def configToggleBool(self, configName, defaultVal=1, offCode=None, onCode=None):
currVal = not config.GetBool(configName, defaultVal)
loadPrcFileData('', '%s %s' % (configName, currVal))
self.setMagicWordResponse('%s %s' % (configName, choice(currVal, 'ON', 'OFF')))
if currVal:
onCode and onCode()
else:
if not currVal and offCode:
offCode()
def cameraFollowTgt(self, target, parentId):
localAvatar.cTrav.removeCollider(localAvatar.cFloorNodePath)
localAvatar.controlManager.use('observer', localAvatar)
localAvatar.controlManager.currentControls.disableAvatarControls()
localAvatar.guiMgr.setIgnoreAllKeys(True)
localAvatar.guiMgr.combatTray.skillMapping.clear()
localAvatar.reparentTo(target)
localAvatar.setScale(1)
parentObj = base.cr.doId2do[parentId]
localAvatar.setPos(0, 0, 0)
localAvatar.setHpr(render, target.getHpr(render))
localAvatar.stash()
if self.pendingCameraReparent:
base.cr.relatedObjectMgr.abortRequest(self.pendingCameraReparent)
self.pendingCameraReparent = None
return
def cameraUnfollowTgt(self, target):
localAvatar.cTrav.addCollider(localAvatar.cFloorNodePath, localAvatar.floorEventHandler)
localAvatar.controlManager.currentControls.enableAvatarControls()
localAvatar.controlManager.use('walk', localAvatar)
localAvatar.guiMgr.setIgnoreAllKeys(False)
localAvatar.guiMgr.combatTray.initCombatTray()
localAvatar.unstash()
if hasattr(localAvatar, 'followTgt'):
del localAvatar.followTgt
def cameraReparent(self, targetId, targetParentId, zoneId):
targetObj = base.cr.doId2do.get(targetParentId)
if targetObj:
if not isinstance(targetObj, NodePath):
return
currParentObj = localAvatar.getParentObj()
if self.originalLocation == None:
self.originalLocation = [
localAvatar.getLocation(), localAvatar.getPos(currParentObj)]
prevPos = None
if targetId == 0:
if (targetParentId == 0 and zoneId == 0 and self).originalLocation:
targetParentId = self.originalLocation[0][0]
zoneId = self.originalLocation[0][1]
prevPos = self.originalLocation[1]
self.originalLocation = None
targetObj = base.cr.doId2do.get(targetParentId)
if targetObj == None or not isinstance(targetObj, NodePath):
self.notify.debug('Parent of target object to reparent avatar/camera to does not yet exist, skipping reparent request')
return
newPos = prevPos and prevPos
else:
newPos = Point3(*targetObj.getZoneCellOriginCenter(zoneId))
localAvatar.reparentTo(targetObj)
localAvatar.setPos(newPos)
localAvatar.isGhosting = True
base.cr.doId2do.has_key(targetId) and self.cameraFollowTgt(base.cr.doId2do[targetId], targetParentId)
else:
if targetId:
self.pendingCameraReparent = base.cr.relatedObjectMgr.requestObjects([targetId], eachCallback=lambda param=None, param2=targetParentId: self.cameraFollowTgt(param, param2))
else:
if self.pendingCameraReparent:
base.cr.relatedObjectMgr.abortRequest(self.pendingCameraReparent)
self.pendingCameraReparent = None
self.cameraUnfollowTgt(targetObj)
localAvatar.isGhosting = False
return
def shipCreated(self, shipId):
return
print 'shipCreated(%s)' % shipId
ship = base.cr.doId2do.get(shipId)
if ship:
print 'ship created: %s' % ship
ship.localAvatarInstantBoard()
ship.enableOnDeckInteractions()
def toggleFishCam(self):
self.fishCamEnabled = not self.fishCamEnabled
if self.fishCamEnabled:
base.oobe()
base.oobeCamera.setPos(-13.0, 4.0, -6.0)
base.oobeCamera.setHpr(90.0, 0.0, 0.0)
from pandac.PandaModules import CardMaker
from direct.interval.IntervalGlobal import PosInterval, ProjectileInterval, Sequence, Wait
cm = CardMaker('fishBackdrop')
self.fishBackdrop = render.attachNewNode(cm.generate())
tex = loader.loadTexture('maps/underseaBackdrop.jpg')
self.fishBackdrop.setTexture(tex)
self.fishBackdrop.reparentTo(localAvatar)
self.fishBackdrop.setHpr(90, 0, 0)
self.fishBackdrop.setPos(0, -100, -108.7)
self.fishBackdrop.setScale(400, 1, 100)
self.fishBackdrop.setBin('ground', 20)
self.fishBackdrop.setDepthWrite(0)
self.fishCamProjectileInterval = Sequence(Wait(4), ProjectileInterval(base.oobeCamera, startPos=Point3(-13.0, 4.0, -6.0), endPos=Point3(-13.0, 164.0, -36.0), duration=3), ProjectileInterval(base.oobeCamera, startPos=Point3(-13.0, 164.0, -36.0), endPos=Point3(-13.0, 4.0, -24.0), gravityMult=-0.5, duration=5), base.oobeCamera.posInterval(5, Point3(-13.0, 4.0, -6.0)))
self.fishCamProjectileInterval.start()
else:
self.fishCamProjectileInterval.finish()
del self.fishCamProjectileInterval
self.fishBackdrop.reparentTo(hidden)
del self.fishBackdrop
base.oobe()
def doRequestFish(self, word, av, zoneId, senderId):
args = word.split()
doid = args[1]
spot = self.cr.doId2do[int(doid)]
spot.requestInteraction(localAvatar.doId)
| 69.176724
| 993
| 0.408349
| 78,717
| 0.980958
| 0
| 0
| 0
| 0
| 0
| 0
| 5,299
| 0.066035
|
0207e1cd7c3433152b1e340e7f376f8049a8644d
| 634
|
bzl
|
Python
|
layers/bazel/deps.bzl
|
celentes/bazel-container-ubuntu1804
|
67c12c3f6db785909fa3695c80ebbdec1ff81b61
|
[
"Apache-2.0"
] | null | null | null |
layers/bazel/deps.bzl
|
celentes/bazel-container-ubuntu1804
|
67c12c3f6db785909fa3695c80ebbdec1ff81b61
|
[
"Apache-2.0"
] | null | null | null |
layers/bazel/deps.bzl
|
celentes/bazel-container-ubuntu1804
|
67c12c3f6db785909fa3695c80ebbdec1ff81b61
|
[
"Apache-2.0"
] | null | null | null |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def deps():
excludes = native.existing_rules().keys()
if "bazel_installer" not in excludes:
http_file(
name = "bazel_installer",
downloaded_file_path = "bazel-installer.sh",
sha256 = "bd7a3a583a18640f58308c26e654239d412adaa833b6b6a7b57a216ab62fabc2",
urls = [
"https://releases.bazel.build/4.0.0/release/bazel-4.0.0-installer-linux-x86_64.sh",
"https://github.com/bazelbuild/bazel/releases/download/4.0.0/bazel-4.0.0-installer-linux-x86_64.sh",
],
)
| 39.625
| 116
| 0.62776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.564669
|
0208d63efe0cf495f00648e33345a8f7f3c257eb
| 4,318
|
py
|
Python
|
db.py
|
RecycledMedia/apprenticeship-app
|
67eb18300163dedcc4f473883f20d992644af7b2
|
[
"BSD-3-Clause"
] | null | null | null |
db.py
|
RecycledMedia/apprenticeship-app
|
67eb18300163dedcc4f473883f20d992644af7b2
|
[
"BSD-3-Clause"
] | null | null | null |
db.py
|
RecycledMedia/apprenticeship-app
|
67eb18300163dedcc4f473883f20d992644af7b2
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sqlite3
class TaskDatabase:
def __init__(self, db_filename):
self.filename = db_filename
self.connection = None
def get_connection(self):
""" Return a connection to the database, creating one if it doesn't exist """
if self.connection is None:
self.connection = sqlite3.connect(self.filename, check_same_thread=False)
return self.connection
def close_connection(self):
""" Close the connection to the database """
if self.connection:
self.get_connection().commit()
self.connection.close()
self.connection = None
def get_cursor(self):
""" Return a database cursor"""
return self.get_connection().cursor()
def execute(self, cursor, sql, parameters=None):
""" Execute a SQL statement
If the cursor is None, one will be automatically created
"""
if cursor is None:
cursor = self.connection.cursor()
print(f"Executing SQL: {sql}")
if parameters:
print(f"Parameters: {parameters}")
return cursor.execute(sql, parameters)
cursor.execute(sql)
self.get_connection().commit()
def create_database(self):
""" Create the tasks database """
cursor = self.get_cursor()
sql = """
CREATE TABLE IF NOT EXISTS tasks (
id integer PRIMARY KEY,
created_date text NOT NULL,
content text NOT NULL,
done boolean DEFAULT false,
completed_date text
);"""
self.execute(cursor, sql)
def delete_database(self):
""" Delete the tasks database file """
self.close_connection()
os.unlink(self.filename)
self.connection = None
def add_task(self, content):
""" Add a task """
# WARNING: This is bad and can lead to SQL Injection attacks!
sql = f"""
INSERT INTO tasks (created_date, content)
VALUES (datetime('now'), '{content.replace("'", "''")}');
"""
cursor = self.get_cursor()
self.execute(cursor, sql)
return cursor.lastrowid
def rename_task(self, task_id, content):
""" Rename a task """
sql = "UPDATE tasks SET content = ? WHERE id = ?;"
return self.execute(None, sql, (content, task_id))
def set_task_done(self, task_id, done=True):
""" Update the task to done or undone """
if done:
sql = "UPDATE tasks SET done = TRUE, completed_date = datetime('now') WHERE id = ?;"
else:
sql = "UPDATE tasks SET done = FALSE, completed_date = NULL WHERE id = ?;"
return self.execute(None, sql, (task_id, ))
def delete_task(self, task_id):
""" Delete a task """
sql = "DELETE FROM tasks WHERE id = ?;"
return self.execute(None, sql, (task_id, ))
def get_task(self, task_id):
""" Retrieve a single task by id from the database """
columns = ('id', 'created_date', 'content', 'done', 'completed_date')
sql = f"SELECT {', '.join(columns)} FROM tasks WHERE id = ?;"
cursor = self.get_cursor()
self.execute(cursor, sql, (task_id, ))
return self.make_result(columns, cursor.fetchall())[0]
def get_tasks(self):
""" Retrieve all tasks from the database """
columns = ('id', 'created_date', 'content', 'done', 'completed_date')
sql = f"SELECT {', '.join(columns)} FROM tasks ORDER BY id;"
cursor = self.get_cursor()
self.execute(cursor, sql)
return self.make_result(columns, cursor.fetchall())
def get_undone_tasks(self):
""" Retrieve all tasks from the database """
columns = ('id', 'created_date', 'content', 'done', 'completed_date')
sql = f"SELECT {', '.join(columns)} FROM tasks WHERE done = 0 ORDER BY id;"
cursor = self.get_cursor()
self.execute(cursor, sql)
return self.make_result(columns, cursor.fetchall())
def make_result(self, columns, rows):
""" Helper function to convert lists of (list) results into a list of dicts """
records = []
for row in rows:
records.append(dict(zip(columns, row)))
return records
| 32.712121
| 96
| 0.585456
| 4,290
| 0.993516
| 0
| 0
| 0
| 0
| 0
| 0
| 1,706
| 0.39509
|
02097fb19e8e97c98afe88f64252e859af37785e
| 243
|
py
|
Python
|
python/vars_test.py
|
runningforlife/CodingExamples
|
808b12cdb996390225d40a687bf6215c4b7d1822
|
[
"Apache-2.0"
] | null | null | null |
python/vars_test.py
|
runningforlife/CodingExamples
|
808b12cdb996390225d40a687bf6215c4b7d1822
|
[
"Apache-2.0"
] | null | null | null |
python/vars_test.py
|
runningforlife/CodingExamples
|
808b12cdb996390225d40a687bf6215c4b7d1822
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
def test_vars():
"""test variables in python"""
int_var = 5
string_var = "hah"
assert int_var == 5
assert string_var == 'hah'
print("test vars is done")
if __name__ == "__main__":
test_vars()
| 15.1875
| 34
| 0.596708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.353909
|
020a172c0d9f7b9606628146aaa062d113a7182b
| 7,199
|
py
|
Python
|
src/data_preparation/tfrecords_and_queues.py
|
Zhenxingzhang/tiny_imagenet
|
f44512023ce52df30cdffd80d3cb7cc4e1426354
|
[
"Apache-2.0"
] | null | null | null |
src/data_preparation/tfrecords_and_queues.py
|
Zhenxingzhang/tiny_imagenet
|
f44512023ce52df30cdffd80d3cb7cc4e1426354
|
[
"Apache-2.0"
] | null | null | null |
src/data_preparation/tfrecords_and_queues.py
|
Zhenxingzhang/tiny_imagenet
|
f44512023ce52df30cdffd80d3cb7cc4e1426354
|
[
"Apache-2.0"
] | null | null | null |
"""
Up to now we have held all data in memory. This is of course impossible with large datasets.
In this file we explore the use of TFRecords (binary files quickly loading data from disk) and Queues to store
asynchronously loading data.
In this example we the TinyImageNet-200 dataset which has 100,000 64x64 images for 200 classes
We will examine 2 options for reading from TFRecord files:
a) reading from the record directly one example at a time
b) reading from the record into a queue and sampling batches from that queue
For more info, consult the great documentation on this from Tensorflow at
https://www.tensorflow.org/versions/r0.12/how_tos/reading_data/index.html
"""
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import matplotlib
# to remove issue with default matplotlib backend (causing runtime error "python is not installed as a framework")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from PIL import Image
import os
from src.common.paths import DATA_PATH
def grey_to_rgb(im):
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im
return ret
def csv_to_record(csv_file, tfrecord_file):
with open(csv_file) as f:
lines = f.readlines()
np.random.shuffle(lines)
writer = tf.python_io.TFRecordWriter(tfrecord_file)
# iterate over each example
# wrap with tqdm for a progress bar
for line in tqdm(lines):
path = line.split(',')[0]
image = np.array(Image.open(path))
image_name = path.split("/")[-1]
if len(image.shape) == 2:
# there are some greyscale image in data, reformat them
image = grey_to_rgb(image)
flat_image = image.flatten().astype("int64")
text_label = line.split(',')[1].lstrip()
label = -1 if (text_label == '' or text_label is None) else int(text_label)
# construct the Example proto object
example = tf.train.Example(
# Example contains a Features proto object
features=tf.train.Features(
# Features contains a map of string to Feature proto objects
# A Feature contains one of either a int64_list,
# float_list, or bytes_list
feature={'label': tf.train.Feature(
int64_list=tf.train.Int64List(value=[label])),
'image': tf.train.Feature(
int64_list=tf.train.Int64List(value=flat_image)),
'filename': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image_name]))
}
)
)
# use the proto object to serialize the example to a string
serialized = example.SerializeToString()
# write the serialized object to disk
writer.write(serialized)
def read_record_to_queue(tf_record_name, shapes, plot=None):
def read_and_decode_single_example(filename):
# first construct a queue containing a list of filenames.
# this lets a user split up there dataset in multiple files to keep
# size down
filename_queue = tf.train.string_input_producer([filename], num_epochs=None)
# Unlike the TFRecordWriter, the TFRecordReader is symbolic
# this means it creates a function and adds it to our graph that can be evaluation with our session
# Each time we evaluate it it will pull the next batch off the queue and return that data
reader = tf.TFRecordReader()
# One can read a single serialized example from a filename
# serialized_example is a Tensor of type string.
_, serialized_example = reader.read(filename_queue)
# The serialized example is converted back to actual values.
# One needs to describe the format of the objects to be returned
features = tf.parse_single_example(
serialized_example,
features={
# We know the length of both fields. If not the
# tf.VarLenFeature could be used
'label': tf.FixedLenFeature([shapes['label']], tf.int64),
'image': tf.FixedLenFeature([np.product(shapes['image'])], tf.int64)
})
# now return the converted data
label = features['label']
image = features['image']
return label, image
# returns symbolic label and image
label, image = read_and_decode_single_example(tf_record_name)
# groups examples into batches randomly
# min_after_queue = size of buffer that will be randomly sampled
# capcity = maxmimum examples to prefetch
images_batch, labels_batch = tf.train.shuffle_batch([image, label], batch_size=32,
capacity=2000, min_after_dequeue=1000)
sess = tf.Session()
# Initialize graph
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
# grab examples back.
print('Reading random batches of 32')
if plot:
plt.suptitle('Read in batches from queue')
for i in range(plot):
# get ith batch
label_vals, image_vals = sess.run([labels_batch, images_batch])
idx = np.random.randint(0, 32) # sample 1 instance from batch
label_val = np.array(label_vals)[idx]
if np.array(label).size > 1:
label_val = np.argmax(label_val)
image_val = np.array(image_vals)[idx]
plt.subplot(3, plot / 3 + (1 if plot % 3 > 0 else 0), i + 1)
plt.xticks(())
plt.yticks(())
plt.title(label_val)
plt.imshow(image_val.reshape(shapes['image']).astype("uint8"))
plt.show()
else:
for i in range(5):
label_vals, image_vals = sess.run([labels_batch, images_batch])
print('Labels of batch {} : {}'.format(i, label_vals))
if i == 10:
print("That's enough of that!")
break
if __name__ == '__main__':
# create TFRecords from csv files if necessary
for set_name in ['train', 'val', 'test']:
tfrecord_path = os.path.join(DATA_PATH, "{}.tfrecord".format(set_name))
if not os.path.exists(tfrecord_path):
print('Creating TFRecord from csv files for set: {}'.format(set_name))
train_csv = os.path.join(DATA_PATH, "{}.csv".format(set_name))
csv_to_record(train_csv, tfrecord_path)
else:
print('TFRecord for {} exists, nothing to do'.format(set_name))
PLOT = 10 # number of images to plot (set == None to suppress plotting)
# read from record one at time
print('Reading from record one at a time')
val_tfrecord_file = os.path.join(DATA_PATH, "train.tfrecord")
# read_from_record(val_tfrecord_file, shapes={'label': 1, 'image': (64, 64, 3)},
# plot=PLOT)
# read from record into queue, shuffle and batch
print('Reading from record into queue, random sample from queue in batches')
read_record_to_queue(val_tfrecord_file, shapes={'label': 1, 'image': (64, 64, 3)},
plot=PLOT)
| 41.137143
| 114
| 0.633282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,977
| 0.41353
|
020a1a0bc964b8990c94fa3dbddf6619f8e10b21
| 2,906
|
py
|
Python
|
relialok/SerialPort.py
|
jrhosk/relialok
|
28d59dfd39296695ebec19387eda9b986ecdd60f
|
[
"MIT"
] | null | null | null |
relialok/SerialPort.py
|
jrhosk/relialok
|
28d59dfd39296695ebec19387eda9b986ecdd60f
|
[
"MIT"
] | null | null | null |
relialok/SerialPort.py
|
jrhosk/relialok
|
28d59dfd39296695ebec19387eda9b986ecdd60f
|
[
"MIT"
] | null | null | null |
import serial
import serial.tools.list_ports
from PyQt5.QtCore import QObject
import relialok.Logger
class SerialPort(QObject):
@relialok.Logger.function_log
def __init__(self, port, parent = None):
self.port = port
self.resource_free = True
self.connection_active = True
self.port_release = True
super(SerialPort, self).__init__(parent)
self.serial = serial.Serial(self.port, '9600', timeout=5)
@relialok.Logger.function_log
def send(self, progress_callback=None, *args, **kwargs):
'''
Send command to serial port if resource is not currently in use and wait for reply.
:param cmd: hardware command
:param progress_callback: signal handler (unused currently)
:return:
'''
self.command = kwargs['command']
self.resource_free = False
while self.port_release == False: # Wait for Listen to release resource
pass
try:
print('Reading serial port on {port}'.format(port=self.port))
self.serial.write('{cmd}\n'.format(cmd=self.command).encode())
self.serial.flush()
line = self.serial.readline().decode()
print("Initialization check: {resp}".format(resp=line))
self.resource_free = True
return line
except serial.serialutil.SerialException:
print('Read failed.')
@relialok.Logger.function_log
def listen(self, progress_callback):
'''
Monitors serial port for incoming data and passes it to decoding function via progress_callback signal.
:param progress_callback: Generates a signal to pass data to the decoding function from within the thread.
:return: None
'''
print('Listening on {port}'.format(port=self.port))
while self.connection_active:
try:
if self.serial.inWaiting() and self.resource_free:
self.port_release = False
self.serial.flush()
line = self.serial.readline().decode()
print("Response check: {resp}".format(resp=line))
progress_callback.emit(line)
self.port_release = True
else:
pass
except serial.serialutil.SerialException:
print('Listening error occurred.')
@relialok.Logger.function_log
def _is_open(self):
'''
Passes boolean depending on state of serial connection
:return: serial port connection state *True/False)
'''
return self.serial.is_open
@relialok.Logger.function_log
def disconnect(self):
'''
Close serial port connection.
:return: None
'''
self.resource_free = False
self.connection_active = False
self.serial.close()
| 34.188235
| 114
| 0.604955
| 2,804
| 0.9649
| 0
| 0
| 2,749
| 0.945974
| 0
| 0
| 913
| 0.314178
|
020a85d2b9268f0ad8b4e717c76fefae39beb819
| 339
|
py
|
Python
|
Python/DDUtil.py
|
dalek7/umbrella
|
cabf0367940905ca5164d104d7aef6ff719ee166
|
[
"MIT"
] | 1
|
2021-03-09T09:12:02.000Z
|
2021-03-09T09:12:02.000Z
|
Python/DDUtil.py
|
dalek7/umbrella
|
cabf0367940905ca5164d104d7aef6ff719ee166
|
[
"MIT"
] | null | null | null |
Python/DDUtil.py
|
dalek7/umbrella
|
cabf0367940905ca5164d104d7aef6ff719ee166
|
[
"MIT"
] | null | null | null |
import os
import datetime
def exit():
os._exit(0)
def GetTimeString(m = -1):
if m==0:
s1 = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
else:
s1 = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
return s1
def MakeDir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
| 19.941176
| 62
| 0.60767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.085546
|
020b56188f2411001ea02312adb3e4b3e9f8fcbc
| 4,301
|
py
|
Python
|
codes/modelTraining.py
|
jairock282/hatsi
|
ecb16fb99115c413e980855ae3d06433ced2260c
|
[
"MIT"
] | null | null | null |
codes/modelTraining.py
|
jairock282/hatsi
|
ecb16fb99115c413e980855ae3d06433ced2260c
|
[
"MIT"
] | null | null | null |
codes/modelTraining.py
|
jairock282/hatsi
|
ecb16fb99115c413e980855ae3d06433ced2260c
|
[
"MIT"
] | null | null | null |
"""
__| |_____________________________________________________________________________________| |__
(__ _____________________________________________________________________________________ __)
| | | |
| | modelTraining Module | |
| | | |
| | Trains the LSTM model with the sliding windows of 15 frames | |
__| |_____________________________________________________________________________________| |__
(__ _____________________________________________________________________________________ __)
| | | |
"""
import glob
import numpy as np
import pandas as pd
from tensorflow import keras
from keras.layers import LSTM,Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import multilabel_confusion_matrix, accuracy_score
## ------------------------------------------------ Loading Data ------------------------------------------------------------------
files = glob.glob(r'C:\Users\khmap\depthai-python\Ejemplos_Python\Datos_Completos_L/*.csv') ##Read all the CSV files
tam=len(files) ##Total of files
tTrain=(70*tam)/100 ##Gets 70% of the files to the train process
tTest=tam-tTrain ##Gets 30% of the files to the test process
## -------------- Data matrices --------------
x_train=np.zeros((int(tTrain), 15, 201))
x_test=np.zeros((int(tTest), 15, 201))
y_train=np.zeros(int(tTrain))
y_test=np.zeros(int(tTest))
## ----------------- Phrases -------------------
phrases=np.array(['A','B','C','Diarrea','DolordeCabeza','DolordeCuerpo','D','E','Fatiga','Fiebre','F','G','H','I','J','K','L','M','N','O','P','Q','R','Sin sena','S','Tos','T','U','V','W','X','Y','Z','Ñ']) ##Phrases
label_map = {label:num for num, label in enumerate(phrases)} ##Phrases mapping
cont=0 ##Counter to separate 70% of the data to the training process and 30% to the testing process
contNum=0 ##Counter to assign to ytest and ytrain
cont_x_tra=0 ##Counter of the vector x_train
cont_x_tes=0 ##Counter of the vector x_test
cont_y_tra=0 ##Counter of the vector y_train
cont_y_tes=0 ##Counter of the vector y_test
## Iterate over each CSV file
for i in range(0, tam):
fRead= pd.read_csv(files[i]) ##Read file
res= fRead.values ##Gets all the values
res = res[0:len(res), 1:len(res[1])]
if cont<70: ## Training data
x_train[cont_x_tra]=res
y_train[cont_y_tra]=contNum
cont=cont+1
cont_x_tra=cont_x_tra + 1
cont_y_tra = cont_y_tra + 1
else: ## Testing data
x_test[cont_x_tes] = res
y_test[cont_y_tes] = contNum
cont = cont + 1
cont_x_tes =cont_x_tes + 1
cont_y_tes = cont_y_tes + 1
if cont==100:
cont=0
contNum=contNum+1
##Converts to binary matrix
y_train=to_categorical (y_train).astype(int)
y_test=to_categorical (y_test).astype(int)
print("Datos Guardados")
## -------------------------------------- Model ------------------------------------------------
model=Sequential()
model.add(LSTM(3400,return_sequences=True,activation='relu',input_shape=(15,201))) ##Input layer
model.add(LSTM(400,return_sequences=True,activation='relu')) ##Hidden layers
model.add(LSTM(128,return_sequences=False,activation='relu'))
model.add(Dense(64,activation='relu'))
model.add(Dense(34,activation='softmax')) ##Output layer
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['categorical_accuracy'])
model.fit(x_train,y_train,epochs=200)
model.summary() ## Summary of the model results
print("Modelo entrenado")
resul=model.predict(x_test) ##Prediction
## ---------------- Model evaluation ------------------------
print("Evaluacion")
ytrue=np.argmax(y_test,axis=1).tolist()
yhat=np.argmax(resul,axis=1).tolist()
matriz=multilabel_confusion_matrix(ytrue,yhat)
ac = accuracy_score(ytrue,yhat)
model.save('Entrenamiento_ABC_Enf_1.h5') ##Saves the model
| 44.802083
| 215
| 0.601953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,306
| 0.53603
|
020c16a78df08433f5dc19175781c44bf2dcbb01
| 1,763
|
py
|
Python
|
datasource/mylaps/tests.py
|
SphinxNZ/game-on
|
da10ea9303563cd91ccab13321ba15a927e703e5
|
[
"Apache-2.0"
] | null | null | null |
datasource/mylaps/tests.py
|
SphinxNZ/game-on
|
da10ea9303563cd91ccab13321ba15a927e703e5
|
[
"Apache-2.0"
] | null | null | null |
datasource/mylaps/tests.py
|
SphinxNZ/game-on
|
da10ea9303563cd91ccab13321ba15a927e703e5
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from django.utils import timezone
from django.test import TestCase
from sport.models import Sport, Competition, Venue
from compete.models import CompetitionRound
from compete.motorsport.models import Race
from datasource.models import DataSource
from datasource.mylaps.scoreboard import ScoreboardHandler
class ScoreboardTestCase(TestCase):
def setUp(self):
sport = Sport.objects.create(name="test sport")
competition = Competition.objects.create(sport=sport, name="test comp")
round = CompetitionRound.objects.create(competition=competition, name="test round")
venue = Venue.objects.create(real_name="test venue")
datasource = DataSource.objects.create(data_source_type="Client", round=round )
race = Race.objects.create( sport=sport, competition=competition, venue=venue, number=1, name="test race" )
self.handler = ScoreboardHandler(datasource, competition, round, timezone.datetime.now().date())
self.handler.race = race
def test_heartbeat(self):
"""
Make sure the heartbeat works
"""
changes = self.handler.parse('$F,14,"00:12:45","13:34:23","00:09:47","Green "')
self.assertEqual(self.handler.race.status,"Green")
changes = self.handler.parse('$F,14,"00:12:45","13:34:22","00:09:47","Green "')
self.assertEqual(changes,[])
changes = self.handler.parse('$F,14,"00:11:45","13:34:22","00:09:47","Green "')
self.assertEqual(changes,["time to go",])
changes = self.handler.parse('$F,0,"00:00:00","13:34:23","00:09:47","Finish"')
self.assertTrue("status" in changes)
self.assertTrue("finished" in changes)
self.assertTrue("time to go" in changes)
| 43
| 115
| 0.676687
| 1,429
| 0.81055
| 0
| 0
| 0
| 0
| 0
| 0
| 363
| 0.205899
|
020c551868d4325ef446cf93f3e3b90f6e4e9908
| 1,697
|
py
|
Python
|
scripts/generate_tests.py
|
alibaba/sionnx
|
3f3e18826ddcc26402b4e2af96ca8aac15560456
|
[
"Apache-2.0"
] | 34
|
2019-05-29T03:15:48.000Z
|
2022-03-24T03:14:58.000Z
|
scripts/generate_tests.py
|
alibaba/sionnx
|
3f3e18826ddcc26402b4e2af96ca8aac15560456
|
[
"Apache-2.0"
] | 1
|
2020-05-21T11:44:22.000Z
|
2020-05-21T11:44:22.000Z
|
scripts/generate_tests.py
|
alibaba/sionnx
|
3f3e18826ddcc26402b4e2af96ca8aac15560456
|
[
"Apache-2.0"
] | 4
|
2019-12-16T18:49:42.000Z
|
2021-10-11T18:41:54.000Z
|
#*
#* Copyright (C) 2017-2019 Alibaba Group Holding Limited
#*
#* Licensed under the Apache License, Version 2.0 (the "License");
#* you may not use this file except in compliance with the License.
#* You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing, software
#* distributed under the License is distributed on an "AS IS" BASIS,
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#* See the License for the specific language governing permissions and
#* limitations under the License.
import sys
import os
import glob
import argparse
parser = argparse.ArgumentParser(description='Generate conformanc tests')
parser.add_argument("-profile_level", help="Specify the profile level: 0=smoke tests; 1=full tests", type=int)
parser.parse_args()
args = parser.parse_args()
option = "-gen-onnx-smoke-tests"
if args.profile_level:
option = "-gen-onnx-smoke-tests" if args.profile_level==0 else "-gen-onnx-tests"
print("======Generating tests with option " + option + "========")
if not os.path.exists("tests"):
os.makedirs("tests")
os.system("cp ../include/onnx_*.td -r . | cp ../include/*.algorithm -r .")
dir_path = os.path.dirname(os.path.realpath(__file__))
td_files = glob.glob(os.path.join(dir_path, '*.td'))
lens = len(td_files)
for k in range(lens):
base = os.path.basename(td_files[k])
out_file_name = os.path.splitext(base)[0]
os.system("../llvm/build/bin/llvm-tblgen " + option + " " + td_files[k] + " -I ./ -o ./tests/" + out_file_name + ".py")
print(out_file_name + ".py generated.")
os.system("rm onnx_*.td | rm *.algorithm")
| 36.891304
| 124
| 0.703595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,004
| 0.591632
|
020e09341ffea9ce59519650e80614b26a974b81
| 6,610
|
py
|
Python
|
tests/mixins.py
|
jarkkorantala/sqlalchemy-utils
|
7cee65f0a3074245b853425e19a732aa274bfa3e
|
[
"BSD-3-Clause"
] | 879
|
2015-01-01T12:06:35.000Z
|
2022-03-27T16:13:05.000Z
|
tests/mixins.py
|
jarkkorantala/sqlalchemy-utils
|
7cee65f0a3074245b853425e19a732aa274bfa3e
|
[
"BSD-3-Clause"
] | 418
|
2015-01-02T08:43:43.000Z
|
2022-03-25T15:49:21.000Z
|
tests/mixins.py
|
jarkkorantala/sqlalchemy-utils
|
7cee65f0a3074245b853425e19a732aa274bfa3e
|
[
"BSD-3-Clause"
] | 295
|
2015-01-06T14:19:33.000Z
|
2022-03-26T16:20:50.000Z
|
import pytest
import sqlalchemy as sa
class ThreeLevelDeepOneToOne(object):
@pytest.fixture
def Catalog(self, Base, Category):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
category = sa.orm.relationship(
Category,
uselist=False,
backref='catalog'
)
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
catalog_id = sa.Column(
'_catalog_id',
sa.Integer,
sa.ForeignKey('catalog._id')
)
sub_category = sa.orm.relationship(
SubCategory,
uselist=False,
backref='category'
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
category_id = sa.Column(
'_category_id',
sa.Integer,
sa.ForeignKey('category._id')
)
product = sa.orm.relationship(
Product,
uselist=False,
backref='sub_category'
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
sub_category_id = sa.Column(
'_sub_category_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
class ThreeLevelDeepOneToMany(object):
@pytest.fixture
def Catalog(self, Base, Category):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
categories = sa.orm.relationship(Category, backref='catalog')
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
catalog_id = sa.Column(
'_catalog_id',
sa.Integer,
sa.ForeignKey('catalog._id')
)
sub_categories = sa.orm.relationship(
SubCategory, backref='category'
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
category_id = sa.Column(
'_category_id',
sa.Integer,
sa.ForeignKey('category._id')
)
products = sa.orm.relationship(
Product,
backref='sub_category'
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
sub_category_id = sa.Column(
'_sub_category_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
def __repr__(self):
return '<Product id=%r>' % self.id
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
class ThreeLevelDeepManyToMany(object):
@pytest.fixture
def Catalog(self, Base, Category):
catalog_category = sa.Table(
'catalog_category',
Base.metadata,
sa.Column('catalog_id', sa.Integer, sa.ForeignKey('catalog._id')),
sa.Column('category_id', sa.Integer, sa.ForeignKey('category._id'))
)
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
categories = sa.orm.relationship(
Category,
backref='catalogs',
secondary=catalog_category
)
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
category_subcategory = sa.Table(
'category_subcategory',
Base.metadata,
sa.Column(
'category_id',
sa.Integer,
sa.ForeignKey('category._id')
),
sa.Column(
'subcategory_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
)
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
sub_categories = sa.orm.relationship(
SubCategory,
backref='categories',
secondary=category_subcategory
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
subcategory_product = sa.Table(
'subcategory_product',
Base.metadata,
sa.Column(
'subcategory_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
),
sa.Column(
'product_id',
sa.Integer,
sa.ForeignKey('product._id')
)
)
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
products = sa.orm.relationship(
Product,
backref='sub_categories',
secondary=subcategory_product
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
| 28.864629
| 79
| 0.522542
| 6,563
| 0.99289
| 0
| 0
| 6,359
| 0.962027
| 0
| 0
| 720
| 0.108926
|
020e71ff56d4917b70bf98b950bcfa70c6d8e56c
| 6,041
|
py
|
Python
|
gbpservice/nfp/lib/rest_client_over_unix.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/nfp/lib/rest_client_over_unix.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/nfp/lib/rest_client_over_unix.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import exceptions
import httplib
import httplib2
import zlib
import six.moves.urllib.parse as urlparse
import socket
from oslo_serialization import jsonutils
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class RestClientException(exceptions.Exception):
""" RestClient Exception """
class UnixHTTPConnection(httplib.HTTPConnection):
"""Connection class for HTTP over UNIX domain socket."""
def __init__(self, host, port=None, strict=None, timeout=None,
proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.socket_path = '/var/run/uds_socket'
def connect(self):
"""Method used to connect socket server."""
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.timeout:
self.sock.settimeout(self.timeout)
try:
self.sock.connect(self.socket_path)
except socket.error as exc:
raise RestClientException(
"Caught exception socket.error : %s" % exc)
class UnixRestClient(object):
def _http_request(self, url, method_type, headers=None, body=None):
try:
h = httplib2.Http()
resp, content = h.request(
url,
method=method_type,
headers=headers,
body=body,
connection_type=UnixHTTPConnection)
return resp, content
except httplib2.ServerNotFoundError:
raise RestClientException("Server Not Found")
except exceptions.Exception as e:
raise RestClientException("httplib response error %s" % (e))
def send_request(self, path, method_type, request_method='http',
server_addr='127.0.0.1',
headers=None, body=None):
"""Implementation for common interface for all unix crud requests.
Return:Http Response
"""
# prepares path, body, url for sending unix request.
if method_type.upper() != 'GET':
body = jsonutils.dumps(body)
body = zlib.compress(body)
path = '/v1/nfp/' + path
url = urlparse.urlunsplit((
request_method,
server_addr,
path,
None,
''))
try:
resp, content = self._http_request(url, method_type,
headers=headers, body=body)
if content != '':
content = zlib.decompress(content)
message = "%s:%s" % (resp, content)
LOG.info(message)
except RestClientException as rce:
message = "ERROR : %s" % (rce)
LOG.error(message)
raise rce
success_code = [200, 201, 202, 204]
# Evaluate responses into success and failures.
# Raise exception for failure cases which needs
# to be handled by caller.
if success_code.__contains__(resp.status):
return resp, content
elif resp.status == 400:
raise RestClientException("HTTPBadRequest: %s" % resp.reason)
elif resp.status == 401:
raise RestClientException("HTTPUnauthorized: %s" % resp.reason)
elif resp.status == 403:
raise RestClientException("HTTPForbidden: %s" % resp.reason)
elif resp.status == 404:
raise RestClientException("HttpNotFound: %s" % resp.reason)
elif resp.status == 405:
raise RestClientException(
"HTTPMethodNotAllowed: %s" % resp.reason)
elif resp.status == 406:
raise RestClientException("HTTPNotAcceptable: %s" % resp.reason)
elif resp.status == 408:
raise RestClientException("HTTPRequestTimeout: %s" % resp.reason)
elif resp.status == 409:
raise RestClientException("HTTPConflict: %s" % resp.reason)
elif resp.status == 415:
raise RestClientException(
"HTTPUnsupportedMediaType: %s" % resp.reason)
elif resp.status == 417:
raise RestClientException(
"HTTPExpectationFailed: %s" % resp.reason)
elif resp.status == 500:
raise RestClientException("HTTPServerError: %s" % resp.reason)
else:
raise Exception('Unhandled Exception code: %s %s' % (resp.status,
resp.reason))
def get(path):
"""Implements get method for unix restclient
Return:Http Response
"""
return UnixRestClient().send_request(path, 'GET')
def put(path, body):
"""Implements put method for unix restclient
Return:Http Response
"""
headers = {'content-type': 'application/octet-stream'}
return UnixRestClient().send_request(
path, 'PUT', headers=headers, body=body)
def post(path, body, delete=False):
"""Implements post method for unix restclient
Return:Http Response
"""
# Method-Type added here,as DELETE/CREATE
# both case are handled by post as delete also needs
# to send data to the rest-unix-server.
headers = {'content-type': 'application/octet-stream'}
if delete:
headers.update({'method-type': 'DELETE'})
else:
headers.update({'method-type': 'CREATE'})
return UnixRestClient().send_request(
path, 'POST', headers=headers, body=body)
| 35.327485
| 78
| 0.609833
| 4,256
| 0.704519
| 0
| 0
| 0
| 0
| 0
| 0
| 1,907
| 0.315676
|
020f39177cabbb0de46cc69acb4473e957930343
| 3,916
|
py
|
Python
|
tk_sim.py
|
incherre/slam-bot
|
8479aff8f595b2d602a83e9e922b64836ae64375
|
[
"MIT"
] | null | null | null |
tk_sim.py
|
incherre/slam-bot
|
8479aff8f595b2d602a83e9e922b64836ae64375
|
[
"MIT"
] | null | null | null |
tk_sim.py
|
incherre/slam-bot
|
8479aff8f595b2d602a83e9e922b64836ae64375
|
[
"MIT"
] | null | null | null |
'''Robot sim with a nicer display.'''
from sim_framework import *
from math import radians
import tkinter
BACKGROUND_COLOR = 'grey60'
ENTITY_COLOR = 'RoyalBlue1'
OBSTACLE_COLOR = 'black'
ENTITY_TAG = 'entity'
class TKWorld(World):
'''A world that will display via tkinter instead of ascii.'''
def __init__(self, root, x_min, x_max, y_min, y_max, resolution=2, max_dist=10000, collision_delta_theta=1):
super().__init__(resolution=resolution, max_dist=max_dist, collision_delta_theta=collision_delta_theta)
if x_min >= x_max:
raise ValueError('Improperly ordered x boundaries')
self.x_min = x_min
self.x_max = x_max
if y_min >= y_max:
raise ValueError('Improperly ordered y boundaries')
self.y_min = y_min
self.y_max = y_max
self.root = root
self.room_canvas = tkinter.Canvas(self.root, bg=BACKGROUND_COLOR,
height=self.y_max - self.x_min,
width=self.x_max - self.x_min)
self.add_obs(Wall(self.x_min, '-x'))
self.add_obs(Wall(self.x_max, '+x'))
self.add_obs(Wall(self.y_min, '-y'))
self.add_obs(Wall(self.y_max, '+y'))
def add_obs(self, obstacle):
'''Adds the obstacle to tracking and also the TK canvas.'''
super().add_obs(obstacle)
if isinstance(obstacle, Wall):
# In the TK world, walls are only used for the outside of the box.
pass
elif isinstance(obstacle, Box):
box_x1, box_y1 = self.get_canvas_coords(obstacle.x_min, obstacle.y_max)
box_x2, box_y2 = self.get_canvas_coords(obstacle.x_max, obstacle.y_min)
self.room_canvas.create_rectangle(box_x1, box_y1,
box_x2, box_y2,
fill=OBSTACLE_COLOR, outline=OBSTACLE_COLOR)
else:
print('Error: Unknown obstacle type added to sim:', type(obstacle).__name__)
def get_canvas_coords(self, x, y):
'''Converts simulation coordinates to canvas coordinates.'''
disp_x = x - self.x_min
disp_y = (self.y_max - self.y_min) - (y - self.y_min) - 1
return (disp_x, disp_y)
def display(self):
'''Displays the environment, by default just as a character array.'''
try:
self.room_canvas.delete(ENTITY_TAG)
except _tkinter.TclError:
return
for ent in self.entities:
if isinstance(ent, CircleBot):
center_x, center_y = self.get_canvas_coords(ent.x, ent.y)
ent_x1 = center_x - ent.radius
ent_y1 = center_y - ent.radius
ent_x2 = center_x + ent.radius
ent_y2 = center_y + ent.radius
self.room_canvas.create_oval(ent_x1, ent_y1,
ent_x2, ent_y2,
fill=ENTITY_COLOR, outline=ENTITY_COLOR,
tags=(ENTITY_TAG,))
else:
print('Error: Unknown entity type found in sim:', type(ent).__name__)
self.room_canvas.pack()
if __name__ == '__main__':
root = tkinter.Tk()
W = TKWorld(root, -500, 500, -500, 500)
W.add_obs(Box(-500, -250, 250, 500))
W.add_obs(Box(-450, -200, 200, 450))
W.add_obs(Box(-400, -150, 150, 400))
W.add_obs(Box(-350, -100, 100, 350))
bot = CircleBot(100, 0, 0, 0)
W.add_ent(bot)
theta = radians(0)
def update():
root.after(int(1000 / 60), update)
global theta
W.display()
theta -= radians(0.2)
if W.move_ent(bot, 5, theta):
theta -= radians(360 * 1.618)
theta = theta % radians(360)
root.after(int(1000 / 60), update)
root.mainloop()
| 36.943396
| 112
| 0.565884
| 3,052
| 0.779367
| 0
| 0
| 0
| 0
| 0
| 0
| 565
| 0.14428
|
0210ff2439d9da24bc21178720c18eee48ba770a
| 1,224
|
py
|
Python
|
COT/tests/test_doctests.py
|
morneaup/cot
|
3d4dc7079a33aa0c09216ec339b44f84ab69ff4b
|
[
"MIT"
] | 81
|
2015-01-18T22:31:42.000Z
|
2022-03-14T12:34:33.000Z
|
COT/tests/test_doctests.py
|
morneaup/cot
|
3d4dc7079a33aa0c09216ec339b44f84ab69ff4b
|
[
"MIT"
] | 67
|
2015-01-05T15:24:39.000Z
|
2021-08-16T12:44:58.000Z
|
COT/tests/test_doctests.py
|
morneaup/cot
|
3d4dc7079a33aa0c09216ec339b44f84ab69ff4b
|
[
"MIT"
] | 20
|
2015-07-09T14:20:25.000Z
|
2021-09-18T17:59:57.000Z
|
#!/usr/bin/env python
#
# test_doctests.py - test runner for COT doctests
#
# July 2016, Glenn F. Matthews
# Copyright (c) 2016-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Test runner for COT doctest tests."""
import logging
from logging import NullHandler
from doctest import DocTestSuite
from unittest import TestSuite
logging.getLogger('COT').addHandler(NullHandler())
def load_tests(*_):
"""Load doctests as unittest test suite.
For the parameters, see :mod:`unittest`. The parameters are unused here.
"""
suite = TestSuite()
suite.addTests(DocTestSuite('COT.data_validation'))
suite.addTests(DocTestSuite('COT.utilities'))
return suite
| 33.081081
| 78
| 0.750817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 909
| 0.742647
|
021113c40a21b05029b6c6708d8e10e3927d9701
| 1,045
|
py
|
Python
|
aws/etc/packer/tools/python/stardog/cluster/test_program.py
|
stardog-union/stardog-graviton
|
652fa3e3bbb166e92ce165938ef2075831d26c04
|
[
"Apache-2.0"
] | 3
|
2017-03-10T15:00:08.000Z
|
2019-10-29T07:46:19.000Z
|
aws/etc/packer/tools/python/stardog/cluster/test_program.py
|
stardog-union/stardog-graviton
|
652fa3e3bbb166e92ce165938ef2075831d26c04
|
[
"Apache-2.0"
] | 31
|
2017-02-21T16:19:11.000Z
|
2021-03-25T21:27:50.000Z
|
aws/etc/packer/tools/python/stardog/cluster/test_program.py
|
stardog-union/stardog-graviton
|
652fa3e3bbb166e92ce165938ef2075831d26c04
|
[
"Apache-2.0"
] | 6
|
2017-04-26T07:22:25.000Z
|
2020-07-29T20:17:55.000Z
|
import logging
import subprocess
import sys
import stardog.cluster.utils as utils
def run_program(cmd, tries):
def pgm_func():
try:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o, e = p.communicate()
logging.debug("STDOUT: %s", o.decode())
logging.debug("STDERR: %s", e.decode())
rc = p.wait()
if rc == 0:
logging.info("The program %s succeeded", cmd)
return True
else:
logging.warning("The program %s failed %d", cmd, rc)
except Exception as ex:
logging.warning("There was an exception running %s. %s.", cmd, ex)
return False
logging.info("Start the program run loop for the command %s")
return utils.wait_for_func(tries, 30, pgm_func)
def main():
utils.setup_logging()
tries = int(sys.argv[1])
cmd = sys.argv[2:]
rc = run_program(' '.join(cmd), tries)
if not rc:
return 1
return 0
| 29.857143
| 97
| 0.572249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.159809
|
0211dbc40a6aa48e66ae666cbc2afb8294c1a296
| 297
|
py
|
Python
|
apps/core/urls.py
|
tayyabRazzaq/opl-platform
|
37b0efdb9327253a144c50bfd192132fac732619
|
[
"MIT"
] | 2
|
2019-04-03T04:04:53.000Z
|
2019-04-28T16:13:56.000Z
|
apps/core/urls.py
|
tayyabRazzaq/opl-platform
|
37b0efdb9327253a144c50bfd192132fac732619
|
[
"MIT"
] | 8
|
2021-06-04T21:57:30.000Z
|
2022-03-11T23:48:38.000Z
|
apps/core/urls.py
|
tayyab-razzaq/opl-platform
|
37b0efdb9327253a144c50bfd192132fac732619
|
[
"MIT"
] | 7
|
2019-03-12T19:39:08.000Z
|
2021-04-15T05:25:59.000Z
|
""" Here all the blog's urls routes will be mapped """
from django.urls import path
from django.conf.urls import include, url
from . import views
app_name = 'core'
urlpatterns = [
# path('', views.home, name='home-page'),
url(r'^api/', include('apps.core.api.urls', namespace='api')),
]
| 24.75
| 66
| 0.670034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.451178
|
021267aeacfe0ae1c6472616df30ce20f8a2d09b
| 24,270
|
py
|
Python
|
picoCTF-web/tests/api/functional/common.py
|
MongYahHsieh/picoCTF
|
dd500ad9c59768137b33e2d2b102a089ddf0ad40
|
[
"MIT"
] | null | null | null |
picoCTF-web/tests/api/functional/common.py
|
MongYahHsieh/picoCTF
|
dd500ad9c59768137b33e2d2b102a089ddf0ad40
|
[
"MIT"
] | null | null | null |
picoCTF-web/tests/api/functional/common.py
|
MongYahHsieh/picoCTF
|
dd500ad9c59768137b33e2d2b102a089ddf0ad40
|
[
"MIT"
] | null | null | null |
"""Utilities for functional tests."""
import datetime
import json
import re
import pymongo
import pytest
import api
RATE_LIMIT_BYPASS = "test_bypass"
TESTING_DB_NAME = 'ctf_test'
db = None
def decode_response(res):
"""Parse a WebSuccess or WebError response."""
decoded_dict = json.loads(res.data.decode('utf-8'))
return (decoded_dict['status'], decoded_dict['message'],
decoded_dict['data'])
def get_csrf_token(res):
"""Extract the CSRF token from a response."""
for header in res.headers:
m = re.search('token=(.+?);', header[1])
if m:
return m.group(1)
raise RuntimeError('Could not find CSRF token in response headers: ' + str(res.headers))
def get_conn():
"""Get a connection to the testing database."""
global db
if db is None:
client = pymongo.MongoClient(host='127.0.0.1', port=27018)
db = client[TESTING_DB_NAME]
return db
def clear_db():
"""Clear out the testing database."""
db = get_conn()
db.command('dropDatabase')
@pytest.fixture
def client():
"""Create a test client of the Flask app."""
app = api.create_app({
'TESTING': True,
'MONGO_DB_NAME': TESTING_DB_NAME,
'MONGO_PORT': 27018,
'RATE_LIMIT_BYPASS': RATE_LIMIT_BYPASS
})
return app.test_client()
def app():
"""Create an instance of the Flask app for testing."""
app = api.create_app({
'TESTING': True,
'MONGO_DB_NAME': TESTING_DB_NAME,
'MONGO_PORT': 27018
})
return app
def cache(f, *args, **kwargs):
result = f(reset_cache=True, *args, **kwargs)
return result
def update_all_scoreboards():
api.stats.get_all_team_scores()
api.stats.get_all_team_scores(include_ineligible=True)
for group in api.group.get_all_groups():
api.stats.get_group_scores(gid=group['gid'])
ADMIN_DEMOGRAPHICS = {
'username': 'adminuser',
'password': 'adminpw',
'firstname': 'Admin',
'lastname': 'User',
'email': 'admin@example.com',
'country': 'US',
'affiliation': 'Admin School',
'usertype': 'other',
'demo': {
'parentemail': 'admin@example.com',
'age': '18+'
},
'gid': None,
'rid': None
}
TEACHER_DEMOGRAPHICS = {
'username': 'teacheruser',
'password': 'teacherpw',
'firstname': 'Teacher',
'lastname': 'User',
'email': 'teacher@example.com',
'country': 'US',
'affiliation': 'Sample School',
'usertype': 'teacher',
'demo': {
'parentemail': 'teacher@example.com',
'age': '18+'
},
'gid': None,
'rid': None
}
STUDENT_DEMOGRAPHICS = {
'username': 'studentuser',
'password': 'studentpw',
'firstname': 'Student',
'lastname': 'User',
'email': 'student@example.com',
'country': 'US',
'affiliation': 'Sample School',
'usertype': 'student',
'demo': {
'parentemail': 'student@example.com',
'age': '13-17'
},
'gid': None,
'rid': None
}
STUDENT_2_DEMOGRAPHICS = {
'username': 'studentuser2',
'password': 'studentpw2',
'firstname': 'Student',
'lastname': 'Usertwo',
'email': 'student2@example.com',
'country': 'US',
'affiliation': 'Sample School',
'usertype': 'student',
'demo': {
'parentemail': 'student2@example.com',
'age': '18+'
},
'gid': None,
'rid': None
}
OTHER_USER_DEMOGRAPHICS = {
'username': 'otheruser',
'password': 'otherpw',
'firstname': 'Other',
'lastname': 'User',
'email': 'other@example.com',
'country': 'US',
'affiliation': 'Sample Organization',
'usertype': 'other',
'demo': {
'age': '18+'
},
'gid': None,
'rid': None
}
def register_test_accounts():
"""
Register an admin, teacher, and student account with known demographics.
Intended to be used, if needed, in conjunction with clear_db()
to set up a clean environment for each test.
"""
with app().app_context():
api.user.add_user(ADMIN_DEMOGRAPHICS)
api.user.add_user(TEACHER_DEMOGRAPHICS)
api.user.add_user(STUDENT_DEMOGRAPHICS)
api.user.add_user(STUDENT_2_DEMOGRAPHICS)
api.user.add_user(OTHER_USER_DEMOGRAPHICS)
sample_shellserver_publish_output = r'''
{
"problems": [
{
"name": "ECB 1",
"category": "Cryptography",
"description": "There is a crypto service running at {{server}}:{{port}}. We were able to recover the source code, which you can download at {{url_for(\"ecb.py\")}}.",
"hints": [],
"walkthrough": "Let me google that for you.",
"score": 70,
"author": "Tim Becker",
"organization": "ForAllSecure",
"event": "Sample",
"pip_requirements": [
"pycrypto"
],
"pip_python_version": "3",
"unique_name": "ecb-1-b06174a",
"instances": [
{
"user": "ecb-1_0",
"deployment_directory": "/problems/ecb-1_0_73a0108a98d2862a86f4b71534aaf7c3",
"service": "ecb-1_0",
"socket": null,
"server": "192.168.2.3",
"description": "There is a crypto service running at 192.168.2.3:46981. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/fd59acc6b8d2359d48bd939a08ecb8ab/ecb.py'>ecb.py</a>.",
"flag": "49e56ea9bf2e2b60ba9af034b5b2a5fd",
"flag_sha1": "77cec418714d6eb0dc48afa6d6f38200402a83c0",
"instance_number": 0,
"should_symlink": false,
"files": [
{
"path": "flag",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "key",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "ecb.py",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 46981
},
{
"user": "ecb-1_1",
"deployment_directory": "/problems/ecb-1_1_83b2ed9a1806c86219347bc4982a66de",
"service": "ecb-1_1",
"socket": null,
"server": "192.168.2.3",
"description": "There is a crypto service running at 192.168.2.3:21953. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/beb9874a05a1810fa8c9d79152ace1b3/ecb.py'>ecb.py</a>.",
"flag": "85a32ccd05fa30e0efd8da555c1a101a",
"flag_sha1": "f28581a86561c885152f7622200057585787c063",
"instance_number": 1,
"should_symlink": false,
"files": [
{
"path": "flag",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "key",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "ecb.py",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 21953
},
{
"user": "ecb-1_2",
"deployment_directory": "/problems/ecb-1_2_1998c2cc0f0d17ae54170200f5478b7f",
"service": "ecb-1_2",
"socket": null,
"server": "192.168.2.3",
"description": "There is a crypto service running at 192.168.2.3:17648. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/19e863cba0bf14ad676e4b4799eacc72/ecb.py'>ecb.py</a>.",
"flag": "f76d2f6b885255450ed2f7307d96e28e",
"flag_sha1": "43cf6f1dab026cf2100e2f663509512416112219",
"instance_number": 2,
"should_symlink": false,
"files": [
{
"path": "flag",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "key",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "ecb.py",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 17648
}
],
"sanitized_name": "ecb-1"
},
{
"name": "SQL Injection 1",
"category": "Web Exploitation",
"pkg_dependencies": [
"php7.2-sqlite3"
],
"description": "There is a website running at http://{{server}}:{{port}}. Try to see if you can login!",
"score": 40,
"hints": [],
"author": "Tim Becker",
"organization": "ForAllSecure",
"event": "Sample",
"unique_name": "sql-injection-1-0c436d0",
"instances": [
{
"user": "sql-injection-1_0",
"deployment_directory": "/problems/sql-injection-1_0_9e114b246c48eb158b16525f71ae2a00",
"service": "sql-injection-1_0",
"socket": null,
"server": "192.168.2.3",
"description": "There is a website running at http://192.168.2.3:46984. Try to see if you can login!",
"flag": "9ac0a74de6bced3cdce8e7fd466f32d0",
"flag_sha1": "958416d52940e4948eca8d9fb1eca21e4cf7eda1",
"instance_number": 0,
"should_symlink": false,
"files": [
{
"path": "webroot/index.html",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.phps",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/config.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "users.db",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 46984
},
{
"user": "sql-injection-1_1",
"deployment_directory": "/problems/sql-injection-1_1_10a4b1cdfd3a0f78d0d8b9759e6d69c5",
"service": "sql-injection-1_1",
"socket": null,
"server": "192.168.2.3",
"description": "There is a website running at http://192.168.2.3:21955. Try to see if you can login!",
"flag": "28054fef0f362256c78025f82e6572c3",
"flag_sha1": "f57fa5d3861c22a657eecafe30a43bd4ad7a4a2a",
"instance_number": 1,
"should_symlink": false,
"files": [
{
"path": "webroot/index.html",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.phps",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/config.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "users.db",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 21955
},
{
"user": "sql-injection-1_2",
"deployment_directory": "/problems/sql-injection-1_2_57a103ad26a005f69b4332e62d611372",
"service": "sql-injection-1_2",
"socket": null,
"server": "192.168.2.3",
"description": "There is a website running at http://192.168.2.3:17649. Try to see if you can login!",
"flag": "6ed19af4c4540d444ae08735aa5664af",
"flag_sha1": "19bbc88ca231ddfde8063acdda75a92b1e6fd993",
"instance_number": 2,
"should_symlink": false,
"files": [
{
"path": "webroot/index.html",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.phps",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/config.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "users.db",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 17649
}
],
"sanitized_name": "sql-injection-1"
},
{
"name": "Buffer Overflow 1",
"category": "Binary Exploitation",
"description": "Exploit the {{url_for(\"vuln\", display=\"Buffer Overflow\")}} found here: {{directory}}.",
"score": 50,
"hints": [
"This is a classic buffer overflow with no modern protections."
],
"walkthrough": "PROTIP: Find the correct answer to get the points.",
"author": "Tim Becker",
"organization": "ForAllSecure",
"event": "Sample",
"unique_name": "buffer-overflow-1-35e6d9d",
"instances": [
{
"user": "buffer-overflow-1_0",
"deployment_directory": "/problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f",
"service": null,
"socket": null,
"server": "192.168.2.3",
"description": "Exploit the <a href='//192.168.2.3/static/bd08ee41f495f8bff378c13157d0f511/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f.",
"flag": "638608c79eca2165e7b241ff365df05b",
"flag_sha1": "4b97abef055a11ec19c14622eb31eb1168d98aca",
"instance_number": 0,
"should_symlink": true,
"files": [
{
"path": "flag.txt",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "vuln",
"permissions": 1517,
"user": null,
"group": null
}
]
},
{
"user": "buffer-overflow-1_1",
"deployment_directory": "/problems/buffer-overflow-1_1_f49b6bd5da29513569bd87f98a934fa6",
"service": null,
"socket": null,
"server": "192.168.2.3",
"description": "Exploit the <a href='//192.168.2.3/static/c95410042007bb17f49b891a2a87afb2/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_1_f49b6bd5da29513569bd87f98a934fa6.",
"flag": "35013564b97b80d4fd3f2be45e5836ff",
"flag_sha1": "5675d2d5819084d4203c1ef314239527074938a9",
"instance_number": 1,
"should_symlink": true,
"files": [
{
"path": "flag.txt",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "vuln",
"permissions": 1517,
"user": null,
"group": null
}
]
},
{
"user": "buffer-overflow-1_2",
"deployment_directory": "/problems/buffer-overflow-1_2_6c4daed04928f80dd29290060827be61",
"service": null,
"socket": null,
"server": "192.168.2.3",
"description": "Exploit the <a href='//192.168.2.3/static/dbeb4d34945e752ea988dcdb4454f57d/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_2_6c4daed04928f80dd29290060827be61.",
"flag": "8dfabcb5c4a18d03ad5ecea19eef27a6",
"flag_sha1": "aef4789685665a1bf4994d62ef10941dbce5647a",
"instance_number": 2,
"should_symlink": true,
"files": [
{
"path": "flag.txt",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "vuln",
"permissions": 1517,
"user": null,
"group": null
}
]
}
],
"sanitized_name": "buffer-overflow-1"
}
],
"bundles": [
{
"name": "Challenge Sampler",
"author": "Christopher Ganas",
"description": "Dependency weightmap for the example challenges provided in the picoCTF-Problems repository.",
"dependencies": {
"ecb-1-b06174a": {
"threshold": 1,
"weightmap": {
"buffer-overflow-1-35e6d9d": 1
}
},
"sql-injection-1-0c436d0": {
"threshold": 1,
"weightmap": {
"buffer-overflow-1-35e6d9d": 1,
"ecb-1-b06174a": 1
}
}
}
}
],
"sid": "728f36885f7c4686805593b9e4988c30"
}
'''
problems_endpoint_response = [{'name': 'SQL Injection 1', 'category': 'Web Exploitation', 'description': 'There is a website running at http://192.168.2.3:17648. Try to see if you can login!', 'score': 40, 'hints': [], 'author': 'Tim Becker', 'organization': 'ForAllSecure', 'sanitized_name': 'sql-injection-1', 'disabled': False, 'pid': '4508167aa0b219fd9d131551d10aa58e', 'solves': 0, 'socket': None, 'server': '192.168.2.3', 'port': 17648, 'server_number': 1, 'solved': False, 'unlocked': True}, {'name': 'Buffer Overflow 1', 'category': 'Binary Exploitation', 'description': "Exploit the <a href='//192.168.2.3/static/bd08ee41f495f8bff378c13157d0f511/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f.", 'score': 50, 'hints': ['This is a classic buffer overflow with no modern protections.'], 'author': 'Tim Becker', 'organization': 'ForAllSecure', 'sanitized_name': 'buffer-overflow-1', 'disabled': False, 'pid': '1bef644c399e10a3f35fecdbf590bd0c', 'solves': 0, 'socket': None, 'server': '192.168.2.3', 'server_number': 1, 'solved': False, 'unlocked': True}, {'name': 'ECB 1', 'category': 'Cryptography', 'description': "There is a crypto service running at 192.168.2.3:21953. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/beb9874a05a1810fa8c9d79152ace1b3/ecb.py'>ecb.py</a>.", 'hints': [], 'score': 70, 'author': 'Tim Becker', 'organization': 'ForAllSecure', 'sanitized_name': 'ecb-1', 'disabled': False, 'pid': '7afda419da96e8471b49df9c2009e2ef', 'solves': 0, 'socket': None, 'server': '192.168.2.3', 'port': 21953, 'server_number': 1, 'solved': False, 'unlocked': True}]
def load_sample_problems():
"""Load the sample problems and bundle into the DB."""
with app().app_context():
db = get_conn()
db.shell_servers.insert_one({
'sid': '728f36885f7c4686805593b9e4988c30',
'name': 'Test shell server',
'host': 'testing.picoctf.com',
'port': '22',
'username': 'username',
'password': 'password',
'protocol': 'HTTPS',
'server_number': 1
})
api.problem.load_published(
json.loads(sample_shellserver_publish_output)
)
def enable_sample_problems():
"""Enable any sample problems in the DB."""
db = get_conn()
db.problems.update_many({}, {'$set': {'disabled': False}})
def ensure_within_competition():
"""Adjust the competition times so that protected methods are callable."""
db = get_conn()
db.settings.update_one({}, {'$set': {
'start_time': datetime.datetime.utcnow() - datetime.timedelta(1),
'end_time': datetime.datetime.utcnow() + datetime.timedelta(1),
}})
def ensure_before_competition():
"""Adjust the competition times so that @block_before_competition fails."""
db = get_conn()
db.settings.update_one({}, {'$set': {
'start_time': datetime.datetime.utcnow() + datetime.timedelta(11),
'end_time': datetime.datetime.utcnow() + datetime.timedelta(10),
}})
def ensure_after_competition():
"""Adjust the competition times so that @block_before_competition fails."""
db = get_conn()
db.settings.update_one({}, {'$set': {
'start_time': datetime.datetime.utcnow() - datetime.timedelta(11),
'end_time': datetime.datetime.utcnow() - datetime.timedelta(10),
}})
def get_problem_key(pid, team_name):
"""Get the flag for a given pid and team name."""
db = get_conn()
assigned_instance_id = db.teams.find_one({
'team_name': team_name
})['instances'][pid]
problem_instances = db.problems.find_one({
'pid': pid
})['instances']
assigned_instance = None
for instance in problem_instances:
if instance['iid'] == assigned_instance_id:
assigned_instance = instance
break
return assigned_instance['flag']
| 34.621969
| 1,680
| 0.486279
| 0
| 0
| 0
| 0
| 284
| 0.011702
| 0
| 0
| 18,431
| 0.759415
|
0212be2b426e881f46ce9b5faa0a4d6cd2b0e659
| 11
|
py
|
Python
|
py2codes/py2_exec.py
|
rhabacker/lib2to3import
|
36102fa844bf18234053d96f6b9b90f5c6068e87
|
[
"MIT"
] | null | null | null |
py2codes/py2_exec.py
|
rhabacker/lib2to3import
|
36102fa844bf18234053d96f6b9b90f5c6068e87
|
[
"MIT"
] | 1
|
2020-11-14T01:39:18.000Z
|
2020-11-17T07:54:28.000Z
|
py2codes/py2_exec.py
|
rhabacker/lib2to3import
|
36102fa844bf18234053d96f6b9b90f5c6068e87
|
[
"MIT"
] | 2
|
2019-08-12T09:58:05.000Z
|
2021-03-18T17:13:06.000Z
|
exec "123"
| 5.5
| 10
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.454545
|
02154f47c33721ccd238e5aa1dcf948b5ec4704f
| 1,308
|
py
|
Python
|
Tools/RaiseCheck.py
|
17320692835RGF/buptoj
|
3d1e4719d757b4f0199e4451be7c0bee28e7c3ca
|
[
"MIT"
] | null | null | null |
Tools/RaiseCheck.py
|
17320692835RGF/buptoj
|
3d1e4719d757b4f0199e4451be7c0bee28e7c3ca
|
[
"MIT"
] | null | null | null |
Tools/RaiseCheck.py
|
17320692835RGF/buptoj
|
3d1e4719d757b4f0199e4451be7c0bee28e7c3ca
|
[
"MIT"
] | null | null | null |
import MySQLdb
from queue import Queue
import socket
import json
from time import sleep
import threading
import os
queue = Queue() # 全局判题列表
myjsonfile = open("./setting.json", 'r')
judgerjson = json.loads(myjsonfile.read())
if os.environ.get("DB_USER"):
judgerjson["db_ip"] = os.environ.get("DB_HOST")
judgerjson["db_pass"] = os.environ.get("DB_PASSWORD")
judgerjson["db_user"] = os.environ.get("DB_USER")
judgerjson["db_port"] = os.environ.get("DB_PORT")
try:
db = MySQLdb.connect(judgerjson["db_ip"], judgerjson["db_user"], judgerjson["db_pass"],
judgerjson["db_database"], int(judgerjson["db_port"]), charset='utf8')
except Exception as e:
print(e)
exit(1)
cursor = db.cursor()
cursor.execute("SELECT user, code from judgestatus_judgestatus")
data = cursor.fetchall()
raisenum = {}
for d in data:
id = str(d[0])
code = str(d[1])
raisenum[id] = 0
for d in data:
id = str(d[0])
code = str(d[1])
raisenum[id] = max(raisenum[id], code.count("raise"))
li = sorted(raisenum.items(), key=lambda item:item[1],reverse=True)
file = open("raisenum.txt", "w")
for l in li:
file.write(l[0]+" "+str(l[1])+'\n')
print(l[0]+" "+str(l[1]))
| 22.169492
| 96
| 0.603211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.19697
|
02159d47bc916fdaaa02496845099d898342fd4d
| 909
|
py
|
Python
|
first_steps_in_oop/programmer.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | 1
|
2021-08-03T19:14:24.000Z
|
2021-08-03T19:14:24.000Z
|
first_steps_in_oop/programmer.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | null | null | null |
first_steps_in_oop/programmer.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | null | null | null |
class Programmer:
def __init__(self, name, language, skills):
self.name = name
self.language = language
self.skills = skills
def watch_course(self, course_name, language, skills_earned):
if not self.language == language:
return f"{self.name} does not know {language}"
self.skills += skills_earned
return f"{self.name} watched {course_name}"
def change_language(self, new_language, skills_needed):
if not skills_needed <= self.skills:
needed_skills = skills_needed - self.skills
return f"{self.name} needs {needed_skills} more skills"
if self.language == new_language:
return f"{self.name} already knows {self.language}"
previous_language = self.language
self.language = new_language
return f"{self.name} switched from {previous_language} to {new_language}"
| 37.875
| 81
| 0.651265
| 909
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.256326
|
0216b8ad609381ab0fb91a808c2538b44b5d722d
| 1,557
|
py
|
Python
|
unit_test.py
|
LSTM-Kirigaya/MsnEnvironment
|
29c6e02525c7671f304d0f9d7689942509f12a16
|
[
"MIT"
] | null | null | null |
unit_test.py
|
LSTM-Kirigaya/MsnEnvironment
|
29c6e02525c7671f304d0f9d7689942509f12a16
|
[
"MIT"
] | null | null | null |
unit_test.py
|
LSTM-Kirigaya/MsnEnvironment
|
29c6e02525c7671f304d0f9d7689942509f12a16
|
[
"MIT"
] | null | null | null |
from env import MsnDiscrete, MaplessNaviEnv
from robot_utils import *
from robot_utils.log import msn_debug
from robot_utils.scene import *
from env import *
from collections import Counter
MAX_FORCE = 10.
TARGET_VELOCITY = 5.
MULTIPLY = 2.0
def keyboard_control():
global MAX_FORCE
global TARGET_VELOCITY
global MULTIPLY
cid = p.connect(p.GUI)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
# 载入机器人和其他的物件
_ = p.loadURDF("plane.urdf")
urdf_path = os.path.join(os.path.dirname(__file__), "robot_utils/urdf/miniBox.urdf")
robot_id = p.loadURDF(urdf_path, basePosition=[0., 0., 0.2], baseOrientation=p.getQuaternionFromEuler([0, 0, np.pi / 2.]))
p.setJointMotorControlArray(
bodyUniqueId=robot_id,
jointIndices=[0, 1],
controlMode=p.VELOCITY_CONTROL,
forces=[0., 0.]
)
p.setGravity(0, 0, -9.8)
p.setRealTimeSimulation(1)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
while True:
basePos, baseOrientation = p.getBasePositionAndOrientation(robot_id)
baseEuler = p.getEulerFromQuaternion(baseOrientation)
keyboard_control_miniBox(robot_id)
def u_MsnDiscrete():
env = MsnDiscrete(render=True, laser_num=18)
state = env.reset()
done = False
while not done:
action = env.sample()
state, reward, done, info = env.step(action)
env.render()
# keyboard_control()
u_MsnDiscrete()
| 28.833333
| 126
| 0.705202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.062065
|
0218bf8ae5e0f91bee4226c0b79fa035f5a60a3c
| 12,358
|
py
|
Python
|
meta-refkit-core/lib/ostree/ostreeupdate.py
|
kraj/intel-iot-refkit
|
04cd5afec0c41deeb5e1a48b43a0a31e708295c1
|
[
"MIT"
] | 36
|
2017-02-20T04:04:28.000Z
|
2022-02-17T05:36:33.000Z
|
meta-refkit-core/lib/ostree/ostreeupdate.py
|
kraj/intel-iot-refkit
|
04cd5afec0c41deeb5e1a48b43a0a31e708295c1
|
[
"MIT"
] | 284
|
2017-02-06T08:51:52.000Z
|
2021-11-03T16:52:16.000Z
|
meta-refkit-core/lib/ostree/ostreeupdate.py
|
kraj/intel-iot-refkit
|
04cd5afec0c41deeb5e1a48b43a0a31e708295c1
|
[
"MIT"
] | 65
|
2017-02-03T12:36:16.000Z
|
2021-02-18T11:00:46.000Z
|
import bb
import oe.path
import glob
import hashlib
import os.path
import shutil
import string
import subprocess
VARIABLES = (
'IMAGE_ROOTFS',
'OSTREE_BRANCHNAME',
'OSTREE_COMMIT_SUBJECT',
'OSTREE_REPO',
'OSTREE_GPGDIR',
'OSTREE_GPGID',
'OSTREE_OS',
'OSTREE_REMOTE',
'OSTREE_BARE',
'OSTREE_ROOTFS',
'OSTREE_SYSROOT',
)
class OSTreeUpdate(string.Formatter):
"""
Create an OSTree-enabled version of an image rootfs, using an intermediate
per-image OSTree bare-user repository. Optionally export the content
of this repository into HTTP-exportable archive-z2 OSTree repository
which clients can use to pull the image in as an OSTree upgrade.
"""
WHITESPACES_ALLOWED = (
'OSTREE_COMMIT_SUBJECT',
)
def __init__(self, d):
for var in VARIABLES:
value = d.getVar(var)
if var not in self.WHITESPACES_ALLOWED:
for c in '\n\t ':
if c in value:
bb.fatal('%s=%s is not allowed to contain whitespace' % (var, value))
setattr(self, var, value)
self.gpg_sign = ''
if self.OSTREE_GPGID:
if self.OSTREE_GPGDIR:
self.gpg_sign += self.format(' --gpg-homedir={OSTREE_GPGDIR}')
self.gpg_sign += self.format(' --gpg-sign={OSTREE_GPGID}')
def get_value(self, key, args, kwargs):
"""
This class inherits string.Formatter and thus has self.format().
We extend the named field lookup so that object attributes and thus
the variables above can be used directly.
"""
if isinstance(key, str) and key not in kwargs:
return getattr(self, key)
else:
return super().get_value(key, args, kwargs)
def run_ostree(self, command, *args, **kwargs):
cmd = 'ostree ' + self.format(command, *args, **kwargs)
bb.debug(1, 'Running: {0}'.format(cmd))
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return output
def copy_sysroot(self):
"""
Seed the OSTree sysroot with the pristine one.
"""
bb.note(self.format('Copying pristine rootfs {IMAGE_ROOTFS} to OSTree sysroot {OSTREE_SYSROOT} ...'))
oe.path.copyhardlinktree(self.IMAGE_ROOTFS, self.OSTREE_SYSROOT)
def copy_kernel(self):
"""
Copy and checksum kernel, initramfs, and the UEFI app in place for OSTree.
TODO: why?
"""
uefidir = os.path.join(self.IMAGE_ROOTFS, 'boot')
uefibootdir = os.path.join(uefidir, 'EFI', 'BOOT')
uefiinternalbootdir = os.path.join(uefidir, 'EFI_internal_storage', 'BOOT')
uefiappname = glob.glob(os.path.join(uefibootdir, 'boot*.efi'))
if len(uefiappname) != 1:
bb.fatal(self.format('Ambiguous UEFI app in {0}: {1}', uefibootdir, uefiappname))
uefiappname = os.path.basename(uefiappname[0])
ostreeboot = os.path.join(self.OSTREE_SYSROOT, 'usr', 'lib', 'ostree-boot')
bb.note(self.format('Copying and checksumming UEFI combo app(s) {0} into OSTree sysroot {1} ...', uefiappname, ostreeboot))
bb.utils.mkdirhier(ostreeboot)
def copy_app(src, dst):
with open(src, 'rb') as f:
data = f.read()
chksum = hashlib.sha256(data).hexdigest()
with open(dst + '-' + chksum, 'wb') as f:
f.write(data)
shutil.copystat(src, dst + '-' + chksum)
return chksum
# OSTree doesn't care too much about the actual checksums on kernel
# and initramfs. We use the same checksum derived from the UEFI combo
# app for all parts related to it.
chksum = copy_app(os.path.join(uefibootdir, uefiappname),
os.path.join(ostreeboot, uefiappname + '.ext'))
copy_app(os.path.join(uefiinternalbootdir, uefiappname),
os.path.join(ostreeboot, uefiappname + '.int'))
# OSTree expects to find kernel and initramfs, so we provide it
# although the files are not used.
# TODO: does it really make sense to put the real content there?
# It's not going to get used.
bb.note('Extracting and checksumming kernel, initramfs for ostree...')
kernel = os.path.join(ostreeboot, 'vmlinuz')
initrd = os.path.join(ostreeboot, 'initramfs')
# TODO: where does objcopy come from?
#subprocess.check_output('objcopy --dump-section .linux=%s --dump-section .initrd=%s %s' %
# (kernel, initrd, os.path.join(uefibootdir, uefiappname)))
# os.rename(kernel, kernel + '-' + chksum)
# os.rename(initrd, initrd + '-' + chksum)
# For now just create dummy files.
open(kernel + '-' + chksum, 'w').close()
open(initrd + '-' + chksum, 'w').close()
def ostreeify_sysroot(self):
"""
Mangle sysroot into an OSTree-compatible layout.
"""
# Note that everything created/shuffled here will end up getting
# relocated under the ostree deployment directory for the image
# we're building. Everything that needs to get created relative in the
# to the final physical rootfs should be done in finalize_sysroot.
bb.note('* Shuffling sysroot to OSTree-compatible layout...')
# The OSTree deployment model requires the following directories
# and symlinks in place:
#
# /sysroot: the real physical rootfs bind-mounted here
# /sysroot/ostree: ostree repo and deployments ('checkouts')
# /ostree: symlinked to /sysroot/ostree for consistent access
#
# Additionally the deployment model suggests setting up deployment
# root symlinks for the following:
#
# /home -> /var/home (further linked -> /sysroot/home)
# /opt -> /var/opt
# /srv -> /var/srv
# /root -> /var/roothome
# /usr/local -> /var/local
# /mnt -> /var/mnt
# /tmp -> /sysroot/tmp
#
# In this model, /var can be a persistent second data partition.
# We just use one partition, so instead we have:
#
# /boot = mount point for persistent /boot directory in the root partition
# /var = mount point for persistent /ostree/deploy/refkit/var
# /home = mount point for persistent /home directory in the root partition
# /mnt = symlink to var/mnt
# /tmp = symlink to sysroot/tmp (persistent)
#
# Additionally,
# /etc is moved to /usr/etc as the default config
sysroot = os.path.join(self.OSTREE_SYSROOT, 'sysroot')
bb.utils.mkdirhier(sysroot)
os.symlink('sysroot/ostree', os.path.join(self.OSTREE_SYSROOT, 'ostree'))
for dir, link in (
('boot', None),
('var', None),
('home', None),
('mnt', 'var/mnt'),
('tmp', 'sysroot/tmp'),
):
path = os.path.join(self.OSTREE_SYSROOT, dir)
if os.path.isdir(path):
shutil.rmtree(path)
if link is None:
bb.utils.mkdirhier(path)
else:
os.symlink(link, path)
# Preserve read-only copy of /etc for OSTree's three-way merge.
os.rename(os.path.join(self.OSTREE_SYSROOT, 'etc'),
os.path.join(self.OSTREE_SYSROOT, 'usr', 'etc'))
def prepare_sysroot(self):
"""
Prepare a rootfs for committing into an OSTree repository.
"""
if os.path.isdir(self.OSTREE_SYSROOT):
bb.note(self.format('OSTree sysroot {OSTREE_SYSROOT} already exists, nuking it...'))
shutil.rmtree(self.OSTREE_SYSROOT)
bb.note(self.format('Preparing OSTree sysroot {OSTREE_SYSROOT} ...'))
self.copy_sysroot()
self.copy_kernel()
self.ostreeify_sysroot()
def populate_repo(self):
"""
Populate primary OSTree repository (bare-user mode) with the given sysroot.
"""
bb.note(self.format('Populating OSTree primary repository {OSTREE_BARE} ...'))
if os.path.isdir(self.OSTREE_BARE):
shutil.rmtree(self.OSTREE_BARE)
bb.utils.mkdirhier(self.OSTREE_BARE)
self.run_ostree('--repo={OSTREE_BARE} init --mode=bare-user')
self.run_ostree('--repo={OSTREE_BARE} commit '
'{gpg_sign} '
'--tree=dir={OSTREE_SYSROOT} '
'--branch={OSTREE_BRANCHNAME} '
'--subject="{OSTREE_COMMIT_SUBJECT}"')
output = self.run_ostree('--repo={OSTREE_BARE} summary -u')
bb.note(self.format('OSTree primary repository {OSTREE_BARE} summary:\n{0}', output))
def checkout_sysroot(self):
"""
Replicate the ostree repository into the OSTree rootfs and make a checkout/deploy.
"""
if os.path.isdir(self.OSTREE_ROOTFS):
shutil.rmtree(self.OSTREE_ROOTFS)
bb.note(self.format('Initializing OSTree rootfs {OSTREE_ROOTFS} ...'))
bb.utils.mkdirhier(self.OSTREE_ROOTFS)
self.run_ostree('admin --sysroot={OSTREE_ROOTFS} init-fs {OSTREE_ROOTFS}')
self.run_ostree('admin --sysroot={OSTREE_ROOTFS} os-init {OSTREE_OS}')
bb.note(self.format('Replicating primary OSTree repository {OSTREE_BARE} branch {OSTREE_BRANCHNAME} into OSTree rootfs {OSTREE_ROOTFS} ...'))
self.run_ostree('--repo={OSTREE_ROOTFS}/ostree/repo pull-local --remote=updates {OSTREE_BARE} {OSTREE_BRANCHNAME}')
bb.note('Deploying sysroot from OSTree sysroot repository...')
self.run_ostree('admin --sysroot={OSTREE_ROOTFS} deploy --os={OSTREE_OS} updates:{OSTREE_BRANCHNAME}')
# OSTree initialized var for our OS, but we want the original rootfs content instead.
src = os.path.join(self.IMAGE_ROOTFS, 'var')
dst = os.path.join(self.OSTREE_ROOTFS, 'ostree', 'deploy', self.OSTREE_OS, 'var')
bb.note(self.format('Copying /var from rootfs to OSTree rootfs as {} ...', dst))
shutil.rmtree(dst)
oe.path.copyhardlinktree(src, dst)
if self.OSTREE_REMOTE:
bb.note(self.format('Setting OSTree remote to {OSTREE_REMOTE} ...'))
self.run_ostree('remote add --repo={OSTREE_ROOTFS}/ostree/repo '
'--gpg-import={OSTREE_GPGDIR}/pubring.gpg '
'updates {OSTREE_REMOTE}')
def finalize_sysroot(self):
"""
Finalize the physical root directory after the ostree checkout.
"""
bb.note(self.format('Creating EFI mount point /boot/efi in OSTree rootfs {OSTREE_ROOTFS} ...'))
bb.utils.mkdirhier(os.path.join(self.OSTREE_ROOTFS, 'boot', 'efi'))
bb.note(self.format('Copying pristine rootfs {IMAGE_ROOTFS}/home to OSTree rootfs {OSTREE_ROOTFS} ...'))
oe.path.copyhardlinktree(os.path.join(self.IMAGE_ROOTFS, 'home'),
os.path.join(self.OSTREE_ROOTFS, 'home'))
def prepare_rootfs(self):
"""
Create the intermediate, bare repo and a fully functional rootfs for the target device
where the current build is deployed.
"""
self.prepare_sysroot()
self.populate_repo()
self.checkout_sysroot()
self.finalize_sysroot()
def export_repo(self):
"""
Export data from a primary OSTree repository to the given (archive-z2) one.
"""
bb.note(self.format('Exporting primary repository {OSTREE_BARE} to export repository {OSTREE_REPO}...'))
if not os.path.isdir(self.OSTREE_REPO):
bb.note("Initializing repository %s for exporting..." % self.OSTREE_REPO)
bb.utils.mkdirhier(self.OSTREE_REPO)
self.run_ostree('--repo={OSTREE_REPO} init --mode=archive-z2')
self.run_ostree('--repo={OSTREE_REPO} pull-local --remote={OSTREE_OS} {OSTREE_BARE} {OSTREE_BRANCHNAME}')
self.run_ostree('--repo={OSTREE_REPO} commit {gpg_sign} --branch={OSTREE_BRANCHNAME} --tree=ref={OSTREE_OS}:{OSTREE_BRANCHNAME}')
self.run_ostree('--repo={OSTREE_REPO} summary {gpg_sign} -u')
| 42.177474
| 149
| 0.606732
| 11,991
| 0.970303
| 0
| 0
| 0
| 0
| 0
| 0
| 6,220
| 0.503318
|
021a272ec30f97420b7269bd3ee1d988857ff0cb
| 123
|
py
|
Python
|
returns-the- value-to-the-variable.py
|
fatihwin-yt/a-Python-Tutorial-of-2021
|
7d2110f80efdfa79437bf64f8edcd08ec3d61926
|
[
"MIT"
] | 1
|
2021-03-29T02:29:58.000Z
|
2021-03-29T02:29:58.000Z
|
returns-the- value-to-the-variable.py
|
fatihwin-yt/a-Python-Tutorial-of-2021
|
7d2110f80efdfa79437bf64f8edcd08ec3d61926
|
[
"MIT"
] | null | null | null |
returns-the- value-to-the-variable.py
|
fatihwin-yt/a-Python-Tutorial-of-2021
|
7d2110f80efdfa79437bf64f8edcd08ec3d61926
|
[
"MIT"
] | 1
|
2021-03-27T15:00:06.000Z
|
2021-03-27T15:00:06.000Z
|
#returns the value to the variable #
x = 900
print(x)
#print will take the argument x as the value in the variable #
| 20.5
| 63
| 0.699187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.813008
|
021a57faf00fc6d4266f3268c12b51f08834cc6c
| 1,453
|
py
|
Python
|
app.py
|
alvaropp/interactive-fantasy-map
|
b75ebc734970790bc5779865ab5e786e50250709
|
[
"MIT"
] | 4
|
2021-02-11T03:23:40.000Z
|
2022-02-13T01:56:58.000Z
|
app.py
|
alvaropp/interactive-fantasy-map
|
b75ebc734970790bc5779865ab5e786e50250709
|
[
"MIT"
] | null | null | null |
app.py
|
alvaropp/interactive-fantasy-map
|
b75ebc734970790bc5779865ab5e786e50250709
|
[
"MIT"
] | null | null | null |
from glob import glob
from flask import flash, Flask, Markup, render_template, redirect, request, send_from_directory
from form import MapForm
from process_new_map import create_map_from_form
app = Flask(__name__)
with open("secret.txt", "r") as secret_f:
app.config["SECRET_KEY"] = secret_f.read()
@app.route("/", methods=["GET", "POST"])
def home():
form = MapForm()
if form.validate_on_submit():
map_website_path = create_map_from_form(form)
map_name = map_website_path.split("/")[-1].split(".")[0]
full_url = f"{request.base_url}maps/" + map_website_path
flash(Markup(f"Map created successfully: <a href={full_url}>{map_name}</a>"))
return render_template("index.html", title="Create a new map", form=form)
@app.route("/examples", methods=["GET", "POST"])
def examples():
examples = [path.split("/")[-1].split("_")[-1].split(".")[0] for path in glob("templates/example_*")]
return render_template("examples.html", examples=examples)
@app.route("/help", methods=["GET", "POST"])
def help():
return render_template("help.html")
@app.route("/maps/<map_uuid>/<map_name>.html")
def show_map(map_uuid, map_name):
return render_template("map_template.html", data=[map_uuid, map_name])
@app.route("/examples/<example_name>.html")
def show_example_map(example_name):
return render_template(f"example_{example_name}.html")
if __name__ == "__main__":
app.run(debug=True)
| 29.653061
| 105
| 0.692361
| 0
| 0
| 0
| 0
| 1,078
| 0.741913
| 0
| 0
| 385
| 0.264969
|
021afdb076c4754aa3ba63a750975318ad4eba13
| 4,121
|
py
|
Python
|
monai/deploy/core/execution_context.py
|
jlvahldiek/monai-deploy-app-sdk
|
050aeabec581067a11566f59a2970b075d36ae7c
|
[
"Apache-2.0"
] | 28
|
2021-09-17T18:16:42.000Z
|
2022-03-31T16:32:36.000Z
|
monai/deploy/core/execution_context.py
|
jlvahldiek/monai-deploy-app-sdk
|
050aeabec581067a11566f59a2970b075d36ae7c
|
[
"Apache-2.0"
] | 109
|
2021-09-17T18:34:31.000Z
|
2022-03-31T21:04:35.000Z
|
monai/deploy/core/execution_context.py
|
jlvahldiek/monai-deploy-app-sdk
|
050aeabec581067a11566f59a2970b075d36ae7c
|
[
"Apache-2.0"
] | 11
|
2021-09-17T20:23:31.000Z
|
2022-03-29T08:55:19.000Z
|
# Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from monai.deploy.core.domain.datapath import NamedDataPath
# To avoid "Cannot resolve forward reference" error
# : https://github.com/agronholm/sphinx-autodoc-typehints#dealing-with-circular-imports
from . import operator
from .datastores import Datastore, MemoryDatastore
from .io_context import InputContext, OutputContext
from .models import Model
class BaseExecutionContext:
"""A base execution context for the application.
BaseExecutionContext is responsible for storing the input and output data paths,
and the models.
Those pieces of information are used by the Operator (in `compute()` method) to perform the execution.
The input and output data paths from the application's context are available through
`context.input.get()` and `context.output.get()`.
"""
def __init__(
self,
datastore: Optional[Datastore],
input: NamedDataPath,
output: NamedDataPath,
models: Optional[Model] = None,
):
if datastore is None:
self._storage: Datastore = MemoryDatastore()
else:
self._storage = datastore
self._input = input
self._output = output
if models is None:
self._models = Model("") # set a null model
else:
self._models = models
@property
def storage(self) -> Datastore:
return self._storage
@property
def input(self) -> NamedDataPath:
return self._input
@property
def output(self) -> NamedDataPath:
return self._output
@property
def models(self) -> Model:
return self._models
class ExecutionContext(BaseExecutionContext):
"""An execution context for the operator."""
def __init__(self, context: BaseExecutionContext, op: "operator.Operator"):
super().__init__(context.storage, context.input, context.output, context.models)
self._context = context
self._op = op
self._input_context = InputContext(self)
self._output_context = OutputContext(self)
@property
def op(self):
return self._op
def get_execution_index(self):
"""Returns the execution index for the operator.
The execution index is incremented every time before the operator is executed.
For the first time, the execution index is set to 0.
Returns:
The execution index(int) for the operator.
"""
storage = self._context.storage
parent_node = f"/operators/{self.op.uid}"
key = f"{parent_node}/execution_index"
if storage.exists(key):
return storage.get(key)
else:
storage.put(key, 0)
return 0
def increase_execution_index(self):
"""Increases the execution index for the operator.
This index number would be increased once for each call to the operator
so that the operator can be executed multiple times.
"""
storage = self._context.storage
parent_node = f"/operators/{self.op.uid}"
key = f"{parent_node}/execution_index"
new_execution_index = self.get_execution_index() + 1
storage.put(key, new_execution_index)
return new_execution_index
@property
def input_context(self):
"""Returns the input context for the operator."""
return self._input_context
@property
def output_context(self):
"""Returns the output context for the operator."""
return self._output_context
| 32.448819
| 106
| 0.674351
| 3,158
| 0.766319
| 0
| 0
| 608
| 0.147537
| 0
| 0
| 1,902
| 0.461538
|
021b5b2946a725db8a4879a92f48d89c65c21d97
| 11,698
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-1728-Cat-and-Mouse-II.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-1728-Cat-and-Mouse-II.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-1728-Cat-and-Mouse-II.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-1728-Cat-and-Mouse-II.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-05-10
=================================================================="""
import sys
import time
from typing import List, Tuple
import collections
"""
LeetCode - 1728 - (Hard) - Cat and Mouse II
https://leetcode.com/problems/cat-and-mouse-ii/
Description:
A game is played by a cat and a mouse named Cat and Mouse.
The environment is represented by a grid of size rows x cols,
where each element is a wall, floor, player (Cat, Mouse), or food.
Players are represented by the characters 'C'(Cat),'M'(Mouse).
Floors are represented by the character '.' and can be walked on.
Walls are represented by the character '#' and cannot be walked on.
Food is represented by the character 'F' and can be walked on.
There is only one of each character 'C', 'M', and 'F' in grid.
Mouse and Cat play according to the following rules:
Mouse moves first, then they take turns to move.
During each turn, Cat and Mouse can jump in one of the four directions (left, right, up, down).
They cannot jump over the wall nor outside of the grid.
catJump, mouseJump are the maximum lengths Cat and Mouse can jump at a time, respectively.
Cat and Mouse can jump less than the maximum length.
Staying in the same position is allowed.
Mouse can jump over Cat.
The game can end in 4 ways:
If Cat occupies the same position as Mouse, Cat wins.
If Cat reaches the food first, Cat wins.
If Mouse reaches the food first, Mouse wins.
If Mouse cannot get to the food within 1000 turns, Cat wins.
Given a rows x cols matrix grid and two integers catJump and mouseJump,
return true if Mouse can win the game if both Cat and Mouse play optimally, otherwise return false.
Example 1:
Input: grid = ["####F","#C...","M...."], catJump = 1, mouseJump = 2
Output: true
Explanation: Cat cannot catch Mouse on its turn nor can it get the food before Mouse.
Example 2:
Input: grid = ["M.C...F"], catJump = 1, mouseJump = 4
Output: true
Example 3:
Input: grid = ["M.C...F"], catJump = 1, mouseJump = 3
Output: false
Constraints:
rows == grid.length
cols = grid[i].length
1 <= rows, cols <= 8
grid[i][j] consist only of characters 'C', 'M', 'F', '.', and '#'.
There is only one of each character 'C', 'M', and 'F' in grid.
1 <= catJump, mouseJump <= 8
"""
class Solution:
def __init__(self):
self.MOUSE_TURN = 0
self.CAT_TURN = 1
self.UNKNOWN = 0
self.MOUSE_WIN = 1
self.CAT_WIN = 2
self.MAX_MOVE = 1000
self.DIRECTION = ((-1, 0), (1, 0), (0, -1), (0, 1))
def canMouseWin(self, grid: List[str], catJump: int, mouseJump: int) -> bool:
# exception case
assert isinstance(grid, list) and 1 <= len(grid) and 1 <= len(grid[0])
assert isinstance(catJump, int) and 1 <= catJump
assert isinstance(mouseJump, int) and 1 <= mouseJump
# main method: (Game Theory & Topological Sorting)
return self._canMouseWin(grid, catJump, mouseJump)
def _canMouseWin(self, grid: List[str], catJump: int, mouseJump: int) -> bool:
assert isinstance(grid, list)
max_row = len(grid)
assert max_row >= 1
max_col = len(grid[0])
assert max_col >= 1
total_block = max_row * max_col
def __get_pos(_row: int, _col: int) -> int:
return int(_row * max_col + _col)
# get the initial positions of the mouse, cat, and food
mouse_start_pos = cat_start_pos = food_pos = 0
for row_idx in range(max_row):
for col_idx in range(max_col):
cur_block = grid[row_idx][col_idx]
if cur_block == 'M':
mouse_start_pos = __get_pos(row_idx, col_idx)
elif cur_block == 'C':
cat_start_pos = __get_pos(row_idx, col_idx)
elif cur_block == 'F':
food_pos = __get_pos(row_idx, col_idx)
# calculate the degree of each state
degrees = [[[0, 0] for _ in range(total_block)] for _ in range(total_block)]
for mouse in range(total_block):
row_mouse, col_mouse = divmod(mouse, max_col)
if grid[row_mouse][col_mouse] == '#':
continue
for cat in range(total_block):
row_cat, col_cat = divmod(cat, max_col)
if grid[row_cat][col_cat] == '#':
continue
degrees[mouse][cat][self.MOUSE_TURN] += 1
degrees[mouse][cat][self.CAT_TURN] += 1
for d_row, d_col in self.DIRECTION:
row, col, jump = row_mouse + d_row, col_mouse + d_col, 1
while 0 <= row < max_row and 0 <= col < max_col and grid[row][col] != '#' and jump <= mouseJump:
next_mouse = __get_pos(row, col)
next_cat = __get_pos(row_cat, col_cat)
degrees[next_mouse][next_cat][self.MOUSE_TURN] += 1
row += d_row
col += d_col
jump += 1
row, col, jump = row_cat + d_row, col_cat + d_col, 1
while 0 <= row < max_row and 0 <= col < max_col and grid[row][col] != '#' and jump <= catJump:
next_mouse = __get_pos(row_mouse, col_mouse)
next_cat = __get_pos(row, col)
degrees[next_mouse][next_cat][self.CAT_TURN] += 1
row += d_row
col += d_col
jump += 1
res = [[[[0, 0], [0, 0]] for _ in range(total_block)] for _ in range(total_block)]
queue = collections.deque()
# if the cat and mouse are in the same block, then the cat wins
for pos in range(total_block):
row, col = divmod(pos, max_col)
if grid[row][col] == '#':
continue
res[pos][pos][self.MOUSE_TURN][0] = self.CAT_WIN
res[pos][pos][self.MOUSE_TURN][1] = 0
res[pos][pos][self.CAT_TURN][0] = self.CAT_WIN
res[pos][pos][self.CAT_TURN][1] = 0
queue.append((pos, pos, self.MOUSE_TURN))
queue.append((pos, pos, self.CAT_TURN))
# if the cat and food are in the same block, then the cat wins
for mouse in range(total_block):
row_mouse, col_mouse = divmod(mouse, max_col)
if grid[row_mouse][col_mouse] == '#' or mouse == food_pos:
continue
res[mouse][food_pos][self.MOUSE_TURN][0] = self.CAT_WIN
res[mouse][food_pos][self.MOUSE_TURN][1] = 0
res[mouse][food_pos][self.CAT_TURN][0] = self.CAT_WIN
res[mouse][food_pos][self.CAT_TURN][1] = 0
queue.append((mouse, food_pos, self.MOUSE_TURN))
queue.append((mouse, food_pos, self.CAT_TURN))
# if the mouse and food are in the same block \land cat is somewhere else, then the mouse wins
for cat in range(total_block):
row_cat, col_cat = divmod(cat, max_col)
if grid[row_cat][col_cat] == '#' or cat == food_pos:
continue
res[food_pos][cat][self.MOUSE_TURN][0] = self.MOUSE_WIN
res[food_pos][cat][self.MOUSE_TURN][1] = 0
res[food_pos][cat][self.CAT_TURN][0] = self.MOUSE_WIN
res[food_pos][cat][self.CAT_TURN][1] = 0
queue.append((food_pos, cat, self.MOUSE_TURN))
queue.append((food_pos, cat, self.CAT_TURN))
def __get_prev_state(_mouse: int, _cat: int, _turn: int) -> List[Tuple[int, int, int]]:
r_mouse, c_mouse = divmod(_mouse, max_col)
r_cat, c_cat = divmod(_cat, max_col)
prev_turn = self.CAT_TURN if _turn == self.MOUSE_TURN else self.MOUSE_TURN
max_jump = mouseJump if prev_turn == self.MOUSE_TURN else catJump
r_start = r_mouse if prev_turn == self.MOUSE_TURN else r_cat
c_start = c_mouse if prev_turn == self.MOUSE_TURN else c_cat
prev_state = [(_mouse, _cat, prev_turn)]
for d_r, d_c in self.DIRECTION:
_r, _c, _jump = r_start + d_r, c_start + d_c, 1
while 0 <= _r < max_row and 0 <= _c < max_col and grid[_r][_c] != '#' and jump <= max_jump:
prev_r_mouse = _r if prev_turn == self.MOUSE_TURN else r_mouse
prev_c_mouse = _c if prev_turn == self.MOUSE_TURN else c_mouse
prev_mouse_pos = __get_pos(prev_r_mouse, prev_c_mouse)
prev_r_cat = r_cat if prev_turn == self.MOUSE_TURN else _r
prev_c_cat = c_cat if prev_turn == self.MOUSE_TURN else _c
prev_cat_pos = __get_pos(prev_r_cat, prev_c_cat)
prev_state.append((prev_mouse_pos, prev_cat_pos, prev_turn))
_r += d_r
_c += d_c
_jump += 1
return prev_state
# Topological Sorting
while queue:
mouse, cat, turn = queue.popleft()
result = res[mouse][cat][turn][0]
moves = res[mouse][cat][turn][1]
for previous_mouse, previous_cat, previous_turn in __get_prev_state(mouse, cat, turn):
if res[previous_mouse][previous_cat][previous_turn][0] == self.UNKNOWN:
if (result == self.MOUSE_WIN and previous_turn == self.MOUSE_TURN) or \
(result == self.CAT_WIN and previous_turn == self.CAT_TURN):
res[previous_mouse][previous_cat][previous_turn][0] = result
res[previous_mouse][previous_cat][previous_turn][1] = moves + 1
queue.append((previous_mouse, previous_cat, previous_turn))
else:
degrees[previous_mouse][previous_cat][previous_turn] -= 1
if degrees[previous_mouse][previous_cat][previous_turn] == 0:
loseResult = self.CAT_WIN if previous_turn == self.MOUSE_TURN else self.MOUSE_WIN
res[previous_mouse][previous_cat][previous_turn][0] = loseResult
res[previous_mouse][previous_cat][previous_turn][1] = moves + 1
queue.append((previous_mouse, previous_cat, previous_turn))
if res[mouse_start_pos][cat_start_pos][self.MOUSE_TURN][0] == self.MOUSE_WIN and \
res[mouse_start_pos][cat_start_pos][self.MOUSE_TURN][1] <= self.MAX_MOVE:
return True
else:
return False
def main():
# Example 1: Output: true
# grid = ["####F", "#C...", "M...."]
# catJump = 1
# mouseJump = 2
# Example 2: Output: true
# grid = ["M.C...F"]
# catJump = 1
# mouseJump = 4
# Example 3: Output: false
grid = ["M.C...F"]
catJump = 1
mouseJump = 3
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.canMouseWin(grid, catJump, mouseJump)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 44.310606
| 116
| 0.566422
| 8,292
| 0.708839
| 0
| 0
| 0
| 0
| 0
| 0
| 3,372
| 0.288254
|
021c36744a33f4725dc24d93c0aa09acf81e97bf
| 2,193
|
py
|
Python
|
tictac/tictac/cli.py
|
SteveDMurphy/tic_tac_go
|
7e80dc1ec6fbeceb3c9879cee7fb32b7ecfe37a7
|
[
"MIT"
] | null | null | null |
tictac/tictac/cli.py
|
SteveDMurphy/tic_tac_go
|
7e80dc1ec6fbeceb3c9879cee7fb32b7ecfe37a7
|
[
"MIT"
] | null | null | null |
tictac/tictac/cli.py
|
SteveDMurphy/tic_tac_go
|
7e80dc1ec6fbeceb3c9879cee7fb32b7ecfe37a7
|
[
"MIT"
] | null | null | null |
import click
from random import randrange
from tictac import Tictac
@click.group()
def tictac():
pass
@tictac.command(name="games", help="Returns all started games, order by when they were created")
def view_games():
tictac_class = Tictac()
click.echo(tictac_class.view_games())
@tictac.command(name="gamemoves", help="Returns all moves in a specified game")
def view_game_moves():
game_id = click.prompt("Input a valid game ID", type=int)
tictac_class = Tictac()
game_moves = tictac_class.view_moves(game_id)
click.echo(game_moves)
@tictac.command(name="newgame", help="Creates a new game and walks moves through to completion")
def new_game():
tictac_class = Tictac()
tictac_class.create_new_game()
click.echo(f"playing game id: {tictac_class.game_id}")
game_complete = 0
while game_complete == 0:
available_moves = tictac_class.get_move_options()
if (tictac_class.number_of_moves() % 2) == 0:
# player to move here
click.echo("Possible moves:")
for move in available_moves:
click.echo(f"Position ID: {move[0]}, Position: {move[1]}")
move = click.prompt(
"Please pick a position id number for you next move", type=int
)
# TODO add some validation here
game_complete = tictac_class.take_turn(position_id=move)
else:
# selects a random position ID from the available moves
random_selection_id = randrange(len(available_moves))
computer_move = available_moves[random_selection_id][0]
game_complete = tictac_class.take_turn(position_id=computer_move, player_is_robot=1)
if game_complete == 1:
if tictac_class.winning_player_is_robot == 0:
click.echo("Congratulations! You win!")
else:
click.echo("OOF - sorry, the computer won this time...")
click.echo("Winning combination:")
click.echo(tictac_class.winning_combination)
elif game_complete == -1:
click.echo("oh dang, nobody won... try again?")
if __name__ == "__main__":
tictac()
| 33.227273
| 96
| 0.645691
| 0
| 0
| 0
| 0
| 2,070
| 0.943912
| 0
| 0
| 609
| 0.277702
|
021d46262a81bc3bd29354a1c4c85f1ce3571b25
| 4,230
|
py
|
Python
|
matchId.py
|
terryhahm/ARAM
|
bbaa6446aec6ad7141d492aef174832e627c7b74
|
[
"MIT"
] | null | null | null |
matchId.py
|
terryhahm/ARAM
|
bbaa6446aec6ad7141d492aef174832e627c7b74
|
[
"MIT"
] | null | null | null |
matchId.py
|
terryhahm/ARAM
|
bbaa6446aec6ad7141d492aef174832e627c7b74
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import riotConstant
import time
import requests
def wait( tik ):
tik = int(tik)
tik += 2
while( tik > 0 ):
print("API Rate Limit exceeded, wait for " + str(tik) + " second(s)", end = ' \r')
tik -= 1
time.sleep(1)
print(" ", end='\r')
return
def getMatchIdURL( region, accountId, api_key):
month_in_mill = 2592000000
currentTime = int(round(time.time() * 1000))
beginTime = currentTime - 3 * month_in_mill
url = "https://" + region\
+ ".api.riotgames.com/lol/match/v4/matchlists/by-account/"+ accountId\
+"?queue=450&beginTime="+ str(beginTime)\
+"&api_key=" + api_key
return url
def getMatchIdByPlayer( region, accountId, api_key ):
# Create a list to store the match ID played by user with given account ID, and get Riot API URL
matchIdList = []
url = getMatchIdURL( region, accountId, api_key )
# While loop to handle API rate limit exceeding
while( True ):
# Request for match info played by user with given account Id
try:
response = requests.get( url )
response.raise_for_status()
# If any status code other than 200 occurs
except requests.exceptions.RequestException as e:
# User have not played ARAM in last 3 months (Data Not Found), break from while loop
if( response.status_code == 404):
# print("User does not have record playing ARAM in last 3 months")
break
# If API Rate limit exceeded, wait for 1 min and try again.
elif( response.status_code == 429 ):
retry_after = response.headers['Retry-After']
wait(retry_after)
continue
# Any other error will print out and break from while loop
else:
print(e)
break
# If request was successful, handle json data and break from while loop
else:
json_data = response.json()
matches = json_data['matches']
# print("Collected match history of user with account id : " + accountId)
for match in matches:
matchId = match['gameId']
matchIdList.append(matchId)
break
return matchIdList
def getMatchId( region ):
RIOTConstant = riotConstant.RIOTConstant()
api_key = RIOTConstant.api_key
# Read account ID file per region
file_path_accnt = './data/' + region + "accountId.csv"
df_accntId = pd.read_csv(file_path_accnt)
# Create a new dataframe to store match ID
df_matchId = pd.DataFrame()
# For each tier / division
for column in df_accntId.columns:
# Get the list of account ID, and create list to store match ID
accntIdList = df_accntId[column].dropna(axis = 0)
matchIdList = []
# Create variable to track process of getting data
total = len(accntIdList)
count = 1
# For each account ID
for accntId in accntIdList:
# Get the match ID played by each account ID
matchidListByPlayer = getMatchIdByPlayer( region, accntId, api_key)
print("Collecting match history : " + str(count) + " out of " + str(total), end = '\r')
count = count + 1
# Add the match ID to the list
matchIdList.extend(matchidListByPlayer)
# Once iterate through all account ID in each tier / division,
# check for duplicate, create a dataframe column and concatenate with previous dataframe
matchIdList = list(dict.fromkeys(matchIdList))
new_column = pd.DataFrame( data = { column : matchIdList } )
df_matchId = pd.concat( [df_matchId, new_column], axis=1 )
df_matchId.to_csv('./data/' + region + "matchId.csv", index=False)
# # Once all columns are done, convert everythin to Integer because some values are listed as float type
# df_final = pd.read_csv('./data/' + region + "matchId.csv").dropna(axis = 0)
# df_final = df.astype(int)
# df_final.to_csv('./data/' + region + "matchId.csv", index=False)
| 37.433628
| 108
| 0.607092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,729
| 0.408747
|
021d5769d36b572a0f2addec694597fefa3cfa6f
| 158
|
py
|
Python
|
Backend/order/urls.py
|
Bhavya0020/Readopolis
|
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
|
[
"MIT"
] | null | null | null |
Backend/order/urls.py
|
Bhavya0020/Readopolis
|
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
|
[
"MIT"
] | null | null | null |
Backend/order/urls.py
|
Bhavya0020/Readopolis
|
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('checkout/', views.checkout),
path('orders/', views.OrdersList.as_view()),
]
| 19.75
| 50
| 0.677215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.126582
|
02223351c3f6f455c742ce52e04a38d560dc3455
| 299
|
py
|
Python
|
src/z3c/saconfig/__init__.py
|
zopefoundation/z3c.saconfig
|
69a32e7f7617ec4a1f9667d673a1ddc00aff59c2
|
[
"ZPL-2.1"
] | 2
|
2016-03-12T14:22:23.000Z
|
2019-05-22T04:18:26.000Z
|
src/z3c/saconfig/__init__.py
|
zopefoundation/z3c.saconfig
|
69a32e7f7617ec4a1f9667d673a1ddc00aff59c2
|
[
"ZPL-2.1"
] | 13
|
2015-05-05T12:27:48.000Z
|
2021-05-20T11:11:49.000Z
|
src/z3c/saconfig/__init__.py
|
zopefoundation/z3c.saconfig
|
69a32e7f7617ec4a1f9667d673a1ddc00aff59c2
|
[
"ZPL-2.1"
] | 4
|
2015-05-04T12:18:31.000Z
|
2019-11-18T09:47:31.000Z
|
from z3c.saconfig.scopedsession import Session, named_scoped_session
from z3c.saconfig.utility import (
GloballyScopedSession, SiteScopedSession, EngineFactory)
__all__ = [
'Session',
'named_scoped_session',
'GloballyScopedSession',
'SiteScopedSession',
'EngineFactory',
]
| 23
| 68
| 0.752508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.294314
|
0223c05bd579183b627da44b67aca37eba1114e5
| 557
|
py
|
Python
|
src/triage/experiments/singlethreaded.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 160
|
2017-06-13T09:59:59.000Z
|
2022-03-21T22:00:35.000Z
|
src/triage/experiments/singlethreaded.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 803
|
2016-10-21T19:44:02.000Z
|
2022-03-29T00:02:33.000Z
|
src/triage/experiments/singlethreaded.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 59
|
2017-01-31T22:10:22.000Z
|
2022-03-19T12:35:03.000Z
|
from triage.experiments import ExperimentBase
class SingleThreadedExperiment(ExperimentBase):
def process_query_tasks(self, query_tasks):
self.feature_generator.process_table_tasks(query_tasks)
def process_matrix_build_tasks(self, matrix_build_tasks):
self.matrix_builder.build_all_matrices(matrix_build_tasks)
def process_train_test_batches(self, batches):
self.model_train_tester.process_all_batches(batches)
def process_subset_tasks(self, subset_tasks):
self.subsetter.process_all_tasks(subset_tasks)
| 34.8125
| 66
| 0.800718
| 508
| 0.912029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
022635491f2d2bfe0024464d83f72d0ff2d7769e
| 11,374
|
py
|
Python
|
Webspider.py
|
radiantbk/webspider
|
62a9c71f8f3f39e5e07e0fb68682fc05a83edd5b
|
[
"MIT"
] | 1
|
2019-11-09T01:36:39.000Z
|
2019-11-09T01:36:39.000Z
|
Webspider.py
|
radiantbk/webspider
|
62a9c71f8f3f39e5e07e0fb68682fc05a83edd5b
|
[
"MIT"
] | null | null | null |
Webspider.py
|
radiantbk/webspider
|
62a9c71f8f3f39e5e07e0fb68682fc05a83edd5b
|
[
"MIT"
] | null | null | null |
import re
import os
class tag_obj:
def __init__(self):
self.pos = 0
self.name = ""
self.class_name = ""
self.content = ''
self.children =''
self.pairpos = -1
self.pair=""
self.tag_label = ""
class spider:
def __init__(self, txt):
self.html = txt
self.tag_scope = [
"!DOCTYPE",
"a",
"abbr",
"acronym",
"address",
"applet",
"area",
"article",
"aside",
"audio",
"b",
"base",
"basefont",
"bdi",
"bdo",
"big",
"blockquote",
"body",
"br",
"button",
"canvas",
"caption",
"center",
"cite",
"code",
"col",
"colgroup",
"command",
"datalist",
"dd",
"del",
"details",
"dir",
"div",
"dfn",
"dialog",
"dl",
"dt",
"em",
"embed",
"fieldset",
"figcaption",
"figure",
"fig",
"font",
"footer",
"form",
"frame",
"frameset",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"head",
"header",
"hr",
"html",
"i",
"iframe",
"img",
"input",
"ins",
"isindex",
"kbd",
"keygen",
"label",
"legend",
"li",
"link",
"map",
"mark",
"menu",
"menuitem",
"meta",
"meter",
"nav",
"noframes",
"noscript",
"object",
"ol",
"optgroup",
"option",
"output",
"p",
"param",
"pre",
"progress",
"q",
"rp",
"rt",
"ruby",
"s",
"samp",
"script",
"section",
"select",
"small",
"source",
"span",
"strike",
"strong",
"style",
"sub",
"summary",
"sup",
"table",
"tbody",
"td",
"textarea",
"tfoot",
"th",
"thead",
"time",
"title",
"tr",
"track",
"tt",
"u",
"ul",
"var",
"video",
"wbr",
"xmp",
]
self.tag_items = []
self.tag_items_l = []
self.tag_items_s = []
self.get_tag_items()
self.pos_list = []
self.tag_list = []
self.tag_set()
print("finish set up")
# find all tag in html, return a tag item list without tag contents
def get_tag_items(self):
tagpa = "<(?:"
for each in self.tag_scope:
tagpa += each
if each != self.tag_scope[-1]:
tagpa += "|"
# for tag with description
tagpa1 = tagpa + ")\s.*?>"
# for tag without descirption
tagpa2 = tagpa + ")>"
pa1 = re.compile(tagpa1)
pa2 = re.compile(tagpa2)
tag1 = re.findall(pa1, self.html)
tag2 = re.findall(pa2, self.html)
self.tag_items_l = tag1
self.tag_items_s = tag2
self.tag_items = self.tag_items_l + self.tag_items_s
# define a method which can be used internally, to avoid error caused by wrong tag_item
def get_tag_pos(self, tag_label, pos):
# find tag_item postion, and update in self.tag
start_pos = pos
find_result = 0
str = self.html
while find_result != -1:
find_result = str.find(tag_label, start_pos)
# find tag_label in Html
if find_result != -1:
# if found, check whether in pos list. if it is in, update the start position and continue. if not in pos list, update pos list and return position.
try:
self.pos_list.index(find_result)
except ValueError:
self.pos_list.append(find_result)
#print("%s:%d" % (tag_label, find_result))
return find_result
else:
start_pos = find_result + len(tag_label)
#print("already found one!")
# if tag_label was not found,return -1
#print("%s not found" % tag_label)
return find_result
def get_tag_lastpos(self, tag_name):
pos = 0
if self.tag_list == []:
return pos
i = len(self.tag_list)
while i >= 1:
tag_obj = self.tag_list[i - 1]
if tag_obj.name== tag_name:
pos = tag_obj.pos + len(tag_name)
break
i = i -1
return pos
def get_tag_allpair(self,tag_name):
#find position of tag_name pair, return a list of pair pos
tag_pair = '</'+tag_name+'>'
start_pos = 0
find_result = 0
pair_pos = []
while self.html.find(tag_pair,start_pos)!= -1:
#keep seeking pair pos till it is not found
find_result = self.html.find(tag_pair, start_pos)
if find_result != -1:
pair_pos.append(find_result)
start_pos = find_result+len(tag_pair)
return pair_pos
def match_tag(self,tag_pos_list,pair_pos_list):
# match the list of pos and pair, return a list of match. the biggest pos of pair, should match with biigest pos who is smaller than pair.
match_list=[]
#print('%s:\n%s'%(tag_pos_list,pair_pos_list))
if tag_pos_list != []:
#if tag_pos_list not empty,set min pos as first element of tag_post_list
min_pos = tag_pos_list[0]
else:
#if tag_pos_list is empty, stop matching and return a empty match_list
return match_list
for pair_pos in pair_pos_list:
for tag_pos in tag_pos_list:
if (tag_pos<pair_pos) and (tag_pos>min_pos):
min_pos = tag_pos
#print(min_pos)
match_list.append([min_pos,pair_pos])
tag_pos_list.remove(min_pos)
#remove min_pos from tag_pos_list as it has been matched
if tag_pos_list !=[]:
#if tag_pos_list not empty,set min pos as first element of tag_post_list
min_pos = tag_pos_list[0]
else:
#if tag_pos_list is empty,stop matching
return match_list
return match_list
def set_pair(self):
#get pair position of tag
#print(self.tag_list)
for each in self.tag_list:
#get each tag object in tag_list
if each.tag_label[-2:]== '/>':
#if tag end with />, directly get the pair position.
each.pairpos = each.pos + len(each.tag_label)-2
each.pair = '/>'
#else if pair pos not exists, group the tags and get all tag position.
elif each.pairpos ==-1:
tag_pos_list=[]
for ea in self.tag_list:
#group tag pos for those tag_label == current tag label
if ea.name ==each.name:
tag_pos_list.append(ea.pos)
#print(tag_pos_list)
#get relevant pair pos list
#print(tag_pos_list)
tag_pair_list = self.get_tag_allpair(each.name)
#match pair for tag,name of which ==current tag_label
match_list = self.match_tag(tag_pos_list,tag_pair_list)
#print(match_list)
#update pair and pair pos by match list by go through each elements in math list.
for ml in match_list:
for tg in self.tag_list:
if tg.pos == ml[0]:
tg.pairpos = ml[1]
tg.pair = '</'+tg.name+'>'
def set_tag_content(self):
#set tag content and children when tag pos and pair were set.
for ea in self.tag_list:
if ea.pairpos != -1:
#when pair position is available, get tag content by str split
ea.content = self.html[ea.pos:ea.pairpos + len(ea.pair)]
content_str = ea.content
content_str=content_str[len(ea.tag_label):]
#if there is a string, it means the tag has children and indepent pair.
if content_str != '':
end = len(ea.name)+3
content_str=content_str[:-end]
ea.children = content_str
def tag_set(self):
# remove all tag setting, create tag object for all tags detected from txt.
self.tag_list = []
items = self.tag_items
for ea in items:
# define a tag object, and update tag of spider, id, description
tag_object = tag_obj()
# get the tag position in html
start_pos = self.get_tag_lastpos(ea)
pos = self.get_tag_pos(ea, start_pos)
if pos != -1:
tag_object.pos = pos
else:
tag_object.pos = 0
tag_object.tag_label = ea
# remove start and end of tag
ea_str = ea
tag_item = ea_str.replace(">", "")
ea_str = tag_item.replace("<", "")
# if there is a space, name is first part of string. otherwise, it is a none description tag, tag name equal to tag item.
if ea_str.find(" "):
ea_list = ea_str.split(" ")
tag_object.name = ea_list[0]
else:
tag_object.name = ea_str
# add tag_object into tag attribute
# get class name of tag
class_str = 'class="(.*?)"'
pa_class = re.compile(class_str)
class_content = re.findall(pa_class, ea)
if class_content != []:
tag_object.class_name = class_content[0]
self.tag_list.append(tag_object)
self.set_pair()
self.set_tag_content()
#when tag_list has been set up, match pos and pair pos.
def get_tag_content(self,tag_name,class_name =''):
#get tag content by input the tag name and class name(optional)
tag_content =[]
for ea in self.tag_list:
if ea.name == tag_name:
if class_name =="":
tag_content.append(ea.content)
elif ea.class_name == class_name:
tag_content.append(ea.content)
return tag_content
def tag(self,tag_name,tag_classname =''):
#get a tag_object by input the name, and there is more than 1 tag with same name, return first one. if tag was not exisiting, return a None
tag_obj=None
for tg in self.tag_list:
if tg.name == tag_name:
if tag_classname =='':
tag_obj =tg
break
elif tag_classname in tg.class_name:
tag_obj = tg
break
return tag_obj
| 31.076503
| 164
| 0.481976
| 11,250
| 0.989098
| 0
| 0
| 0
| 0
| 0
| 0
| 3,402
| 0.299103
|
022a8bafe44b23b7f0a6af1c6947a769d26527f0
| 4,909
|
py
|
Python
|
QScrollAreaImages.py
|
ErwinSchotman/QT5-QScrollAreaImages
|
053e06a3ff67311f753712902902c43b1f011d30
|
[
"MIT"
] | 1
|
2019-11-29T00:37:31.000Z
|
2019-11-29T00:37:31.000Z
|
QScrollAreaImages.py
|
ErwinSchotman/QT5-QScrollAreaImages
|
053e06a3ff67311f753712902902c43b1f011d30
|
[
"MIT"
] | null | null | null |
QScrollAreaImages.py
|
ErwinSchotman/QT5-QScrollAreaImages
|
053e06a3ff67311f753712902902c43b1f011d30
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2019 Erwin Schotman
#
# Licensed under MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from PyQt5.QtWidgets import QScrollArea, QWidget, QGridLayout
from QClickableImage import *
from PyQt5.QtCore import QRect
#=======================================================================================================================
class QScrollAreaImages(QScrollArea):
displayed_image_size = 100
#-------------------------------------------------------------------------------------------------------------------
def __init__(self, width=0, height=0, pixmap=None):
QScrollArea.__init__(self)
# make a scroll area resizeable
self.setWidgetResizable(True)
# make a widget for the contents of the scroll area
self.scrollAreaWidgetContents = QWidget()
#self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 421, 429))
# give this widget a grid layout
self.gridLayout = QGridLayout(self.scrollAreaWidgetContents)
# put the contents widget in the scroll area
self.setWidget(self.scrollAreaWidgetContents)
#-------------------------------------------------------------------------------------------------------------------
def get_nr_of_image_columns(self):
scroll_area_images_width = self.width()
if scroll_area_images_width > self.displayed_image_size:
nr_of_columns = scroll_area_images_width // self.displayed_image_size
else:
nr_of_columns = 1
return nr_of_columns
#-------------------------------------------------------------------------------------------------------------------
def on_resize(self, event):
nr_of_columns = self.get_nr_of_image_columns()
nr_of_widgets = self.gridLayout.count()
widgets = []
for i in range(nr_of_widgets):
widgets.append(self.gridLayout.itemAt(i))
column_nr = 0
row_nr = 0
for widget in widgets:
self.gridLayout.removeItem(widget)
self.gridLayout.addWidget(widget.widget(), row_nr, column_nr)
if column_nr == nr_of_columns - 1:
column_nr = 0
row_nr += 1
else:
column_nr += 1
#-------------------------------------------------------------------------------------------------------------------
def setDisplayedImageSize(self, image_size):
self.displayed_image_size = image_size
#-------------------------------------------------------------------------------------------------------------------
def addImage(self, pixmap, image_id):
nr_of_columns = self.get_nr_of_image_columns()
nr_of_widgets = self.gridLayout.count()
row_nr = nr_of_widgets // nr_of_columns
column_nr = nr_of_widgets % nr_of_columns
clickable_image = QClickableImage(self.displayed_image_size, self.displayed_image_size, pixmap, image_id)
clickable_image.clicked.connect(self.on_left_clicked)
clickable_image.rightClicked.connect(self.on_right_clicked)
self.gridLayout.addWidget(clickable_image, column_nr, row_nr)
#-------------------------------------------------------------------------------------------------------------------
def on_left_clicked(self, image_id):
print('left clicked - image id = ' + image_id)
#-------------------------------------------------------------------------------------------------------------------
def on_right_clicked(self, image_id):
print('right clicked - image id = ' + image_id)
#-------------------------------------------------------------------------------------------------------------------
def resizeEvent(self, event):
self.on_resize(event)
| 48.127451
| 121
| 0.534528
| 3,508
| 0.714606
| 0
| 0
| 0
| 0
| 0
| 0
| 2,468
| 0.50275
|
022b9e68ba47723e01a95addbedb6c10c435b96e
| 30,434
|
py
|
Python
|
pyrax/fakes.py
|
jfreeman812/pyrax
|
dba18df916dcc3a9f539bd9c609b1bb68f3d9203
|
[
"Apache-2.0"
] | null | null | null |
pyrax/fakes.py
|
jfreeman812/pyrax
|
dba18df916dcc3a9f539bd9c609b1bb68f3d9203
|
[
"Apache-2.0"
] | 1
|
2019-11-06T20:21:59.000Z
|
2019-11-06T20:21:59.000Z
|
pyrax/fakes.py
|
jfreeman812/pyrax
|
dba18df916dcc3a9f539bd9c609b1bb68f3d9203
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import os
import random
import time
import uuid
import pyrax
from pyrax.autoscale import AutoScaleClient
from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
from pyrax.client import BaseClient
from pyrax.clouddatabases import CloudDatabaseClient
from pyrax.clouddatabases import CloudDatabaseDatabaseManager
from pyrax.clouddatabases import CloudDatabaseInstance
from pyrax.clouddatabases import CloudDatabaseManager
from pyrax.clouddatabases import CloudDatabaseUser
from pyrax.clouddatabases import CloudDatabaseUserManager
from pyrax.clouddatabases import CloudDatabaseVolume
from pyrax.cloudblockstorage import CloudBlockStorageClient
from pyrax.cloudblockstorage import CloudBlockStorageManager
from pyrax.cloudblockstorage import CloudBlockStorageSnapshot
from pyrax.cloudblockstorage import CloudBlockStorageSnapshotManager
from pyrax.cloudblockstorage import CloudBlockStorageVolume
from pyrax.cloudloadbalancers import CloudLoadBalancer
from pyrax.cloudloadbalancers import CloudLoadBalancerManager
from pyrax.cloudloadbalancers import CloudLoadBalancerClient
from pyrax.cloudloadbalancers import Node
from pyrax.cloudloadbalancers import VirtualIP
from pyrax.clouddns import CloudDNSClient
from pyrax.clouddns import CloudDNSDomain
from pyrax.clouddns import CloudDNSManager
from pyrax.clouddns import CloudDNSRecord
from pyrax.clouddns import CloudDNSPTRRecord
from pyrax.cloudnetworks import CloudNetwork
from pyrax.cloudnetworks import CloudNetworkClient
from pyrax.cloudmonitoring import CloudMonitorClient
from pyrax.cloudmonitoring import CloudMonitorEntity
from pyrax.cloudmonitoring import CloudMonitorCheck
from pyrax.cloudmonitoring import CloudMonitorNotification
from pyrax.image import Image
from pyrax.image import ImageClient
from pyrax.image import ImageManager
from pyrax.image import ImageMemberManager
from pyrax.image import ImageTagManager
from pyrax.object_storage import BulkDeleter
from pyrax.object_storage import Container
from pyrax.object_storage import ContainerManager
from pyrax.object_storage import FolderUploader
from pyrax.object_storage import StorageClient
from pyrax.object_storage import StorageObject
from pyrax.object_storage import StorageObjectManager
from pyrax.queueing import Queue
from pyrax.queueing import QueueClaim
from pyrax.queueing import QueueMessage
from pyrax.queueing import QueueClient
from pyrax.queueing import QueueManager
import pyrax.exceptions as exc
from pyrax.base_identity import BaseIdentity
from pyrax.base_identity import Endpoint
from pyrax.base_identity import Service
from pyrax.identity.rax_identity import RaxIdentity
from pyrax.identity.keystone_identity import KeystoneIdentity
import pyrax.utils as utils
example_uri = "http://example.com"
class FakeResponse(object):
headers = {}
body = ""
status_code = 200
reason = "Oops"
content = "Oops"
@property
def status(self):
# TEMPORARY - until the cf_wrapper code is removed.
return self.status_code
@status.setter
def status(self, val):
# TEMPORARY - until the cf_wrapper code is removed.
self.status_code = val
def getheaders(self):
return self.headers
def read(self):
return "Line1\nLine2"
def get(self, arg):
return self.headers.get(arg)
def json(self):
return self.content
class FakeIterator(utils.ResultsIterator):
def _init_methods(self):
pass
class FakeClient(object):
user_agent = "Fake"
USER_AGENT = "Fake"
def __init__(self, *args, **kwargs):
self.identity = FakeIdentity()
class FakeStorageClient(StorageClient):
def __init__(self, identity=None, *args, **kwargs):
if identity is None:
identity = FakeIdentity()
super(FakeStorageClient, self).__init__(identity, *args, **kwargs)
def create(self, name):
return FakeContainer(self._manager, {"name": name})
class FakeContainerManager(ContainerManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeStorageClient()
super(FakeContainerManager, self).__init__(api, *args, **kwargs)
class FakeContainer(Container):
def __init__(self, *args, **kwargs):
super(FakeContainer, self).__init__(*args, **kwargs)
self.object_manager = FakeStorageObjectManager(self.manager.api,
uri_base=self.name)
self.object_manager._container = self
class FakeStorageObjectManager(StorageObjectManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeStorageClient()
if "uri_base" not in kwargs:
kwargs["uri_base"] = utils.random_ascii()
super(FakeStorageObjectManager, self).__init__(api, *args, **kwargs)
class FakeStorageObject(StorageObject):
def __init__(self, manager, name=None, total_bytes=None, content_type=None,
last_modified=None, etag=None, attdict=None):
"""
The object can either be initialized with individual params, or by
passing the dict that is returned by swiftclient.
"""
self.manager = manager
self.name = name
self.bytes = total_bytes or 0
self.content_type = content_type
self.last_modified = last_modified
self.hash = etag
if attdict:
self._read_attdict(attdict)
fake_attdict = {"name": "fake",
"content-length": 42,
"content-type": "text/html",
"etag": "ABC",
"last-modified": "Tue, 01 Jan 2013 01:02:03 GMT",
}
class FakeServer(object):
id = utils.random_unicode()
class FakeService(object):
user_agent = "FakeService"
USER_AGENT = "FakeService"
def __init__(self, *args, **kwargs):
self.client = FakeClient()
self.Node = FakeNode
self.VirtualIP = FakeVirtualIP
self.loadbalancers = FakeLoadBalancer()
self.id = utils.random_unicode()
def authenticate(self):
pass
def get_protocols(self):
return ["HTTP"]
def get_algorithms(self):
return ["RANDOM"]
def get_usage(self):
pass
class FakeCSClient(FakeService):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCSClient, self).__init__(ident, *args, **kwargs)
def dummy(self):
pass
self.servers = FakeService()
utils.add_method(self.servers, dummy, "list")
self.images = FakeService()
utils.add_method(self.images, dummy, "list")
self.flavors = FakeService()
utils.add_method(self.flavors, dummy, "list")
class FakeFolderUploader(FolderUploader):
def __init__(self, *args, **kwargs):
super(FakeFolderUploader, self).__init__(*args, **kwargs)
# Useful for when we mock out the run() method.
self.actual_run = self.run
self.run = self.fake_run
def fake_run(self):
pass
class FakeBulkDeleter(BulkDeleter):
def __init__(self, *args, **kwargs):
super(FakeBulkDeleter, self).__init__(*args, **kwargs)
# Useful for when we mock out the run() method.
self.actual_run = self.run
self.run = self.fake_run
def fake_run(self):
time.sleep(0.0001)
self.results = {}
self.completed = True
class FakeManager(object):
def __init__(self, *args, **kwargs):
super(FakeManager, self).__init__(*args, **kwargs)
self.api = FakeClient()
def list(self):
pass
def get(self, item):
pass
def delete(self, item):
pass
def create(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
pass
def action(self, item, action_type, body={}):
pass
class FakeException(BaseException):
pass
class FakeKeyring(object):
password_set = False
def get_password(self, *args, **kwargs):
return "FAKE_TOKEN|FAKE_URL"
def set_password(self, *args, **kwargs):
self.password_set = True
class FakeEntity(object):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
def get(self, *args, **kwargs):
pass
def list(self, *args, **kwargs):
pass
class FakeDatabaseUser(CloudDatabaseUser):
pass
class FakeDatabaseVolume(CloudDatabaseVolume):
def __init__(self, instance, *args, **kwargs):
self.instance = instance
self.size = 1
self.used = 0.2
class FakeDatabaseInstance(CloudDatabaseInstance):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
self.manager = FakeDatabaseManager()
self.manager.api = FakeDatabaseClient()
self._database_manager = CloudDatabaseDatabaseManager(
FakeDatabaseClient())
self._user_manager = CloudDatabaseUserManager(FakeDatabaseClient())
self.volume = FakeDatabaseVolume(self)
class FakeDatabaseManager(CloudDatabaseManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeDatabaseClient()
super(FakeDatabaseManager, self).__init__(api, *args, **kwargs)
self.uri_base = "instances"
class FakeDatabaseClient(CloudDatabaseClient):
def __init__(self, *args, **kwargs):
self._manager = FakeDatabaseManager(self)
self._flavor_manager = FakeManager()
ident = FakeIdentity()
super(FakeDatabaseClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeNovaVolumeClient(BaseClient):
def __init__(self, *args, **kwargs):
pass
class FakeBlockStorageManager(CloudBlockStorageManager):
def __init__(self, api=None, *args, **kwargs):
ident = FakeIdentity()
if api is None:
api = FakeBlockStorageClient(ident)
super(FakeBlockStorageManager, self).__init__(api, *args, **kwargs)
class FakeBlockStorageVolume(CloudBlockStorageVolume):
def __init__(self, *args, **kwargs):
volname = utils.random_unicode(8)
self.id = utils.random_unicode()
self.manager = FakeBlockStorageManager()
self._nova_volumes = FakeNovaVolumeClient()
class FakeBlockStorageSnapshot(CloudBlockStorageSnapshot):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
self.manager = FakeManager()
self.status = "available"
class FakeBlockStorageClient(CloudBlockStorageClient):
def __init__(self, *args, **kwargs):
self._types_manager = FakeManager()
self._snapshot_manager = FakeManager()
ident = FakeIdentity()
super(FakeBlockStorageClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeSnapshotManager(CloudBlockStorageSnapshotManager):
def __init__(self, api=None, *args, **kwargs):
ident = FakeIdentity()
if api is None:
api = FakeBlockStorageClient(ident)
super(FakeSnapshotManager, self).__init__(api, *args, **kwargs)
class FakeLoadBalancerClient(CloudLoadBalancerClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeLoadBalancerClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeLoadBalancerManager(CloudLoadBalancerManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeLoadBalancerClient()
super(FakeLoadBalancerManager, self).__init__(api, *args, **kwargs)
class FakeLoadBalancer(CloudLoadBalancer):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or utils.random_ascii()
info = info or {"fake": "fake"}
super(FakeLoadBalancer, self).__init__(name, info, *args, **kwargs)
self.id = utils.random_ascii()
self.port = random.randint(1, 256)
self.manager = FakeLoadBalancerManager()
class FakeNode(Node):
def __init__(self, address=None, port=None, condition=None, weight=None,
status=None, parent=None, type=None, id=None):
if address is None:
address = "0.0.0.0"
if port is None:
port = 80
if id is None:
id = utils.random_unicode()
super(FakeNode, self).__init__(address=address, port=port,
condition=condition, weight=weight, status=status,
parent=parent, type=type, id=id)
class FakeVirtualIP(VirtualIP):
pass
class FakeStatusChanger(object):
check_count = 0
id = utils.random_unicode()
@property
def status(self):
if self.check_count < 2:
self.check_count += 1
return "changing"
return "ready"
class FakeDNSClient(CloudDNSClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeDNSClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeDNSManager(CloudDNSManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeDNSClient()
super(FakeDNSManager, self).__init__(api, *args, **kwargs)
self.resource_class = FakeDNSDomain
self.response_key = "domain"
self.plural_response_key = "domains"
self.uri_base = "domains"
class FakeDNSDomain(CloudDNSDomain):
def __init__(self, *args, **kwargs):
self.id = utils.random_ascii()
self.name = utils.random_unicode()
self.manager = FakeDNSManager()
class FakeDNSRecord(CloudDNSRecord):
def __init__(self, mgr, info, *args, **kwargs):
super(FakeDNSRecord, self).__init__(mgr, info, *args, **kwargs)
class FakeDNSPTRRecord(CloudDNSPTRRecord):
pass
class FakeDNSDevice(FakeLoadBalancer):
def __init__(self, *args, **kwargs):
self.id = utils.random_unicode()
class FakeCloudNetworkClient(CloudNetworkClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCloudNetworkClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeCloudNetwork(CloudNetwork):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
label = kwargs.pop("label", kwargs.pop("name", utils.random_unicode()))
info["label"] = label
super(FakeCloudNetwork, self).__init__(manager=None, info=info, *args,
**kwargs)
self.id = uuid.uuid4().hex
class FakeAutoScaleClient(AutoScaleClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
self._manager = FakeManager()
super(FakeAutoScaleClient, self).__init__(ident, *args, **kwargs)
class FakeAutoScalePolicy(AutoScalePolicy):
def __init__(self, *args, **kwargs):
super(FakeAutoScalePolicy, self).__init__(*args, **kwargs)
self.id = utils.random_ascii()
class FakeAutoScaleWebhook(AutoScaleWebhook):
def __init__(self, *args, **kwargs):
super(FakeAutoScaleWebhook, self).__init__(*args, **kwargs)
self.id = utils.random_ascii()
class FakeScalingGroupManager(ScalingGroupManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeAutoScaleClient()
super(FakeScalingGroupManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeScalingGroup(ScalingGroup):
def __init__(self, name=None, info=None, *args, **kwargs):
name = name or utils.random_ascii()
info = info or {"fake": "fake", "scalingPolicies": []}
self.groupConfiguration = {}
super(FakeScalingGroup, self).__init__(name, info, *args, **kwargs)
self.id = utils.random_ascii()
self.name = name
self.manager = FakeScalingGroupManager()
class FakeCloudMonitorClient(CloudMonitorClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeCloudMonitorClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeCloudMonitorEntity(CloudMonitorEntity):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["id"] = utils.random_ascii()
super(FakeCloudMonitorEntity, self).__init__(FakeManager(), info=info,
*args, **kwargs)
self.manager.api = FakeCloudMonitorClient()
class FakeCloudMonitorCheck(CloudMonitorCheck):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
entity = kwargs.pop("entity", None)
info["id"] = utils.random_ascii()
super(FakeCloudMonitorCheck, self).__init__(FakeManager(), info, *args,
**kwargs)
self.set_entity(entity)
self.id = uuid.uuid4()
class FakeCloudMonitorNotification(CloudMonitorNotification):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
super(FakeCloudMonitorNotification, self).__init__(manager=None,
info=info, *args, **kwargs)
self.id = uuid.uuid4()
class FakeQueue(Queue):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeQueueManager())
super(FakeQueue, self).__init__(manager=mgr, info=info, *args, **kwargs)
class FakeQueueClaim(QueueClaim):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeQueueManager())
super(FakeQueueClaim, self).__init__(manager=mgr, info=info, *args,
**kwargs)
class FakeQueueClient(QueueClient):
def __init__(self, *args, **kwargs):
ident = FakeIdentity()
super(FakeQueueClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeQueueManager(QueueManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeQueueClient()
super(FakeQueueManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImage(Image):
def __init__(self, *args, **kwargs):
info = kwargs.pop("info", {"fake": "fake"})
info["name"] = utils.random_unicode()
info["id"] = utils.random_unicode()
mgr = kwargs.pop("manager", FakeImageManager())
kwargs["member_manager_class"] = FakeImageMemberManager
kwargs["tag_manager_class"] = FakeImageTagManager
super(FakeImage, self).__init__(mgr, info, *args, **kwargs)
class FakeImageClient(ImageClient):
def __init__(self, identity=None, *args, **kwargs):
if identity is None:
identity = FakeIdentity()
super(FakeImageClient, self).__init__(identity, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeImageMemberManager(ImageMemberManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageMemberManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImageTagManager(ImageTagManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageTagManager, self).__init__(api, *args, **kwargs)
self.id = utils.random_ascii()
class FakeImageManager(ImageManager):
def __init__(self, api=None, *args, **kwargs):
if api is None:
api = FakeImageClient()
super(FakeImageManager, self).__init__(api, *args, **kwargs)
self.plural_response_key = "images"
self.resource_class = FakeImage
self.id = utils.random_ascii()
class FakeIdentityService(Service):
def __init__(self, identity=None, *args, **kwargs):
self.identity = identity or FakeIdentity()
self.name = "fake"
self.prefix = ""
self.service_type = "fake"
self.clients = {}
self.endpoints = utils.DotDict()
class FakeEndpoint(Endpoint):
def __init__(self, ep_dict=None, service=None, region=None, identity=None):
if ep_dict is None:
ep_dict = {}
if identity is None:
identity = FakeIdentity()
if service is None:
service = FakeIdentityService(identity)
if region is None:
region = "fake_region"
super(FakeEndpoint, self).__init__(ep_dict, service, region, identity)
class FakeRaxIdentity(RaxIdentity):
pass
class FakeIdentity(BaseIdentity):
"""Class that returns canned authentication responses."""
def __init__(self, *args, **kwargs):
super(FakeIdentity, self).__init__(*args, **kwargs)
self._good_username = "fakeuser"
self._good_password = "fakeapikey"
self._default_region = random.choice(("DFW", "ORD"))
self.services = {"fake": FakeIdentityService(self)}
def authenticate(self, connect=False):
if ((self.username == self._good_username) and
(self.password == self._good_password)):
self._parse_response(self.fake_response())
self.authenticated = True
else:
self.authenticated = False
raise exc.AuthenticationFailed("No match for '%s'/'%s' "
"username/password" % (self.username, self.password))
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
self.token = token
self.tenant_id = tenant_id
self.tenant_name = tenant_name
self.authenticated = True
def get_token(self, force=False):
return self.token
def fake_response(self):
return fake_identity_response
fake_config_file = """[settings]
identity_type = rackspace
keyring_username =
region = FAKE
custom_user_agent = FAKE
http_debug =
"""
# This will handle both singular and plural responses.
fake_identity_user_response = {
"users": [{"name": "fake", "id": "fake"},
{"name": "faker", "id": "faker"}],
"user": {"name": "fake", "id": "fake"},
"roles": [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}
fake_identity_tenant_response = {"name": "fake", "id": "fake",
"description": "fake", "enabled": True}
fake_identity_tenants_response = {
"tenants": [
{"name": "fake", "id": "fake", "description": "fake",
"enabled": True},
{"name": "faker", "id": "faker", "description": "faker",
"enabled": True},
]}
fake_identity_tokens_response = {"access":
{'metadata': {u'is_admin': 0,
'roles': [u'asdfgh',
'sdfghj',
'dfghjk']},
'serviceCatalog': [{u'endpoints': [
{u'adminURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'id': 'dddddddddd',
'publicURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'internalURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'nova',
'type': 'compute'},
{u'endpoints': [{u'adminURL': 'http://10.0.0.0:35357/v2.0',
'id': 'qweqweqwe',
'internalURL': 'http://10.0.0.0:5000/v2.0',
'publicURL': 'http://10.0.0.0:5000/v2.0',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'keystone',
'type': 'identity'}],
'token': {u'expires': '1999-05-04T16:45:05Z',
'id': 'qweqweqwe',
'tenant': {u'description': 'admin Tenant',
'enabled': True,
'id': 'qweqweqwe',
'name': 'admin'}},
'user': {u'id': 'qweqweqwe',
'name': 'admin',
'roles': [{u'id': 'qweqweqwe', 'name': 'admin'},
{u'id': 'qweqweqwe', 'name': 'KeystoneAdmin'},
{u'id': 'qweqweqwe',
'name': 'KeystoneServiceAdmin'}],
'roles_links': [],
'username': 'admin'}}}
fake_identity_endpoints_response = {"access": {
"endpoints": ["fake", "faker", "fakest"]}}
fake_identity_response = {u'access':
{u'serviceCatalog': [
{u'endpoints': [{u'publicURL':
'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'},
{u'publicURL':
'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'}],
'name': 'cloudLoadBalancers',
'type': 'rax:load-balancer'},
{u'endpoints': [{u'internalURL':
'https://snet-aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.syd1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFiles',
'type': 'object-store'},
{u'endpoints': [{u'publicURL':
'https://dfw.servers.api.rackspacecloud.com/v2/000000',
'region': 'DFW',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://dfw.servers.api.rackspacecloud.com/v2',
'versionList': 'https://dfw.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://ord.servers.api.rackspacecloud.com/v2/000000',
'region': 'ORD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://ord.servers.api.rackspacecloud.com/v2',
'versionList': 'https://ord.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://syd.servers.api.rackspacecloud.com/v2/000000',
'region': 'SYD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://syd.servers.api.rackspacecloud.com/v2',
'versionList': 'https://syd.servers.api.rackspacecloud.com/'}],
'name': 'cloudServersOpenStack',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://dns.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudDNS',
'type': 'rax:dns'},
{u'endpoints': [{u'publicURL':
'https://dfw.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'},
{u'publicURL':
'https://ord.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'}],
'name': 'cloudDatabases',
'type': 'rax:database'},
{u'endpoints': [{u'publicURL':
'https://servers.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000',
'versionId': '1.0',
'versionInfo': 'https://servers.api.rackspacecloud.com/v1.0',
'versionList': 'https://servers.api.rackspacecloud.com/'}],
'name': 'cloudServers',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn2.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFilesCDN',
'type': 'rax:object-cdn'},
{u'endpoints': [{u'publicURL':
'https://monitoring.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudMonitoring',
'type': 'rax:monitor'}],
u'token': {u'expires': '2222-02-22T22:22:22.000-02:00',
'id': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'tenant': {u'id': '000000', 'name': '000000'}},
u'user': {u'id': '123456',
'name': 'fakeuser',
'RAX-AUTH:defaultRegion': 'DFW',
'roles': [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}}}
class FakeIdentityResponse(FakeResponse):
status_code = 200
response_type = "auth"
responses = {"auth": fake_identity_response,
"users": fake_identity_user_response,
"tenant": fake_identity_tenant_response,
"tenants": fake_identity_tenants_response,
"tokens": fake_identity_tokens_response,
"endpoints": fake_identity_endpoints_response,
}
@property
def content(self):
return self.responses.get(self.response_type)
def json(self):
return self.content
def read(self):
return json.dumps(self.content)
| 33.554576
| 80
| 0.630676
| 19,387
| 0.637018
| 0
| 0
| 492
| 0.016166
| 0
| 0
| 6,289
| 0.206644
|
022be07ba133b6de16720dad8708b355fc237656
| 2,869
|
py
|
Python
|
ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/alerts/alert_logfeeder.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 25
|
2019-12-04T03:09:55.000Z
|
2022-03-08T10:52:06.000Z
|
ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/alerts/alert_logfeeder.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 29
|
2019-12-04T03:00:39.000Z
|
2022-03-02T06:25:44.000Z
|
ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/alerts/alert_logfeeder.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 33
|
2019-12-04T02:51:30.000Z
|
2022-03-24T02:47:38.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.core.exceptions import ComponentIsNotRunning
RESULT_CODE_OK = 'OK'
RESULT_CODE_CRITICAL = 'CRITICAL'
RESULT_CODE_UNKNOWN = 'UNKNOWN'
LOGFEEDER_PID_DIR = '{{logfeeder-env/logfeeder_pid_dir}}'
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (LOGFEEDER_PID_DIR,)
def is_logfeeder_process_live(pid_file):
"""
Gets whether the LogSearch Logfeeder represented by the specified file is running.
:param pid_file: the PID file of the Logfeeder to check
:return: True if the Logfeeder is running, False otherwise
"""
live = False
try:
check_process_status(pid_file)
live = True
except ComponentIsNotRunning:
pass
return live
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
if set([LOGFEEDER_PID_DIR]).issubset(configurations):
LOGFEEDER_PID_PATH = os.path.join(configurations[LOGFEEDER_PID_DIR], 'logfeeder.pid')
else:
return (RESULT_CODE_UNKNOWN, ['The logfeeder_pid_dir is a required parameter.'])
if host_name is None:
host_name = socket.getfqdn()
logfeeder_process_running = is_logfeeder_process_live(LOGFEEDER_PID_PATH)
alert_state = RESULT_CODE_OK if logfeeder_process_running else RESULT_CODE_CRITICAL
alert_label = 'LogFeeder is running on {0}' if logfeeder_process_running else 'LogFeeder is NOT running on {0}'
alert_label = alert_label.format(host_name)
return (alert_state, [alert_label])
| 33.752941
| 113
| 0.776577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,690
| 0.589055
|
022d316f00567159f07f5f66967da1595528de9a
| 3,589
|
py
|
Python
|
hack/scripts/ca_metrics_parser.py
|
nicdoye/autoscaler
|
ebadbda2b2417d7da6147fbc0c1b39f7f55aff22
|
[
"Apache-2.0"
] | 17
|
2018-09-14T10:31:43.000Z
|
2021-09-14T08:47:34.000Z
|
hack/scripts/ca_metrics_parser.py
|
nicdoye/autoscaler
|
ebadbda2b2417d7da6147fbc0c1b39f7f55aff22
|
[
"Apache-2.0"
] | 12
|
2019-01-09T10:34:06.000Z
|
2022-03-24T08:37:25.000Z
|
hack/scripts/ca_metrics_parser.py
|
nicdoye/autoscaler
|
ebadbda2b2417d7da6147fbc0c1b39f7f55aff22
|
[
"Apache-2.0"
] | 3
|
2019-05-06T14:51:10.000Z
|
2020-12-22T14:03:43.000Z
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script parses metrics from Cluster Autoscaler e2e tests.
'''
from __future__ import division
from __future__ import print_function
import argparse
import json
class CAMetric(object):
def __init__(self, function_name):
self.function_name = function_name
self.sum = 0.0
self.average = 0.0
self.buckets = []
self.count = 0
self.upper_bound = 0.0
def print(self):
print(self.function_name, '\t', self.sum, '\t', self.count,'\t', self.avg,
'\t', self.upper_bound)
print(self.buckets)
def print_summary(summary):
print('function_name\t sum\t count\t avg\t upper_bound')
print('buckets')
for metric in summary.values():
metric.print()
def function_name(sample):
return sample['metric']['function']
def metric_value(sample):
return sample['value'][1]
def upper_bound(buckets):
'''
Going from the rightmost bucket, find the first one that has some samples
and return its upper bound.
'''
for i in xrange(len(buckets) - 1, -1, -1):
le, count = buckets[i]
if i == 0:
return le
else:
le_prev, count_prev = buckets[i-1]
if count_prev < count:
return le
def parse_metrics_file(metrics_file):
'''
Return interesting metrics for all Cluster Autoscaler functions.
Merics are stored in a map keyed by function name and are expressed in
seconds. They include
* sum of all samples
* count of sumples
* average value of samples
* upper bound - all collected samples were smaller than this value
* buckets - list of tuples (# of samples, bucket upper bound)
'''
summary = {}
with open(metrics_file) as metrics_file:
summary = {}
metrics = json.load(metrics_file)
ca_metrics = metrics['ClusterAutoscalerMetrics']
total_sum = ca_metrics['cluster_autoscaler_function_duration_seconds_sum']
for sample in total_sum:
function = function_name(sample)
summary[function] = CAMetric(function)
summary[function].sum = float(metric_value(sample))
count = ca_metrics['cluster_autoscaler_function_duration_seconds_count']
for sample in count:
function = function_name(sample)
summary[function].count = int(metric_value(sample))
summary[function].avg = summary[function].sum / summary[function].count
buckets = ca_metrics['cluster_autoscaler_function_duration_seconds_bucket']
for sample in buckets:
function = function_name(sample)
summary[function].buckets.append(
(float(sample['metric']['le']), int(metric_value(sample))))
for value in summary.values():
value.upper_bound = upper_bound(value.buckets)
return summary
def main():
parser = argparse.ArgumentParser(description='Parse metrics from Cluster Autoscaler e2e test')
parser.add_argument('metrics_file', help='File to read metrics from')
args = parser.parse_args()
summary = parse_metrics_file(args.metrics_file)
print_summary(summary)
if __name__ == '__main__':
main()
| 28.712
| 96
| 0.713291
| 365
| 0.1017
| 0
| 0
| 0
| 0
| 0
| 0
| 1,551
| 0.432154
|
022e461176e9788379dfe2431986a89fcba4d6ae
| 2,631
|
py
|
Python
|
tests/test_cli.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 4
|
2021-09-16T13:35:33.000Z
|
2022-02-01T23:35:53.000Z
|
tests/test_cli.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 46
|
2021-09-16T13:44:58.000Z
|
2022-02-02T13:42:56.000Z
|
tests/test_cli.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | null | null | null |
from typing import Iterable
import pytest
from click.testing import CliRunner, Result
from mock import Mock, patch
from mock.mock import create_autospec
from tickit.cli import main
from tickit.core.components.component import ComponentConfig
from tickit.core.management.schedulers.master import MasterScheduler
from tickit.core.typedefs import ComponentID, ComponentPort, PortID
@pytest.fixture
def patch_logging() -> Iterable[Mock]:
with patch("tickit.cli.logging", autospec=True) as mock:
yield mock
@pytest.fixture
def patch_run_all_forever() -> Iterable[Mock]:
with patch("tickit.cli.run_all_forever", autospec=True) as mock:
yield mock
@pytest.fixture
def patch_asyncio() -> Iterable[Mock]:
with patch("tickit.cli.asyncio", autospec=True) as mock:
yield mock
@pytest.fixture
def patch_read_configs() -> Iterable[Mock]:
with patch("tickit.cli.read_configs", autospec=True) as mock:
mock_config = create_autospec(ComponentConfig, instance=True)
mock_config.name = "fake_device"
mock_config.inputs = {
PortID("42"),
ComponentPort(ComponentID("foo"), PortID("24")),
}
mock.return_value = [mock_config]
yield mock
def test_cli_set_loggging_level(patch_logging):
runner: CliRunner = CliRunner()
result: Result = runner.invoke(main, args=["--log-level", "INFO"])
assert result.exit_code == 0
patch_logging.basicConfig.assert_called_with(level="INFO")
def test_component_command(
patch_run_all_forever,
patch_read_configs,
):
runner: CliRunner = CliRunner()
result: Result = runner.invoke(
main, args=["component", "fake_device", "path/to/fake_device.yaml"]
)
assert result.exit_code == 0
patch_run_all_forever.assert_called_once()
@pytest.fixture
def patch_master_scheduler_run_forever_method() -> Iterable[Mock]:
with patch.object(MasterScheduler, "run_forever", autospec=True) as mock:
yield mock
def test_scheduler(
patch_read_configs: Mock,
patch_master_scheduler_run_forever_method: Mock,
):
runner: CliRunner = CliRunner()
result: Result = runner.invoke(main, args=["scheduler", "path/to/fake_device.yaml"])
assert result.exit_code == 0
patch_master_scheduler_run_forever_method.assert_awaited_once()
def test_all(patch_read_configs, patch_master_scheduler_run_forever_method):
runner: CliRunner = CliRunner()
result: Result = runner.invoke(main, args=["all", "path/to/fake_device.yaml"])
assert result.exit_code == 0
patch_master_scheduler_run_forever_method.assert_awaited_once()
| 27.989362
| 88
| 0.72824
| 0
| 0
| 942
| 0.358039
| 1,022
| 0.388445
| 0
| 0
| 275
| 0.104523
|
022e5e8924eb3bc3c0fcb9bc827782f367ea128d
| 565
|
py
|
Python
|
homework5/app/config.py
|
sakost/tinkoff_fintech
|
64b9d5a2a818b4db7c438b0dc53a8f31882f95ba
|
[
"MIT"
] | null | null | null |
homework5/app/config.py
|
sakost/tinkoff_fintech
|
64b9d5a2a818b4db7c438b0dc53a8f31882f95ba
|
[
"MIT"
] | null | null | null |
homework5/app/config.py
|
sakost/tinkoff_fintech
|
64b9d5a2a818b4db7c438b0dc53a8f31882f95ba
|
[
"MIT"
] | 2
|
2021-08-29T15:01:39.000Z
|
2022-02-23T18:48:21.000Z
|
from typing import Any
from pydantic import BaseSettings
from .utils import singleton_cache
class Settings(BaseSettings):
TESTING: bool = False
SQLALCHEMY_DATABASE_URI: str = 'sqlite:///db.sqlite3'
FIRST_SUPERUSER: str = 'admin'
FIRST_SUPERUSER_PASSWORD: str = 'admin'
FIRST_SUPERUSER_ROLE: str = 'superuser'
USER_ROLE_NAME = 'user'
OBJECTS_PER_PAGE: int = 100
class Config:
case_sensitive = True
env_file = '.env'
@singleton_cache
def get_settings(**kwargs: Any) -> Settings:
return Settings(**kwargs)
| 20.178571
| 57
| 0.699115
| 374
| 0.661947
| 0
| 0
| 91
| 0.161062
| 0
| 0
| 59
| 0.104425
|
022f6a23b370efd01d97a4fc32d332f4e763d78f
| 2,158
|
py
|
Python
|
nabu/story.py
|
sterlingbaldwin/nabu
|
6f19a1b237cdab6ff2179c952f41e239e1a0a3e8
|
[
"MIT"
] | null | null | null |
nabu/story.py
|
sterlingbaldwin/nabu
|
6f19a1b237cdab6ff2179c952f41e239e1a0a3e8
|
[
"MIT"
] | 1
|
2022-02-14T12:15:45.000Z
|
2022-02-14T12:15:45.000Z
|
nabu/story.py
|
sterlingbaldwin/nabu
|
6f19a1b237cdab6ff2179c952f41e239e1a0a3e8
|
[
"MIT"
] | null | null | null |
from typing import ChainMap
import yaml
from pathlib import Path
from jinja2 import Template
from weasyprint import HTML, CSS
# from xhtml2pdf import pisa
class Story():
def __init__(self, story_path, story_name, template_path, *args, **kwargs):
self.template_path = template_path
self.story_path = story_path
self.story_name = story_name
def render(self, outpath):
# first render the html
with open(self.template_path, "r") as fp:
story_template = Template(fp.read())
with open(Path(self.story_path, "story.yaml")) as fp:
story_data = yaml.load(fp, Loader=yaml.SafeLoader)
# replace the image paths with the full path
img_path = Path(self.story_path, 'img')
for chapter in story_data['chapters']:
if 'image' in chapter:
chapter['image'] = Path(img_path, chapter['image'])
if 'mini_image' in chapter:
chapter['mini_image'] = Path(img_path, chapter['mini_image'])
for page in chapter['pages']:
if 'image' in page:
page['image'] = Path(img_path, page['image'])
story_html = story_template.render(
cover_image=Path(img_path, story_data["cover_image"]),
title=story_data["title"],
author_name=story_data["author_name"],
author_contact=story_data["author_contact"],
chapters=story_data["chapters"])
with open(Path(self.story_path, f"{self.story_name}.html"), 'w') as fp:
fp.write(story_html)
# now render the css
css_path = Path("nabu", "styles", "default.css.jinja")
with open(css_path, "r") as fp:
css_template = Template(fp.read())
story_css = css_template.render(
chapters=story_data["chapters"])
with open(Path(self.story_path, f"{self.story_name}.css"), 'w') as fp:
fp.write(story_css)
# finally, write out the pdf
h = HTML(string=story_html)
c = CSS(string=story_css)
h.write_pdf(outpath, stylesheets=[c])
| 37.206897
| 79
| 0.598239
| 2,000
| 0.926784
| 0
| 0
| 0
| 0
| 0
| 0
| 418
| 0.193698
|
022fd56061f4a128f54c059a42d1bbaadf434720
| 322
|
py
|
Python
|
src/homework/models/__init__.py
|
nvo87/education-backend
|
1f008bd396b5dde4483af611532826a9bca9fef5
|
[
"MIT"
] | 62
|
2021-09-22T18:38:26.000Z
|
2022-03-29T06:09:42.000Z
|
src/homework/models/__init__.py
|
nvo87/education-backend
|
1f008bd396b5dde4483af611532826a9bca9fef5
|
[
"MIT"
] | 50
|
2021-09-16T07:17:31.000Z
|
2022-03-26T12:06:58.000Z
|
src/homework/models/__init__.py
|
nvo87/education-backend
|
1f008bd396b5dde4483af611532826a9bca9fef5
|
[
"MIT"
] | 16
|
2021-10-17T17:43:31.000Z
|
2022-03-26T11:22:45.000Z
|
from homework.models.answer import Answer
from homework.models.answer_access_log_entry import AnswerAccessLogEntry
from homework.models.answer_cross_check import AnswerCrossCheck
from homework.models.question import Question
__all__ = [
'Answer',
'AnswerAccessLogEntry',
'AnswerCrossCheck',
'Question',
]
| 26.833333
| 72
| 0.801242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.180124
|
0230ced77fc05cfeb2ad94e5f316982b5ce418ba
| 1,650
|
py
|
Python
|
second workout/8B/A.py
|
paktusov/algorithms
|
b21e7ead2325f77a606dc53495866e359f2e24fe
|
[
"BSD-3-Clause"
] | null | null | null |
second workout/8B/A.py
|
paktusov/algorithms
|
b21e7ead2325f77a606dc53495866e359f2e24fe
|
[
"BSD-3-Clause"
] | null | null | null |
second workout/8B/A.py
|
paktusov/algorithms
|
b21e7ead2325f77a606dc53495866e359f2e24fe
|
[
"BSD-3-Clause"
] | null | null | null |
def add(tree, x):
if not tree:
tree.extend([x, None, None])
print('DONE')
return
key = tree[0]
if x == key:
print('ALREADY')
elif x < key:
left = tree[1]
if left == None:
tree[1] = [x, None, None]
print('DONE')
else:
add(left, x)
elif x > key:
right = tree[2]
if right == None:
tree[2] = [x, None, None]
print('DONE')
else:
add(right, x)
def find(tree, x):
if not tree:
return False
key = tree[0]
if x == key:
return True
elif x < key:
left = tree[1]
if left == None:
return False
else:
return find(left, x)
elif x > key:
right = tree[2]
if right == None:
return False
else:
return find(right, x)
def printtree(tree, count=0):
# if not tree:
# return
if tree[1]:
printtree(tree[1], count + 1)
print(f"{''.join('.' * count)}{tree[0]}")
if tree[2]:
printtree(tree[2], count + 1)
tree = []
with open('input.txt', 'r', encoding='utf-8') as file:
string = file.readline().strip()
while string != '':
line = [i for i in string.split()]
if line[0] == 'ADD':
add(tree, int(line[1]))
elif line[0] == 'SEARCH':
if find(tree, int(line[1])):
print('YES')
else:
print('NO')
elif line[0] == 'PRINTTREE':
printtree(tree)
string = file.readline().strip()
| 21.710526
| 54
| 0.434545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.090303
|
023179993902aa78bcb94918909fb230bdfcaedd
| 5,502
|
py
|
Python
|
fewshot/clis/score_simple.py
|
armancohan/flex
|
2a005fd18f522d2667421f170568df1164a73c3a
|
[
"Apache-2.0"
] | 63
|
2021-07-01T23:40:55.000Z
|
2022-03-15T21:56:57.000Z
|
fewshot/clis/score_simple.py
|
armancohan/flex
|
2a005fd18f522d2667421f170568df1164a73c3a
|
[
"Apache-2.0"
] | 1
|
2022-03-04T11:15:55.000Z
|
2022-03-28T09:33:54.000Z
|
fewshot/clis/score_simple.py
|
armancohan/flex
|
2a005fd18f522d2667421f170568df1164a73c3a
|
[
"Apache-2.0"
] | 3
|
2021-07-31T05:06:14.000Z
|
2022-02-28T12:45:06.000Z
|
import json
from typing import TextIO
from functools import partial
import click
import numpy as np
from scipy.stats import sem
import pandas as pd
from fewshot.bootstrap import bootstrap
from fewshot.bootstrap import ci
from fewshot.challenges.utils import get_gold_dataset
from . import score_utils as su
def statistics(a, estimator=np.mean, conf_interval=95, n_boot=1000, seed=0):
"""With 95% CI"""
[ci_lower, ci_upper] = ci(
bootstrap(
a,
func=estimator,
n_boot=n_boot,
seed=seed,
),
conf_interval
)
stat = estimator(a)
return {
'stat': stat,
'stat_ci_lower': stat - ci_lower,
'stat_ci_upper': ci_upper - stat,
'stat_ci_sem': sem(a, ddof=1) * 1.96,
'std': np.std(a),
'n': len(a),
}
@click.command()
@click.option('--challenge_name', type=click.STRING, required=True)
@click.option(
'--predictions',
type=click.File('r'),
help='Path to the file containing system predictions',
required=True,
)
@click.option(
'--output',
'-o',
type=click.File('w'),
help='Output results to this file.',
)
@click.option('--by_way_shot', is_flag=True, default=False)
@click.option('--by_few', is_flag=True, default=False)
@click.option('--for_leaderboard', is_flag=True, default=False)
def score(
challenge_name: str,
predictions: TextIO,
output: TextIO,
by_way_shot: bool,
by_few: bool,
for_leaderboard: bool,
):
"""Score a predictions.json file."""
gold_data = pd.DataFrame(get_gold_dataset(challenge_name))
joined_data = su.join_predictions_and_gold(
predictions=predictions,
gold_data=gold_data,
)
df, metrics = su.score_joined_data(data=joined_data)
if by_way_shot:
df['shot'] = df.apply(lambda row: str(int(row['n_train'] / row['way']))
if row['balanced_train'] else '', axis=1)
grouped = df.groupby(by=['dataset', 'way', 'shot'])['accuracy'].apply(partial(statistics, estimator=np.mean))
grouped.index = grouped.index.set_names('stat', level=3)
res = grouped
elif by_few or for_leaderboard:
df['few'] = df['n_train'].map(lambda v: v > 0)
grouped = df.groupby(by=['dataset', 'few'])['accuracy'].apply(partial(statistics, estimator=np.mean))
grouped.index = grouped.index.set_names('stat', level=2)
ways = df.groupby(by=['dataset', 'few'])['way'].apply(lambda x: '/'.join(str(i) for i in sorted(x.unique())))
res = pd.merge(
grouped.reset_index(),
ways.reset_index(),
on=['dataset', 'few']
).set_index(['dataset', 'way', 'few', 'stat'])
else:
grouped = df.groupby(by=['dataset'])['accuracy'].apply(partial(statistics, estimator=np.mean))
means = grouped.xs('stat', level=1)
stds = grouped.xs('std', level=1)
cis_upper = grouped.xs('stat_ci_upper', level=1)
cis_lower = grouped.xs('stat_ci_lower', level=1)
cis_lower.index = cis_lower.index + '_acc_ci_lower'
cis_upper.index = cis_upper.index + '_acc_ci_upper'
means.index = means.index + '_acc'
stds.index = stds.index + '_acc_std'
res = pd.concat([means, cis_upper, cis_lower, stds], axis=0)
res.loc['overall_acc'] = means.mean()
res.loc['overall_acc_std'] = stds.mean()
if for_leaderboard:
res = res.reset_index()
res['few_string'] = res['few'].map(lambda v: 'few' if v else '0')
res['name'] = res['dataset'] + '-' + res['few_string']
accuracies = res[res.stat == 'stat']
overall_0_acc = accuracies[~accuracies.few]['accuracy'].mean()
overall_few_acc = accuracies[accuracies.few]['accuracy'].mean()
accuracies = accuracies.append([
{'name': 'overall-0', 'accuracy': overall_0_acc},
{'name': 'overall-few', 'accuracy': overall_few_acc},
{'name': 'overall', 'accuracy': 0.5 * (overall_0_acc + overall_few_acc)},
])
uppers = res[res.stat == 'stat_ci_upper']
uppers = uppers.assign(name=lambda x: x['name'] + '_ci_upper')
lowers = res[res.stat == 'stat_ci_lower']
lowers = lowers.assign(name=lambda x: x['name'] + '_ci_lower')
stds = res[res.stat == 'std']
stds = stds.assign(name=lambda x: x['name'] + '_std')
res = pd.concat([accuracies, uppers, lowers, stds], axis=0)
res = res[['name', 'accuracy']].set_index('name')
res = res['accuracy']
print(type(res))
if output:
if for_leaderboard:
# Add episode-level accuracy values under 'episode_accuracies' key
res = json.loads(res.to_json())
grouped = (
df.groupby(by=['few', 'dataset'])[['task_id', 'accuracy']]
.apply(lambda x: x.sort_values('task_id')['accuracy'].tolist())
.reset_index(name='accuracies')
)
grouped['few_string'] = grouped['few'].map(lambda v: 'few' if v else '0')
grouped['name'] = grouped['dataset'] + '-' + grouped['few_string']
res['episode_accuracies'] = grouped.set_index('name')[['accuracies']].to_dict()['accuracies']
json.dump(res, output)
elif output.name.endswith('.json'):
res.to_json(output)
else:
res.to_csv(output)
else:
pd.set_option("display.max_rows", None)
print(res.sort_index())
| 38.746479
| 117
| 0.596692
| 0
| 0
| 0
| 0
| 4,667
| 0.848237
| 0
| 0
| 1,135
| 0.206289
|
0232a5792f409bc2541863dd10af6a3d5b55632c
| 1,196
|
py
|
Python
|
KWS/Dissection/tf_mfcc_from_log_mel_spectrogram_sample.py
|
xrick/gotek_smic
|
7655b6d7415b23c35810b8db48af7424f7dcdb06
|
[
"MIT"
] | null | null | null |
KWS/Dissection/tf_mfcc_from_log_mel_spectrogram_sample.py
|
xrick/gotek_smic
|
7655b6d7415b23c35810b8db48af7424f7dcdb06
|
[
"MIT"
] | null | null | null |
KWS/Dissection/tf_mfcc_from_log_mel_spectrogram_sample.py
|
xrick/gotek_smic
|
7655b6d7415b23c35810b8db48af7424f7dcdb06
|
[
"MIT"
] | null | null | null |
batch_size, num_samples, sample_rate = 32, 32000, 16000.0
# A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1].
pcm = tf.random.normal([batch_size, num_samples], dtype=tf.float32)
# A 1024-point STFT with frames of 64 ms and 75% overlap.
stfts = tf.signal.stft(pcm, frame_length=1024, frame_step=256,
fft_length=1024)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13]
| 46
| 78
| 0.778428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 326
| 0.272575
|
0232d872e8633ddbe199a54a9b7cd036c696f627
| 458
|
py
|
Python
|
user/migrations/0017_auto_20200812_2149.py
|
Muia23/Grammer
|
dcc26937d88382c1da36a5f72306e6de367e90a3
|
[
"Unlicense"
] | null | null | null |
user/migrations/0017_auto_20200812_2149.py
|
Muia23/Grammer
|
dcc26937d88382c1da36a5f72306e6de367e90a3
|
[
"Unlicense"
] | null | null | null |
user/migrations/0017_auto_20200812_2149.py
|
Muia23/Grammer
|
dcc26937d88382c1da36a5f72306e6de367e90a3
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-08-12 18:49
from __future__ import unicode_literals
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('user', '0016_post_likes'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=tinymce.models.HTMLField(blank=True),
),
]
| 20.818182
| 55
| 0.617904
| 285
| 0.622271
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.237991
|
0233159b2601985f539a68dd35218b81258f9ecc
| 1,834
|
py
|
Python
|
audio/loudness_normalization.py
|
Open-Speech-EkStep/common_scripts
|
916f01444e028f9111d5499217abf4443bd24017
|
[
"MIT"
] | 4
|
2021-07-22T15:32:13.000Z
|
2022-01-25T08:13:45.000Z
|
audio/loudness_normalization.py
|
Open-Speech-EkStep/common_scripts
|
916f01444e028f9111d5499217abf4443bd24017
|
[
"MIT"
] | null | null | null |
audio/loudness_normalization.py
|
Open-Speech-EkStep/common_scripts
|
916f01444e028f9111d5499217abf4443bd24017
|
[
"MIT"
] | 3
|
2021-04-12T05:04:55.000Z
|
2021-08-25T06:55:42.000Z
|
from pydub import AudioSegment, effects
import glob
import os
from tqdm import tqdm
import argparse
class AudioNormalization:
def __init__(self, wav_file):
self.wav_file = wav_file
def loudness_normalization(self, target_dBFS=-15):
audio_file = AudioSegment.from_file(self.wav_file, format='wav')
loudness_difference = target_dBFS - audio_file.dBFS
normalized_audio = audio_file + loudness_difference
return normalized_audio
def loudness_normalization_effects(self):
audio_file = AudioSegment.from_file(self.wav_file, format='wav')
normalized_audio = effects.normalize(audio_file)
return normalized_audio
def rectify_audio_path(path):
if path[-1] == "/":
path = path[:-1]
return path
def normalize_loudness(input, output):
input_audio_path = rectify_audio_path(input)
audio_dump_path = rectify_audio_path(output)
audio_files = glob.glob(input_audio_path + '/**/*.wav', recursive=True)
print("Normalization will run on ", len(audio_files))
output_folder_path = audio_dump_path + '/' + input_audio_path.split('/')[-1] + '_loud_norm'
os.makedirs(output_folder_path)
for audio in tqdm(audio_files):
normalized_audio = AudioNormalization(audio).loudness_normalization_effects()
output_file_name = (output_folder_path + '/' +
audio.split('/')[-1])
normalized_audio.export(output_file_name, format='wav')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalize')
parser.add_argument('-i', '--input', required=True, help='Input path')
parser.add_argument('-o', '--output', required=True, help='Output path')
args_local = parser.parse_args()
normalize_loudness(args_local.input, args_local.output)
| 36.68
| 95
| 0.698473
| 582
| 0.317339
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.083969
|
0233975ca46a04c5b097d1d82d0ed1a76059f352
| 12,308
|
py
|
Python
|
libcloud/dns/drivers/nsone.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 4
|
2017-11-14T17:24:12.000Z
|
2020-10-30T01:46:02.000Z
|
libcloud/dns/drivers/nsone.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 11
|
2017-01-29T08:59:21.000Z
|
2018-07-02T09:17:47.000Z
|
libcloud/dns/drivers/nsone.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 4
|
2016-04-04T08:01:48.000Z
|
2018-06-06T08:04:36.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
import simplejson as json
except ImportError:
import json
from libcloud.dns.types import Provider, ZoneDoesNotExistError, \
ZoneAlreadyExistsError, RecordDoesNotExistError, RecordAlreadyExistsError
from libcloud.utils.py3 import httplib
from libcloud.dns.base import DNSDriver, Zone, Record, RecordType
from libcloud.common.nsone import NsOneConnection, NsOneResponse, \
NsOneException
__all__ = [
'NsOneDNSDriver'
]
class NsOneDNSResponse(NsOneResponse):
pass
class NsOneDNSConnection(NsOneConnection):
responseCls = NsOneDNSResponse
class NsOneDNSDriver(DNSDriver):
name = 'NS1 DNS'
website = 'https://ns1.com'
type = Provider.NSONE
connectionCls = NsOneDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SOA: 'SOA',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT'
}
def list_zones(self):
action = '/v1/zones'
response = self.connection.request(action=action, method='GET')
zones = self._to_zones(items=response.parse_body())
return zones
def get_zone(self, zone_id):
"""
:param zone_id: Zone domain name (e.g. example.com)
:return: :class:`Zone`
"""
action = '/v1/zones/%s' % zone_id
try:
response = self.connection.request(action=action, method='GET')
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'zone not found':
raise ZoneDoesNotExistError(value=e.message, driver=self,
zone_id=zone_id)
else:
raise e
zone = self._to_zone(response.objects[0])
return zone
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (This is not really used. See API docs for extra
parameters)
:type type: ``str``
:param ttl: TTL for new records (This is used through the extra param)
:type ttl: ``int``
:param extra: Extra attributes that are specific to the driver
such as ttl.
:type extra: ``dict``
:rtype: :class:`Zone`
"""
action = '/v1/zones/%s' % domain
raw_data = {'zone': domain}
if extra is not None:
raw_data.update(extra)
post_data = json.dumps(raw_data)
try:
response = self.connection.request(action=action, method='PUT',
data=post_data)
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'zone already exists':
raise ZoneAlreadyExistsError(value=e.message, driver=self,
zone_id=domain)
else:
raise e
zone = self._to_zone(response.objects[0])
return zone
def delete_zone(self, zone):
"""
:param zone: Zone to be deleted.
:type zone: :class:`Zone`
:return: Boolean
"""
action = '/v1/zones/%s' % zone.domain
"""zones_list = self.list_zones()
if not self.ex_zone_exists(zone_id=zone.id, zones_list=zones_list):
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.id)
"""
try:
response = self.connection.request(action=action, method='DELETE')
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'zone not found':
raise ZoneDoesNotExistError(value=e.message, driver=self,
zone_id=zone.id)
else:
raise e
return response.status == httplib.OK
def list_records(self, zone):
"""
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
action = '/v1/zones/%s' % zone.domain
try:
response = self.connection.request(action=action, method='GET')
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'zone not found':
raise ZoneDoesNotExistError(value=e.message, driver=self,
zone_id=zone.id)
else:
raise e
records = self._to_records(items=response.parse_body()['records'],
zone=zone)
return records
def get_record(self, zone_id, record_id):
"""
:param zone_id: The id of the zone where to search for
the record (e.g. example.com)
:type zone_id: ``str``
:param record_id: The type of record to search for
(e.g. A, AAA, MX etc)
:return: :class:`Record`
"""
action = '/v1/zones/%s/%s/%s' % (zone_id, zone_id, record_id)
try:
response = self.connection.request(action=action, method='GET')
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'record not found':
raise RecordDoesNotExistError(value=e.message, driver=self,
record_id=record_id)
else:
raise e
zone = self.get_zone(zone_id=zone_id)
record = self._to_record(item=response.parse_body(), zone=zone)
return record
def delete_record(self, record):
"""
:param record: Record to delete.
:type record: :class:`Record`
:return: Boolean
"""
action = '/v1/zones/%s/%s/%s' % (record.zone.domain, record.name,
record.type)
try:
response = self.connection.request(action=action, method='DELETE')
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'record not found':
raise RecordDoesNotExistError(value=e.message, driver=self,
record_id=record.id)
else:
raise e
return response.status == httplib.OK
def create_record(self, name, zone, type, data, extra=None):
"""
:param name: Name of the record to create (e.g. foo).
:type name: ``str``
:param zone: Zone where the record should be created.
:type zone: :class:`Zone`
:param type: Type of record (e.g. A, MX etc)
:type type: ``str``
:param data: Data of the record (e.g. 127.0.0.1 for the A record)
:type data: ``str``
:param extra: Extra data needed to create different types of records
:type extra: ``dict``
:return: :class:`Record`
"""
action = '/v1/zones/%s/%s/%s' % (zone.domain, '%s.%s' %
(name, zone.domain), type)
raw_data = {
"answers": [
{
"answer": [
data
], }
],
"type": type,
"domain": '%s.%s' % (name, zone.domain),
"zone": zone.domain
}
if extra is not None and extra.get('answers'):
raw_data['answers'] = extra.get('answers')
post_data = json.dumps(raw_data)
try:
response = self.connection.request(action=action, method='PUT',
data=post_data)
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'record already exists':
raise RecordAlreadyExistsError(value=e.message, driver=self,
record_id='')
else:
raise e
record = self._to_record(item=response.parse_body(), zone=zone)
return record
def update_record(self, record, name, type, data, extra=None):
"""
:param record: Record to update
:type record: :class:`Record`
:param name: Name of the record to update (e.g. foo).
:type name: ``str``
:param type: Type of record (e.g. A, MX etc)
:type type: ``str``
:param data: Data of the record (e.g. 127.0.0.1 for the A record)
:type data: ``str``
:param extra: Extra data needed to create different types of records
:type extra: ``dict``
:return: :class:`Record`
"""
zone = record.zone
action = '/v1/zones/%s/%s/%s' % (zone.domain, '%s.%s' %
(name, zone.domain), type)
raw_data = {
"answers": [
{
"answer": [
data
], }
]
}
if extra is not None and extra.get('answers'):
raw_data['answers'] = extra.get('answers')
post_data = json.dumps(raw_data)
try:
response = self.connection.request(action=action, data=post_data,
method='POST')
except NsOneException:
e = sys.exc_info()[1]
if e.message == 'record does not exist':
raise RecordDoesNotExistError(value=e.message, driver=self,
record_id=record.id)
else:
raise e
record = self._to_record(item=response.parse_body(), zone=zone)
return record
def ex_zone_exists(self, zone_id, zones_list):
"""
Function to check if a `Zone` object exists.
:param zone_id: ID of the `Zone` object.
:type zone_id: ``str``
:param zones_list: A list containing `Zone` objects.
:type zones_list: ``list``.
:rtype: Returns `True` or `False`.
"""
zone_ids = []
for zone in zones_list:
zone_ids.append(zone.id)
return zone_id in zone_ids
def _to_zone(self, item):
common_attr = ['zone', 'id', 'type']
extra = {}
for key in item.keys():
if key not in common_attr:
extra[key] = item.get(key)
zone = Zone(domain=item['zone'], id=item['id'], type=item.get('type'),
extra=extra, ttl=extra.get('ttl'), driver=self)
return zone
def _to_zones(self, items):
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_record(self, item, zone):
common_attr = ['id', 'short_answers', 'answers', 'domain', 'type']
extra = {}
for key in item.keys():
if key not in common_attr:
extra[key] = item.get(key)
if item.get('answers') is not None:
data = item.get('answers')[0]['answer']
else:
data = item.get('short_answers')
record = Record(id=item['id'], name=item['domain'], type=item['type'],
data=data, zone=zone, driver=self,
extra=extra)
return record
def _to_records(self, items, zone):
records = []
for item in items:
records.append(self._to_record(item, zone))
return records
| 34.188889
| 79
| 0.537699
| 11,062
| 0.898765
| 0
| 0
| 0
| 0
| 0
| 0
| 4,266
| 0.346604
|
02339931b6a314a7b42357abbf8fe125695e6d76
| 533
|
py
|
Python
|
ocr.py
|
PI2-Braille-printer/OCR
|
25511596efbe5e408fe43a92c0d04e513d7fea39
|
[
"MIT"
] | null | null | null |
ocr.py
|
PI2-Braille-printer/OCR
|
25511596efbe5e408fe43a92c0d04e513d7fea39
|
[
"MIT"
] | 6
|
2021-03-18T20:56:22.000Z
|
2022-03-11T23:28:10.000Z
|
ocr.py
|
PI2-Braille-printer/OCR
|
25511596efbe5e408fe43a92c0d04e513d7fea39
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageEnhance
import pytesseract
import os
#image = Image.open('f_test.jpg')
#enhance = ImageEnhance.Contrast(image)
#new_image = enhance.enhance(1.5)
#new_image.save('f_test__c_2.jpg')
for x in range(0,3):
os.system('./textcleaner -g -s 2 -a 1 ./Images/test_crop_'+str(x)+'.jpg ./Images/test_crop_'+str(x)+'_r.jpg')
result_string = pytesseract.image_to_string(Image.open('./Images/test_crop_'+str(x)+'_r.jpg'),lang='por')
print(result_string)
#result_string = result_string.split()
#print(result_string)
| 31.352941
| 110
| 0.739212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.589118
|
0233f5b5066a471f59d0277aa64b3c981e22b913
| 2,090
|
py
|
Python
|
processing/lua_file_builder.py
|
eubr-atmosphere/Spark-Log-Parser
|
6f2025d50944b3603ce3e41ab09afcb38eab4e08
|
[
"Apache-2.0"
] | 1
|
2017-05-06T21:25:39.000Z
|
2017-05-06T21:25:39.000Z
|
processing/lua_file_builder.py
|
eubr-atmosphere/Spark-Log-Parser
|
6f2025d50944b3603ce3e41ab09afcb38eab4e08
|
[
"Apache-2.0"
] | null | null | null |
processing/lua_file_builder.py
|
eubr-atmosphere/Spark-Log-Parser
|
6f2025d50944b3603ce3e41ab09afcb38eab4e08
|
[
"Apache-2.0"
] | 3
|
2018-10-19T12:35:56.000Z
|
2019-05-09T08:09:54.000Z
|
#! /usr/bin/env python3
## Copyright 2018 Eugenio Gianniti <eugenio.gianniti@polimi.it>
## Copyright 2016 Giorgio Pea <giorgio.pea@mail.polimi.it>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os
import sys
def buildLuaFile(targetDirectory, name, containers):
scriptdir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(scriptdir, 'template.lua'), 'r') as infile:
content = infile.read()
with open (os.path.join (targetDirectory,
"dependencies.lua"), "r") as infile:
stages = infile.read ()
content = content \
.replace('@@STAGES@@', stages) \
.replace('@@CONTAINERS@@', containers) \
.replace('@@USERS@@', os.environ['DAGSIM_USERS']) \
.replace('@@TYPE@@',
os.environ['DAGSIM_UTHINKTIMEDISTR_TYPE']) \
.replace('@@PARAMS@@',
os.environ['DAGSIM_UTHINKTIMEDISTR_PARAMS'])
outfilename = os.path.join(targetDirectory,
'{}.lua.template'.format(name))
with open(outfilename, 'w') as outfile:
outfile.write(content)
def main():
args = sys.argv
if len(args) != 4:
print("Required args: [TARGET_DIRECTORY] [NAME]", file=sys.stderr)
sys.exit(2)
else:
if os.path.exists(str(args[1])):
buildLuaFile(str(args[1]), str(args[2]), str(args[3]))
else:
print("error: the inserted directory does not exist",
file = sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| 32.65625
| 75
| 0.623445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 981
| 0.469378
|
0236d15dce7606a0d8edbca50d378b142b6663f7
| 127
|
py
|
Python
|
mynlp/__init__.py
|
Suneel123/mynlp
|
9dcf6fb57df66ebd4a359b8cd866323f43bc8ec4
|
[
"MIT"
] | null | null | null |
mynlp/__init__.py
|
Suneel123/mynlp
|
9dcf6fb57df66ebd4a359b8cd866323f43bc8ec4
|
[
"MIT"
] | null | null | null |
mynlp/__init__.py
|
Suneel123/mynlp
|
9dcf6fb57df66ebd4a359b8cd866323f43bc8ec4
|
[
"MIT"
] | null | null | null |
"""Top-level package for mynlp."""
__author__ = """Suneel Dondapati"""
__email__ = 'dsuneel1@gmail.com'
__version__ = '0.1.0'
| 21.166667
| 35
| 0.685039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.653543
|
0236d5c96173fb20b1c62f540c0341822dff9bf5
| 788
|
py
|
Python
|
test/point_test.py
|
markupCode/computational-geometry
|
9a0a63a0b0c86e0618c18f82283b41baded21c50
|
[
"MIT"
] | null | null | null |
test/point_test.py
|
markupCode/computational-geometry
|
9a0a63a0b0c86e0618c18f82283b41baded21c50
|
[
"MIT"
] | null | null | null |
test/point_test.py
|
markupCode/computational-geometry
|
9a0a63a0b0c86e0618c18f82283b41baded21c50
|
[
"MIT"
] | null | null | null |
import unittest
from geometry.point import Point
class TestPoint(unittest.TestCase):
def get_points(self):
return [
Point(0, 0),
Point(1, 1),
Point(0, 1),
Point(-1, 1),
Point(-1, 0),
Point(-1, -1),
Point(1, -1)
]
def test_get_arc(self):
points = self.get_points()
self.assertEqual(points[0].get_arc(), 0)
self.assertEqual(points[1].get_arc(), 45)
self.assertEqual(points[2].get_arc(), 90)
self.assertEqual(points[3].get_arc(), 135)
self.assertEqual(points[4].get_arc(), 180)
self.assertEqual(points[5].get_arc(), 225)
self.assertEqual(points[6].get_arc(), 315)
if __name__ == '__main__':
unittest.main()
| 23.878788
| 50
| 0.549492
| 686
| 0.870558
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.01269
|
0238ca053db973ce47447cd47778ddb364794224
| 2,183
|
py
|
Python
|
scenarios/simpleBTSEdgeCloudIngestion/units/sensors.py
|
rdsea/IoTCloudSamples
|
37a3550627682981aa7d2a4cf317f19a3b1a699c
|
[
"Apache-2.0"
] | 5
|
2019-05-04T08:43:58.000Z
|
2021-12-20T14:22:52.000Z
|
scenarios/simpleBTSEdgeCloudIngestion/units/sensors.py
|
rdsea/IoTCloudSamples
|
37a3550627682981aa7d2a4cf317f19a3b1a699c
|
[
"Apache-2.0"
] | 7
|
2017-10-30T22:53:51.000Z
|
2022-02-06T18:03:32.000Z
|
scenarios/simpleBTSEdgeCloudIngestion/units/sensors.py
|
rdsea/IoTCloudSamples
|
37a3550627682981aa7d2a4cf317f19a3b1a699c
|
[
"Apache-2.0"
] | 3
|
2018-12-17T17:04:04.000Z
|
2021-09-23T07:07:01.000Z
|
import yaml
import os, errno
import json
def load_config(path):
config = None
with open(path, 'r') as config_file:
config = yaml.load(config_file)
return config
def createSensorConfigs(topicSensors):
sensors = []
count = 0
for i in range(topicSensors['nb']):
config = {}
config['server'] = topicSensors['broker']
config['username'] = 'xxx'
config['password'] = 'xxx'
config['port'] = 1883
config['clientId'] = 'sensor_' + topicSensors['topic'] + '_' +str(count)
config['topic'] = topicSensors['topic']
sensors.append(config)
if 'remoteLoggingBroker' in topicSensors:
remoteLoggingConfig = {}
remoteLoggingConfig['broker'] = 'tcp://'+topicSensors['remoteLoggingBroker']['host']+':'+str(topicSensors['remoteLoggingBroker']['port'])
remoteLoggingConfig['topic'] = topicSensors['remoteLoggingBroker']['topic']
config['remoteLoggingBroker'] = remoteLoggingConfig
config['remoteLogging'] = True
count += 1
return sensors
def write_config_files(sensors):
try:
os.makedirs('sensors')
except OSError as e:
if e.errno != errno.EEXIST:
raise
for sensor in sensors:
file_name = sensor['clientId']+'.json'
with open('sensors/'+file_name,'w') as outfile:
json.dump(sensor, outfile)
def write_compose(sensors):
services = {}
for sensor in sensors:
service = {}
volumes = []
volumes.append('./sensors/'+sensor['clientId']+'.json'+":/sensor/config.json:")
volumes.append('./sensors/'+sensor['clientId']+'.csv'+":/sensor/data.csv:")
service['volumes'] = volumes
service['image'] = 'rdsea/sensor'
services[sensor['clientId']] = service
return services
def provision(config):
try:
os.makedirs('sensors')
except OSError as e:
if e.errno != errno.EEXIST:
raise
sensors = []
for topicSensors in config['sensors']:
sensors.extend(createSensorConfigs(topicSensors))
write_config_files(sensors)
return write_compose(sensors)
| 30.319444
| 149
| 0.607879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 464
| 0.212552
|
0238ea3d027c6d41c055683ac6fc0e17e3bc821b
| 879
|
py
|
Python
|
array/0018_4_sum/0018_4_sum.py
|
zdyxry/LeetCode
|
33371285d0f3302158230f46e8b1b63b9f4639c4
|
[
"Xnet",
"X11"
] | 6
|
2019-09-16T01:50:44.000Z
|
2020-09-17T08:52:25.000Z
|
array/0018_4_sum/0018_4_sum.py
|
zdyxry/LeetCode
|
33371285d0f3302158230f46e8b1b63b9f4639c4
|
[
"Xnet",
"X11"
] | null | null | null |
array/0018_4_sum/0018_4_sum.py
|
zdyxry/LeetCode
|
33371285d0f3302158230f46e8b1b63b9f4639c4
|
[
"Xnet",
"X11"
] | 4
|
2020-02-07T12:43:16.000Z
|
2021-04-11T06:38:55.000Z
|
import collections
class Solution(object):
def fourSum(self, nums, target):
nums, result, lookup = sorted(nums), [], collections.defaultdict(list)
for i in xrange(0, len(nums) - 1):
for j in xrange(i+1, len(nums)):
lookup[nums[i]+nums[j]].append([i,j])
for i in lookup.keys():
if target - i in lookup:
for x in lookup[i]:
for y in lookup[target-i]:
[a,b],[c,d] = x,y
if a is not c and a is not d and b is not c and b is not d:
quad = sorted([nums[a], nums[b], nums[c],nums[d]])
if quad not in result:
result.append(quad)
return sorted(result)
nums = [1,0,-1,0,-2,2]
target = 0
res = Solution().fourSum(nums, target)
print(res)
| 35.16
| 83
| 0.480091
| 772
| 0.878271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
023b3b94e54c17d3e9f985c30a7d72a9e9d96bce
| 573
|
py
|
Python
|
Qcover/backends/__init__.py
|
BAQIS-Quantum/Qcover
|
ca3776ed73fefa0cfef08042143a8cf842f8dad5
|
[
"Apache-2.0"
] | 38
|
2021-12-22T03:12:01.000Z
|
2022-03-17T06:57:10.000Z
|
Qcover/backends/__init__.py
|
BAQIS-Quantum/Qcover
|
ca3776ed73fefa0cfef08042143a8cf842f8dad5
|
[
"Apache-2.0"
] | null | null | null |
Qcover/backends/__init__.py
|
BAQIS-Quantum/Qcover
|
ca3776ed73fefa0cfef08042143a8cf842f8dad5
|
[
"Apache-2.0"
] | 13
|
2021-12-22T07:32:44.000Z
|
2022-02-28T06:47:41.000Z
|
from .backend import Backend
from .circuitbyqiskit import CircuitByQiskit
from .circuitbyprojectq import CircuitByProjectq
from .circuitbycirq import CircuitByCirq
from .circuitbyqulacs import CircuitByQulacs
# from .circuitbytket import CircuitByTket
from .circuitbytensor import CircuitByTensor
from .circuitbyqton import CircuitByQton
import warnings
warnings.filterwarnings("ignore")
__all__ = [
'Backend',
'CircuitByCirq',
'CircuitByQiskit',
'CircuitByProjectq',
'CircuitByTensor',
'CircuitByQulacs',
'CircuitByQton'
]
| 27.285714
| 49
| 0.767888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.279232
|
023c2aec98d43d7652c64c1fee878f6de026330b
| 766
|
py
|
Python
|
python-files/dictionary-val.py
|
chirumist/Python-Practice
|
fc7d6447ca492989221904121321aaf762bb6b43
|
[
"MIT"
] | null | null | null |
python-files/dictionary-val.py
|
chirumist/Python-Practice
|
fc7d6447ca492989221904121321aaf762bb6b43
|
[
"MIT"
] | null | null | null |
python-files/dictionary-val.py
|
chirumist/Python-Practice
|
fc7d6447ca492989221904121321aaf762bb6b43
|
[
"MIT"
] | null | null | null |
"""
User Get Key Value Input Dictionary Start
"""
dic = {
"google": "google is provide job and internship.",
"amezon": "amezon is e-commerce store and cloud computing provider.",
"zoom": "zoom is provide video call system to connecting meeating.",
"microsoft": "microsoft is owner of windows and office software.."
}
# For beginner
print("google")
print("amezon")
print("zoom")
print("microsoft")
key = input("search detail of dectionary! \n")
print(dic[key.lower()])
# For advance
while True:
for index, item in dic.items():
print(index)
key = input("search detail of dectionary! \n")
print(dic[key.lower()])
if int(input("Press 1 to exit 0 to continue \n")):
break
"""
User Get Key Value Input Dictionary End
"""
| 24.709677
| 73
| 0.663185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 498
| 0.650131
|
023fa6bbd20b990b812f3f037de938b8c58a24d0
| 2,811
|
py
|
Python
|
cloudpredictionframework/anomaly_detection/algorithms/hybrid_algorithm.py
|
Fruktus/CloudPredictionFramework
|
1474287cc9bdfd58ae92db7bc24966a7e600258f
|
[
"MIT"
] | 1
|
2021-11-19T13:13:20.000Z
|
2021-11-19T13:13:20.000Z
|
cloudpredictionframework/anomaly_detection/algorithms/hybrid_algorithm.py
|
Fruktus/CloudPredictionFramework
|
1474287cc9bdfd58ae92db7bc24966a7e600258f
|
[
"MIT"
] | null | null | null |
cloudpredictionframework/anomaly_detection/algorithms/hybrid_algorithm.py
|
Fruktus/CloudPredictionFramework
|
1474287cc9bdfd58ae92db7bc24966a7e600258f
|
[
"MIT"
] | null | null | null |
from statistics import mean
from collections import defaultdict
from cloudpredictionframework.anomaly_detection.algorithms.base_algorithm import BaseAlgorithm
class HybridAlgorithm(BaseAlgorithm):
def __init__(self, filters: [BaseAlgorithm], min_confidence=0.8):
super().__init__()
self._filters = filters
self._min_confidence = min_confidence
self._recurrency_data = {'day_of_week': defaultdict(lambda: 0),
'day_of_month': defaultdict(lambda: 0)}
def get_confidence(self):
pass
def update(self, timestamp, value):
self._samples = self._samples.append({'timestamp': timestamp, 'value': value}, ignore_index=True)
combined_states = []
for alg in self._filters:
alg.update(timestamp, value)
combined_states.append(alg.get_current_state())
if self.states.learning in combined_states:
self._current_state = self.states.learning
return
state_confidence = mean([1 if i == self.states.overutil_anomaly else 0 for i in combined_states])
self._update_recurrent(timestamp, state_confidence > self._min_confidence)
if state_confidence >= self._min_confidence:
if self._is_recurrent(timestamp):
self._current_state = self.states.normal
else:
self._current_state = self.states.overutil_anomaly
self._anomalies_overutil = self._anomalies_overutil.append({'timestamp': timestamp, 'value': value},
ignore_index=True)
else:
self._current_state = self.states.normal
self._anomalies_treshold_history = self._anomalies_treshold_history.append(
{'timestamp': timestamp,
'upper_treshold': self._upper_treshold,
'lower_treshold': self._lower_treshold},
ignore_index=True)
def _update_recurrent(self, timestamp, is_anomaly: bool):
if is_anomaly:
self._recurrency_data['day_of_week'][timestamp.dayofweek] += 1
self._recurrency_data['day_of_month'][timestamp.day] += 1
else:
dow = self._recurrency_data['day_of_week'][timestamp.dayofweek]
self._recurrency_data['day_of_week'][timestamp.dayofweek] = dow - 1 if dow > 0 else 0
dom = self._recurrency_data['day_of_month'][timestamp.day]
self._recurrency_data['day_of_month'][timestamp.day] = dom - 1 if dom > 0 else 0
def _is_recurrent(self, timestamp):
return self._recurrency_data['day_of_week'][timestamp.dayofweek] > 2 or \
self._recurrency_data['day_of_month'][timestamp.day] > 2
def __str__(self):
return "HybridAlgorithm"
| 41.338235
| 116
| 0.644966
| 2,648
| 0.942014
| 0
| 0
| 0
| 0
| 0
| 0
| 231
| 0.082177
|
02426c5e9ebc5b6e7797b501d9a365d58338fa41
| 159
|
py
|
Python
|
Defer/__init__.py
|
loynoir/defer.py
|
46f37a046028b1854586301a45870c2b3a628f65
|
[
"MIT"
] | null | null | null |
Defer/__init__.py
|
loynoir/defer.py
|
46f37a046028b1854586301a45870c2b3a628f65
|
[
"MIT"
] | null | null | null |
Defer/__init__.py
|
loynoir/defer.py
|
46f37a046028b1854586301a45870c2b3a628f65
|
[
"MIT"
] | null | null | null |
__all__ = ['Defer']
from contextlib import contextmanager, ExitStack
@contextmanager
def Defer():
with ExitStack() as stack:
yield stack.callback
| 19.875
| 48
| 0.72327
| 0
| 0
| 72
| 0.45283
| 88
| 0.553459
| 0
| 0
| 7
| 0.044025
|
024385bec991016fbb9a7b197fba1d40d6b4f297
| 9,798
|
py
|
Python
|
jsonmerge/strategies.py
|
open-contracting-archive/jsonmerge
|
2b87eea10bed3aa380cb28034a96783ac3081a85
|
[
"Unlicense"
] | null | null | null |
jsonmerge/strategies.py
|
open-contracting-archive/jsonmerge
|
2b87eea10bed3aa380cb28034a96783ac3081a85
|
[
"Unlicense"
] | 3
|
2015-09-16T15:37:05.000Z
|
2015-09-16T16:32:26.000Z
|
jsonmerge/strategies.py
|
open-contracting-archive/jsonmerge
|
2b87eea10bed3aa380cb28034a96783ac3081a85
|
[
"Unlicense"
] | null | null | null |
# vim:ts=4 sw=4 expandtab softtabstop=4
from jsonmerge.exceptions import HeadInstanceError, \
BaseInstanceError, \
SchemaError
import jsonschema
import re
class Strategy(object):
"""Base class for merge strategies.
"""
def merge(self, walk, base, head, schema, meta, **kwargs):
"""Merge head instance into base.
walk -- WalkInstance object for the current context.
base -- Value being merged into.
head -- Value being merged.
schema -- Schema used for merging.
meta -- Meta data, as passed to the Merger.merge() method.
kwargs -- Dict with any extra options given in the 'mergeOptions'
keyword
Specific merge strategies should override this method to implement
their behavior.
The function should return the object resulting from the merge.
Recursion into the next level, if necessary, is achieved by calling
walk.descend() method.
"""
raise NotImplemented
def get_schema(self, walk, schema, meta, **kwargs):
"""Return the schema for the merged document.
walk -- WalkSchema object for the current context.
schema -- Original document schema.
meta -- Schema for the meta data, as passed to the Merger.get_schema()
method.
kwargs -- Dict with any extra options given in the 'mergeOptions'
keyword.
Specific merge strategies should override this method to modify the
document schema depending on the behavior of the merge() method.
The function should return the schema for the object resulting from the
merge.
Recursion into the next level, if necessary, is achieved by calling
walk.descend() method.
Implementations should take care that all external schema references
are resolved in the returned schema. This can be achieved by calling
walk.resolve_refs() method.
"""
raise NotImplemented
class Overwrite(Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
if head == None:
return base
else:
return head
def get_schema(self, walk, schema, meta, **kwargs):
return walk.resolve_refs(schema)
class OCDSOmit(Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
return None
def get_schema(self, walk, schema, meta, **kwargs):
return walk.resolve_refs(schema)
class Version(Strategy):
def merge(self, walk, base, head, schema, meta, limit=None, unique=None, ignoreDups=True, **kwargs):
# backwards compatibility
if unique is False:
ignoreDups = False
if base is None:
base = []
else:
base = list(base)
if not ignoreDups or not base or base[-1]['value'] != head:
base.append(walk.add_meta(head, meta))
if limit is not None:
base = base[-limit:]
return base
def get_schema(self, walk, schema, meta, limit=None, **kwargs):
if meta is not None:
item = dict(meta)
else:
item = {}
if 'properties' not in item:
item['properties'] = {}
item['properties']['value'] = walk.resolve_refs(schema)
rv = { "type": "array",
"items": item }
if limit is not None:
rv['maxItems'] = limit
return rv
class OCDSVersion(Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
if base is None:
base = []
else:
base = list(base)
meta = {
"releaseID": walk.merger.head_root.get('id'),
"releaseDate": walk.merger.head_root.get('date'),
"releaseTag": walk.merger.head_root.get('tag')
}
if (not base or base[-1]['value'] != head) and head != None:
base.append(walk.add_meta(head, meta))
return base
def get_schema(self, walk, schema, meta, **kwargs):
if meta is not None:
item = dict(meta)
else:
item = {}
if 'properties' not in item:
item['properties'] = {}
item['properties']['value'] = walk.resolve_refs(schema)
item['properties'].update({
"releaseDate": {
"type": "string",
"format": "date-time"
},
"releaseID": {
"type": "string"
},
"releaseTag": {
"type": "string"
}
})
rv = { "type": "array",
"items": item }
return rv
class Append(Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
if not walk.is_type(head, "array"):
raise HeadInstanceError("Head for an 'append' merge strategy is not an array")
if base is None:
base = []
else:
if not walk.is_type(base, "array"):
raise BaseInstanceError("Base for an 'append' merge strategy is not an array")
base = list(base)
base += head
return base
def get_schema(self, walk, schema, meta, **kwargs):
schema.pop('maxItems', None)
schema.pop('uniqueItems', None)
return walk.resolve_refs(schema)
class ArrayMergeById(Strategy):
def merge(self, walk, base, head, schema, meta, idRef="id", ignoreId=None, **kwargs):
if not walk.is_type(head, "array"):
raise HeadInstanceError("Head for an 'arrayMergeById' merge strategy is not an array") # nopep8
if base is None:
base = []
else:
if not walk.is_type(base, "array"):
raise BaseInstanceError("Base for an 'arrayMergeById' merge strategy is not an array") # nopep8
base = list(base)
subschema = None
if schema:
subschema = schema.get('items')
if walk.is_type(subschema, "array"):
raise SchemaError("'arrayMergeById' not supported when 'items' is an array")
for head_item in head:
try:
head_key = walk.resolver.resolve_fragment(head_item, idRef)
except jsonschema.RefResolutionError:
# Do nothing if idRef field cannot be found.
continue
if head_key == ignoreId:
continue
key_count = 0
for i, base_item in enumerate(base):
base_key = walk.resolver.resolve_fragment(base_item, idRef)
if base_key == head_key:
key_count += 1
# If there was a match, we replace with a merged item
base[i] = walk.descend(subschema, base_item, head_item, meta)
if key_count == 0:
# If there wasn't a match, we append a new object
base.append(walk.descend(subschema, None, head_item, meta))
if key_count > 1:
raise BaseInstanceError("Id was not unique")
return base
def get_schema(self, walk, schema, meta, **kwargs):
subschema = None
if schema:
subschema = schema.get('items')
# Note we're discarding the walk.descend() result here. This is because
# it would de-reference the $ref if the subschema is a reference - i.e.
# in the result it would replace the reference with the copy of the
# target.
#
# But we want to keep the $ref and do the walk.descend() only on the target of the reference.
#
# This seems to work, but is an ugly workaround. walk.descend() should
# be fixed instead to not dereference $refs when not necessary.
walk.descend(subschema, meta)
return schema
class ObjectMerge(Strategy):
def merge(self, walk, base, head, schema, meta, **kwargs):
if not walk.is_type(head, "object"):
raise HeadInstanceError("Head for an 'object' merge strategy is not an object")
if base is None:
base = {}
else:
if not walk.is_type(base, "object"):
raise BaseInstanceError("Base for an 'object' merge strategy is not an object")
base = dict(base)
for k, v in head.items():
subschema = None
# get subschema for this element
if schema is not None:
p = schema.get('properties')
if p is not None:
subschema = p.get(k)
if subschema is None:
p = schema.get('patternProperties')
if p is not None:
for pattern, s in p.items():
if re.search(pattern, k):
subschema = s
if subschema is None:
p = schema.get('additionalProperties')
if p is not None:
subschema = p.get(k)
base[k] = walk.descend(subschema, base.get(k), v, meta)
return base
def get_schema(self, walk, schema, meta, **kwargs):
for forbidden in ("oneOf", "allOf", "anyOf"):
if forbidden in schema:
raise SchemaError("Type ambiguous schema")
schema2 = dict(schema)
def descend_keyword(keyword):
p = schema.get(keyword)
if p is not None:
for k, v in p.items():
schema2[keyword][k] = walk.descend(v, meta)
descend_keyword("properties")
descend_keyword("patternProperties")
descend_keyword("additionalProperties")
return schema2
| 31.504823
| 112
| 0.557359
| 9,554
| 0.975097
| 0
| 0
| 0
| 0
| 0
| 0
| 3,268
| 0.333537
|
0243fa264d20be4663ad37da1958e0275ed6a559
| 3,100
|
py
|
Python
|
ArcGISDesktop/reconcile_post_versions.py
|
jonhusen/ArcGIS
|
1d39a627888ce6039c490cdad810cd6d8035cb77
|
[
"MIT"
] | null | null | null |
ArcGISDesktop/reconcile_post_versions.py
|
jonhusen/ArcGIS
|
1d39a627888ce6039c490cdad810cd6d8035cb77
|
[
"MIT"
] | null | null | null |
ArcGISDesktop/reconcile_post_versions.py
|
jonhusen/ArcGIS
|
1d39a627888ce6039c490cdad810cd6d8035cb77
|
[
"MIT"
] | null | null | null |
"""
Reconcile and posting versions at 10.0
TODO:WIP
"""
import arcpy, os, sys, string
#Populate parent and child versions in the following manner('Parent':'Child', etc). DO NOT LIST DEFAULT
vTree = {'SDE.Parent':'SDE.Child','SDE.QA':'SDE.Edit'}
#Reconcile and post child versions with parent
def RecPostNonDefault(workspace,logWorkspace,logName):
outLog = open(os.path.join(logWorkspace, logName), 'w')
for key, val in vTree.iteritems():
arcpy.ReconcileVersion_management(workspace, val, key,"BY_OBJECT", "FAVOR_TARGET_VERSION", "NO_LOCK_AQUIRED", "NO_ABORT", "POST")
print "Reconciling and posting {0} to {1}".format(val, key)
outLog.write("Reconciling and posting {0} to {1}".format(val, key))
outLog.write("\n")
outLog.close()
del outLog, key, val
#Reconcile and post with parent
def RecPostDefault(workspace,logWorkspace,logName2,defaultVersion):
outLog = open(os.path.join(logWorkspace, logName2), 'w')
#Reconcile and post parents with DEFAULT
for key, val in vTree.iteritems():
arcpy.ReconcileVersion_management(workspace, key, defaultVersion,"BY_OBJECT", "FAVOR_TARGET_VERSION", "NO_LOCK_AQUIRED", "NO_ABORT", "POST")
print "Reconciling and posting {0} to DEFAULT".format(key)
outLog.write("Reconciling and posting {0} to DEFAULT".format(key))
outLog.write("\n")
outLog.close()
del outLog, key, val
def DeleteChildVersions(workspace):
arcpy.ClearWorkspaceCache_management()
for key, val in vTree.iteritems():
arcpy.DeleteVersion_management(workspace, val)
print "Deleted {0}".format(val)
def DeleteParentVersions(workspace):
arcpy.ClearWorkspaceCache_management()
for key, val in vTree.iteritems():
arcpy.DeleteVersion_management(workspace, key)
print "Deleted {0}".format(key)
#Compress database
def Compress(workspace,logWorkspace,logName3):
arcpy.ClearWorkspaceCache_management()
outLog = open(os.path.join(logWorkspace, logName3), 'w')
arcpy.Compress_management(workspace)
print ("Compressed database {0}".format(workspace))
outLog.write("Compressed database {0}".format(workspace))
outLog.close()
def RecreateVersions(workspace, defaultVersion):
for key, val in vTree.iteritems():
arcpy.CreateVersion_management(workspace,defaultVersion, key[4:], "PUBLIC")
print "Created version {0}".format(key)
arcpy.CreateVersion_management(workspace, key, val[4:], "PUBLIC")
print "Created version {0}".format(val)
if __name__=="__main__":
workspace = r"Database Connections\MXD2.sde"
defaultVersion = "sde.DEFAULT"
logName = "RecPostLog.txt"
logName2 = "RecPostDefaultLog.txt"
logName3 = "CompressLog.txt"
logWorkspace = r"C:\temp"
RecPostNonDefault(workspace,logWorkspace,logName)
RecPostDefault(workspace,logWorkspace,logName2,defaultVersion)
DeleteChildVersions(workspace)
DeleteParentVersions(workspace)
Compress(workspace,logWorkspace,logName3)
RecreateVersions(workspace, defaultVersion)
| 40.789474
| 148
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 901
| 0.290645
|
024430ea1d89420e6939d1c770a6a86ca49668e5
| 4,626
|
py
|
Python
|
example/F3Dp/F3D_syn.py
|
Chunfang/defmod-swpc
|
74fe7c02b24a46aa24bca7438738aa5adb72e2b6
|
[
"MIT"
] | 26
|
2017-05-12T08:11:57.000Z
|
2022-03-06T01:44:24.000Z
|
example/F3Dp/F3D_syn.py
|
Soengmou/defmod-swpc
|
75740fca3b36107e9d18201a5623c955f6010740
|
[
"MIT"
] | 4
|
2019-09-11T15:35:16.000Z
|
2020-06-23T10:49:34.000Z
|
example/F3Dp/F3D_syn.py
|
Chunfang/defmod-swpc
|
74fe7c02b24a46aa24bca7438738aa5adb72e2b6
|
[
"MIT"
] | 8
|
2017-05-22T18:40:13.000Z
|
2021-02-10T08:04:39.000Z
|
#!/usr/bin/env python
import numpy as np
import os,sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-vis') # 1 plot cropped point cloud
ap.add_argument('-refine') # 1 refine mesh
ap.add_argument('-clean') # 1 remove tmp files
if ap.parse_args().vis==None:
vis=0
else:
vis=int(ap.parse_args().vis)
if ap.parse_args().refine==None:
refine=0
else:
refine=int(ap.parse_args().refine)
if ap.parse_args().clean==None:
clean=0
else:
clean=int(ap.parse_args().clean)
# Synthetic fault pixels
z=np.linspace(.2, -.8, num=100)
y=np.linspace(-.625,.625, num=120)
grid=np.meshgrid(y,z)
x=np.zeros((len(z)*len(y),1),dtype=np.float)
dat_vert=np.hstack((x,grid[0].reshape(x.shape),grid[1].reshape(x.shape)))
# weak
wl=np.linspace(.12,.18,num=8); amp=.03125*np.sqrt(wl)
e=1.025; r=-.2
dip=70.; zcnt=-.35
omg=[ 0.82976173, 0.89624834, 0.03829284, -0.50016345, -1.06606012, 1.40505898, -1.24256034, 1.28623393]
#omg=(np.random.rand(wl.shape[0])-.5)*np.pi
L=dat_vert[1,:].max()-dat_vert[1,:].min()
zmax=z.max(); zmin=z.min()
for i in range(len(wl)):
phs=dat_vert[:,1]/wl[i]*np.pi+omg[i]
dat_vert[:,0]=dat_vert[:,0]+amp[i]*np.cos(phs)*(e*zmax-dat_vert[:,2])/(e*zmax-zmin)*np.exp(r*abs(phs)/np.pi)
dat_vert[:,0]=dat_vert[:,0]+(zcnt-dat_vert[:,2])*np.tan((90.-dip)/180.*np.pi)
# ridge patch
def flt_patch(dat_vert,slope1,slope2,trunc1,trunc2,hlw,hup):
b1=-slope1*trunc1-.7
b2=-slope2*trunc2-.7
in_id=np.where(np.logical_and(dat_vert[:,2]-slope1*dat_vert[:,1]<b1, dat_vert[:,2]-slope2*dat_vert[:,1]<b2))[0]
out_id=np.setdiff1d(np.array(range(len(dat_vert)),dtype=np.int32),in_id)
x_shift=dat_vert[in_id,0]
# ridge patch
k=0
zup=dat_vert[:,2].max()
zlw=dat_vert[:,2].min()
for i in in_id:
r=abs(dat_vert[i,1]-.5*(trunc1+trunc2))
R=.5*((dat_vert[i,2]-b2)/slope2-(dat_vert[i,2]-b1)/slope1)
h=hlw+(dat_vert[i,2]-zlw)/(zup-zlw)*(hup-hlw)
x_shift[k]=x_shift[k]+np.cos(r/R*np.pi/2.)*h
k+=1
dat_vert=np.vstack((dat_vert[out_id,:],
np.hstack((x_shift.reshape(len(in_id),1),
dat_vert[in_id,1].reshape(len(in_id),1),
dat_vert[in_id,2].reshape(len(in_id),1)))))
return dat_vert
slope1=10.;slope2=-10.
trunc1=.1;trunc2=.6
hup=0.;hlw=.08
#dat_vert=flt_patch(dat_vert,slope1,slope2,trunc1,trunc2,hlw,hup)
print omg
fout='F3D_syn.xyz'
f=open(fout,'w+')
np.savetxt(f,dat_vert,delimiter=' ', fmt='%.6f '*3)
f.close()
from subprocess import call
fin=fout
fout=fout.rsplit('.')[0]+'.stl'
mxl='xyz2stl.mlx'
call(['meshlabserver', '-i',fin,'-o',fout,'-s',mxl])
if clean==1: os.remove(fin)
# Mesh
fin=fout
if refine==1:
fout=fout.rsplit('.')[0]+'_dns.exo'
else:
fout=fout.rsplit('.')[0]+'.exo'
jou='F3D_tet.jou'
txt_jou=open(jou,'r')
txt_jou_tmp=open('tmp.jou','w+')
hf=0.0025 # fault grid length (0.0025 for ~100 m tet model, 0.003 for ~40 m)
hm=0.0075 # matrix grid length (0.0075 for ~100 m tet model, 0.010 for ~40 m)
for line in txt_jou:
line=line.strip('\r\n')
if 'import' in line.lower():
line='import stl "'+fin+'"'
if 'export' in line.lower():
line='export mesh "'+fout+'" dimension 3 overwrite'
if 'surface 46 94 95 97 size' in line.lower():
line='surface 46 94 95 97 size %0.6f' %(2*hf)
if 'volume all size' in line.lower():
line='volume all size %0.6f' %(2*hm)
txt_jou_tmp.write(line+'\n')
if 'mesh volume all' in line.lower() and refine==1:
txt_jou_tmp.write('refine volume all\n')
txt_jou.close();txt_jou_tmp.close()
call(['trelis','-nojournal','-nographics','tmp.jou'])
if clean==1: os.remove('tmp.jou')
# Preprocessing msh=>inp
dt_dyn=2E-5 #1E-5 for dns 100 m tet model, 8E-5 for 40 m tet, 8E-4 for ~1 m tet
import F3D_msh2inp
_=F3D_msh2inp.msh2inp(fout,dt_dyn)
# Fault plot
if vis==1:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(dat_vert[:,0], dat_vert[:,1], dat_vert[:,2], c='b', marker='.')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([np.max(dat_vert[:,0])-np.min(dat_vert[:,0]),np.max(dat_vert[:,1])\
-np.min(dat_vert[:,1]), np.max(dat_vert[:,2])-np.min(dat_vert[:,2])]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten()
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten()
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten()
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w',)
plt.title('fault [km]')
plt.grid()
plt.show()
| 34.266667
| 115
| 0.635754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 996
| 0.215305
|
0244e0d25129f6105b7892408951f27b584d128e
| 2,850
|
py
|
Python
|
fltk/util/data_loader_utils.py
|
tudelft-eemcs-dml/fltk-testbed-gr-5
|
72afa24a37cd1f8f5f49665c83ccbd730d76ad21
|
[
"BSD-2-Clause"
] | null | null | null |
fltk/util/data_loader_utils.py
|
tudelft-eemcs-dml/fltk-testbed-gr-5
|
72afa24a37cd1f8f5f49665c83ccbd730d76ad21
|
[
"BSD-2-Clause"
] | 2
|
2021-05-11T12:48:14.000Z
|
2021-05-11T12:49:24.000Z
|
fltk/util/data_loader_utils.py
|
tudelft-eemcs-dml/fltk-testbed-gr-5
|
72afa24a37cd1f8f5f49665c83ccbd730d76ad21
|
[
"BSD-2-Clause"
] | 2
|
2021-05-03T17:40:18.000Z
|
2021-05-11T09:34:30.000Z
|
import numpy
from torch.utils.data import DataLoader
import os
import pickle
import random
from ..datasets import Dataset
def generate_data_loaders_from_distributed_dataset(distributed_dataset, batch_size):
"""
Generate data loaders from a distributed dataset.
:param distributed_dataset: Distributed dataset
:type distributed_dataset: list(tuple)
:param batch_size: batch size for data loader
:type batch_size: int
"""
data_loaders = []
for worker_training_data in distributed_dataset:
data_loaders.append(Dataset.get_data_loader_from_data(batch_size, worker_training_data[0], worker_training_data[1], shuffle=True))
return data_loaders
def load_train_data_loader(logger, args):
"""
Loads the training data DataLoader object from a file if available.
:param logger: loguru.Logger
:param args: Arguments
"""
if os.path.exists(args.get_train_data_loader_pickle_path()):
dl = load_data_loader_from_file(logger, args.get_train_data_loader_pickle_path())
return dl
else:
logger.error("Couldn't find train data loader stored in file")
raise FileNotFoundError("Couldn't find train data loader stored in file")
def generate_train_loader(args, dataset):
train_dataset = dataset.get_train_dataset()
X, Y = shuffle_data(args, train_dataset)
return dataset.get_data_loader_from_data(args.get_batch_size(), X, Y)
def load_test_data_loader(logger, args):
"""
Loads the test data DataLoader object from a file if available.
:param logger: loguru.Logger
:param args: Arguments
"""
if os.path.exists(args.get_test_data_loader_pickle_path()):
return load_data_loader_from_file(logger, args.get_test_data_loader_pickle_path())
else:
logger.error("Couldn't find test data loader stored in file")
raise FileNotFoundError("Couldn't find train data loader stored in file")
def load_data_loader_from_file(logger, filename) -> DataLoader:
"""
Loads DataLoader object from a file if available.
:param logger: loguru.Logger
:param filename: string
"""
logger.info("Loading data loader from file: {}".format(filename))
with open(filename, "rb") as f:
return load_saved_data_loader(f)
def generate_test_loader(args, dataset):
test_dataset = dataset.get_test_dataset()
X, Y = shuffle_data(args, test_dataset)
return dataset.get_data_loader_from_data(args.get_test_batch_size(), X, Y)
def shuffle_data(args, dataset):
data = list(zip(dataset[0], dataset[1]))
random.shuffle(data)
X, Y = zip(*data)
X = numpy.asarray(X)
Y = numpy.asarray(Y)
return X, Y
def load_saved_data_loader(file_obj):
return pickle.load(file_obj)
def save_data_loader_to_file(data_loader, file_obj):
pickle.dump(data_loader, file_obj)
| 31.318681
| 138
| 0.729825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 878
| 0.30807
|
024580a7ff506aa3cbda6d46122b84b1603a6c05
| 794
|
py
|
Python
|
pywikibot/families/omegawiki_family.py
|
shizhao/pywikibot-core
|
8441a1cd0e8dd5d3701f1c5e26077e40a40937ee
|
[
"MIT"
] | null | null | null |
pywikibot/families/omegawiki_family.py
|
shizhao/pywikibot-core
|
8441a1cd0e8dd5d3701f1c5e26077e40a40937ee
|
[
"MIT"
] | null | null | null |
pywikibot/families/omegawiki_family.py
|
shizhao/pywikibot-core
|
8441a1cd0e8dd5d3701f1c5e26077e40a40937ee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = '$Id: 024580a7ff506aa3cbda6d46122b84b1603a6c05 $'
from pywikibot import family
# Omegawiki, the Ultimate online dictionary
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'omegawiki'
self.langs['omegawiki'] = 'www.omegawiki.org'
# On most Wikipedias page names must start with a capital letter, but some
# languages don't use this.
self.nocapitalize = self.langs.keys()
def hostname(self,code):
return 'www.omegawiki.org'
def version(self, code):
return "1.16alpha"
def scriptpath(self, code):
return ''
def path(self, code):
return '/index.php'
def apipath(self, code):
return '/api.php'
| 22.685714
| 82
| 0.632242
| 627
| 0.789673
| 0
| 0
| 0
| 0
| 0
| 0
| 312
| 0.392947
|
024a818dbea659d940b31f646bbc0d73684c65d8
| 4,781
|
py
|
Python
|
tools/scripts/extract_features_WORLD.py
|
feelins/mcd_WORLD
|
8a98c1c740ec5371a322d038b8498cb72f3f7750
|
[
"BSD-3-Clause"
] | 5
|
2019-05-16T11:42:21.000Z
|
2022-03-25T22:25:35.000Z
|
tools/scripts/extract_features_WORLD.py
|
feelins/mcd_WORLD
|
8a98c1c740ec5371a322d038b8498cb72f3f7750
|
[
"BSD-3-Clause"
] | null | null | null |
tools/scripts/extract_features_WORLD.py
|
feelins/mcd_WORLD
|
8a98c1c740ec5371a322d038b8498cb72f3f7750
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
if len(sys.argv)!=4:
print("Usage: ")
print("python extract_features_WORLD.py <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top currently directory
current_dir = os.getcwd()
# input audio directory
wav_dir = sys.argv[1]
# Output features directory
out_dir = sys.argv[2]
# initializations
fs = int(sys.argv[3])
# tools directory
world = os.path.join(current_dir, "tools/bin/WORLD")
sptk = os.path.join(current_dir, "tools/bin/SPTK-3.11")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if fs == 16000:
nFFTHalf = 1024
alpha = 0.58
elif fs == 22050:
nFFTHalf = 1024
alpha = 0.65
elif fs == 44100:
nFFTHalf = 2048
alpha = 0.76
elif fs == 48000:
nFFTHalf = 2048
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
#bap order depends on sampling rate.
mcsize=59
def get_wav_filelist(wav_dir):
wav_files = []
for file in os.listdir(wav_dir):
whole_filepath = os.path.join(wav_dir,file)
if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(".wav"):
wav_files.append(whole_filepath)
elif os.path.isdir(whole_filepath):
wav_files += get_wav_filelist(whole_filepath)
wav_files.sort()
return wav_files
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
world_analysis_cmd = "%s %s %s %s %s" % (os.path.join(world, 'analysis'), \
filename,
os.path.join(out_dir, file_id + '.f0'), \
os.path.join(out_dir, file_id + '.sp'), \
os.path.join(out_dir, file_id + '.bapd'))
os.system(world_analysis_cmd)
### convert f0 to lf0 ###
sptk_x2x_da_cmd = "%s +da %s > %s" % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.f0'), \
os.path.join(out_dir, file_id + '.f0a'))
os.system(sptk_x2x_da_cmd)
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.f0a'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(out_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_x2x_df_cmd1 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.sp'), \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
mcsize) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(out_dir, file_id + '.mgc'))
os.system(sptk_x2x_df_cmd1)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(out_dir, file_id + ".bapd"), \
os.path.join(out_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, wav_files)
# clean temporal files
#shutil.rmtree(out_dir, ignore_errors=True)
#shutil.rmtree(out_dir, ignore_errors=True)
#for zippath in glob.iglob(os.path.join(out_dir, '*.bapd')):
# os.remove(zippath)
clean_temp_files_cmd = "rm -rf %s/*.bapd %s/*.f0a %s/*.f0 %s/*.sp" % (out_dir, out_dir, out_dir, out_dir)
os.system(clean_temp_files_cmd)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
| 34.89781
| 116
| 0.535244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,470
| 0.307467
|
024b2b7d9d7075b55a314e3428f50fdfaf0a011e
| 19,261
|
py
|
Python
|
mmtbx/bulk_solvent/f_model_all_scales.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/bulk_solvent/f_model_all_scales.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/bulk_solvent/f_model_all_scales.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from cctbx import adptbx
from mmtbx import bulk_solvent
from cctbx.array_family import flex
from cctbx import adptbx
import mmtbx
from libtbx import group_args
import mmtbx.arrays
import mmtbx.bulk_solvent.scaler
from libtbx.test_utils import approx_equal
from libtbx.math_utils import ifloor, iceil
import mmtbx.f_model
import mmtbx.bulk_solvent.bulk_solvent_and_scaling as bss
from six.moves import zip, range
class run(mmtbx.f_model.manager):
"""
This is a very specialized routine to perform complex protocols of updating
all scales of fmodel, including case of twininng, presence of H and lileky
more. Inside it pretends to be fmodel proper (done by dictionary updates
before and after - any better ideas of how to do it nicer are welcome!).
"""
def __init__(self,
fmodel,
apply_back_trace,
remove_outliers,
fast,
params,
refine_hd_scattering,
log):
### Must be first thing here
self.__dict__.update(fmodel.__dict__)
# From this point on: self = fmodel
###
russ = self.compute(apply_back_trace = apply_back_trace, remove_outliers =
remove_outliers, fast = fast, params = params,
refine_hd_scattering = refine_hd_scattering, log = log)
### Must be next to last...
fmodel.__dict__.update(self.__dict__)
### ...and this one is last
self.russ = russ
def compute(self, apply_back_trace, remove_outliers, fast,
params, refine_hd_scattering, log):
assert [self.arrays.core_twin, self.twin_law].count(None) in [0,2]
self.show(prefix = "start", log = log)
self.reset_all_scales()
self.show(prefix = "re-set all scales", log = log)
if(remove_outliers and not self.twinned()):
for iii in range(5):
self.remove_outliers(use_model = False, log = None) # XXX
self.show(prefix = "remove outliers", log = log)
result = None
if(self.twinned()):
for cycle in range(2):
if(log is not None): print("cycle %d:"%cycle, file=log)
self.update_twin_fraction()
self.show(prefix = "update twin fraction", log = log)
result = self.update_solvent_and_scale_twin(log = log,
refine_hd_scattering = refine_hd_scattering)
else:
result = self.update_solvent_and_scale_2(
fast = fast,
params = params,
apply_back_trace = apply_back_trace,
refine_hd_scattering = refine_hd_scattering,
log = log)
#XXX if(remove_outliers and not self.twinned()):
#XXX self.remove_outliers(use_model = True, log = None) # XXX
if(remove_outliers and not self.twinned()):
for iii in range(5):
self.remove_outliers(use_model = True, log = None) # XXX
self.show(prefix = "remove outliers", log = log)
return result
def reset_all_scales(self):
size = self.f_obs().data().size()
zero_c = flex.complex_double(size,0)
zero_d = flex.double(size,0)
one_d = flex.double(size,1)
f_part1_twin = self.f_calc_twin()
f_part2_twin = self.f_calc_twin()
if(f_part1_twin is not None):
f_part1_twin = self.f_calc_twin().array(data=zero_c)
f_part2_twin = self.f_calc_twin().array(data=zero_c)
self.update_core(
f_part1 = self.f_calc().array(data=zero_c),
f_part2 = self.f_calc().array(data=zero_c),
f_part1_twin = f_part1_twin,
f_part2_twin = f_part2_twin,
k_isotropic = one_d,
k_anisotropic = one_d,
k_mask = [zero_d]*len(self.k_masks()))
def show(self, prefix, log, r=None):
if(log is None): return
if(r is None): r = self.r_all()
m = "%24s: r(all,work,free)=%6.4f %6.4f %6.4f n_refl.: %d"%(prefix, r,
self.r_work(), self.r_free(), self.f_obs().data().size())
if(not self.twinned()):
print(m, file=log)
else:
print(m+" twin_fraction=%4.2f"%self.twin_fraction, file=log)
def need_to_refine_hd_scattering_contribution(self):
if(self.xray_structure is None): return False
refine_hd_scattering = True
hd_selection = self.xray_structure.hd_selection()
occ_h_all_zero = self.xray_structure.select(
hd_selection).scatterers().extract_occupancies().all_eq(0.0) # riding H
if(self.xray_structure.guess_scattering_type_neutron() or
hd_selection.count(True)==0 or
not occ_h_all_zero):
refine_hd_scattering = False
return refine_hd_scattering
def update_solvent_and_scale_2(self, fast, params, apply_back_trace,
refine_hd_scattering, log):
if(params is None): params = bss.master_params.extract()
if(self.xray_structure is not None):
# Figure out Fcalc and Fmask based on presence of H
hd_selection = self.xray_structure.hd_selection()
xrs_no_h = self.xray_structure.select(~hd_selection)
xrs_h = self.xray_structure.select(hd_selection)
# Create data container for scalers. If H scattering is refined then it is
# assumed that self.f_calc() does not contain H contribution at all.
fmodel_kbu = mmtbx.f_model.manager_kbu(
f_obs = self.f_obs(),
f_calc = self.f_calc(),
f_masks = self.f_masks(),
ss = self.ss)
# Compute k_total and k_mask using one of the two methods (anal or min).
# Note: this intentionally ignores previously existing f_part1 and f_part2.
#
k_sol, b_sol, b_cart, b_adj = [None,]*4
if(fast): # analytical
assert len(fmodel_kbu.f_masks)==1
result = mmtbx.bulk_solvent.scaler.run_simple(
fmodel_kbu = fmodel_kbu,
r_free_flags = self.r_free_flags(),
bulk_solvent = params.bulk_solvent,
aniso_scale = params.anisotropic_scaling,
bin_selections = self.bin_selections)
r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
else: # using minimization: exp solvent and scale model (k_sol,b_sol,b_cart)
result = bss.bulk_solvent_and_scales(
fmodel_kbu = fmodel_kbu,
params = params)
k_sol, b_sol, b_cart = result.k_sols(), result.b_sols(), result.b_cart()
r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
if(apply_back_trace and len(fmodel_kbu.f_masks)==1 and
self.xray_structure is not None):
o = result.apply_back_trace_of_overall_exp_scale_matrix(
xray_structure = self.xray_structure)
b_adj = o.b_adj
if(not fast): b_sol, b_cart = [o.b_sol], o.b_cart
self.update_xray_structure(
xray_structure = o.xray_structure,
update_f_calc = True)
fmodel_kbu = fmodel_kbu.update(f_calc = self.f_calc())
self.show(prefix = "overall B=%s to atoms"%str("%7.2f"%o.b_adj).strip(),
log = log)
# Update self with new arrays so that H correction knows current R factor.
# If no H to account for, then this is the final result.
k_masks = result.k_masks()
k_anisotropic = result.k_anisotropic()
k_isotropic = result.k_isotropic()
self.update_core(
k_mask = k_masks,
k_anisotropic = k_anisotropic,
k_isotropic = k_isotropic)
self.show(prefix = "bulk-solvent and scaling", log = log)
# Consistency check
if(not apply_back_trace):
assert approx_equal(self.r_all(), r_all_from_scaler)
# Add contribution from H (if present and riding). This goes to f_part2.
kh, bh = 0, 0
if(refine_hd_scattering and
self.need_to_refine_hd_scattering_contribution()):
# Obsolete previous contribution f_part2
f_part2 = fmodel_kbu.f_calc.array(data=fmodel_kbu.f_calc.data()*0)
self.update_core(f_part2 = f_part2)
xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
f_h = self.compute_f_calc(xray_structure = xrs_h)
# Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
data = fmodel_kbu.f_calc.data()
for k_mask_, f_mask_ in zip(k_masks, fmodel_kbu.f_masks):
data = data + k_mask_*f_mask_.data()
f_calc_plus_f_bulk_no_scales = fmodel_kbu.f_calc.array(data = data)
# Consistency check
assert approx_equal(self.f_model().data(),
f_calc_plus_f_bulk_no_scales.data()*k_isotropic*k_anisotropic)
assert approx_equal(self.f_model_no_scales().data(),
f_calc_plus_f_bulk_no_scales.data())
#
# Compute contribution from H (F_H)
#
# Coarse sampling
b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
b_min = int(max(0,b_mean)*0.5)
b_max = int(b_mean*1.5)
sc = 1000.
kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = fmodel_kbu.f_obs.data(),
f_calc = f_calc_plus_f_bulk_no_scales.data(),
f_mask = f_h.data(),
k_total = k_isotropic*k_anisotropic,
ss = fmodel_kbu.ss,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
r_ref = self.r_work())
if(o.updated()):
f_part2 = f_h.array(data = o.k_mask()*f_h.data())
kh, bh = o.k_sol(), o.b_sol()
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
# Fine sampling
k_min = max(0,o.k_sol()-0.1)
k_max = o.k_sol()+0.1
b_min = max(0,o.b_sol()-5.)
b_max = o.b_sol()+5.
kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(1.*sc))]
o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = fmodel_kbu.f_obs.data(),
f_calc = f_calc_plus_f_bulk_no_scales.data(),
f_mask = f_h.data(),
k_total = k_isotropic*k_anisotropic,
ss = fmodel_kbu.ss,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
r_ref = o.r())
if(o.updated()):
f_part2 = f_h.array(data = o.k_mask()*f_h.data())
kh, bh = o.k_sol(), o.b_sol()
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
# THIS HELPS if fast=true is used, see how it works in reality
#
if(fast):
fmodel_kbu_ = mmtbx.f_model.manager_kbu(
f_obs = self.f_obs(),
f_calc = f_calc_plus_f_bulk_no_scales,
f_masks = [f_part2],
ss = self.ss)
result = mmtbx.bulk_solvent.scaler.run_simple(
fmodel_kbu = fmodel_kbu_,
r_free_flags = self.r_free_flags(),
bulk_solvent = params.bulk_solvent,
aniso_scale = params.anisotropic_scaling,
bin_selections = self.bin_selections)
f_part2 = f_part2.array(data = result.core.k_mask()*f_part2.data())
k_isotropic = result.core.k_isotropic*result.core.k_isotropic_exp
k_anisotropic = result.core.k_anisotropic
# Update self with final scales
self.update_core(
k_mask = k_masks,
k_anisotropic = k_anisotropic,
k_isotropic = k_isotropic,
f_part2 = f_part2)
# Make sure what came out of scaling matches what self thinks it really is
# It must match at least up to 1.e-6.
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
if(fast):
assert approx_equal(result.r_work(), self.r_work(), 1.e-4)
else:
assert approx_equal(self.r_all(), o.r()), [self.r_all(), o.r()]
return group_args(
k_sol = k_sol,
b_sol = b_sol,
b_cart = b_cart,
k_h = kh,
b_h = bh,
b_adj = b_adj)
def update_solvent_and_scale_twin(self, refine_hd_scattering, log):
if(not self.twinned()): return
assert len(self.f_masks()) == 1
# Re-set all scales to unit or zero
self.show(prefix = "update scales twin start", log = log)
self.reset_all_scales()
self.show(prefix = "reset f_part, k_(total,mask)", log = log)
f_calc_data = self.f_calc().data()
f_calc_data_twin = self.f_calc_twin().data()
# Initial trial set
sc = 1000.
ksr = [i/sc for i in range(ifloor(0*sc), iceil(0.6*sc)+1, int(0.05*sc))]
bsr = [i/sc for i in range(ifloor(0*sc), iceil(150.*sc)+1, int(10.*sc))]
o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_data,
f_calc_2 = f_calc_data_twin,
f_mask_1 = self.arrays.core.f_masks[0].data(),
f_mask_2 = self.arrays.core_twin.f_masks[0].data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(ksr),
b_sol_range = flex.double(bsr),
miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = self.r_all())
if(o_kbu_sol.updated()):
self.update(
k_mask = o_kbu_sol.k_mask(),
k_anisotropic = o_kbu_sol.k_anisotropic())
# Second (finer) trial set
k_min = max(o_kbu_sol.k_sol()-0.05, 0)
k_max = min(o_kbu_sol.k_sol()+0.05, 0.6)
ksr = [i/sc for i in range(ifloor(k_min*sc), iceil(k_max*sc)+1, int(0.01*sc))]
b_min = max(o_kbu_sol.b_sol()-10, 0)
b_max = min(o_kbu_sol.b_sol()+10, 150)
bsr = [i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(1.*sc))]
o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_data,
f_calc_2 = f_calc_data_twin,
f_mask_1 = self.arrays.core.f_masks[0].data(),
f_mask_2 = self.arrays.core_twin.f_masks[0].data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(ksr),
b_sol_range = flex.double(bsr),
miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = o_kbu_sol.r())
if(o_kbu_sol.updated()):
self.update(
k_mask = o_kbu_sol.k_mask(),
k_anisotropic = o_kbu_sol.k_anisotropic())
# Disable due to rare failures. Technically they should always match. But
# since different routines are used tiny disagreements are possible.
# See examples in : /net/anaconda/raid1/afonine/work/bugs/twin_refinement
#assert approx_equal(self.r_all(), o_kbu_sol.r(), 1.e-5)
##############
# use apply_back_trace in if below
if(self.xray_structure is not None):
o = mmtbx.bulk_solvent.scaler.tmp(
xray_structure = self.xray_structure,
k_anisotropic = o_kbu_sol.k_anisotropic(),
k_masks = [o_kbu_sol.k_mask()],
ss = self.ss)
self.update_xray_structure(
xray_structure = o.xray_structure,
update_f_calc = True)
#############
self.update(
k_mask = o.k_masks,
k_anisotropic = o.k_anisotropic)
self.show(prefix = "bulk-solvent and scaling", log = log)
#
# Add contribution from H (if present and riding). This goes to f_part2.
#
kh, bh = 0, 0
if(refine_hd_scattering and
self.need_to_refine_hd_scattering_contribution()):
hd_selection = self.xray_structure.hd_selection()
xrs_no_h = self.xray_structure.select(~hd_selection)
xrs_h = self.xray_structure.select(hd_selection)
# Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
data = self.f_calc().data()+self.f_masks()[0].data()*self.k_masks()[0]
f_calc_plus_f_bulk_no_scales = self.f_calc().array(data = data)
data = self.f_calc_twin().data()+\
self.f_masks_twin()[0].data()*self.k_masks_twin()[0]
f_calc_plus_f_bulk_no_scales_twin = self.f_calc_twin().array(data = data)
# Initial FH contribution
xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
f_h = self.compute_f_calc(xray_structure = xrs_h)
f_h_twin = self.compute_f_calc(xray_structure = xrs_h,
miller_array = self.f_calc_twin())
# Coarse sampling
b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
b_min = int(max(0,b_mean)*0.5)
b_max = int(b_mean*1.5)
sc = 1000.
kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_plus_f_bulk_no_scales.data(),
f_calc_2 = f_calc_plus_f_bulk_no_scales_twin.data(),
f_mask_1 = f_h.data(),
f_mask_2 = f_h_twin.data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
miller_indices = self.f_obs().indices(), # XXX What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = self.r_work())
if(obj.updated()):
f_part2 = f_h.array( data = obj.k_mask()*f_h.data())
f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
kh, bh = obj.k_sol(), obj.b_sol()
# Fine sampling
k_min = max(0,obj.k_sol()-0.1)
k_max = obj.k_sol()+0.1
b_min = max(0,obj.b_sol()-5.)
b_max = obj.b_sol()+5.
kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(5.*sc))]
obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_plus_f_bulk_no_scales.data(),
f_calc_2 = f_calc_plus_f_bulk_no_scales_twin.data(),
f_mask_1 = f_h.data(),
f_mask_2 = f_h_twin.data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
miller_indices = self.f_obs().indices(), # XXX What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = obj.r())
if(obj.updated()):
f_part2 = f_h.array( data = obj.k_mask()*f_h.data())
f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
kh, bh = obj.k_sol(), obj.b_sol()
self.update_core(
f_part2 = f_part2,
f_part2_twin = f_part2_twin,
k_anisotropic = obj.k_anisotropic())
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
b_cart = adptbx.u_as_b(adptbx.u_star_as_u_cart(
self.f_obs().unit_cell(), o_kbu_sol.u_star()))
return group_args(
k_sol = o_kbu_sol.k_sol(),
b_sol = o_kbu_sol.b_sol(),
b_cart = b_cart,
k_h = kh,
b_h = bh)
| 44.380184
| 82
| 0.626343
| 18,746
| 0.973262
| 0
| 0
| 0
| 0
| 0
| 0
| 2,819
| 0.146358
|
024c1d679000935d415d1310cd2a49a746f73e4a
| 4,704
|
py
|
Python
|
pysparkpro/dsl/nodesbak.py
|
liaoxiong3x/pyspark
|
2a16ad495780b1b37f5dc571cb7ea11260765366
|
[
"Apache-2.0"
] | null | null | null |
pysparkpro/dsl/nodesbak.py
|
liaoxiong3x/pyspark
|
2a16ad495780b1b37f5dc571cb7ea11260765366
|
[
"Apache-2.0"
] | null | null | null |
pysparkpro/dsl/nodesbak.py
|
liaoxiong3x/pyspark
|
2a16ad495780b1b37f5dc571cb7ea11260765366
|
[
"Apache-2.0"
] | null | null | null |
from session.abstract_class import PysparkPro
class DslAdaptor(object):
pysparkpro = PysparkPro()
select = 'SELECT'
insert = 'INSERT'
delete = 'DELETE'
update = 'UPDATE'
alert = 'ALERT'
create_table = 'CREATETABLE'
drop_table = 'DROPTABLE'
create_index = 'CREATEINDEX'
drop_index = 'DROPTABLE'
create_user = 'CREATEUSER'
exit = 'EXIT'
print_table = 'PRINT'
show_tables = 'SHOW'
value = 'VALUE'
condition = 'CONDITION'
relation_attr = 'RELATTR'
grant_user = 'GRANTUSER'
revoke_user = 'REVOKEUSER'
attr_type = "ATTRTYPE"
class ConnectNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class CreateNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class InsertNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class LoadNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class RefreshNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class RegisterNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class SaveNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class ScriptNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class SelectNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class SetNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class TrainNode():
def __init__(self, select_list, from_list, where_list):
self.type = DslAdaptor.select
self.select_list = select_list
self.from_list = from_list
self.where_list = where_list
class Exit:
def __init__(self):
self.type = DslAdaptor.exit
class PrintTable:
def __init__(self, table_name):
self.type = DslAdaptor.print_table
self.table_name = table_name
class ShowTables:
def __init__(self):
self.type = DslAdaptor.show_tables
class Value:
def __init__(self, value_type, value):
self.type = DslAdaptor.value
self.value_type = value_type
self.value = value
def __str__(self):
return str(self.value) + '[' + self.value_type + ']'
class RelAttr:
def __init__(self, attr_name, table_name=None):
self.type = DslAdaptor.relation_attr
self.table_name = table_name
self.attr_name = attr_name
def __str__(self):
if self.table_name:
return self.table_name + '.' + self.attr_name
else:
return self.attr_name
class Cond:
def __init__(self, left, op, right):
self.type = DslAdaptor.condition
self.op = op.upper()
self.left = left
self.right = right
def __str__(self):
return '(' + str(self.left) + ', ' + str(self.right) + ', ' + self.op + ')'
class AttrType:
def __init__(self, attr_name, attr_type, type_len = 1):
self.type = DslAdaptor.attr_type
self.attr_type = attr_type
self.type_len = type_len
self.attr_name = attr_name
def __str__(self):
return self.attr_name + " " + self.attr_type + " " + str(self.type_len)
if __name__ == '__main__':
spark = DslAdaptor()
print(spark)
| 26.426966
| 83
| 0.650298
| 4,531
| 0.963223
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.046131
|
024c4ab64cff5513fb1d36a41a43c50162ebb3f1
| 821
|
py
|
Python
|
backdoor/detect_buffer_overflow.py
|
Sanardi/bored
|
2816395b99c05871f01fbbd55a833dcd13801014
|
[
"MIT"
] | null | null | null |
backdoor/detect_buffer_overflow.py
|
Sanardi/bored
|
2816395b99c05871f01fbbd55a833dcd13801014
|
[
"MIT"
] | null | null | null |
backdoor/detect_buffer_overflow.py
|
Sanardi/bored
|
2816395b99c05871f01fbbd55a833dcd13801014
|
[
"MIT"
] | null | null | null |
import socket
def connect(server, port):
# open a connection to vulnserver
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.connect ((server, port))
return s
def read_until(s, delim=b':'):
buf = b''
while not buf.endswith(delim):
buf += s.recv(1)
return buf
def overflow_input(num_chars=128):
for i in range(1, num_chars):
try:
s = connect(SERVER, PORT)
read_until(s)
data = 'A' * i + '\n'
data = bytes(data, encoding='utf-8')
s.send(data)
except:
print(f"Server crashed with input size {i}")
finally:
s.close()
if __name__ == "__main__":
PORT = 12345
SERVER = '<THE HOSTNAME OR IP>'
s = connect(SERVER, PORT)
print(read_until(s))
| 23.457143
| 58
| 0.548112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.149817
|
024c6205dd81c6aee9436b9f31977f458d63fa70
| 3,384
|
py
|
Python
|
tools/test.py
|
EMinsight/MPh
|
2b967b77352f9ce7effcd50ad4774bf5eaf731ea
|
[
"MIT"
] | null | null | null |
tools/test.py
|
EMinsight/MPh
|
2b967b77352f9ce7effcd50ad4774bf5eaf731ea
|
[
"MIT"
] | null | null | null |
tools/test.py
|
EMinsight/MPh
|
2b967b77352f9ce7effcd50ad4774bf5eaf731ea
|
[
"MIT"
] | null | null | null |
"""
Runs all tests in the intended order.
Each test script (in the `tests` folder) contains a group of tests.
These scripts must be run in separate processes as most of them start
and stop the Java virtual machine, which can only be done once per
process. This is why simply calling pyTest (with `python -m pytest`
in the root folder) will not work.
This script here runs each test group in a new subprocess. It also
imposes a logical order: from the tests covering the most most basic
functionality to the high-level abstractions.
Here, as opposed to the similar script `coverage.py`, we don't actually
run the tests through pyTest. Rather, we run the scripts directly so
that the output is less verbose. Note, however, that pyTest still needs
to be installed as some of the test fixtures require it.
The verbosity can be increased by passing `--log` as a command-line
argument. This will display the log messages produced by MPh as the
tests are running. You can also pass the name of a test group to run
only that one. For example, passing "model" will only run the tests
defined in `test_model.py`.
"""
from subprocess import run
from pathlib import Path
from timeit import default_timer as now
from argparse import ArgumentParser
from sys import executable as python
from sys import exit
from os import environ, pathsep
# Define order of test groups.
groups = ['meta', 'config', 'discovery', 'server', 'session', 'standalone',
'client', 'multi', 'node', 'model', 'exit']
# Determine path of project root folder.
here = Path(__file__).resolve().parent
root = here.parent
# Run MPh in project folder, not a possibly different installed version.
if 'PYTHONPATH' in environ:
environ['PYTHONPATH'] = str(root) + pathsep + environ['PYTHONPATH']
else:
environ['PYTHONPATH'] = str(root)
# Parse command-line arguments.
parser = ArgumentParser(prog='test.py',
description='Runs the MPh test suite.',
add_help=False,
allow_abbrev=False)
parser.add_argument('--help',
help='Show this help message.',
action='help')
parser.add_argument('--log',
help='Display log output.',
action='store_true')
parser.add_argument('--groups',
help='List all test groups.',
action='store_true')
parser.add_argument('group',
help='Run only this group of tests.',
nargs='?')
arguments = parser.parse_args()
if arguments.groups:
for group in groups:
print(group)
exit()
if arguments.group:
group = arguments.group
if group.startswith('test_'):
group = group[5:]
if group.endswith('.py'):
group = group[:-3]
groups = [group]
options = []
if arguments.log:
options.append('--log')
# Run each test group in new process.
for group in groups:
if groups.index(group) > 0:
print()
print(f'Running test group "{group}".')
t0 = now()
process = run([python, f'test_{group}.py'] + options, cwd=root/'tests')
if process.returncode == 0:
print(f'Passed in {now()-t0:.0f} s.')
else:
print(f'Failed after {now()-t0:.0f} s.')
exit(1)
| 36
| 76
| 0.636525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,823
| 0.538393
|
024c8b636c73803ba5c14b996265676bb94e1dd0
| 592
|
py
|
Python
|
notebooks/shared/ipypublish/export_plugins/html_standard.py
|
leonbett/debuggingbook
|
ae1fa940c306160429232fbc93a7a7f14b44efb7
|
[
"MIT"
] | 728
|
2018-09-21T03:51:04.000Z
|
2022-03-28T09:35:04.000Z
|
notebooks/shared/ipypublish/export_plugins/html_standard.py
|
leonbett/debuggingbook
|
ae1fa940c306160429232fbc93a7a7f14b44efb7
|
[
"MIT"
] | 103
|
2018-09-02T12:26:32.000Z
|
2022-02-09T07:19:08.000Z
|
notebooks/shared/ipypublish/export_plugins/html_standard.py
|
leonbett/debuggingbook
|
ae1fa940c306160429232fbc93a7a7f14b44efb7
|
[
"MIT"
] | 157
|
2018-09-02T08:00:50.000Z
|
2022-03-27T22:04:50.000Z
|
#!/usr/bin/env python
"""html in standard nbconvert format
"""
from ipypublish.html.create_tpl import create_tpl
from ipypublish.html.standard import content
from ipypublish.html.standard import content_tagging
from ipypublish.html.standard import document
from ipypublish.html.standard import inout_prompt
from ipypublish.html.standard import mathjax
from ipypublish.html.standard import widgets
oformat = 'HTML'
config = {}
template = create_tpl([
document.tpl_dict,
content.tpl_dict, content_tagging.tpl_dict,
mathjax.tpl_dict, widgets.tpl_dict,
inout_prompt.tpl_dict
])
| 28.190476
| 52
| 0.802365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.113176
|
024cdbf14b841e1da6f77d24cda6ea8444019523
| 1,320
|
py
|
Python
|
application/src/app_pkg/routes/get_messages.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | null | null | null |
application/src/app_pkg/routes/get_messages.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | 3
|
2021-06-08T21:39:12.000Z
|
2022-01-13T02:46:20.000Z
|
application/src/app_pkg/routes/get_messages.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | 1
|
2021-05-09T21:01:28.000Z
|
2021-05-09T21:01:28.000Z
|
# from flask import render_template, request, make_response, jsonify
# from src.app_pkg.routes.common import validate_helper
# from src.app_pkg import app, db
# from src.app_pkg.forms import MessageForm
#
# ################################################
# # Show All Messages / User Profile #
# ################################################
# # AUTHOR: Bakulia Kurmant
# # NOTE: This function handles the route of the show all message functionality.
# # It show the list of messages the user sent or received and single view message modal with message body
# # Once the Database manager API returns a result (as a list), it passes that resulting list
# # to the HTML page to be rendered.
#
#
# @app.route('/user_profile', method=['GET'])
# def all_messages(msg_id):
# isloggedin = validate_helper(request.cookies.get('token'))
#
# if not isloggedin:
# return render_template('search.html')
#
# msg_result_size = 0
# msg_results = []
# print('calling db...')
# msg_result_size, msg_results = db.get_all_messages(isloggedin, msg_id)
#
# if msg_result_size == 0:
# print("You have no messages!")
#
# return render_template('user_profile.html', isloggedin=isloggedin, msg_result_size=msg_result_size,
# msg_results=msg_results)
#
#
| 37.714286
| 106
| 0.641667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,286
| 0.974242
|
024d5f02a7be6e61357ca017fedc52a6ef5e46ea
| 18,681
|
py
|
Python
|
tests/fixtures/test_product.py
|
oldarmyc/cap
|
2e3e4b89d3d05f03876446d6f339167bd2805ea8
|
[
"Apache-2.0"
] | 1
|
2017-12-13T20:19:29.000Z
|
2017-12-13T20:19:29.000Z
|
tests/fixtures/test_product.py
|
oldarmyc/cap
|
2e3e4b89d3d05f03876446d6f339167bd2805ea8
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures/test_product.py
|
oldarmyc/cap
|
2e3e4b89d3d05f03876446d6f339167bd2805ea8
|
[
"Apache-2.0"
] | 1
|
2018-09-21T15:26:42.000Z
|
2018-09-21T15:26:42.000Z
|
# Copyright 2016 Dave Kludt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sample_product = {
"title": "Test",
"us_url": "http://us.test.com",
"uk_url": "http://uk.test.com",
"active": True,
"db_name": "test",
"require_region": True,
"doc_url": "http://doc.test.com",
"pitchfork_url": "https://pitchfork/url"
}
sample_limit = {
"product": "test",
"title": "Test",
"uri": "/limits",
"slug": "test",
"active": True,
"absolute_path": "test/path",
"absolute_type": "list",
"limit_key": "test_limit",
"value_key": "test_value"
}
sample_log = {
"queried": ["dns"],
"queried_by": "skeletor",
"region": "dfw",
"ddi": "123456",
'query_results': []
}
sample_auth_failure = {
'message': (
'<strong>Error!</strong> Authentication has failed due to'
' incorrect token or DDI. Please check the token and DDI '
'and try again.'
)
}
""" DNS Tests """
dns = {
"title": "DNS",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "dns",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
dns_limit = {
"product": "dns",
"title": "Domains",
"uri": "/limits",
"slug": "domains",
"active": True,
"absolute_path": "limits.absolute",
"absolute_type": "dict",
"value_key": "",
"limit_key": "domains"
}
dns_limit_return = {
"limits": {
"rate": [
{
"regex": ".*/v\\d+\\.\\d+/(\\d+/domains/search).*",
"limit": [
{
"value": 20,
"verb": "GET",
"next-available": "2016-01-12T13:56:11.450Z",
"remaining": 20,
"unit": "MINUTE"
}
],
"uri": "*/domains/search*"
}
],
"absolute": {
"domains": 500,
"records per domain": 500
}
}
}
dns_list_return = {
"domains": [
{
"comment": "Test",
"updated": "2015-12-08T20:47:02.000+0000",
"name": "test.net",
"created": "2015-04-09T15:42:49.000+0000",
"emailAddress": "skeletor@rackspace.com",
"id": 123465798,
"accountId": 1234567
}
],
"totalEntries": 1
}
dns_full_return = {
'dns': {
'values': {'Domains': 1},
'limits': {'Domains': 500}
}
}
""" Autoscale """
autoscale = {
"title": "Autoscale",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "autoscale",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
autoscale_limit = {
"product": "autoscale",
"title": "Max Groups",
"absolute_path": "limits.absolute",
"uri": "/v1.0/{ddi}/limits",
"slug": "max_groups",
"value_key": "",
"absolute_type": "dict",
"active": True,
"limit_key": "maxGroups"
}
autoscale_limit_return = {
"limits": {
"rate": [
{
"regex": "/v1\\.0/execute/(.*)",
"limit": [
{
"value": 10,
"verb": "ALL",
"next-available": "2016-01-12T14:51:13.402Z",
"remaining": 10,
"unit": "SECOND"
}
],
"uri": "/v1.0/execute/*"
}
],
"absolute": {
"maxGroups": 1000,
"maxPoliciesPerGroup": 100,
"maxWebhooksPerPolicy": 25
}
}
}
autoscale_list_return = {
"groups": [
{
"state": {
"status": "ACTIVE",
"desiredCapacity": 0,
"paused": False,
"active": [],
"pendingCapacity": 0,
"activeCapacity": 0,
"name": "test"
},
"id": "d446f3c2-612f-41b8-92dc-4d6e1422bde2",
"links": [
{
"href": (
'https://dfw.autoscale.api.rackspacecloud.com/v1.0'
'/1234567/groups/d446f3c2-612f-41b8-92dc-4d6e1422bde2/'
),
"rel": "self"
}
]
}
],
"groups_links": []
}
autoscale_full_return = {
'autoscale': {
'values': {'Max Groups': 1},
'limits': {'Max Groups': 1000}
}
}
""" Big Data """
big_data = {
"title": "Big Data",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "big_data",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
big_data_limit = [
{
"product": "big_data",
"title": "Node Count",
"absolute_path": "limits.absolute.node_count",
"uri": "/v2/{ddi}/limits",
"slug": "node_count",
"value_key": "remaining",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}, {
"product": "big_data",
"title": "Disk - MB",
"absolute_path": "limits.absolute.disk",
"uri": "/v2/{ddi}/limits",
"slug": "disk_-_mb",
"value_key": "remaining",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}
]
big_data_limit_return = {
"limits": {
"absolute": {
"node_count": {
"limit": 15,
"remaining": 8
},
"disk": {
"limit": 50000,
"remaining": 25000
},
"ram": {
"limit": 655360,
"remaining": 555360
},
"vcpus": {
"limit": 200,
"remaining": 120
}
}
}
}
big_data_full_return = {
'big_data': {
'values': {'Node Count': 7, 'Disk - MB': 25000},
'limits': {'Node Count': 15, 'Disk - MB': 50000}
}
}
""" CBS """
cbs = {
"title": "CBS",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "cbs",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
cbs_limit = {
"product": "cbs",
"title": "SATA - GB",
"absolute_path": "quota_set.gigabytes_SATA",
"uri": "/v1/{ddi}/os-quota-sets/{ddi}?usage=True",
"slug": "sata_-_gb",
"value_key": "in_use",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}
cbs_limit_return = {
"quota_set": {
"volumes": {
"limit": -1,
"reserved": 0,
"in_use": 3
},
"gigabytes_SATA": {
"limit": 10240,
"reserved": 0,
"in_use": 325
},
"gigabytes_SSD": {
"limit": 10240,
"reserved": 0,
"in_use": 50
}
}
}
cbs_full_return = {
'cbs': {
'values': {'SATA - GB': 9915},
'limits': {'SATA - GB': 10240}
}
}
""" Load Balancers """
clb = {
"title": "Load Balancers",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "load_balancers",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
clb_limit = [
{
"product": "load_balancers",
"title": "Total Load Balancers",
"uri": "/v1.0/{ddi}/loadbalancers/absolutelimits",
"slug": "total_load_balancers",
"active": True,
"path": "absolute['LOADBALANCER_LIMIT']",
"absolute_path": "absolute",
"value_key": "",
"absolute_type": "list",
"limit_key": "LOADBALANCER_LIMIT"
}, {
"product": "load_balancers",
"title": "Nodes per LB",
"uri": "/v1.0/{ddi}/loadbalancers/absolutelimits",
"slug": "nodes_per_lb",
"active": True,
"path": "absolute['NODE_LIMIT']",
"absolute_path": "absolute",
"value_key": "",
"absolute_type": "list",
"limit_key": "NODE_LIMIT"
}
]
clb_limit_return = {
"absolute": [
{
"name": "IPV6_LIMIT",
"value": 25
}, {
"name": "LOADBALANCER_LIMIT",
"value": 25
}, {
"name": "BATCH_DELETE_LIMIT",
"value": 10
}, {
"name": "ACCESS_LIST_LIMIT",
"value": 100
}, {
"name": "NODE_LIMIT",
"value": 25
}, {
"name": "NODE_META_LIMIT",
"value": 25
}, {
"name": "LOADBALANCER_META_LIMIT",
"value": 25
}, {
"name": "CERTIFICATE_MAPPING_LIMIT",
"value": 20
}
]
}
clb_list_return = {
"loadBalancers": [
{
"status": "ACTIVE",
"updated": {
"time": "2016-01-12T16:04:44Z"
},
"protocol": "HTTP",
"name": "test",
"algorithm": "LEAST_CONNECTIONS",
"created": {
"time": "2016-01-12T16:04:44Z"
},
"virtualIps": [
{
"ipVersion": "IPV4",
"type": "PUBLIC",
"id": 19875,
"address": "148.62.0.226"
}, {
"ipVersion": "IPV6",
"type": "PUBLIC",
"id": 9318325,
"address": "2001:4800:7904:0100:f46f:211b:0000:0001"
}
],
"id": 506497,
"timeout": 30,
"nodeCount": 0,
"port": 80
}
]
}
clb_full_return = {
'load_balancers': {
'values': {'Total Load Balancers': 1},
'limits': {'Total Load Balancers': 25, 'Nodes per LB': 25}
}
}
""" Servers """
server = {
"title": "Servers",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "servers",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
server_limit = [
{
"product": "servers",
"title": "Servers",
"uri": "/v2/{ddi}/limits",
"slug": "servers",
"active": True,
"path": "absolute['maxTotalInstances']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalInstances"
}, {
"product": "servers",
"title": "Private Networks",
"uri": "/v2/{ddi}/limits",
"slug": "private_networks",
"active": True,
"path": "absolute['maxTotalPrivateNetworks']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalPrivateNetworks"
}, {
"product": "servers",
"title": "Ram - MB",
"uri": "/v2/{ddi}/limits",
"slug": "ram_-_mb",
"active": True,
"path": "absolute['maxTotalRAMSize']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalRAMSize"
}
]
server_limit_return = {
"limits": {
"rate": [
{
"regex": "/[^/]*/?$",
"limit": [
{
"next-available": "2016-01-12T16:14:47.624Z",
"unit": "MINUTE",
"verb": "GET",
"remaining": 2200,
"value": 2200
}
],
"uri": "*"
}, {
"regex": (
"/v[^/]+/[^/]+/servers/([^/]+)/rax-si-image-schedule"
),
"limit": [
{
"next-available": "2016-01-12T16:14:47.624Z",
"unit": "SECOND",
"verb": "POST",
"remaining": 10,
"value": 10
}
],
"uri": "/servers/{id}/rax-si-image-schedule"
}
],
"absolute": {
"maxPersonalitySize": 1000,
"maxTotalCores": -1,
"maxPersonality": 5,
"totalPrivateNetworksUsed": 1,
"maxImageMeta": 40,
"maxTotalPrivateNetworks": 10,
"maxSecurityGroupRules": -1,
"maxTotalKeypairs": 100,
"totalRAMUsed": 4096,
"maxSecurityGroups": -1,
"totalFloatingIpsUsed": 0,
"totalInstancesUsed": 3,
"totalSecurityGroupsUsed": 0,
"maxServerMeta": 40,
"maxTotalFloatingIps": -1,
"maxTotalInstances": 200,
"totalCoresUsed": 4,
"maxTotalRAMSize": 256000
}
}
}
server_list_return = {
"servers": [
{
"OS-EXT-STS:task_state": None,
"addresses": {
"public": [
{
"version": 4,
"addr": "104.130.28.32"
}, {
"version": 6,
"addr": "2001:4802:7803:104:be76:4eff:fe21:51b7"
}
],
"private": [
{
"version": 4,
"addr": "10.176.205.68"
}
]
},
"flavor": {
"id": "general1-1",
"links": [
{
"href": (
"https://iad.servers.api.rackspacecloud.com"
"/766030/flavors/general1-1"
),
"rel": "bookmark"
}
]
},
"id": "3290e50d-888f-4500-a934-16c10f3b8a10",
"user_id": "284275",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "104.130.28.32",
"accessIPv6": "2001:4802:7803:104:be76:4eff:fe21:51b7",
"progress": 100,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "ACTIVE",
"updated": "2016-01-12T15:16:37Z",
"name": "test-server",
"created": "2016-01-12T15:15:39Z",
"tenant_id": "1234567",
"metadata": {
"build_config": "",
"rax_service_level_automation": "Complete"
}
}
]
}
server_list_processed_return = [
{
'status': 'ACTIVE',
'updated': '2016-01-12T15:16:37Z',
'OS-EXT-STS:task_state': None,
'user_id': '284275',
'addresses': {
'public': [
{
'version': 4,
'addr': '104.130.28.32'
}, {
'version': 6,
'addr': '2001:4802:7803:104:be76:4eff:fe21:51b7'
}
],
'private': [
{
'version': 4,
'addr': '10.176.205.68'
}
]
},
'created': '2016-01-12T15:15:39Z',
'tenant_id': '1234567',
'OS-DCF:diskConfig': 'MANUAL',
'id': '3290e50d-888f-4500-a934-16c10f3b8a10',
'accessIPv4': '104.130.28.32',
'accessIPv6': '2001:4802:7803:104:be76:4eff:fe21:51b7',
'config_drive': '',
'progress': 100,
'OS-EXT-STS:power_state': 1,
'metadata': {
'build_config': '',
'rax_service_level_automation': 'Complete'
},
'flavor': {
'id': 'general1-1',
'links': [
{
'href': (
'https://iad.servers.api.rackspacecloud.com'
'/766030/flavors/general1-1'
),
'rel': 'bookmark'
}
]
},
'name': 'test-server'
}
]
network_list_return = {
"networks": [
{
"status": "ACTIVE",
"subnets": [
"879ff280-6f17-4fd8-b684-19237d88fc45"
],
"name": "test-network",
"admin_state_up": True,
"tenant_id": "1234567",
"shared": False,
"id": "e737483a-00d7-4517-afc3-bd1fbbbd4cd3"
}
]
}
network_processed_list = [
{
'status': 'ACTIVE',
'subnets': [
'879ff280-6f17-4fd8-b684-19237d88fc45'
],
'name': 'test-network',
'admin_state_up': True,
'tenant_id': '1234567',
'shared': False,
'id': 'e737483a-00d7-4517-afc3-bd1fbbbd4cd3'
}
]
server_flavor_return = {
"flavor": {
"ram": 1024,
"name": "1 GB General Purpose v1",
"OS-FLV-WITH-EXT-SPECS:extra_specs": {
"number_of_data_disks": "0",
"class": "general1",
"disk_io_index": "40",
"policy_class": "general_flavor"
},
"vcpus": 1,
"swap": "",
"rxtx_factor": 200.0,
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 20,
"id": "general1-1"
}
}
server_full_return = {
'servers': {
'values': {
'Private Networks': 1,
'Ram - MB': 1024,
'Servers': 1
},
'limits': {
'Private Networks': 10,
'Ram - MB': 256000,
'Servers': 200
}
}
}
| 26.018106
| 79
| 0.429849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,301
| 0.497886
|
0251012874a85c99ece694f4c087c35e3ad1cb49
| 2,150
|
py
|
Python
|
script/download_pretrained.py
|
cttsai1985/google-quest-qa-labeling-pipeline
|
ef4fb92c470e45c0a07b0ee0e474224d88d3d410
|
[
"Apache-2.0"
] | 2
|
2020-04-08T17:05:01.000Z
|
2020-06-30T18:02:03.000Z
|
script/download_pretrained.py
|
cttsai1985/google-quest-qa-labeling-pipeline
|
ef4fb92c470e45c0a07b0ee0e474224d88d3d410
|
[
"Apache-2.0"
] | null | null | null |
script/download_pretrained.py
|
cttsai1985/google-quest-qa-labeling-pipeline
|
ef4fb92c470e45c0a07b0ee0e474224d88d3d410
|
[
"Apache-2.0"
] | null | null | null |
"""
fork THIS excellent downloader
https://www.kaggle.com/maroberti/transformers-model-downloader-pytorch-tf2-0
"""
from typing import Union
from pathlib import Path
import os
import transformers
from transformers import AutoConfig, AutoTokenizer, TFAutoModel
def transformers_model_dowloader(pretrained_model_name: str, working_dir: Union[str, Path], is_tf: bool = True) -> bool:
model_class = None
if is_tf:
model_class = TFAutoModel
NEW_DIR = working_dir / pretrained_model_name
try:
os.mkdir(NEW_DIR)
print(f"Successfully created directory {NEW_DIR}")
except OSError:
print(f"Creation of directory {NEW_DIR} failed")
print(f"Download model and tokenizer {pretrained_model_name}")
transformer_model = model_class.from_pretrained(pretrained_model_name)
transformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
try:
transformer_model.save_pretrained(NEW_DIR)
transformer_tokenizer.save_pretrained(NEW_DIR)
print(f"Save model and tokenizer {pretrained_model_name} in directory {NEW_DIR}")
except:
print(f"Save model and tokenizer {pretrained_model_name} in directory {NEW_DIR}: Failed")
return False
return True
def main():
pretrained_model_name_list = [
'bert-base-uncased',
'bert-base-cased',
'bert-large-cased',
'distilbert-base-uncased',
'albert-xxlarge-v2',
'albert-xlarge-v2',
'albert-large-v2',
'roberta-base',
'roberta-large',
'roberta-large-mnli',
'distilroberta-base',
'distilbert-base-uncased',
]
print(f'Transformers version {transformers.__version__}') # Current version: 2.3.0
WORKING_DIR = Path("../input/hugging_face_pretrained")
try:
os.mkdir(WORKING_DIR)
except:
pass
for i, pretrained_model_name in enumerate(pretrained_model_name_list, start=1):
print(i, '/', len(pretrained_model_name_list))
transformers_model_dowloader(pretrained_model_name, WORKING_DIR, is_tf=True)
return
if "__main__" == __name__:
main()
| 28.289474
| 120
| 0.693953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 758
| 0.352558
|
0251ffe3075d234371ce4b6df85d16a4d7b3e648
| 28,128
|
py
|
Python
|
scripts/icdcs2019/communication.py
|
HKBU-HPML/gtopkssgd
|
6f57343f3749939b0345d36fcb2c24470942aefd
|
[
"Apache-2.0"
] | 33
|
2019-05-13T12:04:15.000Z
|
2022-03-14T06:23:56.000Z
|
scripts/icdcs2019/communication.py
|
HKBU-HPML/gtopkssgd
|
6f57343f3749939b0345d36fcb2c24470942aefd
|
[
"Apache-2.0"
] | 2
|
2019-04-24T02:38:07.000Z
|
2021-05-31T11:22:24.000Z
|
scripts/icdcs2019/communication.py
|
HKBU-HPML/gtopkssgd
|
6f57343f3749939b0345d36fcb2c24470942aefd
|
[
"Apache-2.0"
] | 10
|
2019-07-18T23:43:32.000Z
|
2021-06-16T13:22:04.000Z
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from utils import read_log, plot_hist, update_fontsize, autolabel, read_p100_log
from plot_sth import Bar
import os
import plot_sth as Color
import math
OUTPUT_PATH = '/media/sf_Shared_Data/tmp/icdcs2019'
INPUT_PATH = '/media/sf_Shared_Data/tmp/icdcs2019'
num_of_nodes = [2, 4, 8, 16, 32]
#num_of_nodes = [2, 4, 8]
#num_of_nodes = [8, 80, 81, 82, 83, 85]
#num_of_nodes = [16, 32, 64]
B = 9.0 * 1024 * 1024 * 1024.0 / 8 # 10 Gbps Ethernet
#B = 56 * 1024 * 1024 * 1024.0 / 8 # 56 Gbps IB
markers = {2:'o',
4:'x',
8:'^'}
formats={2:'-', 4:'-.', 8:':', 16:'--', 32:'-*', 64: '-+'}
gmarkers = {'dense':'o',
'sparse':'x',
'topk':'x',
'gtopk':'^'}
gcolors = {'dense':'b',
'sparse':'r',
'topk':'r',
'gtopk':'g'}
def time_of_allreduce(n, M, B=B):
"""
n: number of nodes
M: size of message
B: bandwidth of link
"""
# Model 1, TernGrad, NIPS2017
#if True:
# ncost = 100 * 1e-6
# nwd = B
# return ncost * np.log2(n) + M / nwd * np.log2(n)
# Model 2, Lower bound, E. Chan, et al., 2007
if True:
#alpha = 7.2*1e-6 #Yang 2017, SC17, Scaling Deep Learning on GPU and Knights Landing clusters
#alpha = 6.25*1e-6*n # From the data gpuhome benchmark
#alpha = 12*1e-6*n # From the data gpuhome benchmark
alpha = 45.25*1e-6#*np.log2(n) # From the data gpuhome benchmark
beta = 1 / B *1.2
gamma = 1.0 / (16.0 * 1e9 * 4) * 160
M = 4*M
#t = 2*(n)*alpha + 2*(n-1)*M*beta/n + (n-1)*M*gamma/n
t = 2*(n-1)*alpha + 2*(n-1)*M*beta/n + (n-1)*M*gamma/n
return t * 1e6
ts = 7.5/ (1000.0 * 1000)# startup time in second
#seconds = (np.ceil(np.log2(n)) + n - 1) * ts + (2*n - 1 + n-1) * M / n * 1/B
#seconds = (np.ceil(np.log2(n)) + n - 1) * ts + 2 * (n - 1) * 2*M/n * 1/B
#tcompute = 1. / (2.2 * 1000 * 1000 * 1000)
tcompute = 1. / (1 * 1000 * 1000 * 1000)
#seconds = 2 * (n - 1) * ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
#C = 1024.0 * 1024 # segmented_size
#if M > C * n:
# # ring_segmented allreduce
# seconds = (M / C + (n - 2)) * (ts + C / B + C * tcompute)
#else:
# ring allreduce, better than the above
#seconds = (n - 1) * ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
seconds = 2*(n-1)*n*ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
#C = 512.0
#seconds = (M / C + n-2) * (ts + C/B)
return seconds * 1000 * 1000 # micro seconds
class Simulator():
def __init__(self, name, computes, sizes, num_of_nodes, render=True):
self.name = name
self.computes = computes
self.sizes = sizes
self.num_of_nodes = num_of_nodes
self.comms = None
self.title = name + ' (WFBP)'
self.max_time = 0
self.ax = None
self.render = render
self.merged_layers = []
def wfbp(self, with_optimal=False):
start_time = 0.0
comm_start_time = 0.0
comm = 0.0
if not self.comms:
comms = [time_of_allreduce(self.num_of_nodes, s, B) for s in self.sizes]
else:
comms = self.comms
max_time = max(np.sum(self.computes), np.sum(comms)+self.computes[0])
print('Layer-wise total comm. time:', np.sum(comms)/1000.)
if not with_optimal:
self.max_time = max_time
if not self.ax and self.render:
fig, ax = plt.subplots(1, figsize=(30, 3))
#ax.set_title(self.title, x=0.5, y=0.8)
self.ax = ax
comm_layer_id = ''
for i in range(len(self.computes)):
comp = self.computes[i]
layer_id = len(self.computes) - i
if not with_optimal:
if self.render:
bar = Bar(start_time, comp, self.max_time, self.ax, type='p', index=layer_id)
bar.render()
if comm_start_time + comm > start_time + comp:
comm_start_time = comm_start_time + comm
else:
comm_start_time = start_time + comp
if comm == 0.0 and comm_layer_id != '':
comm_layer_id = str(comm_layer_id)+','+str((len(self.computes) - i))
else:
comm_layer_id = str(layer_id)
comm = comms[i]
type = 'wc'
if with_optimal:
type = 'mc'
if self.render:
bar_m = Bar(comm_start_time, comm, self.max_time, self.ax, type=type, index=comm_layer_id, is_optimal=with_optimal)
bar_m.render()
start_time += comp
total_time = (comm_start_time + comm)/1000.0
title='MG-WFBP' if with_optimal else 'WFBP'
print(title+' Total time: ', total_time, ' ms')
if self.render:
plt.subplots_adjust(left=0.06, right=1.)
return total_time
def synceasgd(self):
start_time = 0.0
comm_start_time = 0.0
comm = 0.0
total_size = np.sum(self.sizes)
comm = time_of_allreduce(self.num_of_nodes, total_size, B)
total_comp = np.sum(self.computes)
comm_start_time = total_comp
index = ','.join([str(len(self.computes)-i) for i in range(0, len(self.computes))])
if self.render:
bar = Bar(np.sum(self.computes), comm, self.max_time, self.ax, type='sc', index=index)
bar.render()
total_time = (comm_start_time + comm)/1000.0
print('SyncEASGD Total time: ', total_time, ' ms')
if self.render:
pass
return total_time
def cal_comm_starts(self, comms, comps):
"""
comms and comps have been aligned
"""
start_comms = []
start_comms.append(0.0)
sum_comp = 0.0
for i in range(1, len(comms)):
comm = comms[i-1]
comp = comps[i-1]
#print(start_comms[i-1],comm, sum_comp,comp)
start_comm = max(start_comms[i-1]+comm, sum_comp+comp)
#print('start_comm: ', start_comm, ', comm: ', comm)
start_comms.append(start_comm)
sum_comp += comp
return start_comms
def merge(self, comms, sizes, i, p, merge_size, comps):
comms[i] = 0# merge here
comms[i+1] = p
sizes[i+1] = merge_size
start_comms = self.cal_comm_starts(comms, comps)
#print('start_comms: ', start_comms)
self.merged_layers.append(i)
return start_comms
def gmwfbp2(self):
if not self.comms:
comms = [time_of_allreduce(self.num_of_nodes, s, B) for s in self.sizes]
else:
comms = self.comms
#comms = comms[0:-1]
#print('comms: ', comms)
comps = self.computes[1:]
comps.append(0) # for last communication
optimal_comms = list(comms)
optimal_sizes = list(self.sizes)
start_comms = self.cal_comm_starts(optimal_comms, comps)
sum_comp = 0.0
#print('start_comms: ', start_comms)
#return
for i in range(0, len(comms)-1):
comp = comps[i]
comm = optimal_comms[i]
if start_comms[i] + comm > comp+sum_comp:
# cannot be hidden, so we need to merge
merge_size = optimal_sizes[i+1] + optimal_sizes[i]
r = comm + optimal_comms[i+1]
p = time_of_allreduce(self.num_of_nodes, merge_size, B)
if start_comms[i] >= comp+sum_comp:
# don't care about computation
if p < r:
start_comms = self.merge(optimal_comms, optimal_sizes, i, p, merge_size, comps)
#optimal_comms[i] = 0# merge here
#optimal_comms[i+1] = p
#optimal_sizes[i+1] += merge_size
#start_comms = self.cal_comm_starts(optimal_comms, comps)
else:
if comp+sum_comp+p < start_comms[i]+comm+optimal_comms[i+1]:
start_comms = self.merge(optimal_comms, optimal_sizes, i, p, merge_size, comps)
else:
pass # optimal, nothing to do
sum_comp += comp
optimal_comms.append(comms[-1])
self.wfbp()
self.synceasgd()
self.comms = optimal_comms
self.title = self.name+ ' (GM-WFBP)'
ret = self.wfbp(with_optimal=True)
#print('merged-layers: ', self.merged_layers)
return ret
start = 1024*16
end = 1024*1024*4
def read_times_from_nccl_log(logfile):
f = open(logfile)
sizes = []
times = []
#start = 1024*16
#end = 1024*1024
for line in f.readlines():
items = ' '.join(line.split()).split(' ')
if len(items) == 12 and items[0] != '#':
size = int(items[0])#/4
if size == 8:
continue
#if size > 1024*1024:
if (size >= start and size <= end):
sizes.append(size)
times.append(float(items[4]))
#print(items)
f.close()
return sizes, times, []
def read_allreduce_log(filename):
print('filename: ', filename)
f = open(filename, 'r')
sizes = []
comms = []
size_comms = {}
for l in f.readlines():
if l[0] == '#' or l[0] == '[' or len(l)<10 :
continue
items = ' '.join(l.split()).split()
comm = float(items[-1])
size = int(items[0])#/4
if size > end or size < start:
continue
comms.append(comm)
sizes.append(size)
if size not in size_comms:
size_comms[size] = []
size_comms[size].append(comm)
f.close()
sizes = size_comms.keys()
sizes.sort()
print('sizes: ', sizes)
comms = [np.mean(size_comms[s]) for s in sizes]
errors = [np.std(size_comms[s]) for s in sizes]
return sizes, comms, errors
def predict(filename, n, color, marker, label, sizes=None, ax=None, nccl=False):
if ax is None:
fig, ax = plt.subplots(figsize=(5,4.5))
if sizes is None:
if not nccl:
sizes, comms, errors = read_allreduce_log(filename)
label='%d nodes' % (n)
else:
sizes, comms, comps = read_times_from_nccl_log(filename)
label='%d GPUs' % (n*8)
size_in_kbytes = np.array(sizes) #/ 1024
#plt.plot(size_in_kbytes, comms, c=color, marker=marker, label=label+' measured', linewidth=2)
#plt.plot(size_in_kbytes, comms, label=label+' measured', linewidth=2)
plt.errorbar(size_in_kbytes, comms, errors, fmt=formats[n], label=label, linewidth=1)
#plt.plot(sizes, comms, c=color, marker=marker, label=label, linewidth=2)
#bandwidths = np.array(sizes)/np.array(comms)
#plt.plot(sizes, bandwidths, c=color, marker=marker, label=label, linewidth=2)
predicts = []
for M in sizes:
p = time_of_allreduce(n, M, B)
predicts.append(p)
#rerror = (np.array(predicts)-np.array(comms))/np.array(comms)
#print('erro: ', np.mean(np.abs(rerror)))
#plt.scatter(sizes, predicts, c='red', marker=markers[n])
#jax.plot(size_in_kbytes, predicts, c=color, marker=marker, linestyle='--', label=label+' predict', markerfacecolor='white', linewidth=1)
return sizes
def plot_all_communication_overheads():
#labels = ['2-node', '4-node', '8-node', '16-node']
fig, ax = plt.subplots(figsize=(5,4.5))
labels = ['%d-node' % i for i in num_of_nodes]
colors = ['r', 'g', 'b', 'black', 'y', 'c']
markers = ['^', 'o', 'd', '*', 'x', 'v']
sizes = None
#sizes = np.arange(128.0, 1e5, step=8192)
for i, n in enumerate(num_of_nodes):
test_file = '%s/mgdlogs/mgd140/ring-allreduce%d.log' % (INPUT_PATH, n)
predict(test_file, n, colors[i], markers[i], labels[i], sizes, ax)
plt.xlabel('Size of parameters (KBytes)')
plt.ylabel(r'Communication time ($\mu$s)')
plt.ylim(bottom=0, top=plt.ylim()[1]*1.2)
plt.legend(ncol=1, loc=2, prop={'size': 10})
update_fontsize(ax, fontsize=14)
plt.subplots_adjust(left=0.18, bottom=0.13, top=0.91, right=0.92)
#plt.savefig('%s/%s.pdf' % (OUTPUT_PATH, 'commtime'))
plt.show()
def gmwfbp_simulate():
name = 'GoogleNet'
#name = 'ResNet'
#name = 'VGG'
#name = 'DenseNet'
num_of_nodes = 32
test_file = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/tmp8comm.log' % name.lower()
sizes, comms, computes, merged_comms = read_log(test_file)
#computes = [c/4 for c in computes]
#sizes = [1., 1., 1., 1.]
#computes = [3., 3.5, 5., 6.]
#sim = Simulator(name, computes[0:4], sizes[0:4], num_of_nodes)
sim = Simulator(name, computes, sizes, num_of_nodes)
#sim.wfbp()
sim.gmwfbp2()
plt.savefig('%s/breakdown%s.pdf' % (OUTPUT_PATH, name.lower()))
#plt.show()
def gmwfbp_speedup():
#configs = ['GoogleNet', 64]
configs = ['ResNet', 32]
#configs = ['DenseNet', 128]
name = configs[0]
b = configs[1]
test_file = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/tmp8comm.log' % name.lower()
sizes, comms, computes, merged_comms = read_log(test_file)
device = 'k80'
#device = 'p100'
#pfn = '/media/sf_Shared_Data/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/tmp8commp100%s.log' % (name.lower(), name.lower())
#val_sizes, computes = read_p100_log(pfn)
#print('computes: ', np.sum(computes))
#print('computes: ', computes)
#assert len(computes) == len(sizes)
nnodes = [4, 8, 16, 32, 64]
#nnodes = [2, 4, 8]
wfbps = []
gmwfbps = []
synceasgds = []
micomputes = np.array(computes)
tf = np.sum(micomputes) * 0.5 / 1000
tb = np.sum(micomputes) / 1000
total_size = np.sum(sizes)
single = b/(tf+tb)
optimal = []
colors = ['k', 'r', 'g', 'b']
markers = ['s', '^', 'o', 'd']
for num_of_nodes in nnodes:
sim = Simulator(name, computes, sizes, num_of_nodes, render=False)
wfbp = sim.wfbp()
wfbps.append(b*num_of_nodes/(wfbp+tf)/single)
gmwfbp = sim.gmwfbp2()
gmwfbps.append(b*num_of_nodes/(gmwfbp+tf)/single)
tc = time_of_allreduce(num_of_nodes, total_size, B)/1000
print('#nodes:', num_of_nodes, ', tc: ', tc)
synceasgd = tb + tf + tc
synceasgds.append(b*num_of_nodes/synceasgd/single)
optimal.append(num_of_nodes)
print('tf: ', tf)
print('tb: ', tb)
print('total_size: ', total_size)
print('wfbp: ', wfbps)
print('gmwfbps: ', gmwfbps)
print('synceasgds: ', synceasgds)
print('compared to synceasgds: ', np.array(gmwfbps)/np.array(synceasgds))
print('compared to wfbps: ', np.array(gmwfbps)/np.array(wfbps))
fig, ax = plt.subplots(figsize=(5,4.5))
ax.plot(nnodes, optimal, color='k', marker='s', label='Linear')
ax.plot(nnodes, wfbps, color='r', marker='d', label='WFBP')
ax.plot(nnodes, synceasgds, color='b', marker='o', label='SyncEASGD')
ax.plot(nnodes, gmwfbps, color='g', marker='^', label='MG-WFBP')
plt.legend(loc=2)
plt.xlabel('# of nodes')
plt.ylabel('Speedup')
#plt.title('%s-Simulation'%name)
#plt.yscale('log', basey=2)
#plt.xscale('log', basey=2)
plt.ylim(bottom=1,top=nnodes[-1]+1)
plt.xlim(left=1, right=nnodes[-1]+1)
plt.xticks(nnodes)
plt.yticks(nnodes)
plt.grid(color='#5e5c5c', linestyle='-.', linewidth=1)
update_fontsize(ax, fontsize=14)
plt.subplots_adjust(left=0.13, bottom=0.13, top=0.96, right=0.97)
#plt.savefig('%s/speedup%s.pdf' % (OUTPUT_PATH, name.lower()+device))
plt.show()
def plot_realdata_comm(datas, configs):
def calculate_real_comms(data, bs):
times = [bs/((d/2)/2**(i-1)) for i, d in enumerate(data)]
comp = times[0]
comms = [t-times[0] for t in times[1:]]
return comp, comms
fig, ax = plt.subplots(figsize=(4.8,3.4))
count = len(datas[0][1:])
ind = np.arange(count)
width = 0.25
s = -int(count/2)
print('s: ', s)
margin = 0.05
xticklabels = [str(2**(i+1)) for i in range(count)]
s = (1 - (width*count+(count-1) *margin))/2+width
ind = np.array([s+i+1 for i in range(count)])
centerind = None
labels=['WF.', 'S.E.', 'M.W.']
for i, data in enumerate(datas):
comp, comms= calculate_real_comms(data, configs[1])
comps = [comp for j in comms]
newind = ind+s*width+(s+1)*margin
p1 = ax.bar(newind, comps, width, color=Color.comp_color,hatch='x', label='Comp.')
p2 = ax.bar(newind, comms, width,
bottom=comps, color=Color.comm_color, label='Comm.')
s += 1
autolabel(p2, ax, labels[i], 0)
print('comp: ', comp)
print('comms: ', comms)
print('')
rects = ax.patches
ax.text(10, 10, 'ehhlo', color='b')
handles, labels = ax.get_legend_handles_labels()
#ax.legend([handles[0][0]], [labels[0][0]], ncol=2)
print(labels)
print(handles)
ax.set_xlim(left=1+0.3)
ax.set_ylim(top=ax.get_ylim()[1]*1.3)
ax.set_xticks(ind+2*(width+margin))
ax.set_xticklabels(xticklabels)
ax.set_xlabel('# of nodes')
ax.set_ylabel('Time [s]')
update_fontsize(ax, 14)
ax.legend((p1[0], p2[0]), (labels[0],labels[1] ), ncol=2, handletextpad=0.2, columnspacing =1.)
fig.subplots_adjust(left=0.16, right=0.96, bottom=0.17, top=0.94)
#plt.savefig('%s/comm%sreal.pdf' % (OUTPUT_PATH, configs[0].lower()))
plt.show()
def realdata_speedup():
nworkers = [1, 4, 8, 16, 32]
configs = ['VGG-16', 128]
dense= [1317.333, 104.200, 92.560 , 39.480 ,12.600]
topk= [1317.333, 110.576, 109.900, 97.865 ,63.002]
gtopk= [1317.333, 131.060, 130.551, 126.434 ,123.200]
#configs = ['ResNet-20', 32]
#dense= [920.632, 821.700, 705.200, 520.400, 287.900]
#topk= [920.632, 908.837, 752.985, 737.594, 696.029]
#gtopk= [920.632, 916.260, 868.730, 808.500, 789.300]
#configs = ['AlexNet', 32]
#dense = [173.469, 14.010, 12.118, 4.936 , 1.234]
#topk = [173.469, 14.238, 13.865, 13.352, 9.236]
#gtopk = [173.469, 16.536, 16.446, 16.359, 15.777]
#configs = ['ResNet-50', 32]
#dense =[52.873, 39.002, 36.989, 23.176, 10.721]
#topk = [52.873, 37.729, 35.703, 34.495, 30.583]
#gtopk =[52.873, 39.795, 39.713, 39.060, 39.119]
configs = ['LSTM-PTB', 32]
dense =[392.0, 12.657, 8.7, 4.1, 2.1]
topk = [392.0, 19.9, 18.6, 14.8, 5.4]
gtopk =[392.0, 17.8, 17.6, 15.1, 10.8]
name = configs[0]
fig, ax = plt.subplots(figsize=(5,4))
optimal = [100 for i in range(len(dense)-1)]
dense = [v/dense[0]*100 for i, v in enumerate(dense[1:])]
topk = [v/topk[0]*100 for i, v in enumerate(topk[1:])]
gtopk = [v/gtopk[0]*100 for i, v in enumerate(gtopk[1:])]
todense = np.array(gtopk)/np.array(dense)
totopk= np.array(gtopk)/np.array(topk)
print(name, ', compared to dense: ', todense, 'mean: ', np.mean(todense))
print(name, ', compared to topk: ', totopk, 'mean: ', np.mean(totopk))
#ax.plot(nworkers[1:], optimal, color='k', marker='s', label='Optimal')
ax.plot(nworkers[1:], dense, color=gcolors['dense'], marker=gmarkers['dense'], label='Dense S-SGD')
ax.plot(nworkers[1:], topk, color=gcolors['topk'], marker=gmarkers['topk'], label=r'Top-$k$ S-SGD')
ax.plot(nworkers[1:], gtopk, color=gcolors['gtopk'], marker=gmarkers['gtopk'], label=r'gTop-$k$ S-SGD')
#plt.yscale('log', basey=2)
#plt.xscale('log', basey=2)
plt.legend(loc=3,prop={'size': 14})
plt.xlabel('# of workers (GPU)')
plt.ylabel('Scaling efficiency (Percentage)')
plt.xticks(nworkers[1:])
plt.title(name)
#plt.yticks(nnodes)
#plt.ylim(top=gtopk[-1]+1)
#plt.xlim(left=1, right=nnodes[-1]+1)
#plt.grid(color='#5e5c5c', linestyle='-.', linewidth=1)
plt.grid(linestyle=':')
update_fontsize(ax, fontsize=14)
plt.subplots_adjust(left=0.18, bottom=0.16, top=0.92, right=0.97)
plt.savefig('%s/scalingeffi%s.pdf' % (OUTPUT_PATH, name.lower()))
plt.show()
def parse_real_comm_cost():
configs = ['GoogleNet', 'gm'] #SyncEASGD
name = configs[0]
t = configs[1]
nnodes = [2, 4, 8]
ncomms = []
for n in nnodes:
test_file = '/home/shshi/gpuhome/repositories/dpBenchmark/tools/caffe/cnn/%s/%s%dcomm.log' % (name.lower(), t, n)
sizes, comms, computes, merged_comms = read_log(test_file)
ncomms.append(np.sum(merged_comms))
print('network: ', name, ', type: ', t)
print('ncomms: ', ncomms)
def speedup_with_r_and_n(r, n):
return n/(1.+r)
def draw_ssgd_speedup():
Ns = [8, 16, 32, 64]
r = np.arange(0, 4, step=0.1)
for N in Ns:
s = N / (1+r)
plt.plot(r, s)
#plt.yscale('log', basey=2)
plt.show()
def plot_p2platency():
def _fit_linear_function(x, y):
X = np.array(x)
Y = np.array(y)
A = np.vstack([X, np.ones(len(X))]).T
beta, alpha = np.linalg.lstsq(A, Y, rcond=None)[0]
return alpha, beta
fig, ax = plt.subplots(figsize=(5,3.8))
#fig, ax = plt.subplots(figsize=(5,4.2))
filename = '/media/sf_Shared_Data/tmp/icdcs2019/mgdlogs/mgd140/p2platency.log'
sizes, comms, errors = read_allreduce_log(filename)
comms = [c/1000. for c in comms]
errors = [c/1000. for c in errors]
alpha, beta = _fit_linear_function(sizes, comms)
print('alpha: %f, beta: %f' % (alpha, beta))
ax.errorbar(sizes, comms, errors, label='Measured Point-to-point Communication', fmt='o', linewidth=1)
ax.plot(sizes, alpha+np.array(sizes)*beta, label=r'Predicted ($\alpha=%.3f, \beta=%f$)'%(alpha, beta), linewidth=1)
ax.grid(linestyle=':')
plt.xlabel('Size of parameters [bytes]')
plt.ylabel(r'Communication time [ms]')
plt.ylim(bottom=0, top=plt.ylim()[1]*1.2)
plt.legend(ncol=1, loc=2, prop={'size': 10})
update_fontsize(ax, fontsize=16)
plt.subplots_adjust(left=0.16, bottom=0.17, top=0.98, right=0.98)
plt.ticklabel_format(axis='x', style='sci', scilimits=(0,0))
plt.savefig('%s/%s.pdf' % (OUTPUT_PATH, 'p2pcommtime'))
plt.show()
def plot_allreduce_comparison():
alpha = 0.436
beta = 4*9e-6
def _denseallreduce_model(P, m):
return 2*(P-1)*alpha + 2* (P-1)/P * m * beta
#return 2*np.log2(P)*alpha + 2* (P-1)/P * m * beta
def _sparseallreduce_model(P, m, rho=0.001):
return np.log2(P) + 2 * (P - 1) * rho * m * beta
def _gtopkallreduce_model(P, m, rho=0.001):
return 2*np.log2(P) + 4 * np.log2(P) * rho * m * beta
fig, ax = plt.subplots(figsize=(5,3.8))
#fig, ax = plt.subplots(figsize=(5,4.2))
#variable = 'm'
variable = 'P'
if variable == 'm':
m = [2**(2*10+i) for i in range(0, 8)] # from 1M to 128M
m = np.array(m)
P = 32
rho = 0.001
#xlabel = 'Size of parameters [bytes]'
xlabel = '# of parameters'
xticks = m
# measured
#filename = '%s/mgdlogs/mgd140/ring-allreduce%d.log' % (INPUT_PATH, P)
#sizes, comms, errors = read_allreduce_log(filename)
#comms = np.array(comms)/1000.
#print('sizes: ', sizes)
#print('comms: ', comms)
#ax.plot(sizes, comms, label=r'DenseAllReduce', linewidth=1, marker=gmarkers['dense'], color=gcolors['dense'])
elif variable == 'P':
m = 25*1024 * 1024 # 10MBytes
P = np.array([4, 8, 16, 32, 64, 128])
rho = 0.001
xlabel = 'Number of workers'
xticks = P
elif variable == 'rho':
m = 8*1024 * 1024 # 10MBytes
P = np.array([4, 8, 16, 32])
rho = np.array([0.01/(2*i) for i in range(1, 10)])
xlabel = 'Density'
xticks = rho
dar = _denseallreduce_model(P, m)
sar = _sparseallreduce_model(P, m, rho)
gar = _gtopkallreduce_model(P, m, rho)
#ax.plot(xticks, dar, label=r'DenseAllReduce', linewidth=1, marker=gmarkers['dense'], color=gcolors['dense'])
ax.plot(xticks, sar, label=r'TopKAllReduce', linewidth=1, marker=gmarkers['sparse'], color=gcolors['sparse'])
ax.plot(xticks, gar, label=r'gTopKAllReduce', linewidth=1, marker=gmarkers['gtopk'], color=gcolors['gtopk'])
ax.grid(linestyle=':')
plt.subplots_adjust(bottom=0.16, left=0.15, right=0.96, top=0.97)
#ax.set_yscale("log", nonposy='clip')
plt.xlabel(xlabel)
plt.ylabel(r'Communication time [ms]')
#plt.ylim(bottom=0, top=plt.ylim()[1]*1.2)
plt.legend(ncol=1, loc=2, prop={'size': 10})
plt.subplots_adjust(left=0.18, bottom=0.20, top=0.94, right=0.96)
#plt.ticklabel_format(axis='x', style='sci', scilimits=(0,0))
if variable == 'P':
plt.xticks(xticks)
elif variable == 'm':
ax.set_xscale("log")
update_fontsize(ax, fontsize=16)
plt.savefig('%s/%s.pdf' % (OUTPUT_PATH, 'sparvsgtopk_dynamic%s'%variable))
plt.show()
def plot_breakdown():
logpath='/media/sf_Shared_Data/tmp/icdcs2019/mgdlogs/mgd115-2/logs/allreduce-comp-baseline-gwarmup-dc1-modelmgd-speed/'
networks=['vgg16', 'resnet20', 'alexnet', 'resnet50']
batchsizes=[128, 128, 64, 256]
lrs=[0.1, 0.1, 0.01, 0.01]
nss=[1,1,1, 16]
for i, na in enumerate(networks):
bs = batchsizes[i]
lr = lrs[i]
ns = nss[i]
fn = os.path.join(logpath, '%s-n32-bs%d-lr%.4f-ns%d-sg2.50/MGD-0.log' % (na, bs, lr, ns))
print('fn: ', fn)
names = ['Compu.', 'Compr.', 'Commu.']
vgg16=[0.139536, 0.091353, 0.811753]
resnet20=[0.146005, 0.001618, 0.024686]
alexnet=[0.257205, 0.383776, 3.36298]
resnet50=[4.882041, 0.15405 , 1.424253]
ratio_vgg16 = [v/np.sum(vgg16) for v in vgg16]
ratio_resnet20= [v/np.sum(resnet20) for v in resnet20]
ratio_alexnet = [v/np.sum(alexnet) for v in alexnet]
ratio_resnet50= [v/np.sum(resnet50) for v in resnet50]
datas = [ratio_vgg16, ratio_resnet20, ratio_alexnet, ratio_resnet50]
for d in datas:
print('ratios: ', d)
communications = [ratio_vgg16[2], ratio_resnet20[2], ratio_alexnet[2], ratio_resnet50[2]]
compressions = [ratio_vgg16[1], ratio_resnet20[1], ratio_alexnet[1], ratio_resnet50[1]]
computes = [ratio_vgg16[0], ratio_resnet20[0], ratio_alexnet[0], ratio_resnet50[0]]
computes = np.array(computes)
compressions= np.array(compressions)
communications= np.array(communications)
fig, ax = plt.subplots(figsize=(4.8,3.4))
count = len(datas)
ind = np.arange(count)
width = 0.35
margin = 0.05
xticklabels = ['VGG-16', 'ResNet-20', 'AlexNet', 'ResNet-50']
#ind = np.array([s+i+1 for i in range(count)])
newind = np.arange(count)
p1 = ax.bar(newind, computes, width, color=Color.comp_color,hatch='x', label=names[0])
p2 = ax.bar(newind, compressions, width, bottom=computes, color=Color.compression_color,hatch='-', label=names[1])
p3 = ax.bar(newind, communications, width, bottom=computes+compressions, color=Color.opt_comm_color,label=names[2])
ax.text(10, 10, 'ehhlo', color='b')
handles, labels = ax.get_legend_handles_labels()
#ax.legend([handles[0][0]], [labels[0][0]], ncol=2)
print(labels)
print(handles)
#ax.set_xlim(left=1+0.3)
#ax.set_ylim(top=ax.get_ylim()[1]*1.3)
ax.set_xticks(ind)
ax.set_xticklabels(xticklabels)
#ax.set_xlabel('Model')
ax.set_ylabel('Percentage')
update_fontsize(ax, 10)
ax.legend((p1[0], p2[0], p3[0]), tuple(names), ncol=9, bbox_to_anchor=(1, -0.1))#, handletextpad=0.2, columnspacing =1.)
#ax.legend((p1[0], p2[0]), (labels[0],labels[1] ), ncol=2, handletextpad=0.2, columnspacing =1.)
fig.subplots_adjust(left=0.16, right=0.96, bottom=0.19, top=0.94)
plt.savefig('%s/breakdown.pdf' % (OUTPUT_PATH))
plt.show()
if __name__ == '__main__':
#plot_all_communication_overheads()
#plot_p2platency()
plot_allreduce_comparison()
#realdata_speedup()
#plot_breakdown()
| 38.478796
| 141
| 0.581342
| 6,004
| 0.213453
| 0
| 0
| 0
| 0
| 0
| 0
| 8,009
| 0.284734
|
02527978354f0193255cdacc1cd11fc9125db75e
| 2,188
|
py
|
Python
|
app/routers/post.py
|
thiere18/fastapi-boilerplate
|
6760e0e49caa915563d44897262d493b012207c0
|
[
"MIT"
] | 5
|
2021-12-10T17:35:31.000Z
|
2021-12-30T18:36:23.000Z
|
app/routers/post.py
|
thiere18/fastapi-boilerplate
|
6760e0e49caa915563d44897262d493b012207c0
|
[
"MIT"
] | 1
|
2021-11-21T13:59:03.000Z
|
2021-11-21T13:59:03.000Z
|
app/routers/post.py
|
thiere18/fastapi-boilerplate
|
6760e0e49caa915563d44897262d493b012207c0
|
[
"MIT"
] | 1
|
2021-12-07T14:08:12.000Z
|
2021-12-07T14:08:12.000Z
|
from logging import raiseExceptions
from typing import List
from fastapi import APIRouter,Depends,HTTPException, Response,status
from sqlalchemy.orm.session import Session
from .. database import get_db
from .. import models,schemas ,oauth2
router=APIRouter(
prefix='/posts',
tags=['Post']
)
@router.get('/',response_model=List[schemas.PostOut])
def get_lists( db:Session=Depends(get_db),current_user: int =Depends(oauth2.get_current_user)):
ps=db.query(models.Post).all()
return ps
@router.post("/")
def post_list(post:schemas.PostCreate,db:Session=Depends(get_db),current_user: int =Depends(oauth2.get_current_user)):
new_post=models.Post(user_id=current_user.id,** post.dict())
db.add(new_post)
db.commit()
db.refresh(new_post)
return new_post
@router.get("/{id}")
def get_post_by_id(id:int ,db:Session=Depends(get_db), current_user: int =Depends(oauth2.get_current_user)):
post = db.query(models.Post).filter(models.Post.id == id).first()
if post is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND ,
detail=f"post with id {id} not found")
return post
@router.put("/{id}",status_code=status.HTTP_200_OK)
def update_list(id:int,updated_list:schemas.PostCreate ,db:Session=Depends(get_db), current_user: int =Depends(oauth2.get_current_user)):
post_query=db.query(models.Post).filter(models.Post.id==id)
post=post_query.first()
if post is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND ,detail=f"post with id {id} not found")
post_query.update(updated_list.dict(),synchronize_session=False)
db.commit()
return post_query.first()
@router.delete("/{id}" ,status_code=status.HTTP_204_NO_CONTENT)
def delete_list(id:int ,db:Session=Depends(get_db), current_user: int =Depends(oauth2.get_current_user)):
post_query=db.query(models.Post).filter(models.Post.id == id)
post=post_query.first()
if post is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND ,detail=f"post with id {id} not found")
post_query.delete(synchronize_session=False)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
| 39.781818
| 137
| 0.743601
| 0
| 0
| 0
| 0
| 1,871
| 0.855119
| 0
| 0
| 131
| 0.059872
|
0252f8eedc296b4ab429a47459f42ba29b283dbc
| 8,766
|
py
|
Python
|
src/util.py
|
thanhnhan311201/via-line-detection
|
1ba986110f7522df1b82c2cdeacd5c8bc27ac896
|
[
"Unlicense"
] | null | null | null |
src/util.py
|
thanhnhan311201/via-line-detection
|
1ba986110f7522df1b82c2cdeacd5c8bc27ac896
|
[
"Unlicense"
] | null | null | null |
src/util.py
|
thanhnhan311201/via-line-detection
|
1ba986110f7522df1b82c2cdeacd5c8bc27ac896
|
[
"Unlicense"
] | null | null | null |
import torch.nn as nn
import cv2
import torch
from copy import deepcopy
import numpy as np
from torch.autograd import Variable
from torch.autograd import Function as F
from numpy.polynomial import Polynomial as P
try:
from parameters import Parameters
except:
from src.parameters import Parameters
import math
p = Parameters()
###############################################################
##
## visualize
##
###############################################################
def visualize_points(image, x, y):
image = image
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)#*255.0
image = image.astype(np.uint8).copy()
for k in range(len(y)):
for i, j in zip(x[k], y[k]):
if i > 0:
image = cv2.circle(image, (int(i), int(j)), 2, p.color[1], -1)
cv2.imshow("test2", image)
cv2.waitKey(0)
def visualize_points_origin_size(x, y, test_image, ratio_w, ratio_h):
color = 0
image = deepcopy(test_image)
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)#*255.0
image = image.astype(np.uint8).copy()
image = cv2.resize(image, (int(p.x_size/ratio_w), int(p.y_size/ratio_h)))
for i, j in zip(x, y):
color += 1
for index in range(len(i)):
cv2.circle(image, (int(i[index]), int(j[index])), 10, p.color[color], -1)
cv2.imshow("test2", image)
cv2.waitKey(0)
return test_image
def visualize_gt(gt_point, gt_instance, ground_angle, image):
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)#*255.0
image = image.astype(np.uint8).copy()
for y in range(p.grid_y):
for x in range(p.grid_x):
if gt_point[0][y][x] > 0:
xx = int(gt_point[1][y][x]*p.resize_ratio+p.resize_ratio*x)
yy = int(gt_point[2][y][x]*p.resize_ratio+p.resize_ratio*y)
image = cv2.circle(image, (xx, yy), 10, p.color[1], -1)
cv2.imshow("image", image)
cv2.waitKey(0)
def visualize_regression(image, gt):
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)*255.0
image = image.astype(np.uint8).copy()
for i in gt:
for j in range(p.regression_size):#gt
y_value = p.y_size - (p.regression_size-j)*(220/p.regression_size)
if i[j] >0:
x_value = int(i[j]*p.x_size)
image = cv2.circle(image, (x_value, y_value), 5, p.color[1], -1)
cv2.imshow("image", image)
cv2.waitKey(0)
def draw_points(x, y, image):
color_index = 0
for i, j in zip(x, y):
color_index += 1
if color_index > 12:
color_index = 12
for index in range(len(i)):
# print( (int(i[index]), int(j[index])))
image = cv2.circle(image, (int(i[index]), int(j[index])), 5, p.color[color_index], -1)
return image
def draw_poly(poly, image, color):
if poly == []:
return image
y = np.linspace(256*12/20, 256, 10)
p = np.poly1d(poly)
x = [(p - _y).roots[0] for _y in y ]
draw_points = (np.asarray([x, y]).T).astype(np.int32)
cv2.polylines(image, [draw_points], False, color,3)
return image
###############################################################
##
## calculate
##
###############################################################
def adjust_fits(fits):
min_y = 20
len_fit = fits.shape[0]
values_x = np.array([np.poly1d(fit)(min_y) for fit in fits ])
order = np.argsort(values_x)
fits_sorted = fits[order]
if len(fits_sorted) > 3:
fits_sorted = fits_sorted[:3]
return fits_sorted
def get_steer_angle(fits):
min_y = 20
len_fit = fits.shape[0]
if len_fit > 3:
pass
if len_fit >= 2:
y = 20
x = (np.poly1d(fits[-1])(y) + np.poly1d(fits[-2])(y)) // 2
return_value = errorAngle((x,y))
#update point in lane
temp_y = 200
temp_x = (np.poly1d(fits[-1])(temp_y) + np.poly1d(fits[-2])(temp_y)) // 2
p.point_in_lane = (temp_x,temp_y)
return return_value
if len_fit == 1:# missing 1 line
y = 20
avaiable_fit = np.poly1d(fits[0])
x_avaiable = avaiable_fit(y)
# check where do line?
point_x = p.point_in_lane[0]
point_y = p.point_in_lane[1]
val = point_x - avaiable_fit(point_y)
# print(val)
if val > 0: # is right
x = x_avaiable + 150
else: # is left
x = x_avaiable - 150
return_value = errorAngle((x,y))
return return_value
return 0
def convert_to_original_size(x, y, ratio_w, ratio_h):
# convert results to original size
out_x = []
out_y = []
for i, j in zip(x,y):
out_x.append((np.array(i)/ratio_w).tolist())
out_y.append((np.array(j)/ratio_h).tolist())
return out_x, out_y
def get_closest_upper_point(x, y, point, n):
x = np.array(x)
y = np.array(y)
x = x[y<point[1]]
y = y[y<point[1]]
dis = (x - point[0])**2 + (y - point[1])**2
ind = np.argsort(dis, axis=0)
x = np.take_along_axis(x, ind, axis=0).tolist()
y = np.take_along_axis(y, ind, axis=0).tolist()
points = []
for i, j in zip(x[:n], y[:n]):
points.append((i,j))
return points
def sort_along_y(x, y):
out_x = []
out_y = []
for i, j in zip(x, y):
i = np.array(i)
j = np.array(j)
ind = np.argsort(j, axis=0)
out_x.append(np.take_along_axis(i, ind[::-1], axis=0).tolist())
out_y.append(np.take_along_axis(j, ind[::-1], axis=0).tolist())
return out_x, out_y
def sort_along_x(x, y):
temp = np.min(y)
try:
min_y = temp[0]
except:
min_y = temp
# print(min_y)
fits = np.array([np.polyfit(_y,_x, 2) for _x, _y in zip(x,y)])
# print(fits)
values_x = np.array([np.poly1d(fit)(min_y) for fit in fits ])
# print(values_x)
order = np.argsort(values_x)
print(order)
return np.array(x)[order], np.array(y)[order]
def sort_batch_along_y(target_lanes, target_h):
out_x = []
out_y = []
for x_batch, y_batch in zip(target_lanes, target_h):
temp_x = []
temp_y = []
for x, y, in zip(x_batch, y_batch):
ind = np.argsort(y, axis=0)
sorted_x = np.take_along_axis(x, ind[::-1], axis=0)
sorted_y = np.take_along_axis(y, ind[::-1], axis=0)
temp_x.append(sorted_x)
temp_y.append(sorted_y)
out_x.append(temp_x)
out_y.append(temp_y)
return out_x, out_y
def errorAngle(point):
carPosx , carPosy = 512//2, 254
dstx, dsty = point
if dstx == carPosx:
return 0
if dsty == carPosy:
if dstx < carPosx:
return -25
else:
return 25
pi = math.acos(-1.0)
dx = dstx - carPosx
dy = carPosy - dsty
if dx < 0:
angle = (math.atan(-dx / dy) * -180 / pi)/2.5
if angle >= 16 or angle <= -16: # maybe must turn 90
if angle > 0:
return 25
return -25
return angle
#################################################
angle = (math.atan(dx / dy) * 180 / pi)/2.5
if angle >= 16 or angle <= -16: # maybe must turn 90
if angle > 0:
return 25
return -25
return angle
def calcul_speed(steer_angle):
max_speed = 70
max_angle = 25
if steer_angle == -10 or steer_angle == 10:
return 0
if steer_angle >= 1 or steer_angle <= -1:
if steer_angle > 0:
return max_speed - (max_speed/max_angle)*steer_angle
else:
return max_speed + (max_speed/max_angle)*steer_angle
elif steer_angle >= 4 or steer_angle <= -4:
if steer_angle > 0:
return 40 - (40/max_angle)*steer_angle
else:
return 40 + (30/max_angle)*steer_angle
# elif steer_angle >= 10 or steer_angle <= -10:
# if steer_angle > 0:
# return max_speed - (max_speed/max_angle)*steer_angle
# else:
# return max_speed + (max_speed/max_angle)*steer_angle
# if steer_angle >=0:
# return max_speed - (max_speed/max_angle)*steer_angle
return max_speed
def clear_StatusObjs(StatusObjs):
list_result = []
for obj in StatusObjs:
if 'i5' in obj:
obj.remove('i5')
if 'pne' in obj:
obj.remove('pne')
if 'car' in obj:
obj.remove('car')
if 'w65' in obj:
obj.remove('w65')
list_result.append(obj)
return list_result
| 27.828571
| 98
| 0.544832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 960
| 0.109514
|
0253374b375e14e18b7b22c7b40e9e638b1ad7cf
| 3,322
|
py
|
Python
|
src/tests/unit_tests/io_tools_test.py
|
samueljackson92/major-project
|
5d82b875944fcf1f001f9beb5e5419ba60be3bf1
|
[
"MIT"
] | 8
|
2015-01-26T16:23:29.000Z
|
2020-03-17T00:57:42.000Z
|
src/tests/unit_tests/io_tools_test.py
|
samueljackson92/major-project
|
5d82b875944fcf1f001f9beb5e5419ba60be3bf1
|
[
"MIT"
] | 64
|
2015-02-05T06:34:56.000Z
|
2015-05-03T15:46:49.000Z
|
src/tests/unit_tests/io_tools_test.py
|
samueljackson92/major-project
|
5d82b875944fcf1f001f9beb5e5419ba60be3bf1
|
[
"MIT"
] | null | null | null |
import nose.tools
import unittest
import os
import json
import pandas as pd
import numpy as np
import mia
from mia.io_tools import *
from ..test_utils import get_file_path
class IOTests(unittest.TestCase):
@classmethod
def setupClass(cls):
cls._output_files = []
@classmethod
def teardownClass(cls):
for f in cls._output_files:
if os.path.isfile(f):
os.remove(f)
def test_iterate_directory(self):
img_directory = get_file_path("texture_patches")
expected_files = ['texture1.png', 'texture2.png', 'texture3.png',
'texture4.png', 'texture5.png']
expected_files = [os.path.join(img_directory, p) for p in expected_files]
dirs = list(iterate_directory(img_directory))
nose.tools.assert_equal(len(dirs), len(expected_files))
for img_path, expected in zip(dirs, expected_files):
nose.tools.assert_equal(img_path, expected)
def test_iterate_directories(self):
img_directory = get_file_path("texture_patches")
expected_files = ['texture1.png', 'texture2.png', 'texture3.png',
'texture4.png', 'texture5.png']
expected_files = [os.path.join(img_directory, p) for p in expected_files]
dirs = list(iterate_directories(img_directory, img_directory))
nose.tools.assert_equal(len(dirs), len(expected_files))
for (img_path, msk_path), expected in zip(dirs, expected_files):
nose.tools.assert_equal(img_path, expected)
nose.tools.assert_equal(msk_path, expected)
def test_check_is_file(self):
img_path = get_file_path("texture_patches/texture1.png")
nose.tools.assert_true(check_is_file(img_path, ".png"))
def test_check_is_file_multiple_images(self):
img_path = get_file_path("synthetic_patch.dcm")
nose.tools.assert_true(check_is_file(img_path, ".png", ".dcm"))
def test_check_is_file_wrong_extension(self):
img_path = get_file_path("blob_detection.csv")
nose.tools.assert_false(check_is_file(img_path, ".png", ".dcm"))
def test_check_is_image_raises_on_not_a_file(self):
img_path = get_file_path("texture_patches")
nose.tools.assert_false(check_is_file(img_path, ".png", ".dcm"))
def test_check_is_directory(self):
directory = get_file_path("texture_patches")
try:
check_is_directory(directory)
except:
self.fail("check_is_directory raised when it shouldn't have.")
def test_check_is_directory_raises(self):
img_path = get_file_path("texture_patches/not_a_directory")
nose.tools.assert_raises(ValueError, check_is_directory, img_path)
def test_dump_mapping_to_json(self):
output_file = 'test_data.json'
mapping = pd.DataFrame(np.ones((10, 2)), columns=['x', 'y'])
dump_mapping_to_json(mapping, ['x', 'y'], np.zeros(10), output_file)
nose.tools.assert_true(os.path.isfile(output_file))
with open(output_file, 'rb') as f:
data = json.load(f)
nose.tools.assert_equal(len(data), 1)
nose.tools.assert_equal(data[0]['name'], 'Class: 0')
nose.tools.assert_equal(len(data[0]['data']), 10)
self._output_files.append(output_file)
| 35.340426
| 81
| 0.669175
| 3,146
| 0.94702
| 0
| 0
| 207
| 0.062312
| 0
| 0
| 459
| 0.13817
|
0254feaa1c998dfb2faf7f35247b0cc22066d85a
| 326
|
py
|
Python
|
main/migrations_old/0007_remove_profile_rated_recipes.py
|
ggetzie/nnr
|
a8b1b1d771027edee2c19062f39fa982cfd024b0
|
[
"MIT"
] | null | null | null |
main/migrations_old/0007_remove_profile_rated_recipes.py
|
ggetzie/nnr
|
a8b1b1d771027edee2c19062f39fa982cfd024b0
|
[
"MIT"
] | 5
|
2020-07-28T12:41:50.000Z
|
2022-01-21T23:27:15.000Z
|
main/migrations_old/0007_remove_profile_rated_recipes.py
|
ggetzie/nnr
|
a8b1b1d771027edee2c19062f39fa982cfd024b0
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-09-29 13:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0006_recipe_tags'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='rated_recipes',
),
]
| 18.111111
| 47
| 0.588957
| 241
| 0.739264
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.291411
|
0255255ddce0aede915e8004ff48e8619c540430
| 126
|
py
|
Python
|
src/timber_clay_hybrid/assembly/__init__.py
|
augmentedfabricationlab/Timber_Clay_Hybrid
|
243efddac77970c989b551697a0e188932064849
|
[
"MIT"
] | 1
|
2020-12-16T01:25:07.000Z
|
2020-12-16T01:25:07.000Z
|
src/timber_clay_hybrid/assembly/__init__.py
|
augmentedfabricationlab/timber_clay_hybrid
|
243efddac77970c989b551697a0e188932064849
|
[
"MIT"
] | null | null | null |
src/timber_clay_hybrid/assembly/__init__.py
|
augmentedfabricationlab/timber_clay_hybrid
|
243efddac77970c989b551697a0e188932064849
|
[
"MIT"
] | null | null | null |
from .assembly import HRCAssembly
from .element import HRCElement
from .artist import AssemblyArtist
from .utilities import *
| 25.2
| 34
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
025829c61e2b13a8ebf606a7afdd54a016dd8119
| 3,674
|
py
|
Python
|
backend/api/tests/schema/test_newsletter.py
|
pauloxnet/pycon
|
82b6eff76dcc785865ea3ffd97a45e931c0add26
|
[
"MIT"
] | 2
|
2017-07-18T21:51:25.000Z
|
2017-12-23T11:08:39.000Z
|
backend/api/tests/schema/test_newsletter.py
|
pauloxnet/pycon
|
82b6eff76dcc785865ea3ffd97a45e931c0add26
|
[
"MIT"
] | 23
|
2017-07-18T20:22:38.000Z
|
2018-01-05T05:45:15.000Z
|
backend/api/tests/schema/test_newsletter.py
|
pauloxnet/pycon
|
82b6eff76dcc785865ea3ffd97a45e931c0add26
|
[
"MIT"
] | 2
|
2017-07-18T21:27:33.000Z
|
2017-07-18T22:07:03.000Z
|
from unittest.mock import patch
import pytest
from pytest import mark
from integrations.mailchimp import SubscriptionResult
from newsletters.models import Subscription
def test_subscribe_to_newsletter(graphql_client):
email = "me@example.it"
variables = {"email": email}
query = """
mutation($email: String!) {
subscribeToNewsletter(input: {
email: $email
}) {
__typename
... on NewsletterSubscribeResult {
status
}
}
}
"""
with patch("api.newsletters.forms.subscribe") as mock_subscription:
mock_subscription.return_value = SubscriptionResult.SUBSCRIBED
resp = graphql_client.query(query, variables=variables)
assert (
resp["data"]["subscribeToNewsletter"]["__typename"]
== "NewsletterSubscribeResult"
)
assert resp["data"]["subscribeToNewsletter"]["status"] == "SUBSCRIBED"
@pytest.mark.skip
@mark.django_db
def test_unsubscribe_not_registered_mail_to_newsletter(graphql_client):
"""If the mail is already unsubscribed (it's not in the subcription table)
return true anyway"""
email = "me@example.it"
variables = {"email": email}
query = """
mutation($email: String!) {
unsubscribeToNewsletter(input: {
email: $email
}) {
__typename
... on UnsubscribeToNewsletterErrors {
email
}
... on NewsletterSubscribeResult {
status
}
}
}
"""
resp = graphql_client.query(query, variables=variables)
assert resp["data"]["unsubscribeToNewsletter"]["status"] is True
def _update_user_newsletter(graphql_client, user, open_to_newsletter):
query = """
mutation(
$open_to_newsletter: Boolean!,
$open_to_recruiting: Boolean!,
$date_birth: String
){
update(input: {
openToNewsletter: $open_to_newsletter,
openToRecruiting: $open_to_recruiting,
dateBirth: $date_birth
}){
__typename
... on User {
id
openToNewsletter
}
... on UpdateErrors {
validationOpenToNewsletter: openToNewsletter
nonFieldErrors
}
}
}
"""
variables = {
"open_to_newsletter": open_to_newsletter,
"open_to_recruiting": user.open_to_recruiting,
"date_birth": f"{user.date_birth:%Y-%m-%d}",
}
return graphql_client.query(query=query, variables=variables), variables
@pytest.mark.skip
@mark.django_db
def test_subscribe_when_update_user(graphql_client, user_factory):
user = user_factory(open_to_newsletter=False)
graphql_client.force_login(user)
resp, variables = _update_user_newsletter(graphql_client, user, True)
assert resp["data"]["update"]["__typename"] == "MeUser"
assert resp["data"]["update"]["openToNewsletter"] is True
assert Subscription.objects.get(email=user.email)
@pytest.mark.skip
@mark.django_db
def test_unsubscribe_when_update_user(graphql_client, user_factory):
user = user_factory(open_to_newsletter=True)
graphql_client.force_login(user)
resp, variables = _update_user_newsletter(graphql_client, user, False)
assert resp["data"]["update"]["__typename"] == "MeUser"
assert resp["data"]["update"]["openToNewsletter"] is False
with pytest.raises(Subscription.DoesNotExist):
Subscription.objects.get(email=user.email)
| 27.833333
| 78
| 0.617583
| 0
| 0
| 0
| 0
| 1,755
| 0.477681
| 0
| 0
| 1,797
| 0.489113
|
02591832a76c44befd1384a4984c9e645f451a38
| 3,077
|
py
|
Python
|
conference_lib/confemailrecipients.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
conference_lib/confemailrecipients.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | 1
|
2020-02-05T13:00:29.000Z
|
2020-02-05T13:00:29.000Z
|
conference_lib/confemailrecipients.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
from google.appengine.ext import ndb
# Local imports
import confoptions
from scaffold import sorrypage, userrightsnames
import basehandler
class ConferenceEmailsPage(basehandler.BaseHandler):
def get(self):
if not(self.request.params.has_key("conf")):
sorrypage.redirect_sorry(self, "ConfKeyMissing")
return
conf_key = ndb.Key(urlsafe=self.request.get("conf"))
conference = conf_key.get()
if not(conference.user_rights().has_permission(self.get_crrt_user().email(),
userrightsnames.CONF_CREATOR)):
sorrypage.redirect_sorry(self, "NoAccess")
return
self.write_page('conference_lib/confemailrecipients.html', {
"crrt_conf": conference,
"tracks": conference.track_options(),
"conf_key": conference.key,
"email_ack_cc": conference.ack_cc_addresses(),
"email_ack_bcc": conference.ack_bcc_addresses(),
"email_accept_cc": conference.accept_cc_addresses(),
})
# TODO Extract and unit test
def add_for_selected(self, conf_key, email):
if self.request.get("AckCC"):
confoptions.make_conference_option(confoptions.AcknowledgementEmailCCAddresses, conf_key, email)
if self.request.get("AckBCC"):
confoptions.make_conference_option(confoptions.AcknowledgementEmailBCCAddresses, conf_key, email)
if self.request.get("AcceptCC"):
confoptions.make_conference_option(confoptions.AcceptEmailCCAddress, conf_key, email)
# TODO Extract and unit test
def add_email(self):
conf_key = ndb.Key(urlsafe=self.request.get("crrt_conf_key"))
email = self.request.get("NewMail")
if len(email)>0:
self.add_for_selected(conf_key, email)
self.redirect('/confemailcopy?conf=' + self.request.get("crrt_conf_key"))
def delete_email(self, check_field, Option_Class):
conf_key = ndb.Key(urlsafe=self.request.get("crrt_conf_key"))
for opt in self.request.get_all(check_field):
confoptions.delete_option(Option_Class, conf_key, opt)
self.redirect('/confemailcopy?conf=' + conf_key.urlsafe())
def post(self):
if self.request.get("NewMail"):
self.add_email()
elif self.request.get("DeleteAckCCEmails"):
self.delete_email("selectAckCCEmail", confoptions.AcknowledgementEmailCCAddresses)
elif self.request.get("DeleteAckBCCEmails"):
self.delete_email("selectAckBCCEmail", confoptions.AcknowledgementEmailBCCAddresses)
elif self.request.get("DeleteAcceptCCEmails"):
self.delete_email("selectAcceptCCEmail", confoptions.AcceptEmailCCAddress)
| 40.486842
| 109
| 0.653559
| 2,637
| 0.857004
| 0
| 0
| 0
| 0
| 0
| 0
| 764
| 0.248294
|
0259184a3f3d6c2f7159bf04b270b9b14a650178
| 891
|
py
|
Python
|
jexam/argparser.py
|
chrispyles/jexam
|
ebe83b170f51c5820e0c93955824c3798922f097
|
[
"BSD-3-Clause"
] | 1
|
2020-07-25T02:36:38.000Z
|
2020-07-25T02:36:38.000Z
|
jexam/argparser.py
|
chrispyles/jexam
|
ebe83b170f51c5820e0c93955824c3798922f097
|
[
"BSD-3-Clause"
] | null | null | null |
jexam/argparser.py
|
chrispyles/jexam
|
ebe83b170f51c5820e0c93955824c3798922f097
|
[
"BSD-3-Clause"
] | null | null | null |
#################################
##### jExam Argument Parser #####
#################################
import argparse
def get_parser():
"""
Creates and returns the argument parser for jExam
Returns:
``argparse.ArgumentParser``: the argument parser for jExam
"""
parser = argparse.ArgumentParser()
parser.add_argument("master", type=str, help="Path to exam master notebook")
parser.add_argument("result", nargs="?", default="dist", help="Path at which to write output notebooks")
parser.add_argument("-f", "--format", type=str, default="otter", help="Name of autograder format; 'otter' or 'ok'")
parser.add_argument("-s", "--seed", type=int, default=None, help="Random seed for NumPy to run before execution")
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="Run without printing status")
return parser
| 42.428571
| 119
| 0.628507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 523
| 0.586981
|
02591a0ba3663c70495908f0fded2d81e95b4ceb
| 474
|
py
|
Python
|
Entities/element.py
|
JoseleSolis/Proceso-de-aprendizaje
|
0c6ee3a64ad48501dd42d2abcb5bf8b4cbb4f370
|
[
"MIT"
] | null | null | null |
Entities/element.py
|
JoseleSolis/Proceso-de-aprendizaje
|
0c6ee3a64ad48501dd42d2abcb5bf8b4cbb4f370
|
[
"MIT"
] | null | null | null |
Entities/element.py
|
JoseleSolis/Proceso-de-aprendizaje
|
0c6ee3a64ad48501dd42d2abcb5bf8b4cbb4f370
|
[
"MIT"
] | 2
|
2022-02-07T05:42:57.000Z
|
2022-02-13T11:05:21.000Z
|
class Element:
dependencies = []
def __init__(self, name):
self.name = name
def add_dependencies(self, *elements):
for element in elements:
if not self.dependencies.__contains__(element):
self.dependencies.append(element)
def remove_dependencies(self, *elements):
for element in elements:
if self.dependencies.__contains__(element):
self.dependencies.remove(element)
| 19.75
| 59
| 0.622363
| 465
| 0.981013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0259bea6f07ec94194968114adbb7688e3c79035
| 236
|
py
|
Python
|
basic/Pyshop/products/models.py
|
IsAlbertLiu/Python-basics
|
49c0c93fb7d1abb70548854b69346eb5837ba00d
|
[
"MIT"
] | null | null | null |
basic/Pyshop/products/models.py
|
IsAlbertLiu/Python-basics
|
49c0c93fb7d1abb70548854b69346eb5837ba00d
|
[
"MIT"
] | null | null | null |
basic/Pyshop/products/models.py
|
IsAlbertLiu/Python-basics
|
49c0c93fb7d1abb70548854b69346eb5837ba00d
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.FloatField()
stack = models.IntegerField()
image_url = models.CharField(2083)
| 23.6
| 43
| 0.724576
| 177
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.110169
|
0259fbe373b86b3d2859b384b23af03bfb7c829a
| 758
|
py
|
Python
|
examples/delta_setitem/001_check_setitem.py
|
pkicsiny/xpart
|
cddf3eb65ffc198c22dd37204139ce3177a9bd96
|
[
"MIT"
] | null | null | null |
examples/delta_setitem/001_check_setitem.py
|
pkicsiny/xpart
|
cddf3eb65ffc198c22dd37204139ce3177a9bd96
|
[
"MIT"
] | null | null | null |
examples/delta_setitem/001_check_setitem.py
|
pkicsiny/xpart
|
cddf3eb65ffc198c22dd37204139ce3177a9bd96
|
[
"MIT"
] | null | null | null |
import numpy as np
import xpart as xp
import xobjects as xo
#context = xo.ContextPyopencl()
context = xo.ContextCpu()
ctx2np = context.nparray_from_context_array
particles = xp.Particles(_context=context, p0c=26e9, delta=[1,2,3])
assert ctx2np(particles.delta[2]) == 3
assert np.isclose(ctx2np(particles.rvv[2]), 1.00061, rtol=0, atol=1e-5)
assert np.isclose(ctx2np(particles.rpp[2]), 0.25, rtol=0, atol=1e-10)
assert np.isclose(ctx2np(particles.ptau[2]), 3.001464*particles._xobject.beta0[0],
rtol=0, atol=1e-6)
particles.delta[1] = particles.delta[2]
assert particles.delta[2] == particles.delta[1]
assert particles.ptau[2] == particles.ptau[1]
assert particles.rpp[2] == particles.rpp[1]
assert particles.rvv[2] == particles.rvv[1]
| 32.956522
| 82
| 0.726913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.040897
|
025a143f5cc2381ed79e2e47f4c56370b64d62d8
| 9,628
|
py
|
Python
|
tests/test_train_eval_mode.py
|
glmcdona/stable-baselines3-contrib
|
91f9b1ed34fbaa9243a044ea67aa4c677663bfc2
|
[
"MIT"
] | 93
|
2020-10-22T14:44:58.000Z
|
2022-03-25T20:06:47.000Z
|
tests/test_train_eval_mode.py
|
glmcdona/stable-baselines3-contrib
|
91f9b1ed34fbaa9243a044ea67aa4c677663bfc2
|
[
"MIT"
] | 36
|
2020-10-26T11:13:23.000Z
|
2022-03-31T15:11:05.000Z
|
tests/test_train_eval_mode.py
|
glmcdona/stable-baselines3-contrib
|
91f9b1ed34fbaa9243a044ea67aa4c677663bfc2
|
[
"MIT"
] | 50
|
2020-12-06T14:21:10.000Z
|
2022-03-31T14:25:36.000Z
|
from typing import Union
import gym
import numpy as np
import pytest
import torch as th
import torch.nn as nn
from stable_baselines3.common.preprocessing import get_flattened_obs_dim
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from sb3_contrib import QRDQN, TQC, MaskablePPO
from sb3_contrib.common.envs import InvalidActionEnvDiscrete
from sb3_contrib.common.maskable.utils import get_action_masks
class FlattenBatchNormDropoutExtractor(BaseFeaturesExtractor):
"""
Feature extract that flatten the input and applies batch normalization and dropout.
Used as a placeholder when feature extraction is not needed.
:param observation_space:
"""
def __init__(self, observation_space: gym.Space):
super(FlattenBatchNormDropoutExtractor, self).__init__(
observation_space,
get_flattened_obs_dim(observation_space),
)
self.flatten = nn.Flatten()
self.batch_norm = nn.BatchNorm1d(self._features_dim)
self.dropout = nn.Dropout(0.5)
def forward(self, observations: th.Tensor) -> th.Tensor:
result = self.flatten(observations)
result = self.batch_norm(result)
result = self.dropout(result)
return result
def clone_batch_norm_stats(batch_norm: nn.BatchNorm1d) -> (th.Tensor, th.Tensor):
"""
Clone the bias and running mean from the given batch norm layer.
:param batch_norm:
:return: the bias and running mean
"""
return batch_norm.bias.clone(), batch_norm.running_mean.clone()
def clone_qrdqn_batch_norm_stats(model: QRDQN) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor):
"""
Clone the bias and running mean from the quantile network and quantile-target network.
:param model:
:return: the bias and running mean from the quantile network and quantile-target network
"""
quantile_net_batch_norm = model.policy.quantile_net.features_extractor.batch_norm
quantile_net_bias, quantile_net_running_mean = clone_batch_norm_stats(quantile_net_batch_norm)
quantile_net_target_batch_norm = model.policy.quantile_net_target.features_extractor.batch_norm
quantile_net_target_bias, quantile_net_target_running_mean = clone_batch_norm_stats(quantile_net_target_batch_norm)
return quantile_net_bias, quantile_net_running_mean, quantile_net_target_bias, quantile_net_target_running_mean
def clone_tqc_batch_norm_stats(
model: TQC,
) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor):
"""
Clone the bias and running mean from the actor and critic networks and critic-target networks.
:param model:
:return: the bias and running mean from the actor and critic networks and critic-target networks
"""
actor_batch_norm = model.actor.features_extractor.batch_norm
actor_bias, actor_running_mean = clone_batch_norm_stats(actor_batch_norm)
critic_batch_norm = model.critic.features_extractor.batch_norm
critic_bias, critic_running_mean = clone_batch_norm_stats(critic_batch_norm)
critic_target_batch_norm = model.critic_target.features_extractor.batch_norm
critic_target_bias, critic_target_running_mean = clone_batch_norm_stats(critic_target_batch_norm)
return (actor_bias, actor_running_mean, critic_bias, critic_running_mean, critic_target_bias, critic_target_running_mean)
def clone_on_policy_batch_norm(model: Union[MaskablePPO]) -> (th.Tensor, th.Tensor):
return clone_batch_norm_stats(model.policy.features_extractor.batch_norm)
CLONE_HELPERS = {
QRDQN: clone_qrdqn_batch_norm_stats,
TQC: clone_tqc_batch_norm_stats,
MaskablePPO: clone_on_policy_batch_norm,
}
def test_ppo_mask_train_eval_mode():
env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)
model = MaskablePPO(
"MlpPolicy",
env,
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
seed=1,
)
bias_before, running_mean_before = clone_on_policy_batch_norm(model)
model.learn(total_timesteps=200)
bias_after, running_mean_after = clone_on_policy_batch_norm(model)
assert ~th.isclose(bias_before, bias_after).all()
assert ~th.isclose(running_mean_before, running_mean_after).all()
batch_norm_stats_before = clone_on_policy_batch_norm(model)
observation = env.reset()
action_masks = get_action_masks(env)
first_prediction, _ = model.predict(observation, action_masks=action_masks, deterministic=True)
for _ in range(5):
prediction, _ = model.predict(observation, action_masks=action_masks, deterministic=True)
np.testing.assert_allclose(first_prediction, prediction)
batch_norm_stats_after = clone_on_policy_batch_norm(model)
# No change in batch norm params
for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after):
assert th.isclose(param_before, param_after).all()
def test_qrdqn_train_with_batch_norm():
model = QRDQN(
"MlpPolicy",
"CartPole-v1",
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
learning_starts=0,
seed=1,
tau=0, # do not clone the target
)
(
quantile_net_bias_before,
quantile_net_running_mean_before,
quantile_net_target_bias_before,
quantile_net_target_running_mean_before,
) = clone_qrdqn_batch_norm_stats(model)
model.learn(total_timesteps=200)
(
quantile_net_bias_after,
quantile_net_running_mean_after,
quantile_net_target_bias_after,
quantile_net_target_running_mean_after,
) = clone_qrdqn_batch_norm_stats(model)
assert ~th.isclose(quantile_net_bias_before, quantile_net_bias_after).all()
assert ~th.isclose(quantile_net_running_mean_before, quantile_net_running_mean_after).all()
assert th.isclose(quantile_net_target_bias_before, quantile_net_target_bias_after).all()
assert th.isclose(quantile_net_target_running_mean_before, quantile_net_target_running_mean_after).all()
def test_tqc_train_with_batch_norm():
model = TQC(
"MlpPolicy",
"Pendulum-v0",
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
learning_starts=0,
tau=0, # do not copy the target
seed=1,
)
(
actor_bias_before,
actor_running_mean_before,
critic_bias_before,
critic_running_mean_before,
critic_target_bias_before,
critic_target_running_mean_before,
) = clone_tqc_batch_norm_stats(model)
model.learn(total_timesteps=200)
(
actor_bias_after,
actor_running_mean_after,
critic_bias_after,
critic_running_mean_after,
critic_target_bias_after,
critic_target_running_mean_after,
) = clone_tqc_batch_norm_stats(model)
assert ~th.isclose(actor_bias_before, actor_bias_after).all()
assert ~th.isclose(actor_running_mean_before, actor_running_mean_after).all()
assert ~th.isclose(critic_bias_before, critic_bias_after).all()
assert ~th.isclose(critic_running_mean_before, critic_running_mean_after).all()
assert th.isclose(critic_target_bias_before, critic_target_bias_after).all()
assert th.isclose(critic_target_running_mean_before, critic_target_running_mean_after).all()
@pytest.mark.parametrize("model_class", [QRDQN, TQC])
def test_offpolicy_collect_rollout_batch_norm(model_class):
if model_class in [QRDQN]:
env_id = "CartPole-v1"
else:
env_id = "Pendulum-v0"
clone_helper = CLONE_HELPERS[model_class]
learning_starts = 10
model = model_class(
"MlpPolicy",
env_id,
policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor),
learning_starts=learning_starts,
seed=1,
gradient_steps=0,
train_freq=1,
)
batch_norm_stats_before = clone_helper(model)
model.learn(total_timesteps=100)
batch_norm_stats_after = clone_helper(model)
# No change in batch norm params
for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after):
assert th.isclose(param_before, param_after).all()
@pytest.mark.parametrize("model_class", [QRDQN, TQC])
@pytest.mark.parametrize("env_id", ["Pendulum-v0", "CartPole-v1"])
def test_predict_with_dropout_batch_norm(model_class, env_id):
if env_id == "CartPole-v1":
if model_class in [TQC]:
return
elif model_class in [QRDQN]:
return
model_kwargs = dict(seed=1)
clone_helper = CLONE_HELPERS[model_class]
if model_class in [QRDQN, TQC]:
model_kwargs["learning_starts"] = 0
else:
model_kwargs["n_steps"] = 64
policy_kwargs = dict(
features_extractor_class=FlattenBatchNormDropoutExtractor,
net_arch=[16, 16],
)
model = model_class("MlpPolicy", env_id, policy_kwargs=policy_kwargs, verbose=1, **model_kwargs)
batch_norm_stats_before = clone_helper(model)
env = model.get_env()
observation = env.reset()
first_prediction, _ = model.predict(observation, deterministic=True)
for _ in range(5):
prediction, _ = model.predict(observation, deterministic=True)
np.testing.assert_allclose(first_prediction, prediction)
batch_norm_stats_after = clone_helper(model)
# No change in batch norm params
for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after):
assert th.isclose(param_before, param_after).all()
| 35.791822
| 125
| 0.745015
| 818
| 0.084961
| 0
| 0
| 2,223
| 0.230889
| 0
| 0
| 1,129
| 0.117262
|
025a4cb24f7a49faae7c43b7347971470e80c885
| 880
|
py
|
Python
|
test_harness.py
|
alexk307/server-exercise
|
31c76a3b370334a22787e06b4c28f8c65f4dd4ff
|
[
"Apache-2.0"
] | null | null | null |
test_harness.py
|
alexk307/server-exercise
|
31c76a3b370334a22787e06b4c28f8c65f4dd4ff
|
[
"Apache-2.0"
] | null | null | null |
test_harness.py
|
alexk307/server-exercise
|
31c76a3b370334a22787e06b4c28f8c65f4dd4ff
|
[
"Apache-2.0"
] | null | null | null |
from requests import post
from random import randrange
from uuid import uuid4
import base64
import json
PORT = 6789
MAX_SIZE_UDP = 65535
HEADER_SIZE = 12
NUM_TRANSACTIONS = 10
SERVER = 'http://localhost:1234/add'
def main():
for i in range(NUM_TRANSACTIONS):
# Psuedo-random transaction ID
transaction_id = randrange(1, 100)
payload = str(uuid4())
# Break into random pieces pieces
l = range(1000)
pieces = randrange(1, 100)
chunks = [l[i:i + pieces] for i in xrange(0, len(l), pieces)]
for chunk in chunks:
fragment = {
'offset': chunk[-1],
'trans_id': transaction_id,
'payload': base64.b64encode(payload),
'size': len(chunk)
}
post(SERVER, json.dumps(fragment))
if __name__ == '__main__':
main()
| 22
| 69
| 0.582955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.151136
|
025c24bac13de507908c7c75d29225711dbc0aef
| 2,414
|
py
|
Python
|
checkmate_comp/experiments/table_approx_speedup_ratios.py
|
uwsampl/dtr-prototype
|
eff53cc4804cc7d6246a6e5086861ce2b846f62b
|
[
"Linux-OpenIB"
] | 90
|
2020-06-18T05:32:06.000Z
|
2022-03-28T13:05:17.000Z
|
checkmate_comp/experiments/table_approx_speedup_ratios.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | 5
|
2020-07-02T02:25:16.000Z
|
2022-03-24T05:50:30.000Z
|
checkmate_comp/experiments/table_approx_speedup_ratios.py
|
uwsampl/dtr-prototype
|
eff53cc4804cc7d6246a6e5086861ce2b846f62b
|
[
"Linux-OpenIB"
] | 13
|
2020-06-27T07:01:54.000Z
|
2022-01-18T07:31:01.000Z
|
from experiments.common.definitions import remat_data_dir
import numpy as np
import pandas as pd
import glob
import re
# compute aggregated tables of max and geomean lp approximation ratios
exp_name_re = re.compile(r"^(?P<platform>.+?)_(?P<model_name>.+?)_(?P<batch_size>[0-9]+?)_(?P<input_shape>None|.+?)$")
dfs = []
for path in (remat_data_dir() / 'budget_sweep').glob('**/slowdowns.csv'):
slowdown_df = pd.read_csv(path)
matches = exp_name_re.match(path.parents[0].name)
model_name = matches.group('model_name')
slowdown_df['Model name'] = [model_name] * len(slowdown_df)
dfs.append(slowdown_df)
df = pd.concat(dfs)
del df['Unnamed: 0']
for valuekey in ['geomean_slowdown', 'max']:
pivot_df = pd.pivot_table(df, values=valuekey, index=['Model name'], columns=['method'])
pivot_df.to_csv(remat_data_dir() / 'budget_sweep' / f"{valuekey}_aggr.csv")
# compute lp relaxation speedups
ilp_runtime_dict = {}
lp_runtime_dict = {}
for model in ['p32xlarge_vgg_unet_32_None', 'p32xlarge_ResNet50_256_None', 'p32xlarge_MobileNet_512_None', 'p32xlarge_VGG16_256_None', 'p32xlarge_VGG19_256_None']:
ilp_matcher = re.compile(r"Explored [0-9]+ nodes \([0-9]+ simplex iterations\) in (?P<ilp_runtime>[0-9\.]+) seconds")
lp_matcher = re.compile(r"Solved in [0-9]+ iterations and (?P<lp_runtime>[0-9\.]+) seconds")
ilp_runtimes = []
for path in (remat_data_dir() / 'budget_sweep' / model / 'ilp_log').glob('./*.log'):
with path.open('r') as f:
file_contents = f.read()
if 'Model is infeasible' in file_contents:
continue
match = ilp_matcher.search(file_contents)
ilp_runtimes.append(float(match.group('ilp_runtime')))
lp_runtimes = []
for path in (remat_data_dir() / 'budget_sweep' / 'p32xlarge_vgg_unet_32_None' / 'lp_det_05').glob('./*.log'):
with path.open('r') as f:
file_contents = f.read()
if 'Model is infeasible' in file_contents:
continue
match = lp_matcher.search(file_contents)
lp_runtimes.append(float(match.group('lp_runtime')))
print("Speedup for {} is {:0.2f} ({:.2f} versus {:.2f}, count {} vs {})".format(model, np.median(ilp_runtimes) / np.median(lp_runtimes), np.mean(ilp_runtimes), np.mean(lp_runtimes), len(ilp_runtimes), len(lp_runtimes)))
ilp_runtime_dict[model] = ilp_runtimes
lp_runtime_dict[model] = lp_runtimes
| 47.333333
| 223
| 0.67937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 871
| 0.360812
|
025c491da627375770263331eb452c03d4b317b0
| 431
|
py
|
Python
|
src/terra/contracts/levana.py
|
fentas/staketaxcsv
|
ad37a32d8864111dbf88e926b80eb4ccacb921c6
|
[
"MIT"
] | null | null | null |
src/terra/contracts/levana.py
|
fentas/staketaxcsv
|
ad37a32d8864111dbf88e926b80eb4ccacb921c6
|
[
"MIT"
] | null | null | null |
src/terra/contracts/levana.py
|
fentas/staketaxcsv
|
ad37a32d8864111dbf88e926b80eb4ccacb921c6
|
[
"MIT"
] | null | null | null |
# known contracts from protocol
CONTRACTS = [
# NFT - Meteor Dust
"terra1p70x7jkqhf37qa7qm4v23g4u4g8ka4ktxudxa7",
# NFT - Eggs
"terra1k0y373yxqne22pc9g7jvnr4qclpsxtafevtrpg",
# NFT - Dragons
"terra1vhuyuwwr4rkdpez5f5lmuqavut28h5dt29rpn6",
# NFT - Loot
"terra14gfnxnwl0yz6njzet4n33erq5n70wt79nm24el",
]
def handle(exporter, elem, txinfo, contract):
print(f"Levana! {contract}")
#print(elem)
| 26.9375
| 51
| 0.723898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 306
| 0.709977
|
025c55086785bd2358aa07697fa9e5ff75a7e9fe
| 2,268
|
py
|
Python
|
github/migrations/0007_auto_20201003_1239.py
|
h3nnn4n/git-o-matic-9k
|
d8241cc768591e0f41c02b2057d7b56697a4cc86
|
[
"MIT"
] | null | null | null |
github/migrations/0007_auto_20201003_1239.py
|
h3nnn4n/git-o-matic-9k
|
d8241cc768591e0f41c02b2057d7b56697a4cc86
|
[
"MIT"
] | null | null | null |
github/migrations/0007_auto_20201003_1239.py
|
h3nnn4n/git-o-matic-9k
|
d8241cc768591e0f41c02b2057d7b56697a4cc86
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-03 12:39
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('github', '0006_repository_open_issues_count'),
]
operations = [
migrations.RenameField(
model_name='developer',
old_name='user_name',
new_name='login',
),
migrations.AddField(
model_name='developer',
name='bio',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='developer',
name='company',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='developer',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='developer',
name='email',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='developer',
name='followers',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='developer',
name='following',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='developer',
name='location',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='developer',
name='name',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='developer',
name='public_gists',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='developer',
name='public_repos',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='developer',
name='updated_at',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| 29.076923
| 74
| 0.543651
| 2,146
| 0.946208
| 0
| 0
| 0
| 0
| 0
| 0
| 353
| 0.155644
|
025c8c73c3dda45b9c81e36fafb6a8137598b6d5
| 254
|
py
|
Python
|
tests/unit/test_databeardb.py
|
chrisrycx/pyDataLogger
|
21094da9de54ab467519a26680247ddc3efa6696
|
[
"MIT"
] | 1
|
2020-09-25T16:25:09.000Z
|
2020-09-25T16:25:09.000Z
|
tests/unit/test_databeardb.py
|
chrisrycx/pyDataLogger
|
21094da9de54ab467519a26680247ddc3efa6696
|
[
"MIT"
] | 4
|
2020-10-06T17:16:58.000Z
|
2020-12-18T17:06:16.000Z
|
tests/unit/test_databeardb.py
|
chrisrycx/pyDataLogger
|
21094da9de54ab467519a26680247ddc3efa6696
|
[
"MIT"
] | 2
|
2020-03-24T14:32:29.000Z
|
2020-08-05T17:38:24.000Z
|
'''
A unit test for databearDB.py
Runs manually at this point...
'''
import unittest
from databear.databearDB import DataBearDB
#Tests
class testDataBearDB(unittest.TestCase):
def setUp(self):
'''
Hmm
'''
pass
| 14.111111
| 42
| 0.622047
| 111
| 0.437008
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.397638
|
025ca2353166896f2415d32f2b2cf83266307837
| 19
|
py
|
Python
|
dbt/adapters/athena/__version__.py
|
sacundim/dbt-athena
|
120c9d3c88da98ec11ddfcf0a0a3fda49538f197
|
[
"Apache-2.0"
] | 92
|
2019-03-23T07:23:55.000Z
|
2021-06-15T18:18:32.000Z
|
dbt/adapters/athena/__version__.py
|
sacundim/dbt-athena
|
120c9d3c88da98ec11ddfcf0a0a3fda49538f197
|
[
"Apache-2.0"
] | 156
|
2019-03-21T03:26:58.000Z
|
2021-06-29T15:30:51.000Z
|
dbt/adapters/athena/__version__.py
|
sacundim/dbt-athena
|
120c9d3c88da98ec11ddfcf0a0a3fda49538f197
|
[
"Apache-2.0"
] | 58
|
2019-04-12T09:09:43.000Z
|
2021-06-24T15:25:11.000Z
|
version = "0.21.0"
| 9.5
| 18
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.421053
|
025d05b924cc7305e801b76dce5c6ec01a360e7c
| 1,161
|
py
|
Python
|
dxtbx/conftest.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
dxtbx/conftest.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
dxtbx/conftest.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#
# See https://github.com/dials/dials/wiki/pytest for documentation on how to
# write and run pytest tests, and an overview of the available features.
#
from __future__ import absolute_import, division, print_function
import os
import pytest
@pytest.fixture(scope="session")
def dials_regression():
'''Return the absolute path to the dials_regression module as a string.
Skip the test if dials_regression is not installed.'''
try:
import dials_regression as dr
except ImportError:
pytest.skip("dials_regression required for this test")
return os.path.dirname(dr.__file__)
def pytest_addoption(parser):
'''Add '--regression' options to pytest.'''
parser.addoption("--regression", action="store_true", default=False,
help="run (time-intensive) regression tests")
def pytest_collection_modifyitems(config, items):
'''Tests marked as regression are only run with --regression.
'''
if not config.getoption("--regression"):
skip_regression = pytest.mark.skip(reason="Test only runs with --regression")
for item in items:
if "regression" in item.keywords:
item.add_marker(skip_regression)
| 33.171429
| 81
| 0.731266
| 0
| 0
| 0
| 0
| 350
| 0.301464
| 0
| 0
| 566
| 0.487511
|
025e3d2d32267b02443190a02969375302ba67a9
| 978
|
py
|
Python
|
ietf/review/migrations/0020_auto_20191115_2059.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 25
|
2022-03-05T08:26:52.000Z
|
2022-03-30T15:45:42.000Z
|
ietf/review/migrations/0020_auto_20191115_2059.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 219
|
2022-03-04T17:29:12.000Z
|
2022-03-31T21:16:14.000Z
|
ietf/review/migrations/0020_auto_20191115_2059.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 22
|
2022-03-04T15:34:34.000Z
|
2022-03-28T13:30:59.000Z
|
# Copyright The IETF Trust 2019-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-15 20:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('review', '0019_auto_20191023_0829'),
]
operations = [
migrations.AddField(
model_name='reviewsecretarysettings',
name='days_to_show_in_reviewer_list',
field=models.IntegerField(blank=True, help_text='Maximum number of days to show in reviewer list for completed items.', null=True),
),
migrations.AddField(
model_name='reviewsecretarysettings',
name='max_items_to_show_in_reviewer_list',
field=models.IntegerField(blank=True, help_text='Maximum number of completed items to show for one reviewer in the reviewer list view, the list is also filtered by the days to show in reviews list setting.', null=True),
),
]
| 36.222222
| 231
| 0.677914
| 800
| 0.817996
| 0
| 0
| 0
| 0
| 0
| 0
| 507
| 0.518405
|
025e72e9d1d41e03246451d111dab4b24c0f7bd1
| 442
|
py
|
Python
|
AlgoExpert/PalindromeCheck.py
|
akhil-ece/160Days
|
545d1c70c79c6ef2341137a88e6a09f81f330ea4
|
[
"MIT"
] | null | null | null |
AlgoExpert/PalindromeCheck.py
|
akhil-ece/160Days
|
545d1c70c79c6ef2341137a88e6a09f81f330ea4
|
[
"MIT"
] | null | null | null |
AlgoExpert/PalindromeCheck.py
|
akhil-ece/160Days
|
545d1c70c79c6ef2341137a88e6a09f81f330ea4
|
[
"MIT"
] | null | null | null |
def isPalindrome(string, i = 0):
j = len(string) - 1 -i
return True if i > j else string[i] == string[j] and isPalindrome(string, i+1)
def isPalindrome(string):
return string == string[::-1]
def isPalindromeUsingIndexes(string):
lIx = 0
rIdx = len(string) -1
while lIx < rIdx:
if(string[lIx] != string [rIdx]):
return False
else:
lIx += 1
rIdx -=1
return True
| 24.555556
| 82
| 0.561086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|