hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14911714ce686af966b277b1e70139fd1fd7f793 | 597 | py | Python | notify.py | MoveOnOrg/merkle | 764351cf6a21bb718343ae7488735f077aee3afe | [
"MIT"
] | null | null | null | notify.py | MoveOnOrg/merkle | 764351cf6a21bb718343ae7488735f077aee3afe | [
"MIT"
] | 50 | 2019-08-15T16:10:19.000Z | 2021-06-25T15:20:44.000Z | notify.py | MoveOnOrg/merkle | 764351cf6a21bb718343ae7488735f077aee3afe | [
"MIT"
] | null | null | null | import os
import sys
import slackweb
from pywell.entry_points import run_from_cli
DESCRIPTION = 'Send notification to Slack.'
ARG_DEFINITIONS = {
'SLACK_WEBHOOK': 'Web hook URL for Slack.',
'SLACK_CHANNEL': 'Slack channel to send to.',
'TEXT': 'Text to send.'
}
REQUIRED_ARGS = [
'SLACK_WEBHOOK', 'SLACK_CHANNEL', 'TEXT'
]
def main(args):
slack = slackweb.Slack(url=args.SLACK_WEBHOOK)
result = slack.notify(text=args.TEXT, channel=args.SLACK_CHANNEL)
return result
if __name__ == '__main__':
run_from_cli(main, DESCRIPTION, ARG_DEFINITIONS, REQUIRED_ARGS)
| 21.321429 | 69 | 0.716918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.298157 |
14913f5e91c0397a0cf3445e632e873adf524250 | 4,203 | py | Python | src/lib/earlystopping.py | dreizehnutters/pcapae | d436e5a8d74656875918d612d1700e063ec08bbb | [
"MIT"
] | null | null | null | src/lib/earlystopping.py | dreizehnutters/pcapae | d436e5a8d74656875918d612d1700e063ec08bbb | [
"MIT"
] | null | null | null | src/lib/earlystopping.py | dreizehnutters/pcapae | d436e5a8d74656875918d612d1700e063ec08bbb | [
"MIT"
] | null | null | null | from os import path, makedirs, walk ,remove, scandir, unlink
from numpy import inf
from torch import save as t_save
from lib.utils import sort_human, BOLD, CLR
class EarlyStopping:
def __init__(self, log_path, patience=7, model=None, verbose=False, exp_tag=""):
"""Early stops the training if validation loss doesn't improve after a given patience.
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = inf
self.global_min_loss = inf
save_dir = f"{log_path}/save_model/{exp_tag}"
self.save_path = save_dir
if not path.isdir(save_dir):
makedirs(save_dir)
save_dir = f"{self.save_path}/best/"
if not path.isdir(save_dir):
makedirs(save_dir)
if model is not None:
self.meta_info = {'meta':(model.encoder_params,\
model.decoder_params,\
model.n_frames_input,\
model.n_frames_output)}
else:
self.meta_info = {}
def __str__(self):
return '\n'.join(f"{k}={v}" for k, v in vars(self).items())
def __call__(self, val_loss, model, epoch, step=0):
"""Summary
Args:
val_loss (TYPE): Description
model (TYPE): Description
epoch (TYPE): Description
"""
score = -val_loss
model.update(self.meta_info)
if step != 0:
self.save_checkpoint(val_loss, model, epoch, step)
else:
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, step)
elif score < self.best_score:
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
print(f"{BOLD}[*] early stopping at epoch {epoch} !{CLR}")
else:
print(f"[*] early stopping counter: {BOLD}{self.counter}/{self.patience}{CLR}")
# self.del_old_models()
else:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, step)
self.counter = 0
t_save(model, f"{self.save_path}/LAST_checkpoint_{epoch}_{step}_{val_loss:.6f}.pth.tar")
def del_old_models(self, keep=10):
_, _, files = next(walk(self.save_path))
file_count = len(files)
if file_count > keep:
for old_model in sort_human(files)[:keep//2]:
remove(path.join(self.save_path, old_model))
def save_checkpoint(self, val_loss, model, epoch, step=0):
"""Saves model when validation loss decrease
Args:
val_loss (TYPE): Description
model (TYPE): Description
epoch (TYPE): Description
"""
# save best model
if step != 0:
save_flag = "IE"
print(f"[$] saveing model at step: {step} in epoch {epoch}")
self.del_old_models()
t_save(model, f"{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar")
else:
if val_loss < self.global_min_loss:
if self.verbose:
print(f"[*] validation loss record {BOLD}{val_loss}{CLR} in epoch: {BOLD}{epoch}{CLR}@{step}")
self.global_min_loss = val_loss
save_flag = "best/"
for file in scandir(f"{self.save_path}/{save_flag}"):
unlink(file.path)
else:
save_flag = ""
#self.del_old_models()
t_save(model, f"{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar")
self.val_loss_min = val_loss
| 38.209091 | 114 | 0.546039 | 4,038 | 0.960742 | 0 | 0 | 0 | 0 | 0 | 0 | 1,387 | 0.330002 |
149261fa7cc26a67d1e1edf484e1fa0b7d33452a | 97 | py | Python | ldapauthenticator/__init__.py | jbmarcille/ldapauthenticator | f6037d72bd8c76317b8741d96de1c7b1dee26298 | [
"BSD-3-Clause"
] | null | null | null | ldapauthenticator/__init__.py | jbmarcille/ldapauthenticator | f6037d72bd8c76317b8741d96de1c7b1dee26298 | [
"BSD-3-Clause"
] | null | null | null | ldapauthenticator/__init__.py | jbmarcille/ldapauthenticator | f6037d72bd8c76317b8741d96de1c7b1dee26298 | [
"BSD-3-Clause"
] | null | null | null | from ldapauthenticator.ldapauthenticator import LDAPAuthenticator
__all__ = [LDAPAuthenticator]
| 24.25 | 65 | 0.876289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
14938eb4c17ff96d22edd6168c88c690385d934d | 2,207 | py | Python | tests/test_tc100.py | radoering/flake8-type-checking | 02c2af870e8098d0c5bd623591c5b184c0614213 | [
"BSD-3-Clause"
] | 19 | 2021-04-21T14:12:24.000Z | 2022-03-13T07:42:26.000Z | tests/test_tc100.py | radoering/flake8-type-checking | 02c2af870e8098d0c5bd623591c5b184c0614213 | [
"BSD-3-Clause"
] | 29 | 2021-04-21T14:31:12.000Z | 2022-03-26T08:57:54.000Z | tests/test_tc100.py | sondrelg/flake8-typing-only-imports | d27ffece2cdd5b57a529c8d9f45ba2173c29066f | [
"BSD-3-Clause"
] | 2 | 2021-04-08T08:04:44.000Z | 2021-04-14T11:00:18.000Z | """
This file tests the TC100 error:
>> Missing 'from __future__ import annotations' import
The idea is that we should raise one of these errors if a file contains any type-checking imports and one is missing.
One thing to note: futures imports should always be at the top of a file, so we only need to check one line.
"""
import pytest
from flake8_type_checking.codes import TC100
from tests import _get_error, mod
examples = [
# No errors
('', set()),
('if TYPE_CHECKING:\n\tx = 2', set()),
# Unused import
('if TYPE_CHECKING:\n\tfrom typing import Dict', {'1:0 ' + TC100}),
('if TYPE_CHECKING:\n\tfrom typing import Dict, Any', {'1:0 ' + TC100}),
(f'if TYPE_CHECKING:\n\timport {mod}', {'1:0 ' + TC100}),
(f'if TYPE_CHECKING:\n\tfrom {mod} import constants', {'1:0 ' + TC100}),
# Used imports
('if TYPE_CHECKING:\n\tfrom typing import Dict\nx = Dict', set()),
('if TYPE_CHECKING:\n\tfrom typing import Dict, Any\nx, y = Dict, Any', set()),
(f'if TYPE_CHECKING:\n\timport {mod}\nx = {mod}.constants.TC001', set()),
(f'if TYPE_CHECKING:\n\tfrom {mod} import constants\nprint(constants)', set()),
# Import used for AnnAssign
('if TYPE_CHECKING:\n\tfrom typing import Dict\nx: Dict[str, int]', {'1:0 ' + TC100}),
('if TYPE_CHECKING:\n\tfrom typing import Dict\nx: Dict[str, int] = {}', {'1:0 ' + TC100}),
# Import used for arg
('if TYPE_CHECKING:\n\tfrom typing import Dict\ndef example(x: Dict[str, int]):\n\tpass', {'1:0 ' + TC100}),
('if TYPE_CHECKING:\n\tfrom typing import Dict\ndef example(x: Dict[str, int] = {}):\n\tpass', {'1:0 ' + TC100}),
# Import used for returns
('if TYPE_CHECKING:\n\tfrom typing import Dict\ndef example() -> Dict[str, int]:\n\tpass', {'1:0 ' + TC100}),
# Probably not much point in adding many more test cases, as the logic for TC100
# is not dependent on the type of annotation assignments; it's purely concerned with
# whether an ast.Import or ast.ImportFrom exists within a type checking block
]
@pytest.mark.parametrize('example, expected', examples)
def test_TC100_errors(example, expected):
assert _get_error(example, error_code_filter='TC100') == expected
| 46.957447 | 117 | 0.670594 | 0 | 0 | 0 | 0 | 167 | 0.075668 | 0 | 0 | 1,636 | 0.741278 |
1493af77067165c908670ab195f77e6b1342b801 | 6,628 | py | Python | scripts/plot_sorting.py | t1mm3/fluid_coprocessing | 2cec71e1b9cb52cccf6c29ccf7193b845e67bc48 | [
"BSD-3-Clause"
] | 2 | 2019-07-01T14:38:55.000Z | 2021-03-16T14:05:26.000Z | scripts/plot_sorting.py | t1mm3/fluid_coprocessing | 2cec71e1b9cb52cccf6c29ccf7193b845e67bc48 | [
"BSD-3-Clause"
] | null | null | null | scripts/plot_sorting.py | t1mm3/fluid_coprocessing | 2cec71e1b9cb52cccf6c29ccf7193b845e67bc48 | [
"BSD-3-Clause"
] | 1 | 2019-11-28T07:25:03.000Z | 2019-11-28T07:25:03.000Z | #!/bin/env python2
import matplotlib as mpl
mpl.use('pgf')
pgf_with_pgflatex = {
"pgf.texsystem": "pdflatex",
"pgf.rcfonts": False,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
# r"\usepackage{cmbright}",
]
}
mpl.rcParams.update(pgf_with_pgflatex)
mpl.rcParams['axes.axisbelow'] = True
kibi = 1024.0
mebi = kibi*1024.0
gibi = mebi*1024.0
kilo = 1000.0
mega = kilo * 1000.0
giga = mega * 1000.0
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import matplotlib.ticker as mticker
from matplotlib.ticker import MultipleLocator, FuncFormatter
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
hatches = ["//", "--", "\\\\", "xx", "||", "++"]
framework_columns = ["Bloom Filter size (MiB)"," Block size (bytes)", "bits to sort", "Probe size", "Hash time (ms)", "Sort time (ms)", "Probe time (ms)", "Total throughput"]
result_path = "results"
def plot_sorting_throughput():
df = pd.read_csv("{}/bench_bits.csv".format(result_path), sep=';', usecols=['Bloom filter size (MiB)','bits to sort','Total throughput'])
print(df)
bf16 = df[df['Bloom filter size (MiB)']==16]
#bf32 = df[df['Bloom filter size (MiB)']==32]
bf64 = df[df['Bloom filter size (MiB)']==64]
# bf128 = df[df['Bloom filter size (MiB)']==128]
#bf256 = df[df['Bloom filter size (MiB)']==256]
bf512 = df[df['Bloom filter size (MiB)']==512]
print(bf16)
(fig, ax1) = plt.subplots()
with pd.option_context('display.max_rows', None, 'display.max_columns', 100):
print(bf16)
ofilename = "plot_bf_sort_throughput.pgf"
ax1.set_ylabel('Throughput (GProbe/s)')
ax1.set_xlabel('Sorted bits')
# ax1.grid(True)
sz_div = mebi * 8.0
tp_div = giga
ax1.ticklabel_format(axis='x', style='plain')
ax1.set_xlim(1, 32, auto=True)
ax1.loglog(bf16['bits to sort'] , bf16['Total throughput'] / tp_div, linestyle='--', marker='o', color=colors[0], label="BF Size 16MiB", basex=2)
#ax1.loglog(bf32['bits to sort'] , bf32['Total throughput'] / tp_div, linestyle='--', marker='o', color=colors[1], label="BF Size 32MiB", basex=2)
ax1.loglog(bf64['bits to sort'] , bf64['Total throughput'] / tp_div, linestyle='--', marker='x', color=colors[1], label="BF Size 64MiB", basex=2)
#ax1.loglog(bf128['bits to sort'] , bf128['Total throughput'] / tp_div, linestyle='--', marker='o', color=colors[3], label="BF Size 128MiB", basex=2)
#ax1.loglog(bf256['bits to sort'] , bf256['Total throughput'] / tp_div, linestyle='--', marker='o', color=colors[4], label="BF Size 256MiB", basex=2)
ax1.loglog(bf512['bits to sort'] , bf512['Total throughput'] / tp_div, linestyle='--', marker='^', color=colors[2], label="BF Size 512MiB", basex=2)
ax1.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax1.xaxis.get_major_formatter().set_scientific(False)
ax1.xaxis.get_major_formatter().set_useOffset(False)
ax1.xaxis.set_minor_formatter(mticker.ScalarFormatter())
box = ax1.get_position()
ax1.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
legend = ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),
fancybox=False, ncol=3)
fig.tight_layout()
#,legend2
fig.savefig(ofilename, bbox_extra_artists=(), bbox_inches='tight')
plt.close(fig)
def plot_sorting_time():
df = pd.read_csv("{}/bench_bits.csv".format(result_path), sep=';', usecols=['Bloom filter size (MiB)','bits to sort','Sort time (ms)', 'Probe time (ms)', 'Total throughput'])
bf16 = df[df['Bloom filter size (MiB)']==16]
bf64 = df[df['Bloom filter size (MiB)']==64]
bf512 = df[df['Bloom filter size (MiB)']==512]
sort = df[df['Bloom filter size (MiB)']==512]
(fig, ax1) = plt.subplots(2, 1,sharex=True)
fig_size = fig.get_figheight()
fig.set_figheight(fig_size * 1.5)
with pd.option_context('display.max_rows', None, 'display.max_columns', 100):
print(bf16)
ofilename = "plot_bf_sort_time.pgf"
ax1[1].set_ylabel('Probe/Sorting time (ms)')
ax1[1].set_xlabel('Sorted bits')
ax1[1].grid(True)
ax1[0].set_ylabel('Throughput (GProbe/s)')
ax1[0].grid(True)
sz_div = mebi * 8.0
tp_div = giga
ax1[1].set_xlim(1, 35)
ax1[0].set_xlim(1, 35)
ax1[1].set_ylim(10, 350)
ax1[1].xaxis.set_ticks(np.arange(0, 33, 8))
ax1[0].xaxis.set_ticks(np.arange(0, 33, 8))
ax1[1].semilogy(bf16['bits to sort'] , bf16['Probe time (ms)'] , linestyle='--', marker='o', color=colors[0], label="16~MiB BF")
ax1[1].semilogy(bf64['bits to sort'] , bf64['Probe time (ms)'] , linestyle='--', marker='x', color=colors[1], label="64~MiB BF")
ax1[1].semilogy(bf512['bits to sort'] , bf512['Probe time (ms)'] , linestyle='--', marker='^', color=colors[2], label="512~MiB BF")
ax1[1].semilogy(sort['bits to sort'] , sort['Sort time (ms)'] , linestyle='--', marker='+', color=colors[3], label="Sorting")
ax1[0].semilogy(bf16['bits to sort'] , bf16['Total throughput'] / tp_div, linestyle='--', marker='o', color=colors[0], label="16~MiB BF")
ax1[0].semilogy(bf64['bits to sort'] , bf64['Total throughput'] / tp_div, linestyle='--', marker='x', color=colors[1], label="64~MiB BF")
ax1[0].semilogy(bf512['bits to sort'] , bf512['Total throughput'] / tp_div, linestyle='--', marker='^', color=colors[2], label="512~MiB BF")
ax1[0].yaxis.set_major_formatter(mticker.ScalarFormatter())
ax1[0].yaxis.get_major_formatter().set_scientific(False)
ax1[0].yaxis.get_major_formatter().set_useOffset(False)
ax1[0].yaxis.set_minor_formatter(mticker.ScalarFormatter())
ax1[1].yaxis.set_major_formatter(mticker.ScalarFormatter())
ax1[1].yaxis.get_major_formatter().set_scientific(False)
ax1[1].yaxis.get_major_formatter().set_useOffset(False)
ax1[1].yaxis.set_minor_formatter(mticker.ScalarFormatter())
ax1[1].legend(loc="center right",ncol=1)
#ax1[0].legend(loc="upper right")
# Put a legend below current axis
#handles, labels = ax1[1].get_legend_handles_labels()
#plt.legend( handles, labels, loc = 'lower center', bbox_to_anchor = (0,-0.025,1,1),ncol=2,
# bbox_transform = plt.gcf().transFigure )
fig.tight_layout()
#,legend2
fig.savefig(ofilename, bbox_extra_artists=(), bbox_inches='tight')
plt.close(fig)
def main():
mpl.rcParams.update({'font.size': 15})
plot_sorting_throughput()
plot_sorting_time()
if __name__ == '__main__':
main()
| 39.218935 | 178 | 0.650875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,458 | 0.370851 |
1493c59c1b444ee2035222130759f4547fe142d3 | 5,272 | py | Python | morf-python-api/build/lib/morf/utils/caching.py | jpgard/morf | f17afcacef68929a5ce9e7714208be1002a42418 | [
"MIT"
] | 14 | 2018-06-27T13:15:46.000Z | 2021-08-30T08:24:38.000Z | morf-python-api/build/lib/morf/utils/caching.py | jpgard/morf | f17afcacef68929a5ce9e7714208be1002a42418 | [
"MIT"
] | 58 | 2018-02-03T15:31:15.000Z | 2019-10-15T02:12:05.000Z | morf-python-api/build/lib/morf/utils/caching.py | jpgard/morf | f17afcacef68929a5ce9e7714208be1002a42418 | [
"MIT"
] | 7 | 2018-03-29T14:47:34.000Z | 2021-06-22T01:34:52.000Z | # Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions for caching data for MORF jobs.
"""
import os
import subprocess
import shutil
from urllib.parse import urlparse
import logging
from morf.utils.docker import load_docker_image
from morf.utils.log import set_logger_handlers, execute_and_log_output
from morf.utils.s3interface import sync_s3_bucket_cache
module_logger = logging.getLogger(__name__)
def make_course_session_cache_dir_fp(job_config, bucket, data_dir, course, session):
fp = os.path.join(job_config.cache_dir, bucket, data_dir, course, session)
return fp
def update_raw_data_cache(job_config):
"""
Update the raw data cache using the parameters in job_config; if job_config contains multiple raw data buckets, cache all of them.
:param job_config: MorfJobConfig object.
:return:
"""
# cache each bucket in a named directory within job_cache_dir
for raw_data_bucket in job_config.raw_data_buckets:
sync_s3_bucket_cache(job_config, raw_data_bucket)
return
def update_proc_data_cache(job_config):
"""
Update the processed data cache using the parameters in job_config. Assumes job_config contains only a single proc_data_bucket.
:param job_config: MorfJobConfig object.
:return:
"""
proc_data_bucket = getattr(job_config, "proc_data_bucket", None)
sync_s3_bucket_cache(job_config, proc_data_bucket)
return
def fetch_from_cache(job_config, cache_file_path, dest_dir):
"""
Fetch a file from the cache for job_config into dest_dir, if it exists.
:param job_config:
:param cache_file_path: string, relative path to file in cache (this is identical to the directory path in s3; e.g. "/bucket/path/to/somefile.csv"
:param dest_dir: absolute path of directory to fetch file into (will be created if not exists)
:return: path to fetched file (string); return None if cache is not used.
"""
logger = set_logger_handlers(module_logger, job_config)
logger.info("fetching file {} from cache".format(cache_file_path))
abs_cache_file_path = os.path.join(getattr(job_config, "cache_dir", None), cache_file_path)
if hasattr(job_config, "cache_dir") and os.path.exists(abs_cache_file_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_fp = shutil.copy(abs_cache_file_path, dest_dir)
else:
logger.warning("file {} does not exist in cache".format(abs_cache_file_path))
dest_fp = None
return dest_fp
def docker_cloud_login(job_config):
"""
Log into docker cloud using creds in job_config.
:param job_config: MorfJobConfig object.
:return: None
"""
cmd = "docker login --username={} --password={}".format(job_config.docker_cloud_username, job_config.docker_cloud_password)
logger = set_logger_handlers(module_logger, job_config)
execute_and_log_output(cmd, logger)
return
def docker_cloud_push(job_config, image_uuid):
"""
Push image to Docker Cloud repo in job_config; tagging the image with its morf_id.
:param job_config: MorfJobConfig object
:param image_uuid: Docker image uuid
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
docker_cloud_repo_and_tag_path = "{}:{}".format(job_config.docker_cloud_repo, job_config.morf_id)
# tag the docker image using the morf_id
tag_cmd = "docker tag {} {}".format(image_uuid, docker_cloud_repo_and_tag_path)
execute_and_log_output(tag_cmd, logger)
# push the image to docker cloud
push_cmd = "docker push {}".format(docker_cloud_repo_and_tag_path)
execute_and_log_output(push_cmd, logger)
return docker_cloud_repo_and_tag_path
def cache_to_docker_hub(job_config, dir, image_name):
"""
Push image to MORF repo in Docker Hub.
:param job_config: MorfJobConfig object.
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
image_uuid = load_docker_image(dir, job_config, logger, image_name)
docker_cloud_login(job_config)
docker_cloud_repo_and_tag_path = docker_cloud_push(job_config, image_uuid)
return docker_cloud_repo_and_tag_path
| 41.1875 | 150 | 0.75588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,792 | 0.52959 |
149847bd740c757e149d66da0c573d0fe5e56850 | 91 | py | Python | tests/basic/lambda4.py | Slater-Victoroff/pyjaco | 89c4e3c46399c5023b0e160005d855a01241c58a | [
"MIT"
] | 38 | 2015-01-01T18:08:59.000Z | 2022-02-18T08:57:27.000Z | tests/basic/lambda4.py | dusty-phillips/pyjaco | 066895ae38d1828498e529c1875cb88df6cbc54d | [
"MIT"
] | 1 | 2020-07-15T13:30:32.000Z | 2020-07-15T13:30:32.000Z | tests/basic/lambda4.py | Slater-Victoroff/pyjaco | 89c4e3c46399c5023b0e160005d855a01241c58a | [
"MIT"
] | 12 | 2016-03-07T09:30:49.000Z | 2021-09-05T20:38:47.000Z |
la = []
for x in range(5):
la.append(lambda x: (lambda q: q + x)(x))
print la[3](1)
| 11.375 | 45 | 0.527473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
14989b6b36688f784980d216d72aa340a8737398 | 153 | py | Python | python/ctypes/hello_rust/hello_rust_ctypes.py | JamesMcGuigan/ecosystem-research | bfd98bd5b0a2165f449eb36b368b54fe972374fe | [
"MIT"
] | 1 | 2019-01-01T02:04:27.000Z | 2019-01-01T02:04:27.000Z | python/ctypes/hello_rust/hello_rust_ctypes.py | JamesMcGuigan/ecosystem-research | bfd98bd5b0a2165f449eb36b368b54fe972374fe | [
"MIT"
] | 1 | 2020-03-09T17:51:00.000Z | 2020-03-09T17:51:00.000Z | python/ctypes/hello_rust/hello_rust_ctypes.py | JamesMcGuigan/ecosystem-research | bfd98bd5b0a2165f449eb36b368b54fe972374fe | [
"MIT"
] | null | null | null | from ctypes import *
rust = cdll.LoadLibrary("./target/debug/libhello_rust.dylib")
answer = rust.times2(64)
print('rust.times2(64)', rust.times2(64))
| 25.5 | 63 | 0.72549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.346405 |
1498ede07d931cf49daf4b8c46bbfef73eb7fec5 | 2,323 | py | Python | black_jack.py | brynpatel/Deck-of-cards | 4d775940895d32fcb9ae25db17adc865c7e8befe | [
"MIT"
] | null | null | null | black_jack.py | brynpatel/Deck-of-cards | 4d775940895d32fcb9ae25db17adc865c7e8befe | [
"MIT"
] | null | null | null | black_jack.py | brynpatel/Deck-of-cards | 4d775940895d32fcb9ae25db17adc865c7e8befe | [
"MIT"
] | null | null | null | from deck_of_cards import *
def check(card1, card2):
if card1.number == card2.number:
check = True
#Add special cards
elif card1.suit == card2.suit:
check = True
else:
check = False
return check
def turn(myCard, myHand, opponentsHand, deck):
cardplayed = False
while cardplayed == False:
myCard = input("what card would you like to put down?, press P to pick up. ")
if myCard.lower() == "p":
deck.cards[0].move(deck.cards, myHand.cards)
print("You picked up")
cardplayed = True
elif myCard.isdigit():
if len(myHand.cards) >= int(myCard):
myCard = int(myCard)-1
if check(myHand.cards[myCard], discard_pile.get_face_card()) == True:
print("You played", myHand.cards[myCard])
myHand.cards[myCard].move(myHand.cards, discard_pile.cards)
cardplayed = True
else:
print("You can't play that card right now, try again")
else:
print("You don't have that many cards!")
else:
print("That is not a valid option, try again")
cardplayed = False
for card in opponentsHand.cards:
if check(card, discard_pile.get_face_card()) == True:
print("I played", card)
card.move(opponentsHand.cards, discard_pile.cards)
return
deck.cards[0].move(deck.cards, opponentsHand.cards)
print("I had to pick up")
hand_size = 7
deck =Deck()
my_hand = Hand()
opponents_hand = Hand()
discard_pile = Discard_Pile()
my_card = 0
opponents_card = 0
win = False
deck.shuffle()
for i in range(hand_size):
deck.deal(my_hand)
deck.deal(opponents_hand)
print(my_hand)
#print(opponents_hand)
deck.cards[0].move(deck.cards, discard_pile.cards)
print(discard_pile.get_face_card())
while win == False:
turn(my_card, my_hand, opponents_hand, deck)
if len(my_hand.cards) == 0:
print("You win")
win = True
elif len(opponents_hand.cards) == 0:
print("You lose")
win = True
else:
win = False
print("=========================NEXT TURN======================")
print(my_hand)
print(discard_pile.get_face_card())
| 29.405063 | 85 | 0.577701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.15282 |
1499d3b358461ba8ac5bf3d7291f9f342918734a | 674 | py | Python | dashboard/settings.py | hosseinmoghimi/instamarket | 6f2f557843ff3105c6ca62a8b85311f0de79a2fe | [
"MIT"
] | 3 | 2020-08-14T20:17:57.000Z | 2020-09-15T19:35:40.000Z | dashboard/settings.py | hosseinmoghimi/instamarket | 6f2f557843ff3105c6ca62a8b85311f0de79a2fe | [
"MIT"
] | 5 | 2020-08-16T21:47:12.000Z | 2020-08-17T03:18:10.000Z | dashboard/settings.py | hosseinmoghimi/instamarket | 6f2f557843ff3105c6ca62a8b85311f0de79a2fe | [
"MIT"
] | null | null | null |
from instamarket import settings
ON_SERVER=settings.ON_SERVER
ON_HEROKU=settings.ON_HEROKU
ON_MAGGIE=settings.ON_MAGGIE
REMOTE_MEDIA=settings.REMOTE_MEDIA
ON_SERVER=settings.ON_SERVER
DEBUG=settings.DEBUG
BASE_DIR=settings.BASE_DIR
COMING_SOON=settings.COMING_SOON
MYSQL=settings.MYSQL
TIME_ZONE=settings.TIME_ZONE
STATIC_URL=settings.STATIC_URL
STATIC_ROOT=settings.STATIC_ROOT
MEDIA_URL=settings.MEDIA_URL
MEDIA_ROOT=settings.MEDIA_ROOT
SITE_URL=settings.SITE_URL
ADMIN_URL=settings.ADMIN_URL
DOWNLOAD_ROOT=settings.DOWNLOAD_ROOT
PUSHER_IS_ENABLE=settings.PUSHER_IS_ENABLE
CSRF_FAILURE_VIEW = 'dashboard.views.csrf_failure'
SITE_DOMAIN='http://www.khafonline.com'
| 22.466667 | 50 | 0.876855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.08457 |
1499e05f9e701f51c62ae9adf1ada191425c6e1e | 326 | py | Python | mindpile/Utility/memo.py | MelbourneHighSchoolRobotics/Mindpile | 9dd0a14ee336810c2b62826afff4da8719455ba0 | [
"BSD-3-Clause"
] | 2 | 2021-02-16T22:21:36.000Z | 2021-02-17T03:16:30.000Z | mindpile/Utility/memo.py | MelbourneHighSchoolRobotics/Mindpile | 9dd0a14ee336810c2b62826afff4da8719455ba0 | [
"BSD-3-Clause"
] | 2 | 2021-02-17T03:20:24.000Z | 2021-04-30T06:46:02.000Z | mindpile/Utility/memo.py | MelbourneHighSchoolRobotics/mindpile | 9dd0a14ee336810c2b62826afff4da8719455ba0 | [
"BSD-3-Clause"
] | null | null | null | import functools
def memoise(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.hasResult:
wrapper.result = func(*args, **kwargs)
wrapper.hasResult = True
return wrapper.result
wrapper.result = None
wrapper.hasResult = False
return wrapper
| 23.285714 | 50 | 0.628834 | 0 | 0 | 0 | 0 | 208 | 0.638037 | 0 | 0 | 0 | 0 |
149d6e3aa3b814bd2610d018aafdae3c04933009 | 348 | py | Python | tests/__init__.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | tests/__init__.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | tests/__init__.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | # Pytest fixtures to get different DLKit configs during tests
# Implemented from documentation here:
# https://docs.pytest.org/en/latest/unittest.html
import pytest
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_FUNCTIONAL'])
def dlkit_service_config(request):
request.cls.service_cfg = request.param
| 29 | 67 | 0.747126 | 0 | 0 | 0 | 0 | 177 | 0.508621 | 0 | 0 | 196 | 0.563218 |
149df9e3f7439d9013fa722d9aa4c7ae4e678566 | 1,432 | py | Python | dataloaders.py | mrubio-chavarria/project_2 | c78d5e4048af193770d52efb2c5a132f6eb6370c | [
"MIT"
] | null | null | null | dataloaders.py | mrubio-chavarria/project_2 | c78d5e4048af193770d52efb2c5a132f6eb6370c | [
"MIT"
] | null | null | null | dataloaders.py | mrubio-chavarria/project_2 | c78d5e4048af193770d52efb2c5a132f6eb6370c | [
"MIT"
] | null | null | null | #!/venv/bin python
"""
DESCRIPTION:
This file contains wrappers and variations on DataLoader.
"""
# Libraries
import os
from random import shuffle
import torch
import numpy as np
from torch.utils.data import Dataset
from resquiggle_utils import parse_resquiggle, window_resquiggle
from torch import nn
class CombinedDataLoader:
"""
DESCRIPTION:
"""
# Methods
def __init__(self, *args):
"""
DESCRIPTION:
"""
self.current_dataloader = 0
self.dataloaders = args
def __next__(self):
"""
DESCRIPTION:
"""
next_batch = next(iter(self.dataloaders[self.current_dataloader]))
self.current_dataloader = (self.current_dataloader + 1) % len(self.dataloaders)
return next_batch
class CustomisedDataLoader:
"""
DESCRIPTION:
"""
# Methods
def __init__(self, dataset, batch_size, sampler, collate_fn, shuffle):
self.dataset = dataset
self.batch_size = batch_size
self.sampler = sampler
self.collate_fn = collate_fn
self.shuffle = shuffle
self.sampled_data = self.sampler(self.dataset, self.batch_size, shuffle=self.shuffle)
def __iter__(self):
for batch in self.sampled_data:
if not batch:
raise StopIteration
yield self.collate_fn(batch)
def __next__(self):
return next(iter(self))
| 22.730159 | 93 | 0.639665 | 1,111 | 0.775838 | 162 | 0.113128 | 0 | 0 | 0 | 0 | 253 | 0.176676 |
149ef58d13dd8a52e0e9a53ee7b6bc88f4c88418 | 405 | py | Python | setup.py | bkanchan6/high-res-stereo | 80eb23ce0fe532f4cd238f25b4c3fced249269e3 | [
"MIT"
] | null | null | null | setup.py | bkanchan6/high-res-stereo | 80eb23ce0fe532f4cd238f25b4c3fced249269e3 | [
"MIT"
] | null | null | null | setup.py | bkanchan6/high-res-stereo | 80eb23ce0fe532f4cd238f25b4c3fced249269e3 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import os
version = "0.0.1"
if "VERSION" in os.environ:
version = os.environ["VERSION"]
setup(
name="high-res-stereo",
version=version,
description="high-res-stereo",
author="Jariullah Safi",
author_email="safijari@isu.edu",
packages=find_packages(),
install_requires=["torch", "opencv-python", "texttable", "torchvision"],
)
| 22.5 | 76 | 0.68642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.34321 |
149f1595ee377a7f0c819fd8202bd1c39fe66ec2 | 7,169 | py | Python | main.py | tokudaek/image-viewer | 262e2ccb165824b3edbb275cc981650487ed8cf4 | [
"MIT"
] | null | null | null | main.py | tokudaek/image-viewer | 262e2ccb165824b3edbb275cc981650487ed8cf4 | [
"MIT"
] | 1 | 2017-03-28T15:23:07.000Z | 2017-03-28T15:23:07.000Z | main.py | tokudaek/image-viewer | 262e2ccb165824b3edbb275cc981650487ed8cf4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" Image viewer based on Tkinter and integrated to the database.
"""
##########################################################IMPORTS
import argparse
import os
import tkinter
import tkinter.messagebox
import tkinter.filedialog
import tkinter.font
import PIL
import PIL.Image
import PIL.ImageTk
import utils
import time
import logging
import random
##########################################################DEFINES
GNDTRUTHID = 2
DETECTIONID = 7
class MyApp(tkinter.Frame):
def __init__(self, parent=None, initialdir=os.getcwd()):
super().__init__()
self.parent = parent
self.curid = 0
self.curdir = initialdir
self.images = listfiles(initialdir)
self.conn = utils.db_connect('config/db.json')
self.parent.bind("<Key>", self.onkeypress)
self.create_canvas()
self.colors = ['black'] + loadcolorsfromfile('tkcolors.txt')
self.update_canvas()
self.parent.title(self.images[self.curid])
#self.create_controls()
self.pack(fill=tkinter.BOTH, expand=tkinter.YES)
def create_canvas(self):
frame = tkinter.Frame(self)
self.canvas = tkinter.Canvas(frame, background='black')
frame.pack(fill=tkinter.BOTH, expand=tkinter.YES)
self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)
def update_canvas(self):
self.im = None
self.canvas.delete("all")
imagepath = self.images[self.curid]
w = self.parent.winfo_width()
h = self.parent.winfo_height()
#canvasratio = w/(h-30)
canvasratio = w/(h)
pilim = PIL.Image.open(os.path.join(self.curdir, imagepath))
imratio = pilim.size[0]/pilim.size[1]
if imratio > canvasratio:
factor = w/pilim.size[0]
else:
factor = (h)/pilim.size[1]
self.imfactor = factor
t0 = time.time()
pilim = pilim.resize((int(pilim.size[0]*factor), int(pilim.size[1]*factor)))
self.curimage = PIL.ImageTk.PhotoImage(pilim)
posx = int(w/2)
posy = int(h/2)
self.im = self.canvas.create_image(posx, posy, image=self.curimage)
t1 = time.time()
logging.debug('{:.1f} seconds to display image.'.format(t1-t0))
#self.canvas.create_text((posx, posy), text=imagepath)
imageid = os.path.splitext(self.images[self.curid])[0]
bboxes = db_getbboxes(self.conn, imageid)
self.draw_detections(bboxes)
self.draw_gndtruths(bboxes)
self.update()
def create_controls(self):
frame = tkinter.Frame(self, pady=5)
obutton = tkinter.Button(frame, text='Open folder', command=
lambda: self.openfolder(0))
pbutton = tkinter.Button(frame, text='Previous picture', command=
lambda: self.change_image(-1))
nbutton = tkinter.Button(frame, text='Next picture', command=
lambda: self.change_image(+1))
qbutton = tkinter.Button(frame, text='Quit', command=self.parent.quit)
obutton.pack(side=tkinter.LEFT)
pbutton.pack(side=tkinter.LEFT)
nbutton.pack(side=tkinter.LEFT)
qbutton.pack(side=tkinter.LEFT)
frame.pack()
def onkeypress(self, event):
k = event.keysym
if k == 'Left':
self.change_image(-1)
elif k == 'Right':
self.change_image(1)
elif k == 'O':
self.openfolder()
elif k == 'S':
self.createsubtitledialog()
def createsubtitledialog(self):
logging.debug('here inside createsubtitledialog')
top = tkinter.Toplevel()
top.title('Colors subtitle')
classesrows = db_getclasses(self.conn)
for i in range(0, 20):
can = tkinter.Canvas(top,width=10,height=10)
can.grid(row=i+1, column=1)
can.create_rectangle(0,0,10,10,fill=self.colors[i+1])
myfont = tkinter.font.Font(family="Arial", size=24)
msg = tkinter.Message(top, text=classesrows[i][1], font=myfont, aspect=500)
msg.grid(row=i+1, column=2, sticky=tkinter.W)
def change_image(self, delta):
newid = self.curid + delta
self.curid = newid % len(self.images)
#if self.curid < 0: self.curid = len(self.images) - 1
#elif self.curid >= len(self.images): self.curid = 0
self.update_canvas()
self.parent.title(self.images[self.curid])
def openfolder(self, event):
self.curdir = tkinter.filedialog.askdirectory()
logging.debug("Now I have to update to " + self.curdir)
def draw_gndtruths(self, bboxes):
self.draw_bboxes(bboxes, GNDTRUTHID, 'black', 0.5, 1)
def draw_detections(self, bboxes):
self.draw_bboxes(bboxes, DETECTIONID)
def draw_bboxes(self, bboxes, methodid, color=None, width=1.0, dash=(2, 10)):
imcoords = self.canvas.coords(self.im)
dx = imcoords[0] - int(self.curimage.width()/2)
dy = imcoords[1] - int(self.curimage.height()/2)
delta = [dx, dy, dx, dy]
bboxline = imcoords[0]/100 * width
for b in bboxes:
p = []
if b[6] != methodid: continue
for i in range(0,4):
p.append(int(b[i]*self.imfactor) + delta[i])
classid = b[5]
col = color if color else self.colors[classid]
self.canvas.create_rectangle(p[0], p[1], p[2], p[3],
width=bboxline, outline=col, dash=dash)
def listfiles(indir, ext='jpg'):
images = []
files = os.listdir(indir)
for f in files:
_file = os.path.join(indir, f)
if os.path.isdir(_file) or not _file.lower().endswith(ext):
continue
images.append(f)
return images
def db_getbboxes(conn, imageid, classid=None):
cur = conn.cursor()
query = """SELECT x_min, y_min, x_max, y_max, prob, classid, methodid """ \
""" FROM Bbox WHERE imageid={}""". \
format(imageid);
if classid: query += """AND classid={}""".format(classid)
cur.execute(query)
conn.commit()
rows = cur.fetchall()
return rows
def db_getclasses(conn):
cur = conn.cursor()
query = """SELECT id,name FROM Class ORDER BY id""" \
cur.execute(query)
conn.commit()
rows = cur.fetchall()
return rows
def loadcolorsfromfile(filepath):
random.seed(0)
with open(filepath) as f:
lines = f.read().splitlines()
random.shuffle(lines)
return lines
#########################################################
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', default=None)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARNING)
indir = args.path if args.path else os.getcwd()
root = tkinter.Tk()
root.geometry('600x400')
root.update()
#root.geometry('1280x960')
myapp = MyApp(root, indir)
root.mainloop()
if __name__ == "__main__":
main()
| 32.004464 | 87 | 0.594644 | 5,045 | 0.703724 | 0 | 0 | 0 | 0 | 0 | 0 | 984 | 0.137258 |
149fdd39d511ea3e1c778093005d088b6ec5befd | 1,882 | py | Python | ozellikler/tarih.py | ny4rlk0/nyarlko | e4224ed11647ffbbdf86d9e7c7834e2d5dc2966c | [
"MIT"
] | null | null | null | ozellikler/tarih.py | ny4rlk0/nyarlko | e4224ed11647ffbbdf86d9e7c7834e2d5dc2966c | [
"MIT"
] | null | null | null | ozellikler/tarih.py | ny4rlk0/nyarlko | e4224ed11647ffbbdf86d9e7c7834e2d5dc2966c | [
"MIT"
] | null | null | null | import datetime as suan
def al(text):
try:
zaman=suan.datetime.now()
saat=zaman.strftime("%H")
dakika=zaman.strftime("%M")
saniye=zaman.strftime("%S")
gun=zaman.strftime("%A")
ay=zaman.strftime("%B")
yil=zaman.strftime("%Y")
if gun=="Monday":
gun="Pazartesi"
elif gun=="Tuesday":
gun="Salı"
elif gun=="Wednesday":
gun="Çarşamba"
elif gun=="Thursday":
gun="Perşembe"
elif gun=="Friday":
gun="Cuma"
elif gun=="Saturday":
gun="Cumartesi"
elif gun=="Sunday":
gun="Pazar"
if ay=="January":
ay="Ocak"
elif ay=="February":
ay="Şubat"
elif ay=="March":
ay="Mart"
elif ay=="April":
ay="Nisan"
elif ay=="May":
ay="Mayıs"
elif ay=="June":
ay="Haziran"
elif ay=="July":
ay="Temmuz"
elif ay=="August":
ay="Ağustos"
elif ay=="September":
ay="Eylül"
elif ay=="October":
ay="Ekim"
elif ay=="November":
ay="Kasım"
elif ay=="December":
ay="Aralık"
except: return "Tarih, saati alırken hata ile karşılaştım."
else:
if text.startswith("saat") or text.startswith("dakika") or text.startswith("saniye"):
return saat+":"+dakika+":"+saniye
elif text.startswith("tarih"):
return gun+"/"+ay+"/"+yil+" "+saat+":"+dakika+":"+saniye
elif text.startswith("gün") or text.startswith("gun"):
return gun
elif text.startswith("ay"):
return ay
elif text.startswith("yıl") or text.startswith("yil"):
return yil | 30.852459 | 94 | 0.463337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.246972 |
14a1226bd39340b0ae889c9808fb6bb548a8f7b1 | 10,467 | py | Python | examples/xlnet/utils/processor.py | qinzzz/texar-pytorch | d66258a599a291418004170e62864b001b650926 | [
"Apache-2.0"
] | 746 | 2019-06-09T12:38:52.000Z | 2022-03-23T12:40:55.000Z | examples/xlnet/utils/processor.py | qinzzz/texar-pytorch | d66258a599a291418004170e62864b001b650926 | [
"Apache-2.0"
] | 247 | 2019-06-11T18:32:44.000Z | 2022-02-17T20:12:04.000Z | examples/xlnet/utils/processor.py | qinzzz/texar-pytorch | d66258a599a291418004170e62864b001b650926 | [
"Apache-2.0"
] | 143 | 2019-06-10T19:38:30.000Z | 2022-03-13T09:43:10.000Z | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data processors. Adapted from
https://github.com/zihangdai/xlnet/blob/master/run_classifier.py
"""
import csv
import logging
from abc import ABC
from pathlib import Path
from typing import NamedTuple, Optional, Union, List, Dict, Type
class InputExample(NamedTuple):
r"""A single training/test example for simple sequence classification."""
guid: str
r"""Unique id for the example."""
text_a: str
r"""string. The untokenized text of the first sequence. For single sequence
tasks, only this sequence must be specified."""
text_b: Optional[str]
r"""(Optional) string. The untokenized text of the second sequence. Only
needs to be specified for sequence pair tasks."""
label: Optional[Union[str, float]]
r"""(Optional) string. The label of the example. This should be specified
for train and dev examples, but not for test examples."""
class DataProcessor:
r"""Base class for data converters for sequence classification data sets."""
labels: List[str]
is_regression: bool = False
task_name: str
__task_dict__: Dict[str, Type['DataProcessor']] = {}
def __init__(self, data_dir: str):
self.data_dir = Path(data_dir)
@classmethod
def register(cls, *names):
def decorator(klass):
for name in names:
prev_processor = DataProcessor.__task_dict__.get(
name.lower(), None)
if prev_processor is not None:
raise ValueError(
f"Cannot register {klass} as {name}. "
f"The name is already taken by {prev_processor}")
DataProcessor.__task_dict__[name.lower()] = klass
klass.task_name = names[0]
return klass
return decorator
def get_train_examples(self) -> List[InputExample]:
r"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError
def get_dev_examples(self) -> List[InputExample]:
r"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError
def get_test_examples(self) -> List[InputExample]:
r"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError
@classmethod
def _read_tsv(cls, input_file: Path,
quotechar: Optional[str] = None) -> List[List[str]]:
"""Reads a tab separated value file."""
with input_file.open('r') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if len(line) == 0:
continue
lines.append(line)
return lines
def get_processor_class(task: str) -> Type[DataProcessor]:
task = task.lower()
klass = DataProcessor.__task_dict__.get(task, None)
if klass is None:
raise ValueError(f"Unsupported task {task}")
return klass
class GLUEProcessor(DataProcessor, ABC):
train_file = "train.tsv"
dev_file = "dev.tsv"
test_file = "test.tsv"
label_column: int
text_a_column: int
text_b_column: int
contains_header = True
test_text_a_column: int
test_text_b_column: int
test_contains_header = True
def __init__(self, data_dir: str):
super().__init__(data_dir)
if not hasattr(self, 'test_text_a_column'):
self.test_text_a_column = self.text_a_column
if not hasattr(self, 'test_text_b_column'):
self.test_text_b_column = self.text_b_column
def get_train_examples(self) -> List[InputExample]:
return self._create_examples(
self._read_tsv(self.data_dir / self.train_file), "train")
def get_dev_examples(self) -> List[InputExample]:
return self._create_examples(
self._read_tsv(self.data_dir / self.dev_file), "dev")
def get_test_examples(self) -> List[InputExample]:
return self._create_examples(
self._read_tsv(self.data_dir / self.test_file), "test")
def _create_examples(self, lines: List[List[str]],
set_type: str) -> List[InputExample]:
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = f"{set_type}-{i}"
a_column = (self.text_a_column if set_type != "test" else
self.test_text_a_column)
b_column = (self.text_b_column if set_type != "test" else
self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
logging.warning('Incomplete line, ignored.')
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
logging.warning('Incomplete line, ignored.')
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = self.labels[0]
else:
if len(line) <= self.label_column:
logging.warning('Incomplete line, ignored.')
continue
label = line[self.label_column]
examples.append(InputExample(guid, text_a, text_b, label))
return examples
@DataProcessor.register("MNLI", "MNLI_matched")
class MnliMatchedProcessor(GLUEProcessor):
labels = ["contradiction", "entailment", "neutral"]
dev_file = "dev_matched.tsv"
test_file = "test_matched.tsv"
label_column = -1
text_a_column = 8
text_b_column = 9
@DataProcessor.register("MNLI_mismatched")
class MnliMismatchedProcessor(MnliMatchedProcessor):
dev_file = "dev_mismatched.tsv"
test_file = "test_mismatched.tsv"
@DataProcessor.register("STS-B", "stsb")
class StsbProcessor(GLUEProcessor):
labels: List[str] = []
is_regression = True
label_column = 9
text_a_column = 7
text_b_column = 8
def _create_examples(self, lines: List[List[str]],
set_type: str) -> List[InputExample]:
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = f"{set_type}-{i}"
a_column = (self.text_a_column if set_type != "test" else
self.test_text_a_column)
b_column = (self.text_b_column if set_type != "test" else
self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
logging.warning('Incomplete line, ignored.')
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
logging.warning('Incomplete line, ignored.')
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = 0.0
else:
if len(line) <= self.label_column:
logging.warning('Incomplete line, ignored.')
continue
label = float(line[self.label_column])
examples.append(InputExample(guid, text_a, text_b, label))
return examples
@DataProcessor.register("Yelp5")
class Yelp5Processor(DataProcessor):
labels = ["1", "2", "3", "4", "5"]
def get_train_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "train.csv")
def get_dev_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "test.csv")
def get_test_examples(self):
raise TypeError("The Yelp 5 dataset does not have a test set.")
@staticmethod
def _create_examples(input_file: Path) -> List[InputExample]:
"""Creates examples for the training and dev sets."""
examples = []
with input_file.open() as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
label = line[0]
text_a = line[1].replace('""', '"').replace('\\"', '"')
examples.append(InputExample(
guid=str(i), text_a=text_a, text_b=None, label=label))
return examples
@DataProcessor.register("IMDB")
class ImdbProcessor(DataProcessor):
labels = ["neg", "pos"]
def get_train_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "train")
def get_dev_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "test")
def get_test_examples(self):
raise TypeError("The IMDB dataset does not have a test set.")
@staticmethod
def _create_examples(data_dir: Path) -> List[InputExample]:
examples = []
for label in ["neg", "pos"]:
cur_dir = data_dir / label
for filename in cur_dir.iterdir():
if filename.suffix != ".txt":
continue
with filename.open() as f:
text = f.read().strip().replace("<br />", " ")
examples.append(InputExample(
guid=str(filename), text_a=text, text_b=None, label=label))
return examples
| 35.361486 | 80 | 0.602369 | 9,168 | 0.875896 | 0 | 0 | 5,259 | 0.502436 | 0 | 0 | 2,568 | 0.245343 |
14a2c2afb9c59044a6c39bbd0a8f0ba276f110ad | 4,110 | py | Python | src/pyhf_benchmark/plot.py | pyhf/pyhf-benchmark | bc0f91253e8d6d4dbc7205cabf0ec7a9d5402dcf | [
"Apache-2.0"
] | 3 | 2020-05-22T22:50:22.000Z | 2020-06-02T16:28:37.000Z | src/pyhf_benchmark/plot.py | pyhf/pyhf-benchmark | bc0f91253e8d6d4dbc7205cabf0ec7a9d5402dcf | [
"Apache-2.0"
] | 30 | 2020-06-02T16:22:27.000Z | 2020-08-20T04:55:59.000Z | src/pyhf_benchmark/plot.py | pyhf/pyhf-benchmark | bc0f91253e8d6d4dbc7205cabf0ec7a9d5402dcf | [
"Apache-2.0"
] | 1 | 2020-07-28T02:32:58.000Z | 2020-07-28T02:32:58.000Z | import json
import pandas as pd
import time
import matplotlib.pyplot as plt
ylabels = [
"CPU Utilization (%)",
"Disk I/O Utilization (%)",
"Process CPU Threads In Use",
"Network Traffic (bytes)",
"System Memory Utilization (%)",
"Process Memory Available (non-swap) (MB)",
"Process Memory In Use (non-swap) (MB)",
"Process Memory \n In Use (non-swap) (%)",
"GPU Utilization (%)",
"GPU Memory Allocated (%)",
"GPU Time Spent Accessing Memory (%)",
"GPU Temp (℃)",
]
columns = [
"system.cpu",
"system.disk",
"system.proc.cpu.threads",
["network.sent", "system.network.recv"],
"system.memory",
"system.proc.memory.availableMB",
"system.proc.memory.rssMB",
"system.proc.memory.percent",
"system.gpu.0.gpu",
"system.gpu.0.memory",
"system.gpu.0.memoryAllocated",
"system.gpu.0.temp",
]
filenames = [
"CPU_Utilization.png",
"Disk_IO_Utilization.png",
"CPU_Threads.png",
"Network_Traffic.png",
"Memory_Utilization.png",
"Proc_Memory_available.png",
"Proc_Memory_MB.png",
"Proc_Memory_Percent.png",
"GPU_Utilization.png",
"GPU_Memory_Allocated.png",
"GPU_Memory_Time.png",
"GPU_Temp.png",
]
def load(directory_name):
path = directory_name / "events.jsonl"
output_dic = {}
clock = 0
while not path.exists():
clock += 1
time.sleep(1)
if clock >= 60:
raise FileExistsError(f"{path} is not found!")
with path.open("r") as json_file:
json_list = list(json_file)
for json_str in json_list:
item = json.loads(json_str)
for key in item.keys():
output_dic.setdefault(key, []).append(item[key])
return pd.DataFrame.from_dict(output_dic)
def load_all(directory_name):
list_of_paths = directory_name.glob("*")
contents = []
backends = []
for path in list_of_paths:
if path.is_dir():
backends.append(str(path)[str(path).rfind("_") + 1 :])
contents.append(load(path))
return contents, backends
def subplot(y_label, column, output, directory, filename):
fig, ax = plt.subplots()
x_value = output["_runtime"]
if y_label == "Network Traffic (bytes)":
y_value1 = output.get(column[0], [0] * len(x_value))
y_value2 = output.get(column[1], [0] * len(x_value))
ax.plot(x_value, y_value1, ls="--", label="send")
ax.plot(x_value, y_value2, label="recv")
ax.legend(loc="upper left")
else:
y_value = output.get(column, [0] * len(x_value))
ax.plot(x_value, y_value)
ax.set_xlabel("Time (minutes)")
ax.set_ylabel(y_label)
ax.grid()
fig.savefig(directory / filename)
def subplot_comb(y_label, column, outputs, backends, directory, filename):
fig, ax = plt.subplots()
ax.set_xlabel("Time (minutes)")
ax.set_ylabel(y_label)
ax.grid()
for i, output in enumerate(outputs):
x_value = output["_runtime"]
if y_label == "Network Traffic (bytes)":
y_value1 = output.get(column[0], [0] * len(x_value))
y_value2 = output.get(column[1], [0] * len(x_value))
ax.plot(x_value, y_value1, ls="--", label=backends[i] + "_send")
ax.plot(x_value, y_value2, label=backends[i] + "_recv")
else:
y_value = outputs[i].get(column, [0] * len(x_value))
ax.plot(x_value, y_value, label=backends[i])
ax.legend(loc="upper left")
fig.savefig(directory / filename)
def plot(directory):
output = load(directory)
idx = 0
while idx < len(ylabels):
subplot(ylabels[idx], columns[idx], output, directory, filenames[idx])
if not "system.gpu.0.gpu" in output and idx >= 7:
break
idx += 1
def plot_comb(directory):
outputs, backends = load_all(directory)
idx = 0
while idx < len(ylabels):
subplot_comb(
ylabels[idx], columns[idx], outputs, backends, directory, filenames[idx]
)
if not "system.gpu.0.gpu" in outputs[0] and idx >= 7:
break
idx += 1
| 29.568345 | 84 | 0.605596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,131 | 0.275049 |
14a5b4ceef7a61b358d2f461acdd1773fb472818 | 3,045 | py | Python | serial.py | Tythos/SeRes | 724dcfff8a4fbeb77090cb6e6cf0bd101abe70e4 | [
"MIT"
] | null | null | null | serial.py | Tythos/SeRes | 724dcfff8a4fbeb77090cb6e6cf0bd101abe70e4 | [
"MIT"
] | null | null | null | serial.py | Tythos/SeRes | 724dcfff8a4fbeb77090cb6e6cf0bd101abe70e4 | [
"MIT"
] | null | null | null | """Serial objects are responsible for:
* Maintaining specific catalogues of Format and Protocol parsers
* Serializing and deserializing Python objects to and from dictionary equivalents
Eventually, the second item will need to support more complex types, such as
user-defined enumerations. For now, the following field values are supported in the basic
release:
* Three primitives
* Logicals
* Numerics
* Strings
* Two data structures
* Dictionaries
* Lists
Serial objects also define the method by which specific inbound/outbound operations are
mapped to specific Format and Protocol parsers, usually by matching REST URI patterns.
Therefore, the Serial instance is the primary user interface to the seres inobund/outbound
data pipeline.
"""
import importlib
import types
import sys
import warnings
import seres.formats
import seres.protocols
import seres.rest
from seres import parsers
if sys.version_info.major == 2:
def is_class_type(c):
return type(c) == types.ClassType
else:
def is_class_type(c):
return isinstance(c, type)
def dicts2objs(dicts):
# Populate instantiations of each object using the __uni__ property to
# determine (and import, if necessary) the appropriate module
objs = []
for dict in dicts:
try:
module_name, class_name = dict['__uni__'].rsplit(".", 1)
if module_name not in sys.modules:
m = importlib.import_module(module_name)
else:
m = sys.modules[module_name]
c = getattr(m, class_name)
obj = c()
for field in list(dict.keys()):
if field != "__uni__":
setattr(obj, field, dict[field])
objs.append(obj)
except Exception as e:
# In reality, there will be a large number of different things
# that could go wrong; we should chain try-except, or implement
# our own exceptions for the deserialization process.
warnings.warn("Unable to deserialize to class at UNI '" + dict['__uni__'] + "'; a None will be inserted instead", RuntimeWarning)
objs.append(None)
return objs
def objs2dicts(objs):
# Convert list of objects into a list of dicts
dicts = []
for obj in objs:
dict = obj.__dict__
dict['__uni__'] = obj.__module__ + "." + obj.__class__.__name__
dicts.append(dict)
return get_tabular_dicts(dicts)
def get_tabular_dicts(dicts):
# Given an array of dictionary representations of serialized objects, returns a
# similar array with indentical fields for each entry that will be empty when
# irrelevant (None values are used for empty fields). Fields are also re-ordered
# for consistent organization between objects. This is particularly useful for
# outbound methods of tabular formats, which only have one header row for all entries.
all_fields = []
for d in dicts:
for k in d.keys():
if k not in all_fields:
all_fields.append(k)
all_fields.sort()
tdicts = []
for d in dicts:
nd = {}
for f in all_fields:
if f in d:
nd[f] = d[f]
else:
nd[f] = None
tdicts.append(nd)
return tdicts
| 32.393617 | 133 | 0.7133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,666 | 0.547126 |
14a6ef3b95c24784d64e5d97515f9bb8133b3f76 | 3,893 | py | Python | Scraper.py | Warthog710/FFXIV-Alert-API | 5264eeae2c67cb3e0bfe09169a207d9ec63012ce | [
"MIT"
] | null | null | null | Scraper.py | Warthog710/FFXIV-Alert-API | 5264eeae2c67cb3e0bfe09169a207d9ec63012ce | [
"MIT"
] | null | null | null | Scraper.py | Warthog710/FFXIV-Alert-API | 5264eeae2c67cb3e0bfe09169a207d9ec63012ce | [
"MIT"
] | null | null | null | import requests
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
from bs4 import BeautifulSoup
class lodeStoneScraper:
def __init__(self):
self.__URL = 'https://na.finalfantasyxiv.com/lodestone/worldstatus/'
self.__statistics = {}
self.update_page()
# Setup scheduler
self.__scheduler = BackgroundScheduler()
self.__scheduler.add_job(func=self.update_page, trigger='interval', seconds=15)
self.__scheduler.start()
# Setup atexit
atexit.register(lambda: self.__scheduler.shutdown())
# Called to update the stored information on the servers
def update_page(self):
# Save old data
temp = self.__statistics
try:
# Get the page content and parse it
page = requests.get(self.__URL)
page = BeautifulSoup(page.content, 'html.parser')
# Extract the relevant divs
server_names = page.find_all('div', class_='world-list__world_name')
server_types = page.find_all('div', class_='world-list__world_category')
server_char_status = page.find_all('div', class_='world-list__create_character')
server_online_status = page.find_all('div', class_='world-list__status_icon')
# Parse into text
server_names = self.__parse_name(server_names)
server_types = self.__parse_type(server_types)
server_char_status = self.__parse_char_status(server_char_status)
server_online_status = self.__parse_server_online_status(server_online_status)
# Collate the data
for x in range(0, len(server_names)):
self.__statistics[server_names[x]] = [server_types[x], server_char_status[x], server_online_status[x]]
except Exception as e:
# If update failed, restore old data
self.__statistics = temp
# Log error
print(f'An exception occurred while trying to update data: {e}')
# Returns the currently stored data dictionary
def get_data(self):
return self.__statistics
# Parses server names from raw html
def __parse_name(self, server_names):
names = []
for server in server_names:
names.append(server.find('p').getText())
return names
# Parses server types from the raw html (Standard or Preferred)
def __parse_type(self, server_types):
types = []
for item in server_types:
temp = item.find('p').getText()
# If the server is offline this will return '--', set to an empty string
if temp == '--':
types.append('')
else:
types.append(temp)
return types
# Parses character creation status from the raw html (True = CC Available, False = CC Unavailable)
def __parse_char_status(self, server_char_status):
char_states = []
for item in server_char_status:
try:
state = item.i['data-tooltip']
if 'Available' in state:
char_states.append(True)
else:
char_states.append(False)
# An exception occurs when the server is offline, catch this and append false (no creation on an offline world)
except Exception:
char_states.append(False)
return char_states
# Parses whether a server is online from the raw html (True = Online, False = Server offline)
def __parse_server_online_status(self, server_online_status):
online_states = []
for item in server_online_status:
state = item.i['data-tooltip']
if 'Online' in state:
online_states.append(True)
else:
online_states.append(False)
return online_states | 37.432692 | 123 | 0.624454 | 3,765 | 0.96712 | 0 | 0 | 0 | 0 | 0 | 0 | 1,085 | 0.278705 |
14a796965ec7f09f47a00c7f1ad7492a84deaf81 | 1,078 | py | Python | carmesi/nucleo/tests/constant.py | RedGranatum/Carmesi | bde1d4dd104401ba08e7ba2f3de5b9d5f537dd94 | [
"MIT"
] | null | null | null | carmesi/nucleo/tests/constant.py | RedGranatum/Carmesi | bde1d4dd104401ba08e7ba2f3de5b9d5f537dd94 | [
"MIT"
] | null | null | null | carmesi/nucleo/tests/constant.py | RedGranatum/Carmesi | bde1d4dd104401ba08e7ba2f3de5b9d5f537dd94 | [
"MIT"
] | null | null | null |
TOKEN_PREALTA_CLIENTE = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJlbWFpbCI6InJhdWx0ckBnbWFpbC5jb20iLCJleHAiOjQ3MzM1MTA0MDAsIm93bmVyX25hbWUiOiJSYXVsIEVucmlxdWUgVG9ycmVzIFJleWVzIiwidHlwZSI6ImVtYWlsX2NvbmZpcm1hdGlvbl9uZXdfY2xpZW50In0.R-nXh1nXvlBABfEdV1g81mdIzJqMFLvFV7FAP7PQRCM'
TOKEN_PREALTA_CLIENTE_CADUCO = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjoibGF0aWVuZGl0YTJAZ2FtaWwuY29tIiwib3duZXJfbmFtZSI6IkFuZ2VsIEdhcmNpYSIsImV4cCI6MTU4NjU3ODg1MCwidHlwZSI6ImVtYWlsX2NvbmZpcm1hdGlvbl9uZXdfY2xpZW50In0.x66iQug11cjmkUHqmZq68gdbN3ffSVyD9MHagrspKRw'
TOKEN_PREALTA_USUARIO = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJlbWFpbCI6InJhdWx0ckBnbWFpbC5jb20iLCJleHAiOjQ3MzM1MTA0MDAsIm5hbWUiOiJSYXVsIEVucmlxdWUgVG9ycmVzIFJleWVzIiwic2NoZW1hX25hbWUiOiJtaXRpZW5kaXRhIiwidHlwZSI6ImVtYWlsX2NvbmZpcm1hdGlvbl9uZXdfdXNlciJ9.gcagbNxnNxIkgZbP0mu-9MudiFb9b6cKvttPF4EHH5E'
TOKEN_USUARIO_LOGIN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJlbWFpbCI6InJhdWx0ckBnbWFpbC5jb20iLCJleHAiOjQ3MzM1MTA0MDAsInNjaGVtYV9uYW1lIjoibWl0aWVuZGl0YSIsInR5cGUiOiJ1c2VyX2xvZ2luIn0.vCdeH0iP94XBucXYtWZvEQq7CuEr-P80SdfIjN673qI'
| 119.777778 | 299 | 0.963822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 969 | 0.898887 |
14a905270cca517cd153387fad1257a23aa0b010 | 275 | py | Python | futbol-news/backend/app/app/models/search_term.py | davidespicolomina/proyecto-personal | 807445546d493f9c092720e18e5fefa3281da4c9 | [
"Apache-2.0"
] | null | null | null | futbol-news/backend/app/app/models/search_term.py | davidespicolomina/proyecto-personal | 807445546d493f9c092720e18e5fefa3281da4c9 | [
"Apache-2.0"
] | null | null | null | futbol-news/backend/app/app/models/search_term.py | davidespicolomina/proyecto-personal | 807445546d493f9c092720e18e5fefa3281da4c9 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import Column, Integer, String
from app.db.base_class import Base
class SearchTerm(Base):
id = Column(Integer, primary_key=True, index=True)
term = Column(String, nullable=False, comment="Término de búsqueda para filtros", unique=True, index=True)
| 30.555556 | 110 | 0.76 | 191 | 0.689531 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.129964 |
14abdb3685da3455677e3f4bfa4490aa74a6ac0c | 2,565 | py | Python | trufimonitor/configParser.py | trufi-association/trufi-monitor-backend | 8974a061debe3582605a6e6ec63e4116fe7ef60b | [
"MIT"
] | null | null | null | trufimonitor/configParser.py | trufi-association/trufi-monitor-backend | 8974a061debe3582605a6e6ec63e4116fe7ef60b | [
"MIT"
] | null | null | null | trufimonitor/configParser.py | trufi-association/trufi-monitor-backend | 8974a061debe3582605a6e6ec63e4116fe7ef60b | [
"MIT"
] | null | null | null | """
It converts Strings in the format
# to deactivate commands just comment them out by putting a # to the beginning of the line
# optional commands can be deactivated by putting a # to the lines' beginning and activated by removing # from the beginning
# replace 'example.com' with the hostname or ip address of the server the configuration structure 'servers/trufiwebsite' is for
host=example.com
# replace '22' with the port number the SSH server running on the server listens to and which the firewall allows to pass
port=22
# replace 'example' with the name of the UNIX server on the server to remote log in as
user=example
# optional but required when 'password' has been NOT set. Location to the ssh private key
private_key=./sshkey
# optional but required when 'private_key' has been set. Location to the ssh public key
public_key=./sshkey.pub
# optional but required when 'private_key' has been NOT set. The password to log into the server
password=GhSEs6G(%rfh&54§\"
# if both 'private_key' and 'password' are provided then the key authentication will be tried first.
# It's a general advise not to use password authentication and to deactivate it on the server explicitly.
# If you really can't use key authentication then please use password authentication under the following conditions:
# - contains at least 20 characters
# - contains lower- and uppercase letters, symbols, underscores, minus, special characters like & % ( ) | < > / [ ] =
# Pro Tipp: Use characters from other alphabetic systems like the Japanese or Arabic one as long as you stick to the UTF8 codepage´
to a python dictionary
{
"host": "example.com",
"port": "22",
"user": "example",
"private_key": "./sshkey",
"public_key": "./sshkey.pub",
"password": "GhSEs6G(%rfh&54§\""
}
"""
class ConfigParser():
def __init__(self, filepath, debug=False):
self.filepath = filepath
self.debug = debug
self.config = {}
sfile = open(filepath, "r")
filebuffer = sfile.read()
sfile.close()
for line in filebuffer.split("\n"):
if line.strip() == "":
continue
# 1. strip out the comment string starting with #
commentStarts = line.find("#")
if commentStarts == -1: commentStarts = len(line)-1
commandString = line[0:commentStarts+1].strip()
if commandString == "#": continue # means this is a comment line
#print(commandString)
# 2. parse the command
commandString = commandString.split("=")
# 3. map them to a dict (key-value pair consisting of all strings)
self.config[commandString[0].strip()] = commandString[1].strip()
| 40.078125 | 131 | 0.727096 | 795 | 0.309579 | 0 | 0 | 0 | 0 | 0 | 0 | 1,977 | 0.76986 |
14ac2d2492de50193750cbad1ef4a4ffc1cccf0a | 618 | py | Python | app/core/serializers.py | jblanquicett92/django_celery_app | 5b0069905ec721cc1632611f08c733a5b93d20d8 | [
"MIT"
] | null | null | null | app/core/serializers.py | jblanquicett92/django_celery_app | 5b0069905ec721cc1632611f08c733a5b93d20d8 | [
"MIT"
] | null | null | null | app/core/serializers.py | jblanquicett92/django_celery_app | 5b0069905ec721cc1632611f08c733a5b93d20d8 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Event, Notification
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
exclude = ('id', 'moved_to', 'received_timestamp',)
class EventExcludeIDSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = ('file_name', 'file_path', 'moved_to', 'received_timestamp')
class NotificationSerializer(serializers.ModelSerializer):
event_data=EventExcludeIDSerializer()
class Meta:
model = Notification
exclude = ('id',)
| 25.75 | 77 | 0.669903 | 505 | 0.817152 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.145631 |
14ae087d34a01217e2efdb31229417cab63aa812 | 3,458 | py | Python | modules/plugin_tablecheckbox.py | jredrejo/sqlabs | 2cf39ff924579e72bc0092f2a6d65214dafd4bfe | [
"MIT"
] | 1 | 2017-12-01T22:46:33.000Z | 2017-12-01T22:46:33.000Z | modules/plugin_tablecheckbox.py | jredrejo/sqlabs | 2cf39ff924579e72bc0092f2a6d65214dafd4bfe | [
"MIT"
] | null | null | null | modules/plugin_tablecheckbox.py | jredrejo/sqlabs | 2cf39ff924579e72bc0092f2a6d65214dafd4bfe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This plugins is licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Authors: Kenji Hosoda <hosoda@s-cubism.jp>
from gluon import *
class TableCheckbox(FORM):
def __init__(self, id_getter=lambda row: row.id,
tablecheckbox_var='tablecheckbox',
confirm_message='"Are you sure you want to submit?"',
submit_button='Submit checks',
**attributes):
FORM.__init__(self, **attributes)
self.id_getter = id_getter
self.attributes['_class'] = 'tablecheckbox'
self.tablecheckbox_var, self.confirm_message, self.submit_button = (
tablecheckbox_var, confirm_message, submit_button
)
self._checkall = '%s_checkall' % self.tablecheckbox_var
self._selected = '%s_selected' % self.tablecheckbox_var
self._button = '%s_button' % self.tablecheckbox_var
self.append(SCRIPT("""
jQuery(document).ready(function(){
var selected_el = jQuery("input[name=%(selected)s]");
function set_activation(){setTimeout(function(){
var button_el = jQuery('#%(button)s');
selected_el.each(function(){
if(jQuery(this).is(':checked')) { button_el.prop({disabled: false}); return false;
} else { button_el.prop({disabled: true}); }}); }, 10); }
selected_el.change(set_activation);
jQuery("input[name=%(checkall)s]").change(set_activation);
});""" % dict(checkall=self._checkall, selected=self._selected, button=self._button)))
self.append(INPUT(_type='hidden', _name=self.tablecheckbox_var))
self.append(INPUT(_type='submit', _value=self.submit_button,
_onclick=self._get_submit_js(),
_id=self._button, _disabled='disabled'))
def column(self):
return {'label': DIV(INPUT(_type='checkbox', _name=self._checkall,
_onclick=self._get_toggle_all_js()),
_style='text-align:center;'),
'content': lambda row, rc: DIV(INPUT(_type='checkbox', _name=self._selected,
_value=self.id_getter(row), _style='margin:3px;'),
_style='text-align:center;'),
'width': '', 'class': '', 'selected': False}
def accepts(self, *args, **kwds):
accepted = FORM.accepts(self, *args, **kwds)
if accepted:
self.vars[self.tablecheckbox_var] = current.request.vars[self.tablecheckbox_var].split(',')
return accepted
def xml(self):
return FORM.xml(self)
def _get_toggle_all_js(self):
return """
jQuery('input[name=%(selected)s]').prop('checked', jQuery('input[name=%(checkall)s]').is(':checked'));
""" % dict(checkall=self._checkall, selected=self._selected)
def _get_submit_js(self):
return """
if(%(confirm)s){
var val = [];
jQuery("input[name=%(selected)s]").each(function(){
var el = jQuery(this);
if(el.is(':checked')) { val.push(el.val()); }
});
jQuery("input[name=%(tablecheckbox)s]").val(val);
return true;
;}; return false;""" % dict(confirm='confirm(%s)' % self.confirm_message if self.confirm_message else 'true',
selected=self._selected,
tablecheckbox=self.tablecheckbox_var)
| 45.5 | 109 | 0.589647 | 3,265 | 0.944187 | 0 | 0 | 0 | 0 | 0 | 0 | 1,355 | 0.391845 |
14b1286bb9090e5e7de52578dcf3d83c33bdb3b1 | 2,781 | py | Python | src/py_dss_interface/models/Sensors/SensorsV.py | davilamds/py_dss_interface | a447c97787aeac962381db88dd622ccb235eef4b | [
"MIT"
] | 8 | 2020-08-15T12:56:03.000Z | 2022-01-04T15:51:14.000Z | src/py_dss_interface/models/Sensors/SensorsV.py | rodolfoplondero/py_dss_interface | cb6771b34ed322a5df7ef1cc194611e794f26441 | [
"MIT"
] | 24 | 2021-04-24T18:33:19.000Z | 2021-11-13T14:59:54.000Z | src/py_dss_interface/models/Sensors/SensorsV.py | rodolfoplondero/py_dss_interface | cb6771b34ed322a5df7ef1cc194611e794f26441 | [
"MIT"
] | 7 | 2020-08-15T12:56:04.000Z | 2021-10-04T16:14:30.000Z | # -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
import ctypes
from py_dss_interface.models import Bridge
from py_dss_interface.models.Base import Base
from py_dss_interface.models.Sensors.SensorsS import SensorsS
from py_dss_interface.models.Text.Text import Text
class SensorsV(Base):
"""
This interface can be used to read/write certain properties of the active DSS object.
The structure of the interface is as follows:
void SensorsV(int32_t Parameter, VARIANT *Argument);
This interface returns a Variant with the result of the query according to the value of the variable Parameter,
which can be one of the following.
"""
def sensors_all_names(self):
"""Returns a variant array of sensor names."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(0), ctypes.c_int(0), None)
def sensors_read_currents(self):
"""Gets an array of doubles for the line current measurements; don't use with KWS and KVARS."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(1), ctypes.c_int(0), None)
def sensors_write_currents(self, argument):
"""Sets an array of doubles for the line current measurements; don't use with KWS and KVARS."""
argument = Base.check_string_param(argument)
t = Text(self.dss_obj)
sen = SensorsS(self.dss_obj)
sen_name = sen.sensors_read_name()
return t.text(f'edit Sensor.{sen_name} currents = {argument}')
def sensors_read_kvars(self):
"""Gets an array of doubles for Q measurements; overwrites currents with a new estimate using KWS."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(3), ctypes.c_int(0), None)
def sensors_write_kvars(self, argument):
"""Sets an array of doubles for Q measurements; overwrites currents with a new estimate using KWS."""
argument = Base.check_string_param(argument)
t = Text(self.dss_obj)
sen = SensorsS(self.dss_obj)
sen_name = sen.sensors_read_name()
return t.text(f'edit Sensor.{sen_name} kvars = {argument}')
def sensors_read_kws(self):
"""Gets an array of doubles for P measurements; overwrites currents with a new estimate using KVARS."""
return Bridge.var_array_function(self.dss_obj.SensorsV, ctypes.c_int(5), ctypes.c_int(0), None)
def sensors_write_kws(self, argument):
"""Sets an array of doubles for P measurements; overwrites currents with a new estimate using KVARS."""
argument = Base.check_string_param(argument)
t = Text(self.dss_obj)
sen = SensorsS(self.dss_obj)
sen_name = sen.sensors_read_name()
return t.text(f'edit Sensor.{sen_name} kws = {argument}')
| 44.142857 | 115 | 0.703344 | 2,494 | 0.8968 | 0 | 0 | 0 | 0 | 0 | 0 | 1,211 | 0.435455 |
14b28f425c17976ebe25adcac758c6934615dd00 | 25,967 | py | Python | netapp/santricity/models/v2/__init__.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | netapp/santricity/models/v2/__init__.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | netapp/santricity/models/v2/__init__.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | from __future__ import absolute_import
# import models into model package
from netapp.santricity.models.v2.access_volume_ex import AccessVolumeEx
from netapp.santricity.models.v2.add_batch_cg_members_request import AddBatchCGMembersRequest
from netapp.santricity.models.v2.add_consistency_group_member_request import AddConsistencyGroupMemberRequest
from netapp.santricity.models.v2.add_storage_system_return import AddStorageSystemReturn
from netapp.santricity.models.v2.alert_syslog_configuration import AlertSyslogConfiguration
from netapp.santricity.models.v2.alert_syslog_response import AlertSyslogResponse
from netapp.santricity.models.v2.alert_syslog_server import AlertSyslogServer
from netapp.santricity.models.v2.amg import Amg
from netapp.santricity.models.v2.amg_incomplete_member import AmgIncompleteMember
from netapp.santricity.models.v2.amg_member import AmgMember
from netapp.santricity.models.v2.analysed_controller_statistics import AnalysedControllerStatistics
from netapp.santricity.models.v2.analysed_disk_statistics import AnalysedDiskStatistics
from netapp.santricity.models.v2.analysed_storage_system_statistics import AnalysedStorageSystemStatistics
from netapp.santricity.models.v2.analysed_volume_statistics import AnalysedVolumeStatistics
from netapp.santricity.models.v2.analyzed_application_statistics import AnalyzedApplicationStatistics
from netapp.santricity.models.v2.analyzed_interface_statistics import AnalyzedInterfaceStatistics
from netapp.santricity.models.v2.analyzed_pool_statistics import AnalyzedPoolStatistics
from netapp.santricity.models.v2.analyzed_workload_statistics import AnalyzedWorkloadStatistics
from netapp.santricity.models.v2.application_statistics import ApplicationStatistics
from netapp.santricity.models.v2.asup_dispatch_request import AsupDispatchRequest
from netapp.santricity.models.v2.asup_entry import AsupEntry
from netapp.santricity.models.v2.asup_registration_request import AsupRegistrationRequest
from netapp.santricity.models.v2.asup_response import AsupResponse
from netapp.santricity.models.v2.asup_update_request import AsupUpdateRequest
from netapp.santricity.models.v2.async_communication_data import AsyncCommunicationData
from netapp.santricity.models.v2.async_mirror_connections_response import AsyncMirrorConnectionsResponse
from netapp.santricity.models.v2.async_mirror_group_connectivity_test_request import AsyncMirrorGroupConnectivityTestRequest
from netapp.santricity.models.v2.async_mirror_group_create_request import AsyncMirrorGroupCreateRequest
from netapp.santricity.models.v2.async_mirror_group_member_completion_request import AsyncMirrorGroupMemberCompletionRequest
from netapp.santricity.models.v2.async_mirror_group_member_create_request import AsyncMirrorGroupMemberCreateRequest
from netapp.santricity.models.v2.async_mirror_group_role_update_request import AsyncMirrorGroupRoleUpdateRequest
from netapp.santricity.models.v2.async_mirror_group_sync_request import AsyncMirrorGroupSyncRequest
from netapp.santricity.models.v2.async_mirror_group_update_request import AsyncMirrorGroupUpdateRequest
from netapp.santricity.models.v2.async_mirror_remote_connection import AsyncMirrorRemoteConnection
from netapp.santricity.models.v2.audit_log_configuration import AuditLogConfiguration
from netapp.santricity.models.v2.audit_log_delete_response import AuditLogDeleteResponse
from netapp.santricity.models.v2.audit_log_get_response import AuditLogGetResponse
from netapp.santricity.models.v2.audit_log_info_response import AuditLogInfoResponse
from netapp.santricity.models.v2.audit_log_record import AuditLogRecord
from netapp.santricity.models.v2.average_analysed_application_stats import AverageAnalysedApplicationStats
from netapp.santricity.models.v2.average_analysed_controller_stats import AverageAnalysedControllerStats
from netapp.santricity.models.v2.average_analysed_drive_stats import AverageAnalysedDriveStats
from netapp.santricity.models.v2.average_analysed_interface_stats import AverageAnalysedInterfaceStats
from netapp.santricity.models.v2.average_analysed_pool_stats import AverageAnalysedPoolStats
from netapp.santricity.models.v2.average_analysed_stats_response import AverageAnalysedStatsResponse
from netapp.santricity.models.v2.average_analysed_system_controller_stats import AverageAnalysedSystemControllerStats
from netapp.santricity.models.v2.average_analysed_system_stats import AverageAnalysedSystemStats
from netapp.santricity.models.v2.average_analysed_value import AverageAnalysedValue
from netapp.santricity.models.v2.average_analysed_volume_stats import AverageAnalysedVolumeStats
from netapp.santricity.models.v2.average_analysed_workload_stats import AverageAnalysedWorkloadStats
from netapp.santricity.models.v2.battery_ex import BatteryEx
from netapp.santricity.models.v2.bind_lookup_user import BindLookupUser
from netapp.santricity.models.v2.cfw_package_metadata import CFWPackageMetadata
from netapp.santricity.models.v2.cg_snapshot_view_request import CGSnapshotViewRequest
from netapp.santricity.models.v2.cv_candidate_multiple_selection_request import CVCandidateMultipleSelectionRequest
from netapp.santricity.models.v2.cv_candidate_response import CVCandidateResponse
from netapp.santricity.models.v2.cv_candidate_selection_request import CVCandidateSelectionRequest
from netapp.santricity.models.v2.call_response import CallResponse
from netapp.santricity.models.v2.capabilities_response import CapabilitiesResponse
from netapp.santricity.models.v2.cfw_activation_request import CfwActivationRequest
from netapp.santricity.models.v2.cfw_upgrade_request import CfwUpgradeRequest
from netapp.santricity.models.v2.cfw_upgrade_response import CfwUpgradeResponse
from netapp.santricity.models.v2.concat_repository_volume import ConcatRepositoryVolume
from netapp.santricity.models.v2.concat_volume_candidate_request import ConcatVolumeCandidateRequest
from netapp.santricity.models.v2.concat_volume_expansion_request import ConcatVolumeExpansionRequest
from netapp.santricity.models.v2.configuration_db_validation_check import ConfigurationDbValidationCheck
from netapp.santricity.models.v2.configuration_result import ConfigurationResult
from netapp.santricity.models.v2.configuration_result_item import ConfigurationResultItem
from netapp.santricity.models.v2.consistency_group_create_request import ConsistencyGroupCreateRequest
from netapp.santricity.models.v2.consistency_group_update_request import ConsistencyGroupUpdateRequest
from netapp.santricity.models.v2.controller_stats import ControllerStats
from netapp.santricity.models.v2.create_cg_snapshot_view_manual_request import CreateCGSnapshotViewManualRequest
from netapp.santricity.models.v2.create_consistency_group_snapshot_request import CreateConsistencyGroupSnapshotRequest
from netapp.santricity.models.v2.create_consistency_group_snapshot_view_request import CreateConsistencyGroupSnapshotViewRequest
from netapp.santricity.models.v2.current_firmware_response import CurrentFirmwareResponse
from netapp.santricity.models.v2.device_alert_configuration import DeviceAlertConfiguration
from netapp.santricity.models.v2.device_alert_test_response import DeviceAlertTestResponse
from netapp.santricity.models.v2.device_asup_delivery import DeviceAsupDelivery
from netapp.santricity.models.v2.device_asup_device import DeviceAsupDevice
from netapp.santricity.models.v2.device_asup_response import DeviceAsupResponse
from netapp.santricity.models.v2.device_asup_schedule import DeviceAsupSchedule
from netapp.santricity.models.v2.device_asup_update_request import DeviceAsupUpdateRequest
from netapp.santricity.models.v2.device_asup_verify_request import DeviceAsupVerifyRequest
from netapp.santricity.models.v2.device_asup_verify_response import DeviceAsupVerifyResponse
from netapp.santricity.models.v2.device_data_response import DeviceDataResponse
from netapp.santricity.models.v2.diagnostic_data_request import DiagnosticDataRequest
from netapp.santricity.models.v2.discover_response import DiscoverResponse
from netapp.santricity.models.v2.discovered_storage_system import DiscoveredStorageSystem
from netapp.santricity.models.v2.discovery_start_request import DiscoveryStartRequest
from netapp.santricity.models.v2.disk_io_stats import DiskIOStats
from netapp.santricity.models.v2.disk_pool_priority_update_request import DiskPoolPriorityUpdateRequest
from netapp.santricity.models.v2.disk_pool_reduction_request import DiskPoolReductionRequest
from netapp.santricity.models.v2.disk_pool_threshold_update_request import DiskPoolThresholdUpdateRequest
from netapp.santricity.models.v2.drive_ex import DriveEx
from netapp.santricity.models.v2.drive_firmware_compatability_entry import DriveFirmwareCompatabilityEntry
from netapp.santricity.models.v2.drive_firmware_compatibility_response import DriveFirmwareCompatibilityResponse
from netapp.santricity.models.v2.drive_firmware_compatiblity_set import DriveFirmwareCompatiblitySet
from netapp.santricity.models.v2.drive_firmware_update_entry import DriveFirmwareUpdateEntry
from netapp.santricity.models.v2.drive_selection_request import DriveSelectionRequest
from netapp.santricity.models.v2.ekms_communication_response import EKMSCommunicationResponse
from netapp.santricity.models.v2.embedded_compatibility_check_response import EmbeddedCompatibilityCheckResponse
from netapp.santricity.models.v2.embedded_firmware_response import EmbeddedFirmwareResponse
from netapp.santricity.models.v2.embedded_local_user_info_response import EmbeddedLocalUserInfoResponse
from netapp.santricity.models.v2.embedded_local_user_request import EmbeddedLocalUserRequest
from netapp.santricity.models.v2.embedded_local_user_response import EmbeddedLocalUserResponse
from netapp.santricity.models.v2.embedded_local_users_min_password_request import EmbeddedLocalUsersMinPasswordRequest
from netapp.santricity.models.v2.enable_disable_ekms_request import EnableDisableEkmsRequest
from netapp.santricity.models.v2.enable_external_key_server_request import EnableExternalKeyServerRequest
from netapp.santricity.models.v2.enumeration_string import EnumerationString
from netapp.santricity.models.v2.esm_fibre_port_connection import EsmFibrePortConnection
from netapp.santricity.models.v2.esm_port_connection_response import EsmPortConnectionResponse
from netapp.santricity.models.v2.esm_sas_port_connection import EsmSasPortConnection
from netapp.santricity.models.v2.event import Event
from netapp.santricity.models.v2.event_object_identifier import EventObjectIdentifier
from netapp.santricity.models.v2.exclusive_operation_check import ExclusiveOperationCheck
from netapp.santricity.models.v2.external_key_manager_csr import ExternalKeyManagerCSR
from netapp.santricity.models.v2.failure_data import FailureData
from netapp.santricity.models.v2.fibre_interface_port import FibreInterfacePort
from netapp.santricity.models.v2.file_based_configuration_request import FileBasedConfigurationRequest
from netapp.santricity.models.v2.file_config_item import FileConfigItem
from netapp.santricity.models.v2.file_info import FileInfo
from netapp.santricity.models.v2.firmware_compatibility_request import FirmwareCompatibilityRequest
from netapp.santricity.models.v2.firmware_compatibility_response import FirmwareCompatibilityResponse
from netapp.santricity.models.v2.firmware_compatibility_set import FirmwareCompatibilitySet
from netapp.santricity.models.v2.firmware_upgrade_health_check_result import FirmwareUpgradeHealthCheckResult
from netapp.santricity.models.v2.flash_cache_create_request import FlashCacheCreateRequest
from netapp.santricity.models.v2.flash_cache_ex import FlashCacheEx
from netapp.santricity.models.v2.flash_cache_update_request import FlashCacheUpdateRequest
from netapp.santricity.models.v2.folder import Folder
from netapp.santricity.models.v2.folder_create_request import FolderCreateRequest
from netapp.santricity.models.v2.folder_event import FolderEvent
from netapp.santricity.models.v2.folder_update_request import FolderUpdateRequest
from netapp.santricity.models.v2.group_mapping import GroupMapping
from netapp.santricity.models.v2.hardware_inventory_response import HardwareInventoryResponse
from netapp.santricity.models.v2.health_check_failure_response import HealthCheckFailureResponse
from netapp.santricity.models.v2.health_check_request import HealthCheckRequest
from netapp.santricity.models.v2.health_check_response import HealthCheckResponse
from netapp.santricity.models.v2.historical_stats_response import HistoricalStatsResponse
from netapp.santricity.models.v2.host_create_request import HostCreateRequest
from netapp.santricity.models.v2.host_ex import HostEx
from netapp.santricity.models.v2.host_group import HostGroup
from netapp.santricity.models.v2.host_group_create_request import HostGroupCreateRequest
from netapp.santricity.models.v2.host_group_update_request import HostGroupUpdateRequest
from netapp.santricity.models.v2.host_move_request import HostMoveRequest
from netapp.santricity.models.v2.host_port_create_request import HostPortCreateRequest
from netapp.santricity.models.v2.host_port_update_request import HostPortUpdateRequest
from netapp.santricity.models.v2.host_side_port import HostSidePort
from netapp.santricity.models.v2.host_type import HostType
from netapp.santricity.models.v2.host_type_values import HostTypeValues
from netapp.santricity.models.v2.host_update_request import HostUpdateRequest
from netapp.santricity.models.v2.ib_interface_port import IBInterfacePort
from netapp.santricity.models.v2.i_scsi_interface_port import IScsiInterfacePort
from netapp.santricity.models.v2.identification_request import IdentificationRequest
from netapp.santricity.models.v2.initial_async_response import InitialAsyncResponse
from netapp.santricity.models.v2.interface_stats import InterfaceStats
from netapp.santricity.models.v2.iom_service_info_response import IomServiceInfoResponse
from netapp.santricity.models.v2.iom_service_update_request import IomServiceUpdateRequest
from netapp.santricity.models.v2.iscsi_entity_response import IscsiEntityResponse
from netapp.santricity.models.v2.iscsi_entity_update_request import IscsiEntityUpdateRequest
from netapp.santricity.models.v2.iscsi_target_response import IscsiTargetResponse
from netapp.santricity.models.v2.iscsi_target_update_request import IscsiTargetUpdateRequest
from netapp.santricity.models.v2.job_progress import JobProgress
from netapp.santricity.models.v2.key_value import KeyValue
from netapp.santricity.models.v2.ldap_configuration import LdapConfiguration
from netapp.santricity.models.v2.ldap_domain import LdapDomain
from netapp.santricity.models.v2.ldap_domain_test_response import LdapDomainTestResponse
from netapp.santricity.models.v2.legacy_snapshot_create_request import LegacySnapshotCreateRequest
from netapp.santricity.models.v2.legacy_snapshot_ex import LegacySnapshotEx
from netapp.santricity.models.v2.legacy_snapshot_update_request import LegacySnapshotUpdateRequest
from netapp.santricity.models.v2.level import Level
from netapp.santricity.models.v2.local_user_password_request import LocalUserPasswordRequest
from netapp.santricity.models.v2.locale import Locale
from netapp.santricity.models.v2.localized_log_message import LocalizedLogMessage
from netapp.santricity.models.v2.lockdown_status_response import LockdownStatusResponse
from netapp.santricity.models.v2.log_record import LogRecord
from netapp.santricity.models.v2.logger_record_response import LoggerRecordResponse
from netapp.santricity.models.v2.management_configuration_request import ManagementConfigurationRequest
from netapp.santricity.models.v2.management_interface import ManagementInterface
from netapp.santricity.models.v2.mappable_object import MappableObject
from netapp.santricity.models.v2.mel_entry_ex import MelEntryEx
from netapp.santricity.models.v2.mel_event_health_check import MelEventHealthCheck
from netapp.santricity.models.v2.metadata_change_event import MetadataChangeEvent
from netapp.santricity.models.v2.nv_meo_f_entity_update_request import NVMeoFEntityUpdateRequest
from netapp.santricity.models.v2.nvsram_package_metadata import NvsramPackageMetadata
from netapp.santricity.models.v2.object_change_event import ObjectChangeEvent
from netapp.santricity.models.v2.object_graph_change_event import ObjectGraphChangeEvent
from netapp.santricity.models.v2.object_graph_sync_check import ObjectGraphSyncCheck
from netapp.santricity.models.v2.operation_progress import OperationProgress
from netapp.santricity.models.v2.pitcg_member import PITCGMember
from netapp.santricity.models.v2.password_set_request import PasswordSetRequest
from netapp.santricity.models.v2.password_status_event import PasswordStatusEvent
from netapp.santricity.models.v2.password_status_response import PasswordStatusResponse
from netapp.santricity.models.v2.pit_view_ex import PitViewEx
from netapp.santricity.models.v2.pool_qos_response import PoolQosResponse
from netapp.santricity.models.v2.pool_statistics import PoolStatistics
from netapp.santricity.models.v2.private_file_info import PrivateFileInfo
from netapp.santricity.models.v2.progress import Progress
from netapp.santricity.models.v2.raid_migration_request import RaidMigrationRequest
from netapp.santricity.models.v2.raw_stats_response import RawStatsResponse
from netapp.santricity.models.v2.relative_distinguished_name import RelativeDistinguishedName
from netapp.santricity.models.v2.relative_distinguished_name_attribute import RelativeDistinguishedNameAttribute
from netapp.santricity.models.v2.remote_candidate import RemoteCandidate
from netapp.santricity.models.v2.remote_communication_data import RemoteCommunicationData
from netapp.santricity.models.v2.remote_mirror_candidate import RemoteMirrorCandidate
from netapp.santricity.models.v2.remote_mirror_pair import RemoteMirrorPair
from netapp.santricity.models.v2.remote_volume_mirror_create_request import RemoteVolumeMirrorCreateRequest
from netapp.santricity.models.v2.remote_volume_mirror_update_request import RemoteVolumeMirrorUpdateRequest
from netapp.santricity.models.v2.removable_drive_response import RemovableDriveResponse
from netapp.santricity.models.v2.resource_bundle import ResourceBundle
from netapp.santricity.models.v2.role_permission_data import RolePermissionData
from netapp.santricity.models.v2.roles_response import RolesResponse
from netapp.santricity.models.v2.rule import Rule
from netapp.santricity.models.v2.ssl_cert_configuration import SSLCertConfiguration
from netapp.santricity.models.v2.sas_interface_port import SasInterfacePort
from netapp.santricity.models.v2.save_config_spec import SaveConfigSpec
from netapp.santricity.models.v2.schedule_create_request import ScheduleCreateRequest
from netapp.santricity.models.v2.secure_volume_external_key_response import SecureVolumeExternalKeyResponse
from netapp.santricity.models.v2.secure_volume_key_request import SecureVolumeKeyRequest
from netapp.santricity.models.v2.secure_volume_key_response import SecureVolumeKeyResponse
from netapp.santricity.models.v2.serializable import Serializable
from netapp.santricity.models.v2.session_settings import SessionSettings
from netapp.santricity.models.v2.session_settings_response import SessionSettingsResponse
from netapp.santricity.models.v2.single_number_value import SingleNumberValue
from netapp.santricity.models.v2.snapshot import Snapshot
from netapp.santricity.models.v2.snapshot_create_request import SnapshotCreateRequest
from netapp.santricity.models.v2.snapshot_group import SnapshotGroup
from netapp.santricity.models.v2.snapshot_group_create_request import SnapshotGroupCreateRequest
from netapp.santricity.models.v2.snapshot_group_update_request import SnapshotGroupUpdateRequest
from netapp.santricity.models.v2.snapshot_view_create_request import SnapshotViewCreateRequest
from netapp.santricity.models.v2.snapshot_view_update_request import SnapshotViewUpdateRequest
from netapp.santricity.models.v2.snapshot_volume_mode_conversion_request import SnapshotVolumeModeConversionRequest
from netapp.santricity.models.v2.software_version import SoftwareVersion
from netapp.santricity.models.v2.software_versions import SoftwareVersions
from netapp.santricity.models.v2.spm_database_health_check import SpmDatabaseHealthCheck
from netapp.santricity.models.v2.ssc_volume_create_request import SscVolumeCreateRequest
from netapp.santricity.models.v2.ssc_volume_update_request import SscVolumeUpdateRequest
from netapp.santricity.models.v2.stack_trace_element import StackTraceElement
from netapp.santricity.models.v2.staged_firmware_response import StagedFirmwareResponse
from netapp.santricity.models.v2.storage_device_health_check import StorageDeviceHealthCheck
from netapp.santricity.models.v2.storage_device_status_event import StorageDeviceStatusEvent
from netapp.santricity.models.v2.storage_pool_create_request import StoragePoolCreateRequest
from netapp.santricity.models.v2.storage_pool_expansion_request import StoragePoolExpansionRequest
from netapp.santricity.models.v2.storage_pool_update_request import StoragePoolUpdateRequest
from netapp.santricity.models.v2.storage_system_attributes import StorageSystemAttributes
from netapp.santricity.models.v2.storage_system_config_response import StorageSystemConfigResponse
from netapp.santricity.models.v2.storage_system_config_update_request import StorageSystemConfigUpdateRequest
from netapp.santricity.models.v2.storage_system_controller_stats import StorageSystemControllerStats
from netapp.santricity.models.v2.storage_system_create_request import StorageSystemCreateRequest
from netapp.santricity.models.v2.storage_system_response import StorageSystemResponse
from netapp.santricity.models.v2.storage_system_stats import StorageSystemStats
from netapp.santricity.models.v2.storage_system_update_request import StorageSystemUpdateRequest
from netapp.santricity.models.v2.subject_alternate_name import SubjectAlternateName
from netapp.santricity.models.v2.support_artifact import SupportArtifact
from netapp.santricity.models.v2.support_artifacts import SupportArtifacts
from netapp.santricity.models.v2.support_data_request import SupportDataRequest
from netapp.santricity.models.v2.support_data_response import SupportDataResponse
from netapp.santricity.models.v2.symbol_port_request import SymbolPortRequest
from netapp.santricity.models.v2.symbol_port_response import SymbolPortResponse
from netapp.santricity.models.v2.tag_event import TagEvent
from netapp.santricity.models.v2.thin_volume_cache_settings import ThinVolumeCacheSettings
from netapp.santricity.models.v2.thin_volume_create_request import ThinVolumeCreateRequest
from netapp.santricity.models.v2.thin_volume_ex import ThinVolumeEx
from netapp.santricity.models.v2.thin_volume_expansion_request import ThinVolumeExpansionRequest
from netapp.santricity.models.v2.thin_volume_update_request import ThinVolumeUpdateRequest
from netapp.santricity.models.v2.throwable import Throwable
from netapp.santricity.models.v2.trace_buffer_spec import TraceBufferSpec
from netapp.santricity.models.v2.tray_ex import TrayEx
from netapp.santricity.models.v2.unassociated_host_port import UnassociatedHostPort
from netapp.santricity.models.v2.unreadable_sector_entry_result import UnreadableSectorEntryResult
from netapp.santricity.models.v2.unreadable_sector_response import UnreadableSectorResponse
from netapp.santricity.models.v2.upgrade_manager_response import UpgradeManagerResponse
from netapp.santricity.models.v2.user_volume import UserVolume
from netapp.santricity.models.v2.validate_configuration_file_response_item import ValidateConfigurationFileResponseItem
from netapp.santricity.models.v2.validate_confiuration_file_response import ValidateConfiurationFileResponse
from netapp.santricity.models.v2.version_content import VersionContent
from netapp.santricity.models.v2.volume_action_progress_response import VolumeActionProgressResponse
from netapp.santricity.models.v2.volume_cache_settings import VolumeCacheSettings
from netapp.santricity.models.v2.volume_copy_create_request import VolumeCopyCreateRequest
from netapp.santricity.models.v2.volume_copy_pair import VolumeCopyPair
from netapp.santricity.models.v2.volume_copy_progress import VolumeCopyProgress
from netapp.santricity.models.v2.volume_copy_update_request import VolumeCopyUpdateRequest
from netapp.santricity.models.v2.volume_create_request import VolumeCreateRequest
from netapp.santricity.models.v2.volume_ex import VolumeEx
from netapp.santricity.models.v2.volume_expansion_request import VolumeExpansionRequest
from netapp.santricity.models.v2.volume_group_ex import VolumeGroupEx
from netapp.santricity.models.v2.volume_io_stats import VolumeIOStats
from netapp.santricity.models.v2.volume_mapping_create_request import VolumeMappingCreateRequest
from netapp.santricity.models.v2.volume_mapping_move_request import VolumeMappingMoveRequest
from netapp.santricity.models.v2.volume_metadata_item import VolumeMetadataItem
from netapp.santricity.models.v2.volume_update_request import VolumeUpdateRequest
from netapp.santricity.models.v2.workload_attribute import WorkloadAttribute
from netapp.santricity.models.v2.workload_copy_request import WorkloadCopyRequest
from netapp.santricity.models.v2.workload_create_request import WorkloadCreateRequest
from netapp.santricity.models.v2.workload_model import WorkloadModel
from netapp.santricity.models.v2.workload_statistics import WorkloadStatistics
from netapp.santricity.models.v2.workload_update_request import WorkloadUpdateRequest
from netapp.santricity.models.v2.x509_cert_info import X509CertInfo
from netapp.santricity.models.v2.x509_external_cert_info import X509ExternalCertInfo
| 85.69967 | 129 | 0.89606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.001348 |
14b3ea58e56ed94f8934f892735450dec9e7e14d | 629 | py | Python | config/wsgi.py | e2718281/template_test | 3d47741e657138b1ccfee7af19476a796a099b2b | [
"MIT"
] | null | null | null | config/wsgi.py | e2718281/template_test | 3d47741e657138b1ccfee7af19476a796a099b2b | [
"MIT"
] | null | null | null | config/wsgi.py | e2718281/template_test | 3d47741e657138b1ccfee7af19476a796a099b2b | [
"MIT"
] | null | null | null | """
WSGI config for test_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# test_project directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'test_project'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
| 27.347826 | 78 | 0.779014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.602544 |
14b53da5ae7dc9a0ddd6d2cbcafa9f97c4f9a304 | 747 | py | Python | app/app/migrations/0001_initial.py | poornachandrakashi/covid-cough-prediction | 3466d21c1e9e9931484db486116afe8f591e6ab8 | [
"MIT"
] | 3 | 2020-04-05T21:09:07.000Z | 2022-02-15T15:23:37.000Z | app/app/migrations/0001_initial.py | poornachandrakashi/covid-cough-prediction | 3466d21c1e9e9931484db486116afe8f591e6ab8 | [
"MIT"
] | 2 | 2020-06-06T01:42:31.000Z | 2021-06-10T22:43:54.000Z | app/app/migrations/0001_initial.py | poornachandrakashi/covid-cough-prediction | 3466d21c1e9e9931484db486116afe8f591e6ab8 | [
"MIT"
] | 3 | 2020-04-08T12:53:47.000Z | 2021-08-10T11:10:32.000Z | # Generated by Django 2.1.1 on 2020-04-05 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('email', models.EmailField(max_length=200)),
('cough', models.FileField(upload_to='')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| 28.730769 | 114 | 0.570281 | 654 | 0.875502 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.147256 |
14b6e3bd8ac3a7eb4e7e8620a55ce89ce7b5721c | 1,534 | py | Python | apps/brew/settings.py | martync/zython | e008bbb33e212f0856e85b8594003402e0a635c0 | [
"Beerware"
] | null | null | null | apps/brew/settings.py | martync/zython | e008bbb33e212f0856e85b8594003402e0a635c0 | [
"Beerware"
] | 5 | 2020-06-05T21:26:16.000Z | 2022-01-13T01:21:27.000Z | apps/brew/settings.py | martync/zython | e008bbb33e212f0856e85b8594003402e0a635c0 | [
"Beerware"
] | null | null | null | SRM_TO_HEX = {
"0": "#FFFFFF",
"1": "#F3F993",
"2": "#F5F75C",
"3": "#F6F513",
"4": "#EAE615",
"5": "#E0D01B",
"6": "#D5BC26",
"7": "#CDAA37",
"8": "#C1963C",
"9": "#BE8C3A",
"10": "#BE823A",
"11": "#C17A37",
"12": "#BF7138",
"13": "#BC6733",
"14": "#B26033",
"15": "#A85839",
"16": "#985336",
"17": "#8D4C32",
"18": "#7C452D",
"19": "#6B3A1E",
"20": "#5D341A",
"21": "#4E2A0C",
"22": "#4A2727",
"23": "#361F1B",
"24": "#261716",
"25": "#231716",
"26": "#19100F",
"27": "#16100F",
"28": "#120D0C",
"29": "#100B0A",
"30": "#050B0A"
}
WATER_L_PER_GRAIN_KG = 2.5
MAIN_STYLES = {
"1": "LIGHT LAGER",
"2": "PILSNER",
"3": "EUROPEAN AMBER LAGER",
"4": "DARK LAGER",
"5": "BOCK",
"6": "LIGHT HYBRID BEER",
"7": "AMBER HYBRID BEER",
"8": "ENGLISH PALE ALE",
"9": "SCOTTISH AND IRISH ALE",
"10": "AMERICAN ALE",
"11": "ENGLISH BROWN ALE",
"12": "PORTER",
"13": "STOUT",
"14": "INDIA PALE ALE (IPA)",
"15": "GERMAN WHEAT AND RYE BEER",
"16": "BELGIAN AND FRENCH ALE",
"17": "SOUR ALE",
"18": "BELGIAN STRONG ALE",
"19": "STRONG ALE",
"20": "FRUIT BEER",
"21": "SPICE / HERB / VEGETABLE BEER",
"22": "SMOKE-FLAVORED AND WOOD-AGED BEER",
"23": "SPECIALTY BEER",
"24": "TRADITIONAL MEAD",
"25": "MELOMEL (FRUIT MEAD)",
"26": "OTHER MEAD",
"27": "STANDARD CIDER AND PERRY",
"28": "SPECIALTY CIDER AND PERRY"
}
| 22.895522 | 46 | 0.468057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,000 | 0.65189 |
14b757978821b5341ed6a4a277fcfd2e75bc9742 | 107 | py | Python | egs/codeswitching/asr/local_yzl23/test_libsndfile.py | luyizhou4/espnet | a408b9372df3f57ef33b8a378a8d9abc7f872cf5 | [
"Apache-2.0"
] | null | null | null | egs/codeswitching/asr/local_yzl23/test_libsndfile.py | luyizhou4/espnet | a408b9372df3f57ef33b8a378a8d9abc7f872cf5 | [
"Apache-2.0"
] | null | null | null | egs/codeswitching/asr/local_yzl23/test_libsndfile.py | luyizhou4/espnet | a408b9372df3f57ef33b8a378a8d9abc7f872cf5 | [
"Apache-2.0"
] | null | null | null | from ctypes.util import find_library as _find_library
print(_find_library('sndfile'))
print('test fine')
| 17.833333 | 53 | 0.794393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.186916 |
14b9762b78c281460ee5ec96e6878f3d8ee83597 | 561 | py | Python | tests/test_utils.py | vnmabus/incense | 6542c7cb082e313f4caa77fdb04be65ebc15bc65 | [
"MIT"
] | 78 | 2019-01-23T10:50:18.000Z | 2022-03-26T15:17:18.000Z | tests/test_utils.py | vnmabus/incense | 6542c7cb082e313f4caa77fdb04be65ebc15bc65 | [
"MIT"
] | 59 | 2018-12-31T18:13:13.000Z | 2021-08-25T15:24:28.000Z | tests/test_utils.py | vnmabus/incense | 6542c7cb082e313f4caa77fdb04be65ebc15bc65 | [
"MIT"
] | 6 | 2019-06-25T18:48:00.000Z | 2021-04-12T18:51:38.000Z | from incense import utils
def test_find_differing_config_keys(loader):
assert utils.find_differing_config_keys(loader.find_by_ids([1, 2])) == {"epochs"}
assert utils.find_differing_config_keys(loader.find_by_ids([1, 3])) == {"optimizer"}
assert utils.find_differing_config_keys(loader.find_by_ids([2, 3])) == {"epochs", "optimizer"}
def test_format_config(loader):
exp = loader.find_by_id(2)
assert utils.format_config(exp, "epochs", "optimizer") == "epochs=3 | optimizer=sgd"
assert utils.format_config(exp, "epochs") == "epochs=3"
| 40.071429 | 98 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.180036 |
14b9e8b4dcb1e20a307af26c71e8f867c0782a0d | 285 | py | Python | tdd/run.py | LarsAsplund/vunit_tdd | db16c32968675abdacc9939e3573f37a2cbaf431 | [
"MIT"
] | 10 | 2020-08-12T22:27:15.000Z | 2022-03-31T13:34:12.000Z | tdd/run.py | LarsAsplund/vunit_tdd | db16c32968675abdacc9939e3573f37a2cbaf431 | [
"MIT"
] | 7 | 2021-09-06T05:30:07.000Z | 2021-09-08T02:25:41.000Z | tdd/run.py | LarsAsplund/vunit_tdd | db16c32968675abdacc9939e3573f37a2cbaf431 | [
"MIT"
] | 3 | 2021-05-27T11:31:45.000Z | 2021-05-28T07:22:08.000Z | #!/usr/bin/env python3
"""VUnit run script."""
from pathlib import Path
from vunit import VUnit
prj = VUnit.from_argv()
lib = prj.add_library("lib")
root = Path(__file__).parent
lib.add_source_files(root / "src" / "*.vhd")
lib.add_source_files(root / "test" / "*.vhd")
prj.main()
| 17.8125 | 45 | 0.687719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.263158 |
14bc1271077b75e0d1cf036fe2b3bbfce7418f99 | 6,420 | py | Python | tests/test_regular.py | atiqm/adapt | af9833cb7e698bdcb722941622d67c06f04822f7 | [
"BSD-2-Clause"
] | null | null | null | tests/test_regular.py | atiqm/adapt | af9833cb7e698bdcb722941622d67c06f04822f7 | [
"BSD-2-Clause"
] | null | null | null | tests/test_regular.py | atiqm/adapt | af9833cb7e698bdcb722941622d67c06f04822f7 | [
"BSD-2-Clause"
] | null | null | null | """
Test functions for regular module.
"""
import pytest
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.base import clone
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from adapt.parameter_based import (RegularTransferLR,
RegularTransferLC,
RegularTransferNN)
np.random.seed(0)
Xs = np.concatenate((
np.random.randn(50)*0.1,
np.random.randn(50)*0.1 + 1.,
)).reshape(-1, 1)
Xt = (np.random.randn(100) * 0.1).reshape(-1, 1)
ys_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xs.ravel()]).reshape(-1, 1)
yt_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xt.ravel()]).reshape(-1, 1)
ys_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xs.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
yt_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xt.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
def _get_network(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
input_shape=input_shape,
use_bias=False))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_setup():
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
assert np.abs(lr.coef_[0][0] - 10) < 1
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
assert (lr.predict(Xt) == yt_classif.ravel()).sum() < 70
def test_regularlr_fit():
np.random.seed(0)
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
model = RegularTransferLR(lr, lambda_=0.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 0.2) < 1
assert np.abs(model.predict(Xt) - yt_reg).sum() < 2
model = RegularTransferLR(lr, lambda_=1000000)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 10) < 1
assert np.abs(model.estimator_.coef_[0] - lr.coef_[0]) < 0.001
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 4) < 1
def test_regularlr_multioutput():
np.random.seed(0)
X = np.random.randn(100, 5)+2.
y = X[:, :2]
lr = LinearRegression()
lr.fit(X, y)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(X, y)
assert np.abs(model.predict(X) - y).sum() < 2
assert np.all(model.coef_.shape == (2, 5))
assert np.all(model.intercept_.shape == (2,))
assert model.score(X, y) > 0.9
def test_regularlr_error():
np.random.seed(0)
Xs = np.random.randn(100, 5)
Xt = np.random.randn(100, 5)
ys = np.random.randn(100)
yt = np.random.randn(100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt)
with pytest.raises(ValueError) as excinfo:
model.fit(np.random.randn(100, 4), yt)
assert "expected 5, got 4" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
model.fit(Xt, np.random.randn(100, 2))
assert "expected 1, got 2" in str(excinfo.value)
def test_regularlc_fit():
np.random.seed(0)
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
model = RegularTransferLC(lr, lambda_=0)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 90
model = RegularTransferLC(lr, lambda_=100000000)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() < 70
assert np.abs(model.estimator_.coef_[0][0] - lr.coef_[0][0]) < 0.001
assert np.abs(model.estimator_.intercept_ - lr.intercept_[0]) < 0.001
model = RegularTransferLC(lr, lambda_=1.2)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 95
def test_regularlc_multiclass():
np.random.seed(0)
X = np.random.randn(100, 5)
y = np.zeros(len(X))
y[X[:, :2].sum(1)<0] = 1
y[X[:, 3:].sum(1)>0] = 2
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(X, y)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(X, y)
assert (model.predict(X) == y).sum() > 90
assert np.all(model.coef_.shape == (3, 5))
assert np.all(model.intercept_.shape == (3,))
assert model.score(X, y) > 0.9
def test_regularnn_fit():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
# assert np.abs(network.predict(Xs) - ys_reg).sum() < 1
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) > 4.
assert np.abs(model.predict(Xt) - yt_reg).sum() < 10
model = RegularTransferNN(network, lambdas=10000000., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
def test_regularnn_reg():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, regularizer="l1")
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
with pytest.raises(ValueError) as excinfo:
model = RegularTransferNN(network, regularizer="l3")
assert "l1' or 'l2', got, l3" in str(excinfo.value)
def test_clone():
Xs = np.random.randn(100, 5)
ys = np.random.choice(2, 100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model | 32.923077 | 84 | 0.634579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.034579 |
14bd6078e39ef46714d7bb697f11da50734262f2 | 1,810 | py | Python | tests/test_env_var.py | sfelix-martins/laradock-up-env | 7e7e3e513083afedf724a9b4e2dd8c6ff0b9eb71 | [
"MIT"
] | 2 | 2020-10-06T15:40:43.000Z | 2020-11-27T12:13:10.000Z | tests/test_env_var.py | sfelix-martins/laradock-up-env | 7e7e3e513083afedf724a9b4e2dd8c6ff0b9eb71 | [
"MIT"
] | 5 | 2019-11-10T12:08:35.000Z | 2019-11-10T13:34:54.000Z | tests/test_env_var.py | sfelix-martins/laradock-multiple-env | 7e7e3e513083afedf724a9b4e2dd8c6ff0b9eb71 | [
"MIT"
] | 1 | 2020-11-27T12:13:13.000Z | 2020-11-27T12:13:13.000Z | import unittest
from multienv.config import Config
from multienv.env_var import EnvVar
from multienv.exceptions import InvalidYamlFileException, \
EnvVarContainerBuildNotFoundException
class EnvVarTestCase(unittest.TestCase):
def test_get_containers_to_rebuild_with_existent_env_var(self):
config = Config(
env_var_container_build='tests/fixtures/'
'env_var_container_build.yml')
env_var = EnvVar('PHP_VERSION', 7.1, config)
self.assertEqual(
env_var.get_containers_to_rebuild(),
['php-fpm', 'workspace']
)
def test_get_containers_to_rebuild_with_not_exists_env_var(self):
config = Config(
env_var_container_build='tests/fixtures/'
'env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
self.assertEqual(env_var.get_containers_to_rebuild(), [])
def test_get_containers_to_rebuild_with_invalid_config(self):
with self.assertRaises(InvalidYamlFileException):
config = Config(
env_var_container_build='tests/fixtures'
'/invalid_env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
env_var.get_containers_to_rebuild()
def test_get_containers_to_rebuild_with_not_existent_config(self):
with self.assertRaises(EnvVarContainerBuildNotFoundException):
config = Config(
env_var_container_build='not_found/'
'env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
env_var.get_containers_to_rebuild()
if __name__ == '__main__':
unittest.main()
| 39.347826 | 79 | 0.649724 | 1,568 | 0.866298 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.151934 |
14bd7f91251b03987f5221c2978864b319af6c5c | 3,097 | py | Python | Payload_Type/apollo/mythic/agent_functions/assembly_inject.py | n0pe-sled/Apollo | cfc5804d163e1b47f6614321434a717b2bd2066f | [
"BSD-3-Clause"
] | null | null | null | Payload_Type/apollo/mythic/agent_functions/assembly_inject.py | n0pe-sled/Apollo | cfc5804d163e1b47f6614321434a717b2bd2066f | [
"BSD-3-Clause"
] | null | null | null | Payload_Type/apollo/mythic/agent_functions/assembly_inject.py | n0pe-sled/Apollo | cfc5804d163e1b47f6614321434a717b2bd2066f | [
"BSD-3-Clause"
] | null | null | null | from mythic_payloadtype_container.MythicCommandBase import *
import json
from uuid import uuid4
from os import path
from mythic_payloadtype_container.MythicRPC import *
import base64
import donut
class AssemblyInjectArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"pid": CommandParameter(name="PID", type=ParameterType.Number, description="Process ID to inject into."),
"assembly_name": CommandParameter(name="Assembly Name", type=ParameterType.String, description="Name of the assembly to execute."),
"assembly_arguments": CommandParameter(name="Assembly Arguments", type=ParameterType.String, description="Arguments to pass to the assembly."),
}
async def parse_arguments(self):
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
parts = self.command_line.split(" ", maxsplit=2)
if len(parts) < 2:
raise Exception("Invalid number of arguments.\n\tUsage: {}".format(AssemblyInjectCommand.help_cmd))
pid = parts[0]
assembly_name = parts[1]
assembly_args = ""
assembly_args = ""
if len(parts) > 2:
assembly_args = parts[2]
self.args["pid"].value = pid
self.args["assembly_name"].value = assembly_name
self.args["assembly_arguments"].value = assembly_args
class AssemblyInjectCommand(CommandBase):
cmd = "assembly_inject"
needs_admin = False
help_cmd = "assembly_inject [pid] [assembly] [args]"
description = "Inject the unmanaged assembly loader into a remote process. The loader will then execute the .NET binary in the context of the injected process."
version = 2
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = AssemblyInjectArguments
attackmapping = ["T1055"]
async def create_tasking(self, task: MythicTask) -> MythicTask:
arch = task.args.get_arg("arch")
pipe_name = str(uuid4())
task.args.add_arg("pipe_name", pipe_name)
exePath = "/srv/ExecuteAssembly.exe"
donutPic = donut.create(file=exePath, params=task.args.get_arg("pipe_name"))
file_resp = await MythicRPC().execute("create_file",
task_id=task.id,
file=base64.b64encode(donutPic).decode(),
delete_after_fetch=True)
if file_resp.status == MythicStatus.Success:
task.args.add_arg("loader_stub_id", file_resp.response['agent_file_id'])
else:
raise Exception("Failed to register execute-assembly DLL: " + file_resp.error)
task.args.remove_arg("arch")
return task
async def process_response(self, response: AgentResponse):
pass
| 41.293333 | 164 | 0.638037 | 2,887 | 0.932192 | 0 | 0 | 0 | 0 | 1,704 | 0.55021 | 643 | 0.20762 |
14bdf9d8a47eb3b241902145e3f586e258032f0c | 6,211 | py | Python | Lab_Week_05_-_Value_Functions,_Policies_and_Policy_Iteration/Solutions/recycling_robot/recycling_robot_environment.py | annasu1225/COMP0037-21_22 | e98e8d278b35ee0550e6c09b35ab08b23e60ca82 | [
"Apache-2.0"
] | null | null | null | Lab_Week_05_-_Value_Functions,_Policies_and_Policy_Iteration/Solutions/recycling_robot/recycling_robot_environment.py | annasu1225/COMP0037-21_22 | e98e8d278b35ee0550e6c09b35ab08b23e60ca82 | [
"Apache-2.0"
] | null | null | null | Lab_Week_05_-_Value_Functions,_Policies_and_Policy_Iteration/Solutions/recycling_robot/recycling_robot_environment.py | annasu1225/COMP0037-21_22 | e98e8d278b35ee0550e6c09b35ab08b23e60ca82 | [
"Apache-2.0"
] | null | null | null | '''
Created on 4 Feb 2022
@author: ucacsjj
'''
import random
from enum import Enum
import numpy as np
from gym import Env, spaces
from .robot_states_and_actions import *
# This environment affords a much lower level control of the robot than the
# battery environment. It is partially inspired by the AI Gymn Frozen Lake
# example.
class RecyclingRobotEnvironment(Env):
def __init__(self):
# The action space
self.action_space = spaces.Discrete(RobotActions.NUMBER_OF_ACTIONS)
self.observation_space = spaces.Discrete(RobotBatteryState.NUMBER_OF_STATES)
# Values
# Probability of discharging high => medium
self._alpha = 0.4
# Probability of discharging medium => low
self._beta = 0.1
# Probability of discharging low => discharged
self._gamma = 0.1
# Probability of charging up a level low => medium, medium => high
self._delta = 0.9
self._r_search = 10
self._r_wait = 5
self._r_charge = 0
self._r_discharged = -20
# State transition table. The dictionary consists of (s, a) values. The
# value is a tuple which is the conditional value of the probabilities of
# DISCHARGED, LOW, MEDIUM, HIGH, conditioned on s and a.
self._state_transition_table = {
# New state when a=SEARCH
(RobotBatteryState.HIGH, RobotActions.SEARCH) : \
(0, self._alpha / 3, 2 * self._alpha / 3, 1 - self._alpha),
(RobotBatteryState.MEDIUM, RobotActions.SEARCH) : \
(0, self._beta, 1 - self._beta, 0),
(RobotBatteryState.LOW, RobotActions.SEARCH) : \
(self._gamma, 1 - self._gamma, 0 , 0),
(RobotBatteryState.DISCHARGED, RobotActions.SEARCH) : \
(0, 0, 0, 0),
# a = WAIT
(RobotBatteryState.HIGH, RobotActions.WAIT) : \
(0, 0, 0, 1),
(RobotBatteryState.MEDIUM, RobotActions.WAIT) : \
(0, 0 ,1, 0),
(RobotBatteryState.LOW, RobotActions.WAIT) : \
(0, 1, 0, 0),
(RobotBatteryState.DISCHARGED, RobotActions.WAIT) : \
(0, 0, 0, 0),
# a = RECHARGE
(RobotBatteryState.HIGH, RobotActions.RECHARGE) : \
(0, 0, 0, 1),
(RobotBatteryState.MEDIUM, RobotActions.RECHARGE) : \
(0, 0, 1 - self._delta, self._delta),
(RobotBatteryState.LOW, RobotActions.RECHARGE) : \
(0, 1 - self._delta, self._delta, 0),
(RobotBatteryState.DISCHARGED, RobotActions.RECHARGE) : \
(0, 0, 0, 0)
}
# The rewards. In this case, they are only a function of the actions
# and not the state.
self._action_reward_table = {
RobotActions.SEARCH : self._r_search,
RobotActions.WAIT: self._r_wait,
RobotActions.RECHARGE: self._r_charge,
RobotActions.TERMINATE: self._r_discharged
}
# Reset to the initial state
self.reset()
# Reset the scenario to the initial state
def reset(self):
self._battery_state = RobotBatteryState.HIGH
# Reset the initial value function
def initial_value_function(self):
v_initial = np.zeros(RobotBatteryState.NUMBER_OF_STATES)
v_initial[RobotBatteryState.DISCHARGED] = self._r_discharged
return v_initial
# An initial random policy under consideration
def initial_policy(self):
pi_initial = {
RobotBatteryState.HIGH: (0, 1/3, 1/3, 1/3),
RobotBatteryState.MEDIUM: (0, 1/3, 1/3, 1/3),
RobotBatteryState.LOW: (0, 1/3, 1/3, 1/3)}
return pi_initial
def step(self, action):
# From the (s, a) pair, get the appropriate row in the table
transition_key = (self._battery_state, action)
# Sanity check
assert transition_key in self._state_transition_table
# Get the state transition probabilities and rewards
p = self._state_transition_table[transition_key]
r = self._reward_table[transition_key]
print(str(self._battery_state) + ":" + str(p) + str(r))
# Work out the state transition
sample = random.random()
done = False
# Probability of transitioning to high state
if sample < p[0]:
self._battery_state = RobotBatteryState.HIGH
reward = r[0]
elif sample < p[0] + p[1]:
self._battery_state = RobotBatteryState.MEDIUM
reward = r[1]
elif sample < p[0] + p[1] + p[2]:
self._battery_state = RobotBatteryState.LOW
reward = r[2]
if sample < p[0] + p[1] + p[2] + p[3]:
self._battery_state = RobotBatteryState.DISCHARGED
reward = r[3]
done = True
return self._battery_state, reward, done, {}
# Return the state, reward and probability distributions
def next_state_and_reward_distribution(self, state, action):
# From the (s, a) pair, get the appropriate row in the table
transition_key = (state, action)
# Sanity check
#print(transition_key)
assert transition_key in self._state_transition_table
s_prime = [RobotBatteryState.DISCHARGED, RobotBatteryState.LOW, \
RobotBatteryState.MEDIUM, RobotBatteryState.HIGH]
# Get the state transition probabilities and rewards
p = self._state_transition_table[transition_key]
#r = self._reward_table[transition_key]
r = self._action_reward_table[action]
return s_prime, r, p
| 33.572973 | 84 | 0.555144 | 5,853 | 0.94236 | 0 | 0 | 0 | 0 | 0 | 0 | 1,380 | 0.222186 |
14c105a1705141babfa862aab74d81e82f18db15 | 753 | py | Python | arc/queues.py | arc-repos/arc-functions-python | 796e7661afa069c3a2f7bc609fd0c0af512244cb | [
"Apache-2.0"
] | 4 | 2020-05-21T04:55:02.000Z | 2020-12-22T02:17:30.000Z | arc/queues.py | arc-repos/arc-functions-python | 796e7661afa069c3a2f7bc609fd0c0af512244cb | [
"Apache-2.0"
] | 10 | 2020-02-04T02:00:47.000Z | 2021-06-25T15:34:47.000Z | arc/queues.py | arc-repos/arc-functions-python | 796e7661afa069c3a2f7bc609fd0c0af512244cb | [
"Apache-2.0"
] | 3 | 2020-03-02T22:17:18.000Z | 2021-03-11T09:50:52.000Z | import boto3
import json
import urllib.request
import os
from . import reflect
def publish(name, payload):
if os.environ.get("NODE_ENV") == "testing":
try:
dump = json.dumps({"name": name, "payload": payload})
data = bytes(dump.encode())
handler = urllib.request.urlopen("http://localhost:3334/queues", data)
return handler.read().decode("utf-8")
except Exception as e:
print("arc.queues.publish to sandbox failed: " + str(e))
return data
else:
arc = reflect()
arn = arc["queues"][name]
sqs = boto3.client("sqs")
return sqs.send_message(
QueueUrl=arn, MessageBody=json.dumps(payload), DelaySeconds=0
)
| 28.961538 | 82 | 0.584329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.164675 |
1ad1eb88994e214fd47ed40845356d1110f90028 | 1,927 | py | Python | format_database.py | CS3244-Group10/NameABird | e41c1c5a38b062943b1f6c0d8b51afe80b13c4ca | [
"MIT"
] | null | null | null | format_database.py | CS3244-Group10/NameABird | e41c1c5a38b062943b1f6c0d8b51afe80b13c4ca | [
"MIT"
] | null | null | null | format_database.py | CS3244-Group10/NameABird | e41c1c5a38b062943b1f6c0d8b51afe80b13c4ca | [
"MIT"
] | 2 | 2018-04-08T12:04:59.000Z | 2018-04-13T05:25:09.000Z | import os
import numpy as np
from scipy.misc import imread, imresize
def load_image_labels(dataset_path=''):
labels = {}
with open(os.path.join(dataset_path, 'image_class_labels.txt')) as f:
for line in f:
pieces = line.strip().split()
image_id = pieces[0]
class_id = pieces[1]
labels[image_id] = int(class_id)
return labels
def load_image_paths(dataset_path='', path_prefix=''):
paths = {}
with open(os.path.join(dataset_path, 'images.txt')) as f:
for line in f:
pieces = line.strip().split()
image_id = pieces[0]
path = os.path.join(path_prefix, pieces[1])
paths[image_id] = path
return paths
def load_train_test_split(dataset_path=''):
train_images = []
test_images = []
with open(os.path.join(dataset_path, 'train_test_split.txt')) as f:
for line in f:
pieces = line.strip().split()
image_id = pieces[0]
is_train = int(pieces[1])
if is_train > 0:
train_images.append(image_id)
else:
test_images.append(image_id)
return train_images, test_images
def format_dataset(dataset_path, image_path_prefix):
image_paths = load_image_paths(dataset_path, image_path_prefix)
image_labels = load_image_labels(dataset_path)
train_images, test_images = load_train_test_split(dataset_path)
X_train = []
X_test = []
Y_train = []
Y_test = []
for image_ids, image, label in [(train_images, X_train, Y_train), (test_images, X_test, Y_test)]:
for image_id in image_ids:
image.append(imresize(imread(image_paths[image_id], mode="RGB"), (224, 224)))
label.append(image_labels[image_id] - 1)
return np.array(X_train).astype(np.float32), np.array(Y_train), np.array(X_test).astype(np.float32), np.array(Y_test)
| 29.19697 | 121 | 0.626362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.036845 |
1ad2c6a11db3d3af3b1a5dc0792859101e67bf90 | 1,964 | py | Python | tools/tflitefile_tool/parser/tflite_parser.py | YongseopKim/ONE | 65d4a582621deb0a594343d9cc40ec777ad77e57 | [
"Apache-2.0"
] | null | null | null | tools/tflitefile_tool/parser/tflite_parser.py | YongseopKim/ONE | 65d4a582621deb0a594343d9cc40ec777ad77e57 | [
"Apache-2.0"
] | 36 | 2020-06-17T04:48:55.000Z | 2022-02-07T12:04:10.000Z | tools/tflitefile_tool/parser/tflite_parser.py | YongseopKim/ONE | 65d4a582621deb0a594343d9cc40ec777ad77e57 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Do not use this module
import tflite.Model
import tflite.SubGraph
from ir import graph_stats
from .subgraph_parser import SubgraphParser
class TFLiteParser(object):
def __init__(self, model_file):
self.model_file = model_file
def Parse(self):
# Generate Model: top structure of tflite model file
buf = self.model_file.read()
buf = bytearray(buf)
tf_model = tflite.Model.Model.GetRootAsModel(buf, 0)
stats = graph_stats.GraphStats()
# Model file can have many models
subg_list = list()
for subgraph_index in range(tf_model.SubgraphsLength()):
tf_subgraph = tf_model.Subgraphs(subgraph_index)
model_name = "#{0} {1}".format(subgraph_index, tf_subgraph.Name())
# 0th subgraph is main subgraph
if (subgraph_index == 0):
model_name += " (MAIN)"
# Parse Subgraphs
subg_parser = SubgraphParser(tf_model, tf_subgraph)
subg_parser.Parse()
stats += graph_stats.CalcGraphStats(subg_parser)
subg = (model_name, subg_parser)
subg_list.append(subg)
# Validate
assert subg_list is not None
assert len(subg_list) > 0
assert stats is not None
return (subg_list, stats)
| 33.862069 | 78 | 0.67057 | 1,177 | 0.599287 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.415479 |
1ad2ef9f07071572275789fdd3c3da196769692b | 6,857 | py | Python | modules/text/text_generation/plato2_en_base/module.py | AK391/PaddleHub | a51ab7447e089776766becb3297e560dfed98573 | [
"Apache-2.0"
] | 8,360 | 2019-01-18T10:46:45.000Z | 2022-03-31T14:50:02.000Z | modules/text/text_generation/plato2_en_base/module.py | dwuping/PaddleHub | 9a3b23295947e22149cc85c17cb4cf23c03f9e06 | [
"Apache-2.0"
] | 1,158 | 2019-04-11T09:22:43.000Z | 2022-03-31T12:12:09.000Z | modules/text/text_generation/plato2_en_base/module.py | dwuping/PaddleHub | 9a3b23295947e22149cc85c17cb4cf23c03f9e06 | [
"Apache-2.0"
] | 1,677 | 2019-04-09T15:07:40.000Z | 2022-03-31T06:41:10.000Z | # coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import os
import json
import sys
import argparse
import contextlib
from collections import namedtuple
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.module.module import runnable
from paddlehub.module.nlp_module import DataFormatError
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
import plato2_en_base.models as plato_models
from plato2_en_base.tasks.dialog_generation import DialogGeneration
from plato2_en_base.utils import check_cuda, Timer
from plato2_en_base.utils.args import parse_args
@moduleinfo(
name="plato2_en_base",
version="1.0.0",
summary=
"A novel pre-training model for dialogue generation, incorporated with latent discrete variables for one-to-many relationship modeling.",
author="baidu-nlp",
author_email="",
type="nlp/text_generation",
)
class Plato(hub.NLPPredictionModule):
def _initialize(self):
"""
initialize with the necessary elements
"""
if "CUDA_VISIBLE_DEVICES" not in os.environ:
raise RuntimeError("The module only support GPU. Please set the environment variable CUDA_VISIBLE_DEVICES.")
args = self.setup_args()
self.task = DialogGeneration(args)
self.model = plato_models.create_model(args, fluid.CUDAPlace(0))
self.Example = namedtuple("Example", ["src", "data_id"])
self._interactive_mode = False
def setup_args(self):
"""
Setup arguments.
"""
assets_path = os.path.join(self.directory, "assets")
vocab_path = os.path.join(assets_path, "vocab.txt")
init_pretraining_params = os.path.join(assets_path, "24L", "Plato")
spm_model_file = os.path.join(assets_path, "spm.model")
nsp_inference_model_path = os.path.join(assets_path, "24L", "NSP")
config_path = os.path.join(assets_path, "24L.json")
# ArgumentParser.parse_args use argv[1:], it will drop the first one arg, so the first one in sys.argv should be ""
sys.argv = [
"", "--model", "Plato", "--vocab_path",
"%s" % vocab_path, "--do_lower_case", "False", "--init_pretraining_params",
"%s" % init_pretraining_params, "--spm_model_file",
"%s" % spm_model_file, "--nsp_inference_model_path",
"%s" % nsp_inference_model_path, "--ranking_score", "nsp_score", "--do_generation", "True", "--batch_size",
"1", "--config_path",
"%s" % config_path
]
parser = argparse.ArgumentParser()
plato_models.add_cmdline_args(parser)
DialogGeneration.add_cmdline_args(parser)
args = parse_args(parser)
args.load(args.config_path, "Model")
args.run_infer = True # only build infer program
return args
@serving
def generate(self, texts):
"""
Get the robot responses of the input texts.
Args:
texts(list or str): If not in the interactive mode, texts should be a list in which every element is the chat context separated with '\t'.
Otherwise, texts shoule be one sentence. The module can get the context automatically.
Returns:
results(list): the robot responses.
"""
if not texts:
return []
if self._interactive_mode:
if isinstance(texts, str):
self.context.append(texts.strip())
texts = [" [SEP] ".join(self.context[-self.max_turn:])]
else:
raise ValueError("In the interactive mode, the input data should be a string.")
elif not isinstance(texts, list):
raise ValueError("If not in the interactive mode, the input data should be a list.")
bot_responses = []
for i, text in enumerate(texts):
example = self.Example(src=text.replace("\t", " [SEP] "), data_id=i)
record = self.task.reader._convert_example_to_record(example, is_infer=True)
data = self.task.reader._pad_batch_records([record], is_infer=True)
pred = self.task.infer_step(self.model, data)[0] # batch_size is 1
bot_response = pred["response"] # ignore data_id and score
bot_responses.append(bot_response)
if self._interactive_mode:
self.context.append(bot_responses[0].strip())
return bot_responses
@contextlib.contextmanager
def interactive_mode(self, max_turn=6):
"""
Enter the interactive mode.
Args:
max_turn(int): the max dialogue turns. max_turn = 1 means the robot can only remember the last one utterance you have said.
"""
self._interactive_mode = True
self.max_turn = max_turn
self.context = []
yield
self.context = []
self._interactive_mode = False
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description='Run the %s module.' % self.name,
prog='hub run %s' % self.name,
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, optional.")
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.generate(texts=input_data)
return results
if __name__ == "__main__":
module = Plato()
for result in module.generate(["Hello", "Hello\thi, nice to meet you, my name is tom\tso your name is tom?"]):
print(result)
with module.interactive_mode(max_turn=3):
while True:
human_utterance = input()
robot_utterance = module.generate(human_utterance)
print("Robot: %s" % robot_utterance[0])
| 37.883978 | 151 | 0.648972 | 4,949 | 0.721744 | 426 | 0.062126 | 5,244 | 0.764766 | 0 | 0 | 2,596 | 0.378591 |
1ad42eeaa6ca55779055e15041fcb384578133d5 | 929 | py | Python | Uncuffed/web/routes.py | WckdAwe/Uncuffed | c86c2ef33c689d7895ff45e9ba6108a9f7831c2d | [
"MIT"
] | 2 | 2021-09-16T09:17:34.000Z | 2021-11-18T12:44:34.000Z | Uncuffed/web/routes.py | WckdAwe/Uncuffed | c86c2ef33c689d7895ff45e9ba6108a9f7831c2d | [
"MIT"
] | null | null | null | Uncuffed/web/routes.py | WckdAwe/Uncuffed | c86c2ef33c689d7895ff45e9ba6108a9f7831c2d | [
"MIT"
] | null | null | null | # ------ [ API ] ------
API = '/api'
# ---------- [ BLOCKCHAIN ] ----------
API_BLOCKCHAIN = f'{API}/blockchain'
API_BLOCKCHAIN_LENGTH = f'{API_BLOCKCHAIN}/length'
API_BLOCKCHAIN_BLOCKS = f'{API_BLOCKCHAIN}/blocks'
# ---------- [ BROADCASTS ] ----------
API_BROADCASTS = f'{API}/broadcasts'
API_BROADCASTS_NEW_BLOCK = f'{API_BROADCASTS}/new_block'
API_BROADCASTS_NEW_TRANSACTION = f'{API_BROADCASTS}/new_transaction'
# ---------- [ TRANSACTIONS ] ----------
API_TRANSACTIONS = f'{API}/transactions'
API_TRANSACTIONS_PENDING = f'{API_TRANSACTIONS}/pending'
API_TRANSACTIONS_UTXO = f'{API_TRANSACTIONS}/UTXO'
# ---------- [ NODES ] ----------
API_NODES = f'{API}/nodes'
API_NODES_LIST = f'{API_NODES}/list'
API_NODES_INFO = f'{API_NODES}/info'
API_NODES_REGISTER = f'{API_NODES}/register'
# ------ [ WEB ] ------
WEB_HOME = '/'
WEB_SELECTOR = '/selector'
WEB_CHAT = '/chat'
WEB_CHAT_WITH_ADDRESS = f'{WEB_CHAT}/<address>'
| 27.323529 | 68 | 0.652314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.592034 |
1ad46089871178a892672859920709ee5db5e62e | 1,748 | py | Python | autumn/infrastructure/tasks/utils.py | emmamcbryde/AuTuMN-1 | b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | autumn/infrastructure/tasks/utils.py | emmamcbryde/AuTuMN-1 | b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | autumn/infrastructure/tasks/utils.py | emmamcbryde/AuTuMN-1 | b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import logging
import logging.config
import socket
import os
from autumn.core.utils.runs import read_run_id
from autumn.core.project import Project, get_project
def get_project_from_run_id(run_id: str) -> Project:
app_name, region_name, _, _ = read_run_id(run_id)
return get_project(app_name, region_name)
def set_logging_config(verbose: bool, chain="main", log_path="log", task="task"):
old_factory = logging.getLogRecordFactory()
if chain != "main":
chain = f"chain-{chain}"
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.chain = chain
record.host = socket.gethostname()
return record
logging.setLogRecordFactory(record_factory)
log_format = "%(asctime)s %(host)s [%(chain)s] %(levelname)s %(message)s"
logfile = os.path.join(log_path, f"{task}-{chain}.log")
root_logger = {"level": "INFO", "handlers": ["file"]}
handlers = {
"file": {
"level": "INFO",
"class": "logging.FileHandler",
"filename": logfile,
"formatter": "app",
"encoding": "utf-8",
}
}
if verbose:
root_logger["handlers"].append("stream")
handlers["stream"] = {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "app",
}
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"root": root_logger,
"handlers": handlers,
"formatters": {
"app": {
"format": log_format,
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
}
)
| 28.193548 | 81 | 0.548627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 428 | 0.244851 |
1ad6a9afd46eb178c8ea9b62b017c639613e7aef | 6,433 | py | Python | lib/datasets/wider_face.py | thesuperorange/face-faster-rcnn.pytorch | cfc11e792f87fb132674680f34db193996ea6890 | [
"MIT"
] | 14 | 2018-04-16T13:02:04.000Z | 2020-05-09T15:33:20.000Z | faster-RCNN/lib/datasets/wider_face.py | thesuperorange/deepMI3 | ddc502c831d2f12325157d7503e1e39a218ebe21 | [
"MIT"
] | null | null | null | faster-RCNN/lib/datasets/wider_face.py | thesuperorange/deepMI3 | ddc502c831d2f12325157d7503e1e39a218ebe21 | [
"MIT"
] | 5 | 2018-08-06T13:48:59.000Z | 2022-01-14T05:57:27.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import PIL
from datasets.imdb import imdb
import numpy as np
import scipy.sparse
import scipy.io as sio
import pickle
import uuid
from model.utils.config import cfg
class wider_face(imdb):
def __init__(self, image_set):
"""
WIDER Face data loader
"""
name = 'wider_face_' + image_set
imdb.__init__(self, name)
self._devkit_path = self._get_default_path() # ./data/WIDER2015
# ./data/WIDER2015/WIDER_train/images
self._data_path = os.path.join(self._devkit_path, 'WIDER_' + image_set, 'images')
# Example path to image set file:
image_set_file = os.path.join(self._devkit_path, 'wider_face_split', 'wider_face_' + image_set + '.mat')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
self._wider_image_set = sio.loadmat(image_set_file, squeeze_me=True)
self._classes = ('__background__', # always index 0
'face')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index, self._face_bbx = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path,
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
event_list = self._wider_image_set['event_list']
file_list = self._wider_image_set['file_list']
face_bbx_list = self._wider_image_set['face_bbx_list']
image_index = []
face_bbx = []
for i in range(len(event_list)):
for j in range(len(file_list[i])):
image_index.append(str(event_list[i]) + '/' + str(file_list[i][j]))
face_bbx.append(face_bbx_list[i][j].reshape(-1, 4))
# _wider_image_set = np.concatenate(_wider_image_set['file_list']).ravel().tolist()
# image_index = map(str, _wider_image_set)
return image_index, face_bbx
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'WIDER2015')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_wider_annotation(index)
for index in range(len(self.image_index))]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_wider_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
imw, imh = PIL.Image.open(self.image_path_at(index)).size
num_objs = self._face_bbx[index].shape[0]
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix in range(num_objs):
assert not np.any(np.isnan(self._face_bbx[index][ix]))
x1 = min(max(0, self._face_bbx[index][ix][0]), imw - 1)
y1 = min(max(0, self._face_bbx[index][ix][1]), imh - 1)
w = abs(self._face_bbx[index][ix][2])
h = abs(self._face_bbx[index][ix][3])
x2 = min(max(x1 + w, 0), imw - 1)
y2 = min(max(y1 + h, 0), imh - 1)
cls = 1
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (w + 1) * (h + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
| 38.065089 | 112 | 0.581222 | 5,880 | 0.914037 | 0 | 0 | 0 | 0 | 0 | 0 | 1,822 | 0.283227 |
1ad8a215b26ac1fc0fb1060b601d1debf1dc679f | 573 | py | Python | locale/pot/api/core/_autosummary/pyvista-ExplicitStructuredGrid-compute_implicit_distance-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/core/_autosummary/pyvista-UnstructuredGrid-compute_implicit_distance-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/core/_autosummary/pyvista-StructuredGrid-compute_implicit_distance-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Compute the distance between all the points on a sphere and a
# plane.
#
import pyvista as pv
sphere = pv.Sphere()
plane = pv.Plane()
_ = sphere.compute_implicit_distance(plane, inplace=True)
dist = sphere['implicit_distance']
type(dist)
# Expected:
## <class 'numpy.ndarray'>
#
# Plot these distances as a heatmap
#
pl = pv.Plotter()
_ = pl.add_mesh(sphere, scalars='implicit_distance', cmap='bwr')
_ = pl.add_mesh(plane, color='w', style='wireframe')
pl.show()
#
# See :ref:`clip_with_surface_example` and
# :ref:`voxelize_surface_mesh_example` for more examples using
| 26.045455 | 64 | 0.734729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.537522 |
1ad93b581be550c1b778274bfd4d391d94cbf882 | 1,953 | py | Python | web_Project/Data_Predict/predict_lead.py | mscenter1/pigpriceML | d51f645a590cebd65126e867d6ef0d3d437e9bc7 | [
"MIT"
] | 8 | 2019-02-02T11:41:28.000Z | 2022-03-10T14:15:09.000Z | web_Project/Data_Predict/predict_lead.py | mscenter1/pigpriceML | d51f645a590cebd65126e867d6ef0d3d437e9bc7 | [
"MIT"
] | 2 | 2019-02-01T07:57:57.000Z | 2021-03-01T06:16:35.000Z | web_Project/Data_Predict/predict_lead.py | mscenter1/pigpriceML | d51f645a590cebd65126e867d6ef0d3d437e9bc7 | [
"MIT"
] | 6 | 2019-02-01T07:17:38.000Z | 2021-12-28T02:37:29.000Z | # -*- coding: utf-8 -*-
# 系统模块
import sys
# 数据处理模块
import pandas as pd
# 引入外部模块
# 整理数据
from predict_prepare import Predict_Prepare as Prepare
# 获取价格预测结果
from predict_predict import Predict_Predict as Predict
class Predict_Lead:
def __init__(self):
pass
# 其他包调用的函数
def predict_result(self):
# 模型分两段进行预测
period = [1, 2]
# 实例化准备模块和模型预测模块
PrePare_Data = Prepare()
Predict_Data = Predict()
# 获得第一段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
# History_Model11、Predict_Model11:生猪预测模型所需使用的自变量和因变量
# Last_data_model11:原始数据集中生猪价格的最后一条记录的时间
# History_Model21、Predict_Model21:玉米预测模型所需使用的自变量和因变量
# Last_data_model21:原始数据集中玉米价格的最后一条记录的时间
History_Model11, Predict_Model11, Last_data_model11, History_Model21, Predict_Model21, Last_data_model21 = PrePare_Data.variables_prepar(period[0])
# 获取预测结果
# predict_result1:生猪价格和玉米价格的预测结果
# y_test_compare11:第一时间段中生猪模型训练结果和实际价格的集合
# y_test_compare12:第一时间段中玉米模型训练结果和实际价格的集合
predict_result1, y_test_compare11, y_test_compare12 = Predict_Data.predict_result(History_Model11, Last_data_model11, Predict_Model11, History_Model21, Last_data_model21, Predict_Model21, period[0])
# 获得第二段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
History_Model12, Predict_Model12, Last_data_model12, History_Model22, Predict_Model22, Last_data_model22 = PrePare_Data.variables_prepar(period[1])
# 获取预测结果
predict_result2, y_test_compare21, y_test_compare22 = Predict_Data.predict_result(History_Model12, Last_data_model12, Predict_Model12, History_Model22, Last_data_model22, Predict_Model22, period[1])
# 整合两端时间的预测结果
predict_result = pd.concat([predict_result1, predict_result2])
predict_result = predict_result.reset_index(drop=True)
return predict_result, Last_data_model11, y_test_compare11, y_test_compare12
| 35.509091 | 206 | 0.729647 | 2,249 | 0.891399 | 0 | 0 | 0 | 0 | 0 | 0 | 1,063 | 0.421324 |
1ada20acc9ce4a88cc954468b6dae92540d23e52 | 4,255 | py | Python | aerforge/text.py | Aermoss/AerForge | 1f57ff69f3b2f8052a2a266d3e5c04cfa4ec0e99 | [
"MIT"
] | 2 | 2021-09-24T12:57:07.000Z | 2022-01-14T00:47:43.000Z | aerforge/text.py | Aermoss/AerForge | 1f57ff69f3b2f8052a2a266d3e5c04cfa4ec0e99 | [
"MIT"
] | null | null | null | aerforge/text.py | Aermoss/AerForge | 1f57ff69f3b2f8052a2a266d3e5c04cfa4ec0e99 | [
"MIT"
] | null | null | null | import pygame
from aerforge.color import *
from aerforge.error import *
class Text:
def __init__(self, window, text, font_size = 24, font_file = None, font_name = "arial", bold = False, italic = False, underline = False, color = Color(240, 240, 240), x = 0, y = 0, parent = None, add_to_objects = True):
self.window = window
self.parent = parent
self.x = x
self.y = y
self.font_file = font_file
self.font_name = font_name
self.font_size = font_size
self.bold = bold
self.italic = italic
self.underline = underline
self.load_font(self.font_file, self.font_name)
self.set_bold(self.bold)
self.set_italic(self.italic)
self.set_underline(self.underline)
self.color = color
self.text = text
self.scripts = []
self.destroyed = False
self.visible = True
self.add_to_objects = add_to_objects
if self.add_to_objects:
self.window.objects.append(self)
def update(self):
pass
def draw(self):
if not self.destroyed:
if self.visible:
if self.parent != None:
self.x += self.parent.x
self.y += self.parent.y
rendered_text = self.font.render(self.text, True, self.color.get())
self.window.window.blit(rendered_text, (self.x, self.y))
if self.parent != None:
self.x -= self.parent.x
self.y -= self.parent.y
def set_color(self, color):
self.color = color
def get_color(self):
return self.color
def set_text(self, text):
self.text = text
def get_text(self):
return self.text
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_x(self, x):
self.x = x
def set_y(self, y):
self.y = y
def get_font_size(self):
return self.font_size
def set_font_size(self, font_size):
self.font_size = font_size
self.load_font(self.font_file, self.font_name)
def get_font_file(self):
return self.font_file
def get_font_name(self):
return self.font_name
def set_bold(self, bold):
self.bold = bold
self.font.set_bold(self.bold)
def set_italic(self, italic):
self.italic = italic
self.font.set_italic(self.italic)
def set_underline(self, underline):
self.underline = underline
self.font.set_underline(self.underline)
def get_bold(self):
return self.bold
def get_italic(self):
return self.italic
def get_underline(self):
return self.underline
def load_font(self, font_file = None, font_name = "arial"):
self.font_file = font_file
self.font_name = font_name
if self.font_file != None:
self.font = pygame.font.Font(self.font_file, self.font_size)
else:
self.font = pygame.font.SysFont(self.font_name, self.font_size)
def get_width(self):
rendered_text = self.font.render(self.text, True, self.color.get())
return rendered_text.get_width()
def get_height(self):
rendered_text = self.font.render(self.text, True, self.color.get())
return rendered_text.get_height()
def center(self):
self.x = self.window.width / 2 - self.get_width() / 2
self.y = self.window.height / 2 - self.get_height() / 2
def center_x(self):
self.x = self.window.width / 2 - self.get_width() / 2
def center_y(self):
self.y = self.window.height / 2 - self.get_height() / 2
def destroy(self):
self.destroyed = True
if self.add_to_objects:
try:
self.window.objects.pop(self.window.objects.index(self))
except:
pass
def add_script(self, script):
self.scripts.append(script)
def remove_script(self, script):
self.scripts.pop(self.scripts.index(script)) | 26.93038 | 224 | 0.568038 | 4,176 | 0.981434 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.00329 |
1ada715fb82ce5567b931b5b4c65641a0f3234b9 | 47 | py | Python | xchange/__init__.py | jrgparkinson/ouccc | 36824cd944620b6e28795f43a24e17e648b1f0bb | [
"MIT"
] | null | null | null | xchange/__init__.py | jrgparkinson/ouccc | 36824cd944620b6e28795f43a24e17e648b1f0bb | [
"MIT"
] | 5 | 2020-06-06T00:19:41.000Z | 2022-02-13T18:49:17.000Z | xchange/__init__.py | jrgparkinson/ouccc | 36824cd944620b6e28795f43a24e17e648b1f0bb | [
"MIT"
] | null | null | null | default_app_config = 'webapp.apps.WebAppConfig' | 47 | 47 | 0.851064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.553191 |
1adbac124bbaf8f82229656776f6cf0f6360b65e | 500 | py | Python | setup.py | ruivieira/python-als | c98914991a0812084c85e0ded621334c24866b54 | [
"Apache-2.0"
] | 2 | 2021-03-02T04:44:08.000Z | 2021-08-25T09:42:06.000Z | setup.py | ruivieira/python-als | c98914991a0812084c85e0ded621334c24866b54 | [
"Apache-2.0"
] | null | null | null | setup.py | ruivieira/python-als | c98914991a0812084c85e0ded621334c24866b54 | [
"Apache-2.0"
] | 1 | 2019-05-19T10:51:53.000Z | 2019-05-19T10:51:53.000Z | from distutils.core import setup
setup(
name='als',
packages=['als'],
version='0.0.2',
description='Python library for Alternating Least Squares (ALS)',
author='Rui Vieira',
author_email='ruidevieira@googlemail.com',
url='https://github.com/ruivieira/python-als',
download_url='https://github.com/'
'ruivieira/python-als/archive/0.0.2.tar.gz',
keywords=['als', 'recommendation', 'scientific', 'machine-learning', 'models'],
classifiers=[],
)
| 31.25 | 83 | 0.652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.546 |
1adbb705c738a912188b371ea95988590ae0bd44 | 8,274 | py | Python | echolab2/instruments/util/bottom_data.py | iambaim/pyEcholab | 6e165ad1a947e62fc233467631c445fe9ebcdad2 | [
"MIT"
] | null | null | null | echolab2/instruments/util/bottom_data.py | iambaim/pyEcholab | 6e165ad1a947e62fc233467631c445fe9ebcdad2 | [
"MIT"
] | null | null | null | echolab2/instruments/util/bottom_data.py | iambaim/pyEcholab | 6e165ad1a947e62fc233467631c445fe9ebcdad2 | [
"MIT"
] | null | null | null | # coding=utf-8
# National Oceanic and Atmospheric Administration (NOAA)
# Alaskan Fisheries Science Center (AFSC)
# Resource Assessment and Conservation Engineering (RACE)
# Midwater Assessment and Conservation Engineering (MACE)
# THIS SOFTWARE AND ITS DOCUMENTATION ARE CONSIDERED TO BE IN THE PUBLIC DOMAIN
# AND THUS ARE AVAILABLE FOR UNRESTRICTED PUBLIC USE. THEY ARE FURNISHED "AS IS."
# THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS,
# EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS
# OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME NO RESPONSIBILITY
# (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL
# SUPPORT TO USERS.
"""
| Developed by: Rick Towler <rick.towler@noaa.gov>
| National Oceanic and Atmospheric Administration (NOAA)
| Alaska Fisheries Science Center (AFSC)
| Midwater Assesment and Conservation Engineering Group (MACE)
|
| Author:
| Rick Towler <rick.towler@noaa.gov>
| Maintained by:
| Rick Towler <rick.towler@noaa.gov>
"""
import numpy as np
class bottom_data(object):
'''
The bottom_data class stores data from TAG0 datagrams in Simrad raw files.
It may be useful if other sonar file types have a similar annotation
'''
CHUNK_SIZE = 500
def __init__(self, channel_id):
# Create a counter to keep track of the number of datagrams.
self.n_datagrams = 0
# set the channel ID
self.channel_id = channel_id
# Create arrays to store MRU0 data
self.times = np.empty(bottom_data.CHUNK_SIZE, dtype='datetime64[ms]')
self.annotation_text = np.empty(bottom_data.CHUNK_SIZE, dtype=object)
def add_datagram(self, time, annotation_datagram):
"""
Add annotation text
Args:
annotation_datagram (dict) - The motion datagram dictionary returned by
the simrad datagram parser.
"""
# Check if we need to resize our arrays.
if self.n_datagrams == self.annotation_times.shape[0]:
self._resize_arrays(self.annotation_times.shape[0] + annotation_data.CHUNK_SIZE)
# Add this datagram to our data arrays
self.annotation_times[self.n_datagrams] = annotation_datagram['timestamp']
self.annotation_text[self.n_datagrams] = annotation_datagram['text']
# Increment datagram counter.
self.n_datagrams += 1
def interpolate(self, p_data, data_type, start_time=None, end_time=None):
"""
interpolate returns the requested motion data interpolated to the ping times
that are present in the provided ping_data object.
p_data is a ping_data object that contains the ping_time vector
to interpolate to.
data_type is a string pecifying the motion attribute to interpolate, valid
values are: 'pitch', 'heave', 'roll', and 'heading'
start_time is a datetime or datetime64 object defining the starting time of the data
to return. If None, the start time is the earliest time.
end_time is a datetime or datetime64 object defining the ending time of the data
to return. If None, the end time is the latest time.
attributes is a string or list of strings specifying the motion attribute(s)
to interpolate and return. If None, all attributes are interpolated
and returned.
Returns a dictionary of numpy arrays keyed by attribute name that contain the
interpolated data for that attribute.
"""
# Create the dictionary to return
out_data = {}
# Return an empty dict if we don't contain any data
if self.n_datagrams < 1:
return out_data
# Get the index for all datagrams within the time span.
return_idxs = self.get_indices(start_time=start_time, end_time=end_time)
# Check if we're been given specific attributes to interpolate
if data_type is None:
# No - interpolate all
attributes = ['heave', 'pitch', 'roll', 'heading']
elif isinstance(data_type, str):
# We have a string, put it in a list
attributes = [data_type]
# Work through the attributes and interpolate
for attribute in attributes:
try:
# Interpolate this attribute using the time vector in the
# provided ping_data object
i_data = np.interp(p_data.ping_time.astype('d'),
self.time.astype('d'), getattr(self, attribute),
left=np.nan, right=np.nan)
out_data[attribute] = i_data[return_idxs]
except:
# Provided attribute doesn't exist
out_data[attribute] = None
return (attributes, out_data)
def get_indices(self, start_time=None, end_time=None, time_order=True):
"""
Return index of data contained in speciofied time range.
get_indices returns an index array containing the indices contained
in the range defined by the times provided. By default the indexes
are in time order.
Args:
start_time is a datetime or datetime64 object defining the starting
time of the data to return. If None, the start time is the
earliest time.
end_time is a datetime or datetime64 object defining the ending time
of the data to return. If None, the end time is the latest time.
time_order (bool): Control whether if indexes are returned in time
order (True) or not.
Returns: Index array containing indices of data to return.
"""
# Ensure that we have times to work with.
if start_time is None:
start_time = np.min(self.time)
if end_time is None:
end_time = np.max(self.time)
# Sort time index if returning time ordered indexes.
if time_order:
primary_index = self.time.argsort()
else:
primary_index = self.time
# Determine the indices of the data that fall within the time span
# provided.
mask = self.time[primary_index] >= start_time
mask = np.logical_and(mask, self.time[primary_index] <= end_time)
# and return the indices that are included in the specified range
return primary_index[mask]
def _resize_arrays(self, new_size):
"""
Resize arrays if needed to hold more data.
_resize_arrays expands our data arrays and is called when said arrays
are filled with data and more data need to be added.
Args:
new_size (int): New size for arrays, Since these are all 1d
arrays the value is simply an integer.
"""
self.time = np.resize(self.time,(new_size))
self.pitch = np.resize(self.pitch,(new_size))
self.roll = np.resize(self.roll,(new_size))
self.heading = np.resize(self.heading,(new_size))
self.heave = np.resize(self.heave,(new_size))
def trim(self):
"""
Trim arrays to proper size after all data are added.
trim is called when one is done adding data to the object. It
removes empty elements of the data arrays.
"""
self._resize_arrays(self.n_datagrams)
def __str__(self):
"""
Reimplemented string method that provides some basic info about the
nmea_data object.
"""
# print the class and address
msg = str(self.__class__) + " at " + str(hex(id(self))) + "\n"
# print some more info about the motion_data instance
if (self.n_datagrams > 0):
msg = "{0} MRU data start time: {1}\n".format(msg, self.time[0])
msg = "{0} MRU data end time: {1}\n".format(msg,self.time[self.n_datagrams-1])
msg = "{0} Number of datagrams: {1}\n".format(msg,self.n_datagrams+1)
else:
msg = msg + (" simrad_motion_data object contains no data\n")
return msg
| 37.780822 | 98 | 0.634155 | 7,147 | 0.86379 | 0 | 0 | 0 | 0 | 0 | 0 | 5,234 | 0.632584 |
1adbdfb4bc66d95866bcc5ed925f8780b4dac055 | 318 | py | Python | npy2f32.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | null | null | null | npy2f32.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | 1 | 2020-06-17T12:07:27.000Z | 2020-06-17T12:07:27.000Z | npy2f32.py | shaun95/LPCNet | 117214c3a63d4f43cf5741b299c497e85c983327 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import argparse
import sys
import os
dir_name = os.path.dirname(os.path.realpath(__file__))
npy_data = np.load(os.path.join(dir_name, sys.argv[1]))
npy_data = npy_data.astype(np.float32)
npy_data = npy_data.reshape((-1,))
npy_data.tofile(os.path.join(dir_name, sys.argv[1].split(".")[0] + ".f32"))
| 28.909091 | 75 | 0.732704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.028302 |
1adc3dc40aa2436be842596159a801f5a7ff9623 | 15,448 | py | Python | scripts/generator/filter_domains.py | kcappieg/metronome | 65601b0993550a86843fa2b2f116fcd663118b2c | [
"MIT"
] | null | null | null | scripts/generator/filter_domains.py | kcappieg/metronome | 65601b0993550a86843fa2b2f116fcd663118b2c | [
"MIT"
] | null | null | null | scripts/generator/filter_domains.py | kcappieg/metronome | 65601b0993550a86843fa2b2f116fcd663118b2c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
from shutil import copyfile, move
import argparse
from glob import glob
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from Metronome import distributed_execution
def generate_astar_configs(domain_paths, domain_type):
config_list = []
for domain_path in domain_paths:
# strip leading . char from domain
domain_path_tmp = domain_path[1:] if domain_path[0] == '.' else domain_path
config = dict()
config['algorithmName'] = 'A_STAR'
config['actionDuration'] = 1
config['domainName'] = domain_type
config['terminationType'] = 'EXPANSION'
config['lookaheadType'] = 'DYNAMIC'
config['commitmentStrategy'] = 'SINGLE'
config['heuristicMultiplier'] = 1.0
config['domainPath'] = domain_path_tmp
config_list.append(config)
return config_list
def generate_agrd_configs(domain_paths, domain_type, goals):
config_list = []
for domain_path in domain_paths:
# strip leading . char from domain
domain_path_tmp = domain_path[1:] if domain_path[0] == '.' else domain_path
config = dict()
config['algorithmName'] = 'NAIVE_OPTIMAL_AGRD'
config['actionDuration'] = 1
config['interventionCost'] = 1
config['domainName'] = domain_type
config['terminationType'] = 'EXPANSION'
config['subjectAlgorithm'] = 'NAIVE_DYNAMIC'
config['timeLimit'] = 3600_000_000_000 # 3600 second (60 min) timeout
config['maxDepth'] = 1000
config['goalPriors'] = [1 / goals for _ in range(goals)]
config['subjectGoal'] = 0
config['domainPath'] = domain_path_tmp
config_list.append(config)
return config_list
def filter_domains(generated_domain_paths, base_domain_name, domain_type='GRID_WORLD', domain_ext='.vw',
out_path='./filtered', write=True):
this_cwd = os.getcwd()
success_index = 0
if not os.path.exists(out_path):
os.makedirs(out_path)
configs = generate_astar_configs(generated_domain_paths, domain_type)
print('Begin filtering of generated domains')
os.chdir('../..')
results = distributed_execution(configs, this_cwd)
os.chdir(this_cwd)
success_domains = []
for result in results:
if (result['success']):
print(f'Domain {result["configuration"]["domainPath"]} is solvable')
success_domains.append(result["configuration"]["domainPath"])
if write:
new_file_name = os.path.join(out_path, base_domain_name + str(success_index) + domain_ext)
print(f'Outputting to {new_file_name}')
move('.' + result['configuration']['domainPath'], new_file_name)
success_index += 1
else:
print(result['errorMessage'])
print(f'Domain {result["configuration"]["domainPath"]} was not successfully solved')
return success_domains
def get_with_default(d, key, default_value=None, default_producer=None):
if key not in d:
d[key] = default_value if default_producer is None else default_producer()
return d[key]
def get_with_default_list(d, key):
return get_with_default(d, key, default_value=[])
def get_with_default_dict(d, key):
return get_with_default(d, key, default_value=dict())
def get_depth_upper_bound(result):
most = 0
second_most = 0
idx = 0
while f'Goal_{idx}' in result:
cost = result[f'Goal_{idx}']
if cost > most:
second_most = most
most = cost
elif cost > second_most:
second_most = cost
idx += 1
return second_most
def filter_agrd_chunk(config, chunk_instances, inactive_out_dir, followup_out_dir):
this_cwd = os.getcwd()
base_domain_name = config['base_domain_name']
domain_ext = config['domain_ext']
path_to_instance = {
os.path.join(
config['source_dir'],
filename
): filename
for filename in chunk_instances
}
configs = generate_agrd_configs(path_to_instance.keys(), config['domain_type'], config['num_goals'])
os.chdir('../..')
results = distributed_execution(configs, this_cwd)
os.chdir(this_cwd)
successes_by_depth_bound = dict()
timeouts_by_depth_bound = dict()
for result in results:
result['depthUpperBound'] = get_depth_upper_bound(result)
instance_path = result["configuration"]["domainPath"]
if instance_path[0] != '.':
instance_path = '.' + instance_path
instance_filename = path_to_instance[instance_path]
if result['success'] and result.get('observerIsActive', 0) > 0:
print(f'Observer was active in domain {instance_path}')
get_with_default_list(successes_by_depth_bound, result['depthUpperBound'])\
.append((instance_path, instance_filename, base_domain_name, domain_ext))
else:
if result['success']:
print(f'Observer was inactive in domain {instance_path}')
move(instance_path, os.path.join(inactive_out_dir, instance_filename))
else:
err_msg = result["errorMessage"]
print(f'Failed to solve domain {instance_path} with error {err_msg}')
lower_err = err_msg.lower()
if 'timeout' in lower_err:
get_with_default_list(timeouts_by_depth_bound, result['depthUpperBound'])\
.append((instance_path, instance_filename, base_domain_name, domain_ext))
elif 'dead end' in lower_err or 'subject transitioned' in lower_err or 'follow-up' in lower_err:
# follow up on instances that fail for reasons that shouldn't happen...
move(instance_path, os.path.join(followup_out_dir, instance_filename))
else:
move(instance_path, os.path.join(inactive_out_dir, instance_filename))
return successes_by_depth_bound, timeouts_by_depth_bound
def move_agrd_filter_results(successes_info_by_depth_bound, timeouts_info_by_depth_bound):
"""Moves successes to new directory, but only if all instances
at the relevant depth bound succeeded"""
# loop through timeouts first to purge successes dict
meta_files_by_out = dict()
for depth_bound, timeout_info in timeouts_info_by_depth_bound.items():
for out_dir, timeout_list in timeout_info.items():
print(f'Moving timeout instances at depth bound {depth_bound} for out dir {out_dir}')
timeout_dir = os.path.join(out_dir, 'timeout')
meta_file = get_with_default(
meta_files_by_out, out_dir,
default_producer=lambda: open(os.path.join(out_dir, 'stats.log'), 'w'))
success_info = get_with_default_dict(successes_info_by_depth_bound, depth_bound)
successes_list = get_with_default_list(success_info, out_dir)
num_timeouts = len(timeout_list)
num_successes = len(successes_list)
total = num_timeouts + num_successes
fraction_timeout = float(num_timeouts) / float(total)
meta_log_text = f'Depth Bound {depth_bound}: '\
f'{num_successes} successes, {num_timeouts} timeouts, {fraction_timeout} timeout fraction'
to_timeout_dir = timeout_list[:]
if num_timeouts <= 3 and fraction_timeout <= 0.01:
# tolerate up to 3 timeouts up to 1% of instances
meta_log_text += ' (ignoring timeouts, writing successes)'
else:
to_timeout_dir += successes_list
success_info[out_dir] = [] # wipe the list so we don't write to success dir later
meta_file.write(meta_log_text + '\n')
for instance_path, instance_filename, _, _ in to_timeout_dir:
move(instance_path, os.path.join(timeout_dir, instance_filename))
for file in meta_files_by_out.values():
file.write('\n=====================================\n\n')
success_indices = {}
for depth_bound, success_info in successes_info_by_depth_bound.items():
for out_dir, successes_list in success_info.items():
if len(successes_list) == 0:
continue
print(f'Moving successful instances at depth bound {depth_bound} for out dir {out_dir}')
meta_file = get_with_default(
meta_files_by_out, out_dir,
default_producer=lambda: open(os.path.join(out_dir, 'stats.log'), 'w'))
meta_file.write(f'Depth Bound {depth_bound}: {len(successes_list)} successes\n')
for instance_path, _, base_domain_name, domain_ext in successes_list:
prefix = os.path.join(out_dir, base_domain_name)
new_file_path = prefix + str(get_with_default(success_indices, prefix, 0)) + domain_ext
success_indices[prefix] += 1
move(instance_path, new_file_path)
for file in meta_files_by_out.values():
file.close()
def filter_active_observer(domain_configs, chunk_size=1000):
"""Filter to only those where the observer is active.
Dict schema:
source_dir: str of the source directory
base_domain_name: str prefix for all instance filenames
num_instances: number of instances being filtered with this config
num_goals: number of goals in each instance (must be same across all instances)
domain_type: 'GRID_WORLD', 'LOGISTICS', etc
domain_ext: '.vw', '.logistics', etc
out_dir: str of the output directory
"""
successes_info_by_depth_bound = dict()
timeouts_info_by_depth_bound = dict()
for config in domain_configs:
base_domain_name = config['base_domain_name']
domain_ext = config['domain_ext']
out_dir = config['out_dir']
src_dir = config['source_dir']
if src_dir[-1] != '/':
src_dir += '/'
print(f'Filtering {base_domain_name} instances')
timeout_out_dir = os.path.join(out_dir, 'timeout')
if not os.path.exists(timeout_out_dir):
os.makedirs(timeout_out_dir)
inactive_out_dir = os.path.join(out_dir, 'failed')
if not os.path.exists(inactive_out_dir):
os.makedirs(inactive_out_dir)
followup_out_dir = os.path.join(out_dir, 'follow-up')
if not os.path.exists(followup_out_dir):
os.makedirs(followup_out_dir)
domain_instance_filenames = [
filepath[len(src_dir):]
for filepath in glob(src_dir + base_domain_name + '*' + domain_ext)
]
idx = 0
while len(domain_instance_filenames) > idx:
# new_file_path = os.path.join(active_out_dir, base_domain_name + str(success_index) + domain_ext)
chunk_instances = domain_instance_filenames[idx:idx + chunk_size]
print(f'Begin filtering {base_domain_name} {idx} through '
f'{min(idx + chunk_size - 1, len(domain_instance_filenames) - 1)}')
tmp_successes, tmp_failures = filter_agrd_chunk(config, chunk_instances, inactive_out_dir, followup_out_dir)
for key, value in tmp_successes.items():
all_success_info = get_with_default_dict(successes_info_by_depth_bound, key)
group_success_list = get_with_default_list(all_success_info, out_dir)
group_success_list += value
for key, value in tmp_failures.items():
all_failure_info = get_with_default_dict(timeouts_info_by_depth_bound, key)
group_failure_list = get_with_default_list(all_failure_info, out_dir)
group_failure_list += value
idx += chunk_size
move_agrd_filter_results(successes_info_by_depth_bound, timeouts_info_by_depth_bound)
def run_filter_observer(args):
domain_identifier = args.domain_identifier
configs = []
if domain_identifier == 'uniform':
for size in range(7, 11):
base_domain_name = f'uniform{size}_{size}-'
for goals in range(2, 5):
dir_name = f'./gridworld/{goals}goal/filtered'
num_instances = len(glob(os.path.join(dir_name, base_domain_name) + '*'))
configs.append({
'source_dir': dir_name,
'base_domain_name': base_domain_name,
'num_instances': num_instances,
'num_goals': goals,
'domain_type': 'GRID_WORLD',
'domain_ext': '.vw',
'out_dir': f'./agrd/uniform/{goals}goal'
})
elif domain_identifier == 'rooms':
for idx in range(10):
base_domain_name = f'64room_tiny_00{idx}-scn'
for goals in range(2, 5):
dir_name = f'./gridmap/{goals}goal/filtered'
num_instances = len(glob(os.path.join(dir_name, base_domain_name) + '*'))
configs.append({
'source_dir': dir_name,
'base_domain_name': base_domain_name,
'num_instances': num_instances,
'num_goals': goals,
'domain_type': 'GRID_WORLD',
'domain_ext': '.vw',
'out_dir': f'./agrd/rooms/{goals}goal'
})
elif domain_identifier == 'logistics':
pass
for locs in range(7, 12):
# for locs in range(9, 10):
for goals in range(2, 5):
# for goals in range(4, 5):
base_domain_name = f'geometric_0.4dist_{goals}goal_{locs}loc_3pkg_1trk_'
dir_name = f'./logistics/{goals}goal'
num_instances = len(glob(os.path.join(dir_name, base_domain_name) + '*'))
if num_instances == 0:
continue
configs.append({
'source_dir': dir_name,
'base_domain_name': base_domain_name,
'num_instances': num_instances,
'num_goals': goals,
'domain_type': 'LOGISTICS',
'domain_ext': '.logistics',
'out_dir': f'./agrd/logistics/{goals}goal'
# 'out_dir': f'./temp/logistics/{goals}goal'
})
else:
raise Exception(f'Unknown domain identifier: {domain_identifier}')
# log_config = {
# 'source_dir': './logistics',
# 'base_domain_name': 'geometric_0.4dist_3goal_15loc_3pkg_1trk_',
# 'num_instances': 2,
# 'num_goals': 3,
# 'domain_type': 'LOGISTICS',
# 'domain_ext': '.logistics',
# 'out_dir': './test/logistics'
# }
filter_active_observer(configs, 1000)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Quick and dirty CLI for filtering AGRD instances by only '
'those where the observer can actually do something. '
'To use, edit the file')
# AS OF 1/6/20, valid options are 'logistics', 'rooms', 'uniform'
parser.add_argument('domain_identifier', type=str,
help='String identifier for your set of domains.')
run_filter_observer(parser.parse_args())
| 39.408163 | 120 | 0.621051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,239 | 0.274404 |
1ae21a526218223ed04c6372b03062ed8240aa78 | 1,671 | py | Python | Controller/controleTelas.py | IuriBritoDev/TKINO | 3c689788324bd5badc84c7969f331b076046c211 | [
"MIT"
] | null | null | null | Controller/controleTelas.py | IuriBritoDev/TKINO | 3c689788324bd5badc84c7969f331b076046c211 | [
"MIT"
] | null | null | null | Controller/controleTelas.py | IuriBritoDev/TKINO | 3c689788324bd5badc84c7969f331b076046c211 | [
"MIT"
] | null | null | null | from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp
from View.Painel import painelSensores, painelControladores, painelConexao
from View.Conexao import telaConAnalogAnalog, telaConAnalogDigit, telaConDigitAnalog, telaConDigitDigit
# Abre as telas da aba de seleção
def AbreTelaNovoProjeto(tela):
telaNovoProjeto.TelaNovoProjeto(tela)
def AbreTelaAbrirProjeto(tela):
telaAbrirProjeto.TelaAbrirProjeto(tela)
def AbreTelaRelatorio(tela):
telaRelatorio.TelaRelatorio(tela)
def AbreTelaCadastro(tela):
telaCadastro.TelaCadastro(tela)
def AbreTelaConfigura(tela):
telaConfigura.TelaConfigura(tela)
def AbreTelaConexao(tela):
telaConexao.TelaConexao(tela)
# Abre telas de conexões de atuadores
def AbreTelaConAnAn(tela):
telaConAnalogAnalog.TelaConAnalogAnalog(tela)
def AbreTelaConAnDig(tela):
telaConAnalogDigit.TelaConAnalogDig(tela)
def AbreTelaConDigAn(tela):
telaConDigitAnalog.TelaConDigAnalog(tela)
def AbreTelaConDigDig(tela):
telaConDigitDigit.TelaConDigDig(tela)
# Abre os frames das abas
def AbreFrameSensores(frame):
painelSensores.PainelSensores(frame)
def AbreFrameControladores(frame, tela):
painelControladores.PainelControladores(frame, tela)
def AbreFrameConexao(frame, tela):
painelConexao.PainelConexao(frame, tela)
# Abre telas de edição da conexão e controladores
def AbreEditorControlador(tela, controle):
telaEditarControle.TelaEditarControle(tela, controle)
# Abre telas de PopUP
def AbrePopUp(tela, mensagem):
telaPopUp.TelaPopUp(tela, mensagem) | 24.217391 | 138 | 0.79234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.102564 |
1ae3b0d4623fb39afa8ccdec2cd3e21e3a3da924 | 109 | py | Python | paul_analysis/Python/util/hdf5lib_param.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | paul_analysis/Python/util/hdf5lib_param.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | paul_analysis/Python/util/hdf5lib_param.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | #tables or h5py
libname="h5py" #tables"
#libname="tables"
def setlib(name):
global libname
libname = name
| 13.625 | 23 | 0.724771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.422018 |
1ae48f79b3cd5943abe896aa004066a2e8e41b32 | 2,914 | py | Python | setup_project.py | WindfallLabs/setup_project | fe87ef0fa9d2152a877c94465b3038ecf092463a | [
"MIT"
] | 1 | 2021-01-29T03:44:06.000Z | 2021-01-29T03:44:06.000Z | setup_project.py | WindfallLabs/setup_project | fe87ef0fa9d2152a877c94465b3038ecf092463a | [
"MIT"
] | null | null | null | setup_project.py | WindfallLabs/setup_project | fe87ef0fa9d2152a877c94465b3038ecf092463a | [
"MIT"
] | 1 | 2021-01-29T03:44:16.000Z | 2021-01-29T03:44:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup_project.py -- GIS Project Setup Utility
Garin Wally; May 2014/May 2016
This script creates a project folder-environment for GIS projects as follows:
<project_name>/
data/
raw/
<project_name>.gdb
design/
fonts/
images/
rasters/
vectors/
maps/
archive/
final/
reports/
resources/
tools/
"""
import argparse
import os
from shutil import copy2
# import arcpy # moved for speed
# =============================================================================
# CLI ARGS
argp = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
argp.add_argument("-n", action="store", dest="name",
help="Name of new project folder.")
argp.add_argument("--gdb", action="store_true", default=False,
dest="make_gdb", help="Option to make a gdb on setup.")
argp.add_argument("--cart", action="store_true", default=False,
dest="make_cart",
help="Option to make cartographic resource folders.")
# TODO: For now, we assume user has $ cd'd into the desired base directory
args = argp.parse_args()
if args.make_gdb:
import arcpy
# =============================================================================
# FUNCTIONS
def make_gdb(gdb_name):
# if no project exists, place gdb in cwd
# if it does, place in DATA/
# process: make gdb in C:/temp and copy to cwd
copy2(None, None)
pass
def main(dest_folder):
# <project>/
os.mkdir(dest_folder)
# DATA/
# RAWDATA/
os.makedirs(os.path.join(dest_folder, "data", "raw"))
# TODO: make scratch gdb here? / use C:/temp?
# MISC/
os.mkdir(os.path.join(dest_folder, "data", "misc"))
# GDB/
# TODO: if args.make_gdb: make_gdb(dest_folder)
# MAPS/
# archive/
os.makedirs(os.path.join(dest_folder, "maps", "archive"))
# FINAL/
os.mkdir(os.path.join(dest_folder, "maps", "final"))
if args.make_cart:
# DESIGN/
# FONTS/
os.makedirs(os.path.join(dest_folder, "design", "fonts"))
# IMAGES/
os.mkdir(os.path.join(dest_folder, "design", "images"))
# RASTERS/
os.mkdir(os.path.join(dest_folder, "design", "rasters"))
# VECTORS/
os.mkdir(os.path.join(dest_folder, "design", "vectors"))
# REPORTS/
os.mkdir(os.path.join(dest_folder, "reports"))
# RESOURCES/
# TOOLS/
os.makedirs(os.path.join(dest_folder, "resources", "tools"))
return
# =============================================================================
# RUN IT
if __name__ == "__main__":
if not args.name:
args.name = "new_project"
new_proj = os.path.join(os.getcwd(), args.name)
main(new_proj)
| 26.017857 | 79 | 0.555251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,534 | 0.526424 |
1ae51ac2c341ebe5300267cfbe20cb5e5c501fda | 1,816 | py | Python | tests/format_directory_test.py | garysb/dismantle | b2aeed5916f980c20852d99ae379b0dc1da5a135 | [
"MIT"
] | 2 | 2021-06-02T12:37:13.000Z | 2021-06-08T07:13:20.000Z | tests/format_directory_test.py | garysb/dismantle | b2aeed5916f980c20852d99ae379b0dc1da5a135 | [
"MIT"
] | 5 | 2021-06-29T09:56:15.000Z | 2021-07-12T09:41:19.000Z | tests/format_directory_test.py | area28technologies/dismantle | b2aeed5916f980c20852d99ae379b0dc1da5a135 | [
"MIT"
] | 1 | 2021-12-12T06:17:27.000Z | 2021-12-12T06:17:27.000Z | import os
from pathlib import Path
import pytest
from dismantle.package import DirectoryPackageFormat, PackageFormat
def test_inherits() -> None:
assert issubclass(DirectoryPackageFormat, PackageFormat) is True
def test_grasp_exists(datadir: Path) -> None:
src = datadir.join('directory_src')
assert DirectoryPackageFormat.grasps(src) is True
def test_grasp_non_existant(datadir: Path) -> None:
src = datadir.join('directory_non_existant')
assert DirectoryPackageFormat.grasps(src) is False
def test_grasp_not_supported(datadir: Path) -> None:
src = datadir.join('package.zip')
assert DirectoryPackageFormat.grasps(src) is False
def test_extract_not_supported(datadir: Path) -> None:
src = datadir.join('package.zip')
dest = datadir.join(f'{src}_output')
message = 'formatter only supports directories'
with pytest.raises(ValueError, match=message):
DirectoryPackageFormat.extract(src, dest)
def test_extract_non_existant(datadir: Path) -> None:
src = datadir.join('directory_non_existant')
dest = datadir.join(f'{src}_output')
message = 'formatter only supports directories'
with pytest.raises(ValueError, match=message):
DirectoryPackageFormat.extract(src, dest)
def test_extract_already_exists(datadir: Path) -> None:
src = datadir.join('directory_src')
dest = datadir.join('directory_exists')
DirectoryPackageFormat.extract(src, dest)
assert os.path.exists(dest) is True
assert os.path.exists(dest / 'package.json') is True
def test_extract_create(datadir: Path) -> None:
src = datadir.join('directory_src')
dest = datadir.join('directory_created')
DirectoryPackageFormat.extract(src, dest)
assert os.path.exists(dest) is True
assert os.path.exists(dest / 'package.json') is True
| 32.428571 | 68 | 0.740639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.15859 |
1ae5744d76fd5fe30712a3dceb1ec7d3ea37f9e1 | 1,394 | py | Python | labour/helpers.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | labour/helpers.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | labour/helpers.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | from functools import wraps
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from access.cbac import default_cbac_required
from core.models import Event
from .views.admin_menu_items import labour_admin_menu_items
def labour_admin_required(view_func):
@wraps(view_func)
@default_cbac_required
def wrapper(request, *args, **kwargs):
kwargs.pop('event_slug')
event = request.event
meta = event.labour_event_meta
if not meta:
messages.error(request, "Tämä tapahtuma ei käytä Kompassia työvoiman hallintaan.")
return redirect('core_event_view', event.slug)
vars = dict(
event=event,
admin_menu_items=labour_admin_menu_items(request, event),
admin_title='Työvoiman hallinta'
)
return view_func(request, vars, event, *args, **kwargs)
return wrapper
def labour_event_required(view_func):
@wraps(view_func)
def wrapper(request, event_slug, *args, **kwargs):
event = get_object_or_404(Event, slug=event_slug)
meta = event.labour_event_meta
if not meta:
messages.error(request, "Tämä tapahtuma ei käytä Kompassia työvoiman hallintaan.")
return redirect('core_event_view', event.slug)
return view_func(request, event, *args, **kwargs)
return wrapper
| 30.304348 | 94 | 0.691535 | 0 | 0 | 0 | 0 | 1,017 | 0.723843 | 0 | 0 | 191 | 0.135943 |
1ae6b0af984b4e774a2ea4fe2177c6d38cd7328b | 411 | py | Python | mnist/knows.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | mnist/knows.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | mnist/knows.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
a = np.arange(15)
out = a.reshape(5, 3)
c = np.arange(15) / 2
y_onehot = c.reshape(5, 3)
out_tensor = tf.convert_to_tensor(out, dtype=tf.float32)
y_onehot_tensor = tf.convert_to_tensor(y_onehot, dtype=tf.float32)
# y_onehot = tf.one_hot(y_onehot_tensor, depth=3) # one-hot编码
loss1 = tf.square(out_tensor - y_onehot_tensor)
loss2 = tf.reduce_sum(loss1) / 32
pass | 22.833333 | 66 | 0.737226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.159036 |
1ae7b7205eff654470b26ae16a62edb36fffcde1 | 632 | py | Python | belady.py | EstebanGS13/operating-system-algorithms | 4f9ec10bb3c57e56816329c19971df71a75f4216 | [
"MIT"
] | null | null | null | belady.py | EstebanGS13/operating-system-algorithms | 4f9ec10bb3c57e56816329c19971df71a75f4216 | [
"MIT"
] | null | null | null | belady.py | EstebanGS13/operating-system-algorithms | 4f9ec10bb3c57e56816329c19971df71a75f4216 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
pages = [5, 4, 3, 2, 1, 4, 3, 5, 4, 3, 2, 1, 5]
faults = {3: 0, 4: 0}
for frames in faults:
memory = []
for page in pages:
out = None
if page not in memory:
if len(memory) == frames:
out = memory.pop(0)
memory.append(page)
faults[frames] += 1
print(f"In: {page} --> {memory} --> Out: {out}")
print(f"Marcos: {frames}, Fallas: {faults[frames]}\n")
if faults[4] > faults[3]:
print(f"La secuencia {pages} presenta anomalia de Belady")
| 24.307692 | 66 | 0.457278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.235759 |
1ae930f6ba395d300d3bc1a025005ce82d81ccc0 | 247 | py | Python | dictvaluesortt.py | Srinivassan-Ramamurthy/python_programs | 53b390669c7e88532c67d80b758a9199d6fde8cf | [
"bzip2-1.0.6"
] | null | null | null | dictvaluesortt.py | Srinivassan-Ramamurthy/python_programs | 53b390669c7e88532c67d80b758a9199d6fde8cf | [
"bzip2-1.0.6"
] | null | null | null | dictvaluesortt.py | Srinivassan-Ramamurthy/python_programs | 53b390669c7e88532c67d80b758a9199d6fde8cf | [
"bzip2-1.0.6"
] | null | null | null | a={'a':'hello','b':'1','c':'jayalatha','d':[1,2]}
d={}
val=list(a.values())
val.sort(key=len)
print(val)
for i in val:
for j in a:
if(i==a[j]):
d.update({j:a[j]})
print(d)
| 13.722222 | 49 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.133603 |
1aea758ba84a554dd7667811c068380137ca1b62 | 2,419 | py | Python | papermerge/core/urls.py | ebdavison/papermerge | d177f1af331214e0f62407624e7029ce4953bd9b | [
"Apache-2.0"
] | null | null | null | papermerge/core/urls.py | ebdavison/papermerge | d177f1af331214e0f62407624e7029ce4953bd9b | [
"Apache-2.0"
] | null | null | null | papermerge/core/urls.py | ebdavison/papermerge | d177f1af331214e0f62407624e7029ce4953bd9b | [
"Apache-2.0"
] | null | null | null | from django.urls import path, include
from django.contrib.auth.decorators import login_required
from papermerge.core.views import documents as doc_views
from papermerge.core.views import access as access_views
from papermerge.core.views import api as api_views
document_patterns = [
path(
'<int:id>/preview/page/<int:page>',
doc_views.preview,
name="preview"
),
path(
'<int:id>/preview/<int:step>/page/<int:page>',
doc_views.preview,
name="preview"
),
path(
'<int:id>/hocr/<int:step>/page/<int:page>',
doc_views.hocr,
name="hocr"
),
path(
'<int:id>/download/',
doc_views.document_download,
name="document_download"
),
path(
'usersettings/<str:option>/<str:value>',
doc_views.usersettings,
name="usersettings"
),
]
app_name = 'core'
urlpatterns = [
path('', doc_views.index, name="index"),
path(
'document/', include(document_patterns)
),
path(
'access/<int:id>', access_views.access, name="access"
),
path(
'usergroups', access_views.user_or_groups, name="user_or_groups"
),
path(
'upload/',
login_required(doc_views.DocumentsUpload.as_view()),
name="upload"
),
path(
'create-folder/',
doc_views.create_folder,
name='create_folder'
),
path(
'rename-node/<slug:redirect_to>/',
doc_views.rename_node,
name='rename_node'
),
path(
'delete-node/',
doc_views.delete_node,
name='delete_node'
),
path(
'cut-node/',
doc_views.cut_node,
name='cut_node'
),
path(
'paste-node/',
doc_views.paste_node,
name='paste_node'
),
path(
'clipboard/',
doc_views.clipboard,
name='clipboard'
),
path(
'api/documents',
api_views.DocumentsView.as_view(),
name='api_documents'
),
path(
'api/document/upload/<str:filename>',
api_views.DocumentUploadView.as_view(),
name='api_document_upload'
),
path(
'api/document/<int:pk>/',
api_views.DocumentView.as_view(),
name='api_document'
),
path(
'api/document/<int:doc_id>/pages',
api_views.PagesView.as_view(),
name='api_pages'
),
]
| 22.820755 | 72 | 0.56759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.279041 |
1aeac0d09b1ffbcde4dd4b8e81ad05b4d31e9264 | 4,773 | py | Python | Ene-Jun-2021/flores-fernandez-fernando/Primer Parcial/Ejercicio 2/Ejercicio_2.py | bryanbalderas/DAS_Sistemas | 1e31f088c0de7134471025a5730b0abfc19d936e | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ene-Jun-2021/flores-fernandez-fernando/Primer Parcial/Ejercicio 2/Ejercicio_2.py | bryanbalderas/DAS_Sistemas | 1e31f088c0de7134471025a5730b0abfc19d936e | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ene-Jun-2021/flores-fernandez-fernando/Primer Parcial/Ejercicio 2/Ejercicio_2.py | bryanbalderas/DAS_Sistemas | 1e31f088c0de7134471025a5730b0abfc19d936e | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | import abc
#inteface Component creamos la funcion buscador que es la que buscara alguna PaginaWeb en el SitioWeb
class ISitioWebComponent(metaclass=abc.ABCMeta):
@abc.abstractmethod
def buscador(self):
pass
# Concrete Component
class SitioWebConcreteComponent(ISitioWebComponent):
def __init__(self, dominio: str,categoria: str, paginas: list):
self._dominio = dominio
self._categoria = categoria
self._paginas = paginas
def __str__(self):
return f"""
El dominio del sitio es: {self._dominio}
La categoria del sitio es: {self._categoria}
Las paginas del sitio son: {self._paginas}
"""
def buscador(self):
return f"Pagina no buscada"
# Base Decorator
class SitioWebDecorator(ISitioWebComponent, metaclass=abc.ABCMeta):
def __init__(self,sitio_web: ISitioWebComponent):
self._sitio_web = sitio_web
@abc.abstractmethod
def buscador(self):
pass
# Concrete Decorator: A
class BuscadorConcreteDecorator(SitioWebDecorator):
# La logica del buscador es recibir un objeto de la clase PaginaWeb luego utiliza la url que es unica de cada pagina
# llama a la url de la pagina pedida por atributo y la compara con la url de las paginas que estan dentro del SitioWeb
# si encuentra que la url de la pagina es igual a la url de las paginas en el sitio regresa un string con los datos de
# la pagina junto con un mensaje diciendo que existe y si no encuentra la pagina regresa un mensaje de error
def buscador(self,pagina : object):
i = 0
for pag in self._sitio_web._paginas:
if(pagina._url == self._sitio_web._paginas[i]._url):
return f"La pagina: {self._sitio_web._paginas[i]}\nsi Existe"
i = i+1
return f"ERROR-HTTP 404 page Not found"
#clase PaginaWeb la misma del Ejercicio_1
class PaginaWeb(object):
def __init__(self,url: str, ruta: str, formato: str,contenido: str,titulo: str,slug: str,metatags: list):
self._url = url
self._ruta = ruta
self._formato = formato
self._contenido = contenido
self._titulo = titulo
self._slug = slug
self._metatags = metatags
def __str__(self):
return f"""
El url de la pagina es: {self._url}
La ruta del archivo es:{self._ruta}
El formato del archivo es: {self._formato}
El contenido de la pagina es: {self._contenido}
El titulo de la pagina es: {self._titulo}
El slug de la pagina es: {self._slug}
Los meta-tags de la pagina son: {self._metatags}
"""
def main():
#llenamos los objetos de PaginaWeb y SitioWeb
pagina1 = PaginaWeb("https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"C://User/youtube/user",
"HTML",
"<body> <p> hola soy una pagina de youtube 1 </p></body>",
"<h1>Youtube 1</h1>",
"youtube-1",
['<meta name = "description" content = "this is the description">',
'<meta http-equiv = "refresh" content = "100"'])
pagina2 = PaginaWeb("https://www.youtube.com/watch?v=r1lEc1w92RE",
"C://User/youtube/user",
"HTML",
"<body> <p> hola soy una pagina de youtube 2 </p></body>",
"<h1>Youtube 2</h1>",
"youtube-2",
['<meta name = "description" content = "this is the description">',
'<meta http-equiv = "refresh" content = "100"'])
pagina3 = PaginaWeb("https://www.youtube.com/watch?v=8OJf0-r7sZ0",
"C://User/youtube/user",
"HTML",
"<body> <p> hola soy una pagina de youtube 3 </p></body>",
"<h1>Youtube 3</h1>",
"youtube-3",
['<meta name = "description" content = "this is the description">',
'<meta http-equiv = "refresh" content = "100"'])
sitio = SitioWebConcreteComponent("www.youtube.com","Entretenimiento",[pagina1,pagina2])
#Creamos un objeto del decorador y le mandamos nuestro SitioWeb
buscar = BuscadorConcreteDecorator(sitio)
#Luego llamamos a la funcion buscador junto con una pagina e introducimos el return de Buscador
# a una variable y la imprimimos
resultado = buscar.buscador(pagina2)
print(resultado)
if __name__ == '__main__':
main() | 44.194444 | 122 | 0.577415 | 2,558 | 0.535931 | 0 | 0 | 120 | 0.025141 | 0 | 0 | 2,502 | 0.524199 |
1aec3c2b4298503556aef1b8d4f0b2abb934f5fa | 2,003 | py | Python | DeBERTa/data/data_sampler.py | tirkarthi/DeBERTa | c558ad99373dac695128c9ec45f39869aafd374e | [
"MIT"
] | 7 | 2021-02-04T01:26:55.000Z | 2021-11-23T00:38:47.000Z | DeBERTa/data/data_sampler.py | tirkarthi/DeBERTa | c558ad99373dac695128c9ec45f39869aafd374e | [
"MIT"
] | 1 | 2021-03-18T00:23:17.000Z | 2022-01-05T15:36:48.000Z | src/LASER/data/data_sampler.py | BigBird01/LASER | 57143200814583410acdd0c5ac0a0f8bab8a1f7e | [
"MIT"
] | null | null | null | #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Author: Pengcheng He (penhe@microsoft.com)
# Date: 05/15/2019
#
import os
import numpy as np
import math
import sys
from torch.utils.data import Sampler
__all__=['BatchSampler', 'DistributedBatchSampler', 'RandomSampler', 'SequentialSampler']
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size):
self.sampler = sampler
self.batch_size = batch_size
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch)==self.batch_size:
yield batch
batch = []
if len(batch)>0:
yield batch
def __len__(self):
return (len(self.sampler) + self.batch_size - 1)//self.batch_size
class DistributedBatchSampler(Sampler):
def __init__(self, sampler, rank=0, world_size = 1, drop_last = False):
self.sampler = sampler
self.rank = rank
self.world_size = world_size
self.drop_last = drop_last
def __iter__(self):
for b in self.sampler:
if len(b)%self.world_size != 0:
if self.drop_last:
break
else:
b.extend([b[0] for _ in range(self.world_size-len(b)%self.world_size)])
chunk_size = len(b)//self.world_size
yield b[self.rank*chunk_size:(self.rank+1)*chunk_size]
def __len__(self):
return len(self.sampler)
class RandomSampler(Sampler):
def __init__(self, total_samples:int, data_seed:int = 0):
self.indices = np.array(np.arange(total_samples))
self.rng = np.random.RandomState(data_seed)
def __iter__(self):
self.rng.shuffle(self.indices)
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
class SequentialSampler(Sampler):
def __init__(self, total_samples:int):
self.indices = np.array(np.arange(total_samples))
def __iter__(self):
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
| 26.012987 | 89 | 0.683974 | 1,621 | 0.809286 | 685 | 0.341987 | 0 | 0 | 0 | 0 | 260 | 0.129805 |
1aef2eb7ef26d06658597d29156fcd62d72c7d03 | 85 | py | Python | full_simulation/my_code/physics.py | kvmu/KURRI-workterm | 275f73bdbd8e5ecd7689715f0adc9e824c7ee720 | [
"MIT"
] | null | null | null | full_simulation/my_code/physics.py | kvmu/KURRI-workterm | 275f73bdbd8e5ecd7689715f0adc9e824c7ee720 | [
"MIT"
] | 1 | 2015-11-11T05:06:27.000Z | 2015-11-11T05:06:27.000Z | full_simulation/my_code/physics.py | kvmu/KURRI-workterm | 275f73bdbd8e5ecd7689715f0adc9e824c7ee720 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 04 17:37:37 2015
@author: Kevin
"""
| 10.625 | 35 | 0.564706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.964706 |
1aef5c7a0e77fccf10468e703c0a3cb229409069 | 2,336 | py | Python | tests/__init__.py | contentstack/contentstack-utils-python | 3c7bb445dc77e5a2ab18ceac6f87a35d37b52186 | [
"MIT"
] | null | null | null | tests/__init__.py | contentstack/contentstack-utils-python | 3c7bb445dc77e5a2ab18ceac6f87a35d37b52186 | [
"MIT"
] | null | null | null | tests/__init__.py | contentstack/contentstack-utils-python | 3c7bb445dc77e5a2ab18ceac6f87a35d37b52186 | [
"MIT"
] | null | null | null | # pytest --html=tests/report/test-report.html
# above command runs tests and test reports generates in tests/report location.
# nosetests --with-coverage --cover-html
# clean all the .pyc files
# find . -name \*.pyc -delete
# nosetests --with-coverage --cover-html
# pytest --cov=contentstack_utils
# pytest -v --cov=contentstack_utils --cov-report=html
# pytest --html=tests/report/test-report.html
from unittest import TestLoader, TestSuite
from .convert_style import TestConvertStyle
from .test_default_opt_others import TestDefaultOptOther
from .test_helper_node_to_html import TestHelperNodeToHtml
from .test_item_types import TestItemType
from .test_metadata import TestMetadata
from .test_option_render_mark import TestOptionRenderMark
from .test_render_default_options import TestRenderDefaultOption
from .test_render_options import TestRenderOption
from .test_style_type import TestStyleType
from .test_util_srte import TestSuperchargedUtils
from .test_utils import TestUtility
def all_tests():
test_module_itemtype = TestLoader().loadTestsFromTestCase(TestItemType)
test_module_metadata = TestLoader().loadTestsFromTestCase(TestMetadata)
test_module_style_type = TestLoader().loadTestsFromTestCase(TestStyleType)
test_module_utility = TestLoader().loadTestsFromTestCase(TestUtility)
test_module_default_option = TestLoader().loadTestsFromTestCase(TestDefaultOptOther)
test_module_node_to_html = TestLoader().loadTestsFromTestCase(TestHelperNodeToHtml)
test_module_render_mark = TestLoader().loadTestsFromTestCase(TestOptionRenderMark)
test_module_render_default_option = TestLoader().loadTestsFromTestCase(TestRenderDefaultOption)
test_module_render_option = TestLoader().loadTestsFromTestCase(TestRenderOption)
test_module_utils_srte = TestLoader().loadTestsFromTestCase(TestSuperchargedUtils)
test_module_convert_style = TestLoader().loadTestsFromTestCase(TestConvertStyle)
suite = TestSuite([
test_module_itemtype,
test_module_metadata,
test_module_style_type,
test_module_utility,
test_module_default_option,
test_module_node_to_html,
test_module_render_mark,
test_module_render_default_option,
test_module_render_option,
test_module_utils_srte,
test_module_convert_style
])
| 44.075472 | 99 | 0.81036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.167808 |
1af00cd12e0385425fbe7be77246922d83387198 | 584 | py | Python | sorting/builtin.py | umd-coding-workshop/algorithms | 49ea6f39167b627a0a3d2e9e4bf249e3e828f4e5 | [
"Apache-2.0"
] | null | null | null | sorting/builtin.py | umd-coding-workshop/algorithms | 49ea6f39167b627a0a3d2e9e4bf249e3e828f4e5 | [
"Apache-2.0"
] | null | null | null | sorting/builtin.py | umd-coding-workshop/algorithms | 49ea6f39167b627a0a3d2e9e4bf249e3e828f4e5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
''' Use the python built-in sort for comparison
against other implementations.'''
import sys
def merge(a, b):
sorted = []
while len(a) > 0 and len(b) > 0:
if a[0] < b[0]:
sorted.append(a.pop(0))
else:
sorted.append(b.pop(0))
if len(a) > 0:
sorted.extend(a)
elif len(b) > 0:
sorted.extend(b)
return sorted
with open(sys.argv[1], 'r') as datafile:
data = [int(line.split('\t')[0].strip()) for line in datafile.readlines()]
result = sorted(data)
print(len(result), result)
| 21.62963 | 78 | 0.568493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.196918 |
1af1ba1f73d50b880d12d443416c0d5ee955fd4e | 1,160 | bzl | Python | moq4/repo.bzl | tomaszstrejczek/rules_dotnet_3rd_party | 09f29f062d5250fe7cdc45be872ce9bd1562c60b | [
"Apache-2.0"
] | 1 | 2021-10-10T17:17:27.000Z | 2021-10-10T17:17:27.000Z | moq4/repo.bzl | tomaszstrejczek/rules_dotnet_3rd_party | 09f29f062d5250fe7cdc45be872ce9bd1562c60b | [
"Apache-2.0"
] | null | null | null | moq4/repo.bzl | tomaszstrejczek/rules_dotnet_3rd_party | 09f29f062d5250fe7cdc45be872ce9bd1562c60b | [
"Apache-2.0"
] | null | null | null | load("@io_bazel_rules_dotnet//dotnet:defs.bzl", "core_library", "core_resx", "core_xunit_test")
core_resx(
name = "core_resource",
src = ":src/Moq/Properties/Resources.resx",
identifier = "Moq.Properties.Resources.resources",
)
core_library(
name = "Moq.dll",
srcs = glob(["src/Moq/**/*.cs"]),
defines = [
"NETCORE",
],
keyfile = ":Moq.snk",
resources = [":core_resource"],
visibility = ["//visibility:public"],
nowarn = ["CS3027"],
deps = [
"@//ifluentinterface:IFluentInterface.dll",
"@TypeNameFormatter//:TypeNameFormatter.dll",
"@castle.core//:Castle.Core.dll",
"@core_sdk_stdlib//:libraryset",
],
)
core_xunit_test(
name = "Moq.Tests.dll",
srcs = glob(
["tests/Moq.Tests/**/*.cs"],
exclude = ["**/FSharpCompatibilityFixture.cs"],
),
defines = [
"NETCORE",
],
keyfile = ":Moq.snk",
nowarn = ["CS1701"],
visibility = ["//visibility:public"],
deps = [
":Moq.dll",
"@xunit.assert//:lib",
"@xunit.extensibility.core//:lib",
"@xunit.extensibility.execution//:lib",
],
)
| 25.217391 | 95 | 0.563793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.54569 |
1af777d49fa7d0ed5d696c94bf1c6cab8fc64d57 | 8,160 | py | Python | soaplib/core/test/type/test_clazz.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | soaplib/core/test/type/test_clazz.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | soaplib/core/test/type/test_clazz.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import datetime
import unittest
from soaplib.core.model.clazz import ClassModel
from soaplib.core.model.clazz import Array
from soaplib.core.model.primitive import DateTime
from soaplib.core.model.primitive import Float
from soaplib.core.model.primitive import Integer
from soaplib.core.model.primitive import String
from lxml import etree
ns_test = 'test_namespace'
class Address(ClassModel):
street = String(min_occurs=1)
city = String(min_occurs=1)
zip = Integer(min_occurs=1)
since = DateTime(min_occurs=1)
lattitude = Float(min_occurs=1)
longitude = Float(min_occurs=1)
Address.resolve_namespace(Address,__name__)
class Person(ClassModel):
name = String
birthdate = DateTime
age = Integer
addresses = Array(Address)
titles = Array(String)
Person.resolve_namespace(Person,__name__)
class Employee(Person):
employee_id = Integer
salary = Float
Employee.resolve_namespace(Employee,__name__)
class Level2(ClassModel):
arg1 = String
arg2 = Float
Level2.resolve_namespace(Level2, __name__)
class Level3(ClassModel):
arg1 = Integer
Level3.resolve_namespace(Level3, __name__)
class Level4(ClassModel):
arg1 = String
Level4.resolve_namespace(Level4, __name__)
class Level1(ClassModel):
level2 = Level2
level3 = Array(Level3)
level4 = Array(Level4)
Level1.resolve_namespace(Level1, __name__)
class TestClassModel(unittest.TestCase):
def test_simple_class(self):
a = Address()
a.street = '123 happy way'
a.city = 'badtown'
a.zip = 32
a.lattitude = 4.3
a.longitude = 88.0
element = etree.Element('test')
Address.to_parent_element(a, ns_test, element)
element = element[0]
self.assertEquals(6, len(element.getchildren()))
r = Address.from_xml(element)
self.assertEquals(a.street, r.street)
self.assertEquals(a.city, r.city)
self.assertEquals(a.zip, r.zip)
self.assertEquals(a.lattitude, r.lattitude)
self.assertEquals(a.longitude, r.longitude)
self.assertEquals(a.since, r.since)
def test_nested_class(self): # FIXME: this test is incomplete
p = Person()
element = etree.Element('test')
Person.to_parent_element(p, ns_test, element)
element = element[0]
self.assertEquals(None, p.name)
self.assertEquals(None, p.birthdate)
self.assertEquals(None, p.age)
self.assertEquals(None, p.addresses)
def test_class_array(self):
peeps = []
names = ['bob', 'jim', 'peabody', 'mumblesleves']
for name in names:
a = Person()
a.name = name
a.birthdate = datetime.datetime(1979, 1, 1)
a.age = 27
peeps.append(a)
type = Array(Person)
type.resolve_namespace(type,__name__)
element = etree.Element('test')
type.to_parent_element(peeps, ns_test, element)
element = element[0]
self.assertEquals(4, len(element.getchildren()))
peeps2 = type.from_xml(element)
for i in range(0, 4):
self.assertEquals(peeps2[i].name, names[i])
self.assertEquals(peeps2[i].birthdate,
datetime.datetime(1979, 1, 1))
def test_class_nested_array(self):
peeps = []
names = ['bob', 'jim', 'peabody', 'mumblesleves']
for name in names:
a = Person()
a.name = name
a.birthdate = datetime.datetime(1979, 1, 1)
a.age = 27
a.addresses = []
for i in range(0, 25):
addr = Address()
addr.street = '555 downtown'
addr.city = 'funkytown'
a.addresses.append(addr)
peeps.append(a)
type = Array(Person)
type.resolve_namespace(type,__name__)
element = etree.Element('test')
type.to_parent_element(peeps, ns_test, element)
element = element[0]
self.assertEquals(4, len(element.getchildren()))
peeps2 = type.from_xml(element)
for peep in peeps2:
self.assertEquals(27, peep.age)
self.assertEquals(25, len(peep.addresses))
self.assertEquals('funkytown', peep.addresses[18].city)
def test_complex_class(self):
l = Level1()
l.level2 = Level2()
l.level2.arg1 = 'abcd'
l.level2.arg2 = 1.444
l.level3 = []
l.level4 = []
for i in range(0, 100):
a = Level3()
a.arg1 = i
l.level3.append(a)
for i in range(0, 4):
a = Level4()
a.arg1 = str(i)
l.level4.append(a)
element = etree.Element('test')
Level1.to_parent_element(l, ns_test, element)
element = element[0]
l1 = Level1.from_xml(element)
self.assertEquals(l1.level2.arg1, l.level2.arg1)
self.assertEquals(l1.level2.arg2, l.level2.arg2)
self.assertEquals(len(l1.level4), len(l.level4))
self.assertEquals(100, len(l.level3))
def test_customize(self):
class Base(ClassModel):
class Attributes(ClassModel.Attributes):
prop1=3
prop2=6
Base2 = Base.customize(prop1=4)
self.assertNotEquals(Base.Attributes.prop1, Base2.Attributes.prop1)
self.assertEquals(Base.Attributes.prop2, Base2.Attributes.prop2)
class Derived(Base):
class Attributes(Base.Attributes):
prop3 = 9
prop4 = 12
Derived2 = Derived.customize(prop1=5, prop3=12)
self.assertEquals(Base.Attributes.prop1, 3)
self.assertEquals(Base2.Attributes.prop1, 4)
self.assertEquals(Derived.Attributes.prop1, 3)
self.assertEquals(Derived2.Attributes.prop1, 5)
self.assertNotEquals(Derived.Attributes.prop3, Derived2.Attributes.prop3)
self.assertEquals(Derived.Attributes.prop4, Derived2.Attributes.prop4)
Derived3 = Derived.customize(prop3=12)
Base.prop1 = 4
# changes made to bases propagate, unless overridden
self.assertEquals(Derived.Attributes.prop1, Base.Attributes.prop1)
self.assertNotEquals(Derived2.Attributes.prop1, Base.Attributes.prop1)
self.assertEquals(Derived3.Attributes.prop1, Base.Attributes.prop1)
def test_from_string(self):
from soaplib.core.util.model_utils import ClassModelConverter
class Simple(ClassModel):
number = Integer
text = String
class NotSoSimple(ClassModel):
number_1 = Integer
number_2 = Integer
body = Simple
nss = NotSoSimple()
nss.number_1 = 100
nss.number_2 = 1000
nss.body = Simple()
nss.body.number = 1
nss.body.text = "Some Text"
cmc = ClassModelConverter(nss, "testfromstring", include_ns=False)
element = cmc.to_etree()
assert nss.body.number == 1
assert nss.number_1 == 100
nss_from_xml = NotSoSimple.from_string(cmc.to_xml())
assert nss_from_xml.body.number == 1
assert nss_from_xml.body.text == "Some Text"
assert nss_from_xml.number_1 == 100
assert nss_from_xml.number_2 == 1000
if __name__ == '__main__':
unittest.main()
| 29.458484 | 81 | 0.634436 | 6,612 | 0.810294 | 0 | 0 | 0 | 0 | 0 | 0 | 1,091 | 0.133701 |
1af789c7e288cbf0b52eef2ee02059ff894ca5b5 | 2,097 | py | Python | code/strats/npctt.py | yasirroni/PrisonersDilemmaTournament | ce3de71ff2ccb647aa00129473ff60f985e16e17 | [
"MIT"
] | null | null | null | code/strats/npctt.py | yasirroni/PrisonersDilemmaTournament | ce3de71ff2ccb647aa00129473ff60f985e16e17 | [
"MIT"
] | null | null | null | code/strats/npctt.py | yasirroni/PrisonersDilemmaTournament | ce3de71ff2ccb647aa00129473ff60f985e16e17 | [
"MIT"
] | null | null | null | from decimal import Decimal
import numpy
def strategy(history, memory):
"""
Nice Patient Comparative Tit for Tat (NPCTT):
1. Nice: Never initiate defection, else face the wrath of the Grudge.
2. Patient: Respond to defection with defection, unless it was in possibly
response to my defection. Give opponent a chance to cooperate again since,
even if they backstab me a few more times, we'll both come out ahead.
I don't have to worry about this causing my opponent to actually win
because the Grudge and Tit for Tat will penalize them heavily for
initiating defection.
3. Comparative: Before cooperating in forgiveness, we compare number of
defection between ours and theirs. If D(ours)/D(theirs) is higher than
50%, we forgive.
4. Tit for Tat: (see Patient)
This strategy incorporate enemy that defect in late game and not too fast judging
early impression.
"""
num_rounds = history.shape[1]
opponents_last_move = history[1, -1] if num_rounds >= 1 else 1
our_second_last_move = history[0, -2] if num_rounds >= 2 else 1
# if opponent defects more often, then screw 'em
LOWER_BOUND = Decimal(1) / Decimal(2) # exclusive bound
our_history = history[0, 0:num_rounds]
opponent_history = history[1, 0:num_rounds]
if num_rounds == 0:
defection_ratio = 1
else:
our_stats = dict(zip(*numpy.unique(our_history, return_counts=True)))
opponent_stats = dict(zip(*numpy.unique(opponent_history, return_counts=True)))
our_n_defection = our_stats.get(0, 0)
opponent_n_defection = opponent_stats.get(0, 0)
if opponent_n_defection > 0:
defection_ratio = Decimal(int(our_n_defection)) / Decimal(int(opponent_n_defection))
else:
defection_ratio = 1
be_patient = defection_ratio > LOWER_BOUND
choice = (
1
if (opponents_last_move == 1 or (be_patient and our_second_last_move == 0))
else 0
)
return choice, None | 36.789474 | 96 | 0.664759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 990 | 0.472103 |
1af865cf794ffe9926668927a099faebe449cba9 | 1,202 | py | Python | onadata/apps/api/tests/permissions/test_permissions.py | childhelpline/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 1 | 2018-07-15T13:13:43.000Z | 2018-07-15T13:13:43.000Z | onadata/apps/api/tests/permissions/test_permissions.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 14 | 2018-07-10T12:48:46.000Z | 2022-03-11T23:24:51.000Z | onadata/apps/api/tests/permissions/test_permissions.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 5 | 2018-07-04T07:59:14.000Z | 2020-01-28T07:50:18.000Z | from django.contrib.auth.models import User
from mock import MagicMock
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.logger.models import Instance, XForm
from onadata.apps.api.permissions import MetaDataObjectPermissions
class TestPermissions(TestBase):
def setUp(self):
self.view = MagicMock()
self.permissions = MetaDataObjectPermissions()
self.instance = MagicMock(Instance)
self.instance.xform = MagicMock(XForm)
def test_delete_instance_metadata_perms(self):
request = MagicMock(user=MagicMock(), method='DELETE')
obj = MagicMock(content_object=self.instance)
self.assertTrue(
self.permissions.has_object_permission(
request, self.view, obj))
def test_delete_instance_metadata_without_perms(self):
user = User(username="test")
instance = Instance()
instance.xform = XForm()
# user.has_perms.return_value = False
request = MagicMock(user=user, method='DELETE')
obj = MagicMock(content_object=instance)
self.assertFalse(
self.permissions.has_object_permission(
request, self.view, obj))
| 36.424242 | 66 | 0.69218 | 950 | 0.790349 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.049085 |
1afafdaa5a55f840c140378943d38c4159a3e9db | 610 | py | Python | Exercicios - Mundo3/Ex109/teste.py | BrianMath/ExerciciosPythonCeV | 4960f1a58d281b32afd5dfd6ea65e0ae5ad48b4f | [
"MIT"
] | null | null | null | Exercicios - Mundo3/Ex109/teste.py | BrianMath/ExerciciosPythonCeV | 4960f1a58d281b32afd5dfd6ea65e0ae5ad48b4f | [
"MIT"
] | null | null | null | Exercicios - Mundo3/Ex109/teste.py | BrianMath/ExerciciosPythonCeV | 4960f1a58d281b32afd5dfd6ea65e0ae5ad48b4f | [
"MIT"
] | null | null | null | import moeda
preco = float(input("Digite o preço: R$"))
por100 = float(input("Digite a porcentagem: "))
formatar = str(input("Deseja formatar como moeda [S/N]? ")).upper()
if "S" in formatar:
formatado = True
else:
formatado = False
print(f"\nA metade de {moeda.moeda(preco)} é {moeda.metade(preco, formatado)}")
print(f"O dobro de {moeda.moeda(preco)} é {moeda.dobro(preco, formatado)}")
print(f"Aumentando {por100}% de {moeda.moeda(preco)}, temos {moeda.aumentar(preco, por100, formatado)}")
print(f"Diminuindo {por100}% de {moeda.moeda(preco)}, temos {moeda.diminuir(preco, por100, formatado)}")
| 38.125 | 104 | 0.701639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.685155 |
1afb4e419b6e7623430e399ba3b927cbbb015ac9 | 132 | py | Python | api/companies/urls.py | anjaekk/CRM-internship- | 94eab9401a7336ebbb11046a77c59b1d07e2bf68 | [
"MIT"
] | 1 | 2021-09-10T09:11:08.000Z | 2021-09-10T09:11:08.000Z | api/companies/urls.py | anjaekk/CRM-site-project | 94eab9401a7336ebbb11046a77c59b1d07e2bf68 | [
"MIT"
] | null | null | null | api/companies/urls.py | anjaekk/CRM-site-project | 94eab9401a7336ebbb11046a77c59b1d07e2bf68 | [
"MIT"
] | null | null | null | from django.urls import path, include
from .views import CompanyAPIView
# urlpatterns = [
# path("",include(router.urls)),
# ] | 18.857143 | 37 | 0.69697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.424242 |
1afcc354de4e4e1ba67d59086c2b25d41157da44 | 2,681 | py | Python | src/waldur_auth_saml2/utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_auth_saml2/utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_auth_saml2/utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from djangosaml2.conf import get_config
from djangosaml2.utils import available_idps
from saml2.attribute_converter import ac_factory
from saml2.mdstore import InMemoryMetaData, MetaDataFile
from saml2.mdstore import name as get_idp_name
from saml2.s_utils import UnknownSystemEntity
from . import models
def load_providers():
metadata = {}
for filename in settings.WALDUR_AUTH_SAML2['IDP_METADATA_LOCAL']:
mdf = MetaDataFile(ac_factory(), filename)
mdf.load()
metadata.update(mdf.items())
return metadata
def sync_providers():
providers = load_providers()
current_idps = list(models.IdentityProvider.objects.all().only('url', 'pk'))
backend_urls = set(providers.keys())
stale_idps = set(idp.pk for idp in current_idps if idp.url not in backend_urls)
models.IdentityProvider.objects.filter(pk__in=stale_idps).delete()
existing_urls = set(idp.url for idp in current_idps)
for url, metadata in providers.items():
name = get_idp_name(metadata)
if not name:
# It is expected that every provider has name. For corner cases check entity_id
name = metadata.get('entity_id')
if not name:
# Skip invalid identity provider
continue
if url in existing_urls:
# Skip identity provider if its url is already in the database
continue
models.IdentityProvider.objects.create(url=url, name=name, metadata=metadata)
for provider in models.IdentityProvider.objects.all().iterator():
backend_metadata = providers.get(provider.url)
if backend_metadata and provider.metadata != backend_metadata:
provider.metadata = backend_metadata
provider.save()
def is_valid_idp(value):
remote_providers = available_idps(get_config()).keys()
return (
value in remote_providers
or models.IdentityProvider.objects.filter(url=value).exists()
)
def get_idp_sso_supported_bindings(idp_entity_id, config):
try:
return config.metadata.service(
idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service'
).keys()
except (UnknownSystemEntity, AttributeError):
return []
class DatabaseMetadataLoader(InMemoryMetaData):
def load(self, *args, **kwargs):
# Skip default parsing because data is not stored in file
pass
def __getitem__(self, item):
try:
return models.IdentityProvider.objects.get(url=item).metadata
except ObjectDoesNotExist:
raise KeyError
| 33.5125 | 91 | 0.697128 | 346 | 0.129056 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.116747 |
1afe05c194caa5c442bb47f534efb7a249603873 | 3,846 | py | Python | pvcontrol/__main__.py | stephanme/pv-control | f6aab9800c154492f3b9e5b2cd21c7a87cf92e16 | [
"Apache-2.0"
] | null | null | null | pvcontrol/__main__.py | stephanme/pv-control | f6aab9800c154492f3b9e5b2cd21c7a87cf92e16 | [
"Apache-2.0"
] | null | null | null | pvcontrol/__main__.py | stephanme/pv-control | f6aab9800c154492f3b9e5b2cd21c7a87cf92e16 | [
"Apache-2.0"
] | null | null | null | import logging
# configure logging before initializing further modules
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(name)s - %(message)s")
logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO)
import argparse
import json
import flask
import flask_compress
from werkzeug.middleware.dispatcher import DispatcherMiddleware
import prometheus_client
from pvcontrol import views, relay
from pvcontrol.meter import MeterFactory
from pvcontrol.chargecontroller import ChargeControllerFactory
from pvcontrol.wallbox import WallboxFactory
from pvcontrol.car import CarFactory
from pvcontrol.scheduler import Scheduler
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="PV Control")
parser.add_argument("-m", "--meter", default="SimulatedMeter")
parser.add_argument("-w", "--wallbox", default="SimulatedWallbox")
parser.add_argument("-a", "--car", default="SimulatedCar")
parser.add_argument("-c", "--config", default="{}")
args = parser.parse_args()
logger.info("Starting pvcontrol")
logger.info(f"Meter: {args.meter}")
logger.info(f"Wallbox: {args.wallbox}")
logger.info(f"Car: {args.car}")
logger.info(f"config: {args.config}")
config = json.loads(args.config)
for c in ["wallbox", "meter", "car", "controller"]:
if c not in config:
config[c] = {}
wallbox = WallboxFactory.newWallbox(args.wallbox, **config["wallbox"])
meter = MeterFactory.newMeter(args.meter, wallbox, **config["meter"])
car = CarFactory.newCar(args.car, **config["car"])
controller = ChargeControllerFactory.newController(meter, wallbox, **config["controller"])
controller_scheduler = Scheduler(controller.get_config().cycle_time, controller.run)
controller_scheduler.start()
car_scheduler = Scheduler(car.get_config().cycle_time, car.read_data)
car_scheduler.start()
app = flask.Flask(__name__)
app.json_encoder = views.JSONEncoder
app.after_request(views.add_no_cache_header)
app.config["COMPRESS_MIN_SIZE"] = 2048
app.config["COMPRESS_MIMETYPES"] = ["text/html", "text/css", "application/json", "application/javascript", "image/vnd.microsoft.icon"]
compress = flask_compress.Compress()
compress.init_app(app)
app.add_url_rule("/", view_func=views.StaticResourcesView.as_view("get_index"), defaults={"path": "index.html"})
app.add_url_rule("/<path:path>", view_func=views.StaticResourcesView.as_view("get_static"))
app.add_url_rule("/api/pvcontrol", view_func=views.PvControlView.as_view("get_pvcontrol", meter, wallbox, controller, car))
app.add_url_rule("/api/pvcontrol/controller", view_func=views.PvControlConfigDataView.as_view("get_controller", controller))
app.add_url_rule("/api/pvcontrol/controller/desired_mode", view_func=views.PvControlChargeModeView.as_view("put_desired_mode", controller))
app.add_url_rule("/api/pvcontrol/controller/phase_mode", view_func=views.PvControlPhaseModeView.as_view("put_phase_mode", controller))
app.add_url_rule("/api/pvcontrol/meter", view_func=views.PvControlConfigDataView.as_view("get_meter", meter))
app.add_url_rule("/api/pvcontrol/wallbox", view_func=views.PvControlConfigDataView.as_view("get_wallbox", wallbox))
app.add_url_rule("/api/pvcontrol/car", view_func=views.PvControlConfigDataView.as_view("get_car", car))
# for testing only
app.add_url_rule("/api/pvcontrol/wallbox/car_status", view_func=views.PvControlCarStatusView.as_view("put_car_status", wallbox))
# Add prometheus wsgi middleware to route /metrics requests
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {"/metrics": prometheus_client.make_wsgi_app()})
app.run(host="0.0.0.0", port=8080)
controller_scheduler.stop()
car_scheduler.stop()
# disable charging to play it safe
# TODO: see ChargeMode.INIT handling
logger.info("Set wallbox.allow_charging=False on shutdown.")
wallbox.allow_charging(False)
relay.cleanup()
logger.info("Stopped pvcontrol")
| 46.337349 | 139 | 0.788612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,185 | 0.308112 |
1afe4a291ef32854b9631bb218506f1799a820c8 | 1,274 | py | Python | Python/binary-trees-with-factors.py | xiaohalo/LeetCode | 68211ba081934b21bb1968046b7e3c1459b3da2d | [
"MIT"
] | 9 | 2019-06-30T07:15:18.000Z | 2022-02-10T20:13:40.000Z | Python/binary-trees-with-factors.py | pnandini/LeetCode | e746c3298be96dec8e160da9378940568ef631b1 | [
"MIT"
] | 1 | 2018-07-10T03:28:43.000Z | 2018-07-10T03:28:43.000Z | Python/binary-trees-with-factors.py | pnandini/LeetCode | e746c3298be96dec8e160da9378940568ef631b1 | [
"MIT"
] | 9 | 2019-01-16T22:16:49.000Z | 2022-02-06T17:33:41.000Z | # Time: O(n^2)
# Space: O(n)
# Given an array of unique integers, each integer is strictly greater than 1.
# We make a binary tree using these integers and each number may be used for
# any number of times.
# Each non-leaf node's value should be equal to the product of the values of
# it's children.
# How many binary trees can we make? Return the answer modulo 10 ** 9 + 7.
#
# Example 1:
#
# Input: A = [2, 4]
# Output: 3
# Explanation: We can make these trees: [2], [4], [4, 2, 2]
# Example 2:
#
# Input: A = [2, 4, 5, 10]
# Output: 7
# Explanation: We can make these trees:
# [2], [4], [5], [10], [4, 2, 2], [10, 2, 5], [10, 5, 2].
#
# Note:
# - 1 <= A.length <= 1000.
# - 2 <= A[i] <= 10 ^ 9.
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def numFactoredBinaryTrees(self, A):
"""
:type A: List[int]
:rtype: int
"""
M = 10**9 + 7
A.sort()
dp = {}
for i in xrange(len(A)):
dp[A[i]] = 1
for j in xrange(i):
if A[i] % A[j] == 0 and A[i] // A[j] in dp:
dp[A[i]] += dp[A[j]] * dp[A[i] // A[j]]
dp[A[i]] %= M
return sum(dp.values()) % M
| 26 | 77 | 0.505495 | 470 | 0.368917 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.605965 |
1afff1ccdcfcf884da6805c728defb7d5049e9b8 | 424 | py | Python | jtr/load/embeddings/__init__.py | mitchelljeff/SUMMAD4.3 | 33bb3a74cff16a7aa699660a08d98ddcd662cad5 | [
"MIT"
] | 1 | 2017-09-15T14:06:07.000Z | 2017-09-15T14:06:07.000Z | jtr/load/embeddings/__init__.py | mitchelljeff/SUMMAD4.3 | 33bb3a74cff16a7aa699660a08d98ddcd662cad5 | [
"MIT"
] | null | null | null | jtr/load/embeddings/__init__.py | mitchelljeff/SUMMAD4.3 | 33bb3a74cff16a7aa699660a08d98ddcd662cad5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from jtr.load.embeddings.embeddings import Embeddings, load_embeddings
from jtr.load.embeddings.word_to_vec import load_word2vec, get_word2vec_vocabulary
from jtr.load.embeddings.glove import load_glove
from jtr.load.embeddings.vocabulary import Vocabulary
__all__ = [
'Embeddings',
'load_embeddings'
'load_word2vec',
'get_word2vec_vocabulary',
'load_glove',
'Vocabulary'
]
| 26.5 | 82 | 0.761792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.273585 |
210025e5881047a75dc28e56284192add56bd13d | 9,466 | py | Python | src/account/models.py | opnfv/laas | 35b9f39178cc502a5283a1b37a65f7dd0838ae05 | [
"Apache-2.0"
] | 2 | 2020-10-31T15:03:20.000Z | 2021-03-22T16:29:15.000Z | src/account/models.py | opnfv/laas | 35b9f39178cc502a5283a1b37a65f7dd0838ae05 | [
"Apache-2.0"
] | 13 | 2019-12-04T23:29:42.000Z | 2022-03-02T04:53:53.000Z | src/account/models.py | opnfv/laas | 35b9f39178cc502a5283a1b37a65f7dd0838ae05 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2016 Max Breitenfeldt and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from django.contrib.auth.models import User
from django.db import models
from django.apps import apps
import json
import random
from collections import Counter
from dashboard.exceptions import ResourceAvailabilityException
class LabStatus(object):
"""
A Poor man's enum for the status of a lab.
If everything is working fine at a lab, it is UP.
If it is down temporarily e.g. for maintenance, it is TEMP_DOWN
If its broken, its DOWN
"""
UP = 0
TEMP_DOWN = 100
DOWN = 200
def upload_to(object, filename):
return object.user.username + '/' + filename
class UserProfile(models.Model):
"""Extend the Django User model."""
user = models.OneToOneField(User, on_delete=models.CASCADE)
timezone = models.CharField(max_length=100, blank=False, default='UTC')
ssh_public_key = models.FileField(upload_to=upload_to, null=True, blank=True)
pgp_public_key = models.FileField(upload_to=upload_to, null=True, blank=True)
email_addr = models.CharField(max_length=300, blank=False, default='email@mail.com')
company = models.CharField(max_length=200, blank=False)
oauth_token = models.CharField(max_length=1024, blank=False)
oauth_secret = models.CharField(max_length=1024, blank=False)
jira_url = models.CharField(max_length=100, null=True, blank=True, default='')
full_name = models.CharField(max_length=100, null=True, blank=True, default='')
booking_privledge = models.BooleanField(default=False)
public_user = models.BooleanField(default=False)
class Meta:
db_table = 'user_profile'
def __str__(self):
return self.user.username
class VlanManager(models.Model):
"""
Keeps track of the vlans for a lab.
Vlans are represented as indexes into a 4096 element list.
This list is serialized to JSON for storing in the DB.
"""
# list of length 4096 containing either 0 (not available) or 1 (available)
vlans = models.TextField()
# list of length 4096 containing either 0 (not reserved) or 1 (reserved)
reserved_vlans = models.TextField()
block_size = models.IntegerField()
# True if the lab allows two different users to have the same private vlans
# if they use QinQ or a vxlan overlay, for example
allow_overlapping = models.BooleanField()
def get_vlans(self, count=1):
"""
Return the IDs of available vlans as a list[int], but does not reserve them.
Will throw index exception if not enough vlans are available.
Always returns a list of ints
"""
allocated = []
vlans = json.loads(self.vlans)
reserved = json.loads(self.reserved_vlans)
for i in range(0, len(vlans) - 1):
if len(allocated) >= count:
break
if vlans[i] == 0 and self.allow_overlapping is False:
continue
if reserved[i] == 1:
continue
# vlan is available and not reserved, so safe to add
allocated.append(i)
continue
if len(allocated) != count:
raise ResourceAvailabilityException("can't allocate the vlans requested")
return allocated
def get_public_vlan(self):
"""Return reference to an available public network without reserving it."""
return PublicNetwork.objects.filter(lab=self.lab_set.first(), in_use=False).first()
def reserve_public_vlan(self, vlan):
"""Reserves the Public Network that has the given vlan."""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan, in_use=False)
net.in_use = True
net.save()
def release_public_vlan(self, vlan):
"""Un-reserves a public network with the given vlan."""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan, in_use=True)
net.in_use = False
net.save()
def public_vlan_is_available(self, vlan):
"""
Whether the public vlan is available.
returns true if the network with the given vlan is free to use,
False otherwise
"""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan)
return not net.in_use
def is_available(self, vlans):
"""
If the vlans are available.
'vlans' is either a single vlan id integer or a list of integers
will return true (available) or false
"""
if self.allow_overlapping:
return True
reserved = json.loads(self.reserved_vlans)
vlan_master_list = json.loads(self.vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
for vlan in vlans:
if not vlan_master_list[vlan] or reserved[vlan]:
return False
return True
def release_vlans(self, vlans):
"""
Make the vlans available for another booking.
'vlans' is either a single vlan id integer or a list of integers
will make the vlans available
doesnt return a value
"""
my_vlans = json.loads(self.vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
for vlan in vlans:
my_vlans[vlan] = 1
self.vlans = json.dumps(my_vlans)
self.save()
def reserve_vlans(self, vlans):
"""
Reserves all given vlans or throws a ValueError.
vlans can be an integer or a list of integers.
"""
my_vlans = json.loads(self.vlans)
reserved = json.loads(self.reserved_vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
vlans = set(vlans)
for vlan in vlans:
if my_vlans[vlan] == 0 or reserved[vlan] == 1:
raise ValueError("vlan " + str(vlan) + " is not available")
my_vlans[vlan] = 0
self.vlans = json.dumps(my_vlans)
self.save()
class Lab(models.Model):
"""
Model representing a Hosting Lab.
Anybody that wants to host resources for LaaS needs to have a Lab model
We associate hardware with Labs so we know what is available and where.
"""
lab_user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200, primary_key=True, unique=True, null=False, blank=False)
contact_email = models.EmailField(max_length=200, null=True, blank=True)
contact_phone = models.CharField(max_length=20, null=True, blank=True)
status = models.IntegerField(default=LabStatus.UP)
vlan_manager = models.ForeignKey(VlanManager, on_delete=models.CASCADE, null=True)
location = models.TextField(default="unknown")
# This token must apear in API requests from this lab
api_token = models.CharField(max_length=50)
description = models.CharField(max_length=240)
lab_info_link = models.URLField(null=True)
project = models.CharField(default='LaaS', max_length=100)
@staticmethod
def make_api_token():
"""Generate random 45 character string for API token."""
alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
key = ""
for i in range(45):
key += random.choice(alphabet)
return key
def get_available_resources(self):
# Cannot import model normally due to ciruclar import
Server = apps.get_model('resource_inventory', 'Server') # TODO: Find way to import ResourceQuery
resources = [str(resource.profile) for resource in Server.objects.filter(lab=self, working=True, booked=False)]
return dict(Counter(resources))
def __str__(self):
return self.name
class PublicNetwork(models.Model):
"""L2/L3 network that can reach the internet."""
vlan = models.IntegerField()
lab = models.ForeignKey(Lab, on_delete=models.CASCADE)
in_use = models.BooleanField(default=False)
cidr = models.CharField(max_length=50, default="0.0.0.0/0")
gateway = models.CharField(max_length=50, default="0.0.0.0")
class Downtime(models.Model):
"""
A Downtime event.
Labs can create Downtime objects so the dashboard can
alert users that the lab is down, etc
"""
start = models.DateTimeField()
end = models.DateTimeField()
lab = models.ForeignKey(Lab, on_delete=models.CASCADE)
description = models.TextField(default="This lab will be down for maintenance")
def save(self, *args, **kwargs):
if self.start >= self.end:
raise ValueError('Start date is after end date')
# check for overlapping downtimes
overlap_start = Downtime.objects.filter(lab=self.lab, start__gt=self.start, start__lt=self.end).exists()
overlap_end = Downtime.objects.filter(lab=self.lab, end__lt=self.end, end__gt=self.start).exists()
if overlap_start or overlap_end:
raise ValueError('Overlapping Downtime')
return super(Downtime, self).save(*args, **kwargs)
| 33.214035 | 119 | 0.648109 | 8,684 | 0.917389 | 0 | 0 | 295 | 0.031164 | 0 | 0 | 3,194 | 0.337418 |
2100b09455195ab5c430875e8b35fbf6511f1a48 | 1,513 | py | Python | Chapter06/Blurring_Sharpening.py | AzureCloudMonk/Raspberry-Pi-3-Cookbook-for-Python-Programmers-Third-Edition | 26b4b3859fa7ce471fea3ba3b016922b7bd1629e | [
"MIT"
] | 22 | 2018-05-04T01:15:12.000Z | 2021-12-19T17:14:53.000Z | Chapter06/Blurring_Sharpening.py | AzureCloudMonk/Raspberry-Pi-3-Cookbook-for-Python-Programmers-Third-Edition | 26b4b3859fa7ce471fea3ba3b016922b7bd1629e | [
"MIT"
] | 1 | 2021-02-20T12:50:08.000Z | 2021-02-24T06:40:07.000Z | Chapter06/Blurring_Sharpening.py | PacktPublishing/Raspberry-Pi-3-Cookbook-for-Python-Programmers-Third-Edition | ffaa7e324bda7ad8f7c752092609f48c8335ea39 | [
"MIT"
] | 20 | 2018-07-07T17:20:18.000Z | 2021-04-22T17:31:18.000Z | # Blurring and Sharpening Images
# Import Computer Vision package - cv2
import cv2
# Import Numerical Python package - numpy as np
import numpy as np
# Read the image using imread built-in function
image = cv2.imread('image_6.jpg')
# Display original image using imshow built-in function
cv2.imshow("Original", image)
# Wait until any key is pressed
cv2.waitKey(0)
# Blurring images: Averaging, cv2.blur built-in function
# Averaging: Convolving image with normalized box filter
# Convolution: Mathematical operation on 2 functions which produces third function.
# Normalized box filter having size 3 x 3 would be:
# (1/9) [[1, 1, 1],
# [1, 1, 1],
# [1, 1, 1]]
blur = cv2.blur(image,(9,9)) # (9 x 9) filter is used
# Display blurred image
cv2.imshow('Blurred', blur)
# Wait until any key is pressed
cv2.waitKey(0)
# Sharpening images: Emphasizes edges in an image
kernel = np.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]])
# If we don't normalize to 1, image would be brighter or darker respectively
# cv2.filter2D is the built-in function used for sharpening images
# cv2.filter2D(image, ddepth, kernel)
sharpened = cv2.filter2D(image, -1, kernel)
# ddepth = -1, sharpened images will have same depth as original image
# Display sharpenend image
cv2.imshow('Sharpened', sharpened)
# Wait untill any key is pressed
cv2.waitKey(0)
# Close all windows
cv2.destroyAllWindows()
| 28.54717 | 84 | 0.66887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,070 | 0.707204 |
21016a21d5882a186ec10f492251ce61d6a1d407 | 10,887 | py | Python | webapp/python/isuda.py | akawashiro/isucon6-qualify | ec51c51a5a43f3d1aa192c1404e5e121c087c5d0 | [
"MIT"
] | null | null | null | webapp/python/isuda.py | akawashiro/isucon6-qualify | ec51c51a5a43f3d1aa192c1404e5e121c087c5d0 | [
"MIT"
] | null | null | null | webapp/python/isuda.py | akawashiro/isucon6-qualify | ec51c51a5a43f3d1aa192c1404e5e121c087c5d0 | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for
import MySQLdb.cursors
import hashlib
import html
import json
import math
import os
import pathlib
import random
import re
import string
import urllib
import sys
from werkzeug.contrib.profiler import ProfilerMiddleware, MergeStream
static_folder = pathlib.Path(__file__).resolve().parent.parent / 'public'
app = Flask(__name__, static_folder=str(static_folder), static_url_path='')
app.secret_key = 'tonymoris'
f = open('/home/isucon/profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, stream, sort_by=('time', 'calls'))
keywords_cache = None
keyword_re_cache = None
# app.logger.critical('this is a CRITICAL message')
_config = {
'db_host': os.environ.get('ISUDA_DB_HOST', 'localhost'),
'db_port': int(os.environ.get('ISUDA_DB_PORT', '3306')),
'db_user': os.environ.get('ISUDA_DB_USER', 'root'),
'db_password': os.environ.get('ISUDA_DB_PASSWORD', ''),
'isutar_origin': os.environ.get('ISUTAR_ORIGIN', 'http://localhost:5001'),
'isupam_origin': os.environ.get('ISUPAM_ORIGIN', 'http://localhost:5050'),
}
def config(key):
if key in _config:
return _config[key]
else:
raise "config value of %s undefined" % key
def dbh_isuda():
if hasattr(request, 'isuda_db'):
return request.isuda_db
else:
request.isuda_db = MySQLdb.connect(**{
'host': config('db_host'),
'port': config('db_port'),
'user': config('db_user'),
'passwd': config('db_password'),
'db': 'isuda',
'charset': 'utf8mb4',
'cursorclass': MySQLdb.cursors.DictCursor,
'autocommit': True,
})
cur = request.isuda_db.cursor()
cur.execute("SET SESSION sql_mode='TRADITIONAL,NO_AUTO_VALUE_ON_ZERO,ONLY_FULL_GROUP_BY'")
cur.execute('SET NAMES utf8mb4')
return request.isuda_db
def dbh_isutar():
if hasattr(request, 'isutar_db'):
return request.isutar_db
else:
request.isutar_db = MySQLdb.connect(**{
'host': os.environ.get('ISUTAR_DB_HOST', 'localhost'),
'port': int(os.environ.get('ISUTAR_DB_PORT', '3306')),
'user': os.environ.get('ISUTAR_DB_USER', 'root'),
'passwd': os.environ.get('ISUTAR_DB_PASSWORD', ''),
'db': 'isutar',
'charset': 'utf8mb4',
'cursorclass': MySQLdb.cursors.DictCursor,
'autocommit': True,
})
cur = request.isutar_db.cursor()
cur.execute("SET SESSION sql_mode='TRADITIONAL,NO_AUTO_VALUE_ON_ZERO,ONLY_FULL_GROUP_BY'")
cur.execute('SET NAMES utf8mb4')
return request.isutar_db
@app.teardown_request
def close_db(exception=None):
if hasattr(request, 'db'):
request.db.close()
@app.template_filter()
def ucfirst(str):
return str[0].upper() + str[-len(str) + 1:]
def set_name(func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
if "user_id" in session:
request.user_id = user_id = session['user_id']
cur = dbh_isuda().cursor()
cur.execute('SELECT name FROM user WHERE id = %s', (user_id, ))
user = cur.fetchone()
if user is None:
abort(403)
request.user_name = user['name']
return func(*args, **kwargs)
return wrapper
def authenticate(func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(request, 'user_id'):
abort(403)
return func(*args, **kwargs)
return wrapper
@app.route('/initialize')
def get_initialize():
global keywords_cache
global keyword_re_cache
keywords_cache = None
keyword_re_cache = None
cur = dbh_isuda().cursor()
cur.execute('DELETE FROM entry WHERE id > 7101')
origin = config('isutar_origin')
urllib.request.urlopen(origin + '/initialize')
return jsonify(result='ok')
@app.route('/')
@set_name
def get_index():
PER_PAGE = 10
page = int(request.args.get('page', '1'))
cur = dbh_isuda().cursor()
cur.execute('SELECT * FROM entry ORDER BY updated_at DESC LIMIT %s OFFSET %s', (PER_PAGE, PER_PAGE * (page - 1),))
entries = cur.fetchall()
for entry in entries:
entry['html'] = htmlify(entry['description'])
entry['stars'] = load_stars(entry['keyword'])
cur.execute('SELECT COUNT(*) AS count FROM entry')
row = cur.fetchone()
total_entries = row['count']
last_page = int(math.ceil(total_entries / PER_PAGE))
pages = range(max(1, page - 5), min(last_page, page + 5) + 1)
return render_template('index.html', entries=entries, page=page, last_page=last_page, pages=pages)
@app.route('/robots.txt')
def get_robot_txt():
abort(404)
@app.route('/keyword', methods=['POST'])
@set_name
@authenticate
def create_keyword():
global keywords_cache
global keyword_re_cache
keyword = request.form['keyword']
if keyword is None or len(keyword) == 0:
abort(400)
if keywords_cache is not None:
keywords_cache.add(keyword)
keyword_re_cache = None
user_id = request.user_id
description = request.form['description']
if is_spam_contents(description) or is_spam_contents(keyword):
abort(400)
cur = dbh_isuda().cursor()
sql = """
INSERT INTO entry (author_id, keyword, description, created_at, updated_at)
VALUES (%s,%s,%s,NOW(), NOW())
ON DUPLICATE KEY UPDATE
author_id = %s, keyword = %s, description = %s, updated_at = NOW()
"""
cur.execute(sql, (user_id, keyword, description, user_id, keyword, description))
return redirect('/')
@app.route('/register')
@set_name
def get_register():
return render_template('authenticate.html', action='register')
@app.route('/register', methods=['POST'])
def post_register():
name = request.form['name']
pw = request.form['password']
if name is None or name == '' or pw is None or pw == '':
abort(400)
user_id = register(dbh_isuda().cursor(), name, pw)
session['user_id'] = user_id
return redirect('/')
def register(cur, user, password):
salt = random_string(20)
cur.execute("INSERT INTO user (name, salt, password, created_at) VALUES (%s, %s, %s, NOW())",
(user, salt, hashlib.sha1((salt + "password").encode('utf-8')).hexdigest(),))
cur.execute("SELECT LAST_INSERT_ID() AS last_insert_id")
return cur.fetchone()['last_insert_id']
def random_string(n):
return ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)])
@app.route('/login')
@set_name
def get_login():
return render_template('authenticate.html', action='login')
@app.route('/login', methods=['POST'])
def post_login():
name = request.form['name']
cur = dbh_isuda().cursor()
cur.execute("SELECT * FROM user WHERE name = %s", (name, ))
row = cur.fetchone()
if row is None or row['password'] != hashlib.sha1((row['salt'] + request.form['password']).encode('utf-8')).hexdigest():
abort(403)
session['user_id'] = row['id']
return redirect('/')
@app.route('/logout')
def get_logout():
session.pop('user_id', None)
return redirect('/')
@app.route('/keyword/<keyword>')
@set_name
def get_keyword(keyword):
if keyword == '':
abort(400)
cur = dbh_isuda().cursor()
cur.execute('SELECT * FROM entry WHERE keyword = %s', (keyword,))
entry = cur.fetchone()
if entry is None:
abort(404)
entry['html'] = htmlify(entry['description'])
entry['stars'] = load_stars(entry['keyword'])
return render_template('keyword.html', entry=entry)
@app.route('/keyword/<keyword>', methods=['POST'])
@set_name
@authenticate
def delete_keyword(keyword):
global keywords_cache
global keyword_re_cache
if keyword == '':
abort(400)
if keywords_cache is not None and keyword in keywords_cache:
keywords_cache.remove(keyword)
keyword_re_cache = None
cur = dbh_isuda().cursor()
cur.execute('SELECT keyword FROM entry WHERE keyword = %s', (keyword, ))
row = cur.fetchone()
if row is None:
abort(404)
cur.execute('DELETE FROM entry WHERE keyword = %s', (keyword,))
return redirect('/')
def make_keyword_list():
global keywords_cache
if keywords_cache is not None:
return list(keywords_cache)
cur = dbh_isuda().cursor()
cur.execute('SELECT keyword FROM entry ORDER BY CHARACTER_LENGTH(keyword) DESC')
keywords = list()
for k in cur.fetchall():
keywords.append(k['keyword'])
keywords_cache = set(keywords)
return keywords
def make_keyword_re(keywords):
global keyword_re_cache
if keyword_re_cache is not None:
return keyword_re_cache
keyword_re_cache = re.compile("(%s)" % '|'.join([re.escape(k) for k in keywords]))
return keyword_re_cache
def htmlify(content):
if content is None or content == '':
return ''
# cur = dbh_isuda().cursor()
# cur.execute('SELECT * FROM entry ORDER BY CHARACTER_LENGTH(keyword) DESC')
# keywords = cur.fetchall()
keywords = make_keyword_list()
keyword_re = make_keyword_re(keywords)
kw2sha = {}
def replace_keyword(m):
kw2sha[m.group(0)] = "isuda_%s" % hashlib.sha1(m.group(0).encode('utf-8')).hexdigest()
return kw2sha[m.group(0)]
result = re.sub(keyword_re, replace_keyword, content)
result = html.escape(result)
for kw, hash in kw2sha.items():
url = url_for('get_keyword', keyword=kw)
link = "<a href=\"%s\">%s</a>" % (url, html.escape(kw))
result = re.sub(re.compile(hash), link, result)
return re.sub(re.compile("\n"), "<br />", result)
def get_stars(keyword):
cur = dbh_isutar().cursor()
app.logger.critical('keyword = ' + keyword)
cur.execute('SELECT * FROM star WHERE keyword = %s', (keyword, ))
res = cur.fetchall()
return res
def load_stars(keyword):
# cur = dbh_isutar().cursor()
# cur.execute('SELECT * FROM star WHERE keyword = %s', (keyword, ))
# res = cur.fetchall()
# return res
origin = config('isutar_origin')
url = "%s/stars" % origin
params = urllib.parse.urlencode({'keyword': keyword})
with urllib.request.urlopen(url + "?%s" % params) as res:
data = json.loads(res.read().decode('utf-8'))
return data['stars']
def is_spam_contents(content):
with urllib.request.urlopen(config('isupam_origin'), urllib.parse.urlencode({"content": content}).encode('utf-8')) as res:
data = json.loads(res.read().decode('utf-8'))
return not data['valid']
return False
if __name__ == "__main__":
app.run()
| 28.877984 | 126 | 0.6381 | 0 | 0 | 0 | 0 | 4,967 | 0.456232 | 0 | 0 | 2,716 | 0.249472 |
2102482614cbfec0280843f4652b4c092440bef6 | 80 | py | Python | todo/apps.py | arthtyagi/gettit | 26047f85a7cf0a5f380cde4e18f9bcc88bb27db6 | [
"MIT"
] | 6 | 2020-05-30T18:10:08.000Z | 2021-11-30T14:39:41.000Z | todo/apps.py | arthtyagi/gettit | 26047f85a7cf0a5f380cde4e18f9bcc88bb27db6 | [
"MIT"
] | 18 | 2020-06-21T12:04:47.000Z | 2022-01-13T02:57:16.000Z | todo/apps.py | arthtyagi/gettit | 26047f85a7cf0a5f380cde4e18f9bcc88bb27db6 | [
"MIT"
] | 1 | 2020-08-30T01:42:54.000Z | 2020-08-30T01:42:54.000Z | from django.apps import AppConfig
class TodoConfig(AppConfig):
name = 'todo'
| 13.333333 | 33 | 0.7625 | 43 | 0.5375 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.075 |
21052d838f3c6f1bc317c7615a2db829dddf4cec | 299 | py | Python | Segundo nivel - condicionales/4.py | OscarPalominoC/RetosPlatziProgramaci-n | cd0c32254e8dd0dc35dda91ad50f5d8e6f013c08 | [
"MIT"
] | null | null | null | Segundo nivel - condicionales/4.py | OscarPalominoC/RetosPlatziProgramaci-n | cd0c32254e8dd0dc35dda91ad50f5d8e6f013c08 | [
"MIT"
] | null | null | null | Segundo nivel - condicionales/4.py | OscarPalominoC/RetosPlatziProgramaci-n | cd0c32254e8dd0dc35dda91ad50f5d8e6f013c08 | [
"MIT"
] | null | null | null | def run():
animal = str(input('¿Cuál es tu animal favorito? '))
if animal.lower() == 'tortuga' or animal.lower() == 'tortugas':
print('También me gustan las tortugas.')
else:
print('Ese animal es genial, pero prefiero las tortugas.')
if __name__ == '__main__':
run() | 33.222222 | 67 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.486755 |
2108113aa55ab74979a849198e4a5b50f03a9738 | 1,153 | py | Python | data-structure/registration_with_binary_tree/src/Registration.py | gvalves/unifesp | f0572419f963fe063be56ae34b572a0130246d2f | [
"MIT"
] | null | null | null | data-structure/registration_with_binary_tree/src/Registration.py | gvalves/unifesp | f0572419f963fe063be56ae34b572a0130246d2f | [
"MIT"
] | null | null | null | data-structure/registration_with_binary_tree/src/Registration.py | gvalves/unifesp | f0572419f963fe063be56ae34b572a0130246d2f | [
"MIT"
] | null | null | null | from enum import Enum
class Registration(Enum):
ID_SERVIDOR_PORTAL = 1
NOME = 2
CPF = 3
MATRICULA = 4
DESCRICAO_CARGO = 5
CLASSE_CARGO = 6
REFERENCIA_CARGO = 7
PADRAO_CARGO = 8
NIVEL_CARGO = 9
SIGLA_FUNCAO = 10
NIVEL_FUNCAO = 11
FUNCAO = 12
CODIGO_ATIVIDADE = 13
ATIVIDADE = 14
OPCAO_PARCIAL = 15
COD_UORG_LOTACAO = 16
UORG_LOTACAO = 17
COD_ORG_LOTACAO = 18
ORG_LOTACAO = 19
COD_ORGSUP_LOTACAO = 20
ORGSUP_LOTACAO = 21
COD_UORG_EXERCICIO = 22
UORG_EXERCICIO = 23
COD_ORG_EXERCICIO = 24
ORG_EXERCICIO = 25
COD_ORGSUP_EXERCICIO = 26
ORGSUP_EXERCICIO = 27
TIPO_VINCULO = 28
SITUACAO_VINCULO = 29
DATA_INICIO_AFASTAMENTO = 30
DATA_TERMINO_AFASTAMENTO = 31
REGIME_JURIDICO = 32
JORNADA_DE_TRABALHO = 33
DATA_INGRESSO_CARGOFUNCAO = 34
DATA_NOMEACAO_CARGOFUNCAO = 35
DATA_INGRESSO_ORGAO = 36
DOCUMENTO_INGRESSO_SERVICOPUBLICO = 37
DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO = 38
DIPLOMA_INGRESSO_CARGOFUNCAO = 39
DIPLOMA_INGRESSO_ORGAO = 40
DIPLOMA_INGRESSO_SERVICOPUBLICO = 41
UF_EXERCICIO = 42
| 24.531915 | 45 | 0.699046 | 1,128 | 0.978317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21086d29a3c48a77d90c32973849ccb037435414 | 1,269 | py | Python | repos/system_upgrade/el7toel8/actors/checkacpid/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 21 | 2018-11-20T15:58:39.000Z | 2022-03-15T19:57:24.000Z | repos/system_upgrade/el7toel8/actors/checkacpid/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 732 | 2018-11-21T18:33:26.000Z | 2022-03-31T16:16:24.000Z | repos/system_upgrade/el7toel8/actors/checkacpid/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 85 | 2018-11-20T17:55:00.000Z | 2022-03-29T09:40:31.000Z | from leapp.actors import Actor
from leapp.models import InstalledRedHatSignedRPM
from leapp.libraries.common.rpms import has_package
from leapp.reporting import Report, create_report
from leapp import reporting
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckAcpid(Actor):
"""
Check if acpid is installed. If yes, write information about non-compatible changes.
"""
name = 'checkacpid'
consumes = (InstalledRedHatSignedRPM,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
if has_package(InstalledRedHatSignedRPM, 'acpid'):
create_report([
reporting.Title('Acpid incompatible changes in the next major version'),
reporting.Summary('The option -d (debug) no longer implies -f (foreground).'),
reporting.Severity(reporting.Severity.LOW),
reporting.Remediation(
hint='You must now use both options (\'-df\') for the same behavior. Please update '
'your scripts to be compatible with the changes.'),
reporting.Tags([reporting.Tags.KERNEL, reporting.Tags.SERVICES]),
reporting.RelatedResource('package', 'acpid')
])
| 40.935484 | 104 | 0.662727 | 1,001 | 0.78881 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.295508 |
2108fb3332b187529421f27204509d2d0565250f | 831 | py | Python | crypto_config/cryptoconfigparser.py | x-ware-ltd/CryptoConfig | 59f38ca73194e5663b4c8ea48a2ae73514502664 | [
"BSD-3-Clause"
] | 1 | 2021-11-24T16:24:36.000Z | 2021-11-24T16:24:36.000Z | crypto_config/cryptoconfigparser.py | x-ware-ltd/CryptoConfig | 59f38ca73194e5663b4c8ea48a2ae73514502664 | [
"BSD-3-Clause"
] | 2 | 2021-08-13T15:13:53.000Z | 2021-08-13T15:57:18.000Z | crypto_config/cryptoconfigparser.py | x-ware-ltd/CryptoConfig | 59f38ca73194e5663b4c8ea48a2ae73514502664 | [
"BSD-3-Clause"
] | 1 | 2021-08-12T15:35:04.000Z | 2021-08-12T15:35:04.000Z | from crypto_config import (ConfigParser, ParsingError, Crypt)
import re
class CryptoConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
key = kwargs.pop('crypt_key', None)
if key != None:
self.crypt_key = key
else:
self.crypt_key = None
ConfigParser.__init__(self, *args, **kwargs)
def get(self, section, option, *args, **kwargs):
raw_val = ConfigParser.get(self, section, option, *args, **kwargs)
val = raw_val
encoded_val = re.search(r"enc\((.*)\)", raw_val, re.IGNORECASE)
if encoded_val and self.crypt_key:
val = self._decrypt(encoded_val.group(1), self.crypt_key)
return val
def _decrypt(self, str, key):
c = Crypt(key)
b_decoded = c.decrypt(str)
return b_decoded | 33.24 | 74 | 0.610108 | 758 | 0.912154 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.030084 |
2109cd83cd9e5d9e5a9ceb4961c452b330afafb7 | 9,888 | py | Python | networks/multiclass/CNN2D/InceptionNet/multiclass_InceptionNet.py | so2liu/CNNArt | 9d91bf08a044e7d5068f8446663726411d2236dd | [
"Apache-2.0"
] | 22 | 2018-04-27T21:28:46.000Z | 2021-12-24T06:44:55.000Z | networks/multiclass/CNN2D/InceptionNet/multiclass_InceptionNet.py | so2liu/CNNArt | 9d91bf08a044e7d5068f8446663726411d2236dd | [
"Apache-2.0"
] | 81 | 2017-11-09T17:23:15.000Z | 2020-01-28T22:54:13.000Z | networks/multiclass/CNN2D/InceptionNet/multiclass_InceptionNet.py | so2liu/CNNArt | 9d91bf08a044e7d5068f8446663726411d2236dd | [
"Apache-2.0"
] | 18 | 2017-11-13T16:12:17.000Z | 2020-08-27T10:17:34.000Z | import os.path
import scipy.io as sio
import numpy as np # for algebraic operations, matrices
import keras.models
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout # , Layer, Flatten
# from keras.layers import containers
from keras.models import model_from_json,Model
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from hyperas.distributions import choice, uniform, conditional
from hyperopt import Trials, STATUS_OK
from sklearn.metrics import confusion_matrix
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D as pool2
from keras.callbacks import EarlyStopping,ModelCheckpoint
# from keras.layers.convolutional import ZeroPadding2D as zero2d
from keras.regularizers import l2 # , activity_l2
# from theano import functionfrom keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.optimizers import SGD
from keras.layers.merge import concatenate
from keras.layers import Input,add
from keras.layers.advanced_activations import PReLU,ELU
from keras.layers.pooling import GlobalAveragePooling2D
#temp/Inception-ResNet for 180180
def create180180Model(patchSize):
seed=5
np.random.seed(seed)
input=Input(shape=(1,patchSize[0, 0], patchSize[0, 1]))
out1=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(input)
out2=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out1)
out2=pool2(pool_size=(2,2),data_format='channels_first')(out2)
out3=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out2)
out4=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out3)
out4=add([out2,out4])
out4=pool2(pool_size=(2,2),data_format='channels_first')(out4)
out_3=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out4)
out_4=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_3)
out5_1=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_2)
out5_3=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_3=Conv2D(filters=128,kernel_size=(5,5),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_3)
out5_4=pool2(pool_size=(3,3),strides=(1,1),padding='same',data_format='channels_first')(out_4)
out5_4=Conv2D(filters=128,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_4)
out5=concatenate(inputs=[out5_1,out5_2,out5_3],axis=1)
out7=Conv2D(filters=288,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out5)
out7=add([out5, out7])
out7=pool2(pool_size=(2,2),data_format='channels_first')(out7)
sout7=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out8=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out9=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out8)
out9=add([sout7, out9])
out9=pool2(pool_size=(2,2),data_format='channels_first')(out9)
out10=Flatten()(out9)
out11=Dense(units=11,
kernel_initializer='normal',
kernel_regularizer='l2',
activation='softmax')(out10)
cnn = Model(inputs=input,outputs=out11)
return cnn
def fTrain(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSizes=None, learningRates=None, iEpochs=None):
# grid search on batch_sizes and learning rates
# parse inputs
batchSizes = 64 if batchSizes is None else batchSizes
learningRates = 0.01 if learningRates is None else learningRates
iEpochs = 300 if iEpochs is None else iEpochs
for iBatch in batchSizes:
for iLearn in learningRates:
fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, iBatch, iLearn, iEpochs)
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
# parse inputs
batchSize = 64 if batchSize is None else batchSize
learningRate = 0.01 if learningRate is None else learningRate
iEpochs = 300 if iEpochs is None else iEpochs
print('Training CNN InceptionNet')
print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))
# save names
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_lr_' + str(
learningRate) + '_bs_' + str(batchSize)
weight_name = model_name + '_weights.h5'
model_json = model_name + '_json'
model_all = model_name + '_model.h5'
model_mat = model_name + '.mat'
if (os.path.isfile(model_mat)): # no training if output file exists
return
# create model
if (patchSize[0,0]!=180 & patchSize[0,1]!=180):
print('NO model for patch size ' + patchSize[0, 0] + patchSize[0, 0])
else:
cnn = create180180Model(patchSize)
# opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=20, verbose=1), ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
#callbacks = [ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
cnn.summary()
result = cnn.fit(X_train,
y_train,
validation_data=[X_test, y_test],
epochs=iEpochs,
batch_size=batchSize,
callbacks=callbacks,
verbose=1)
score_test, acc_test = cnn.evaluate(X_test, y_test, batch_size=batchSize )
prob_test = cnn.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_test,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# save model
json_string = cnn.to_json()
open(model_json, 'w').write(json_string)
# wei = cnn.get_weights()
cnn.save_weights(weight_name, overwrite=True)
# cnn.save(model_all) # keras > v0.7
# matlab
acc = result.history['acc']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
print('Saving results: ' + model_name)
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': acc,
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test,
'confusion_mat':confusion_mat})
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
weight_name = model_name[0]
#model_json = model_name[1] + '_json'
#model_all = model_name[0] + '.hdf5'
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sOutPath)
#sFilename, sExt = os.path.splitext(sFilename)
#f = h5py.File(weight_name, 'r+')
#del f['optimizer_weights']
#f.close()
model=load_model(weight_name)
opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
#model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
#model.load_weights(weight_name)
model.summary();
score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
prob_pre = model.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_pre,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# modelSave = model_name[:-5] + '_pred.mat'
modelSave = sOutPath + '/' + sFilename + '_result.mat'
sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test, 'confusion_mat':confusion_mat})
| 46.641509 | 201 | 0.731493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,000 | 0.202265 |
2109ddcd358b814ece6800aa2ba9a800fb0b2400 | 856 | py | Python | python/sort_list_by_multiple_keys.py | julianespinel/trainning | 23e07c954e5bf03f1cd117e388eed7da4a3e8f63 | [
"MIT"
] | null | null | null | python/sort_list_by_multiple_keys.py | julianespinel/trainning | 23e07c954e5bf03f1cd117e388eed7da4a3e8f63 | [
"MIT"
] | null | null | null | python/sort_list_by_multiple_keys.py | julianespinel/trainning | 23e07c954e5bf03f1cd117e388eed7da4a3e8f63 | [
"MIT"
] | null | null | null | class reversor:
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
"""
Inverted it to be able to sort in descending order.
"""
return self.value >= other.value
if __name__ == '__main__':
tuples = [(3, 'x'), (2, 'y'), (1, 'a'), (1, 'z')]
tuples.sort(key=lambda x: (x[0], x[1]))
assert tuples == [(1, 'a'), (1, 'z'), (2, 'y'),(3, 'x')], "Error 1: 0 asc, 1 asc"
tuples.sort(key=lambda x: (x[0], reversor(x[1])))
assert tuples == [(1, 'z'), (1, 'a'), (2, 'y'),(3, 'x')], "Error 2: 0 asc, 1 desc"
# The following approach works for a single char string.
tuples.sort(key=lambda x: (x[0], -ord(x[1])))
assert tuples == [(1, 'z'), (1, 'a'), (2, 'y'), (3, 'x')], "Error 3: 0 asc, 1 desc"
| 31.703704 | 87 | 0.511682 | 299 | 0.349299 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.303738 |
210a6d8521d591e2b68bc2a7a3a7e44846716c28 | 501 | py | Python | labs-code/python/standard-product-track/get_followers.py | aod2004/getting-started-with-the-twitter-api-v2-for-academic-research | 43f90984297427a6c48a39407185240f5782966b | [
"Apache-2.0"
] | 282 | 2021-06-24T17:30:54.000Z | 2022-03-29T17:18:03.000Z | labs-code/python/standard-product-track/get_followers.py | arshamalh/getting-started-with-the-twitter-api-v2-for-academic-research | 9e894096a38a44bd54f852f2d14e4ed7bc2e1ba5 | [
"Apache-2.0"
] | 7 | 2021-06-26T22:03:31.000Z | 2022-01-18T10:35:24.000Z | labs-code/python/standard-product-track/get_followers.py | arshamalh/getting-started-with-the-twitter-api-v2-for-academic-research | 9e894096a38a44bd54f852f2d14e4ed7bc2e1ba5 | [
"Apache-2.0"
] | 63 | 2021-06-24T19:46:50.000Z | 2022-03-24T14:53:41.000Z | from twarc import Twarc2, expansions
import json
# Replace your bearer token below
client = Twarc2(bearer_token="XXXXX")
def main():
# The followers function gets followers for specified user
followers = client.followers(user="twitterdev")
for page in followers:
result = expansions.flatten(page)
for user in result:
# Here we are printing the full Tweet object JSON to the console
print(json.dumps(user))
if __name__ == "__main__":
main()
| 25.05 | 76 | 0.682635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.367265 |
210ce5a109662e3af414b660e816005c84a91241 | 1,091 | py | Python | machine-learning/QiWei-Python-Chinese/function/function_02.py | yw-fang/MLreadingnotes | 3522497e6fb97427c54f4267d9c410064818c357 | [
"Apache-2.0"
] | 2 | 2020-07-09T22:21:57.000Z | 2021-03-20T15:30:31.000Z | machine-learning/QiWei-Python-Chinese/function/function_02.py | yw-fang/MLreadingnotes | 3522497e6fb97427c54f4267d9c410064818c357 | [
"Apache-2.0"
] | 37 | 2018-04-17T06:40:54.000Z | 2022-03-22T09:06:01.000Z | machine-learning/QiWei-Python-Chinese/function/function_02.py | yw-fang/MLreadingnotes | 3522497e6fb97427c54f4267d9c410064818c357 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Yue-Wen FANG'
__maintainer__ = "Yue-Wen FANG"
__email__ = 'fyuewen@gmail.com'
__license__ = 'Apache License 2.0'
__creation_date__= 'Dec. 25, 2018'
"""
This example shows the functionality
of positional arguments and keyword ONLY arguments.
The positional arguments correspond to tuple,
the keyword ONLY arguments correspond to dict.
"""
def add_function_01(x, *args): # you can use any other proper names instead of using args
""" positional arguments"""
print('x is', x)
for i in args:
print(i),
def add_function_02(x, *args, **kwargs): # you can use any other proper names instead of using args
""" positional arguments and keyword specific arguments """
print('x is', x)
print(args)
print('the type of args is', type(args))
print(kwargs.values())
print(kwargs.keys())
print('the type or kwargs is', type(kwargs))
if __name__ == "__main__":
add_function_01(1,2,3,45)
print("*************")
add_function_02(3, 1, 2, 3, 45, c=3, d=4)
print("*************")
| 27.974359 | 100 | 0.656279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.562786 |
210ce5a3d930e858c4ed3d0c31e56cdd73aec402 | 1,999 | py | Python | Python/Problem#8.py | Wolfy7/DailyCodingProblem | dcca0da51bf42413c2eeff122dbb653e73ab5d83 | [
"MIT"
] | null | null | null | Python/Problem#8.py | Wolfy7/DailyCodingProblem | dcca0da51bf42413c2eeff122dbb653e73ab5d83 | [
"MIT"
] | null | null | null | Python/Problem#8.py | Wolfy7/DailyCodingProblem | dcca0da51bf42413c2eeff122dbb653e73ab5d83 | [
"MIT"
] | null | null | null | """
A unival tree (which stands for "universal value") is a tree where all nodes under it have the same value.
Given the root to a binary tree, count the number of unival subtrees.
For example, the following tree has 5 unival subtrees:
0
/ \
1 0
/ \
1 0
/ \
1 1
"""
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# O(n)
def count_univals2(root):
total_count, is_unival = helper(root)
return total_count
def helper(root):
if root == None:
return (0, True)
left_count, is_left_unival = helper(root.left)
right_count, is_right_unival = helper(root.right)
is_unival = True
if not is_left_unival or not is_right_unival:
is_unival = False
if root.left != None and root.left.data != root.data:
is_unival = False
if root.right != None and root.right.data != root.data:
is_unival = False
if is_unival:
return (left_count + right_count + 1, True)
else:
return (left_count + right_count, False)
def is_unival(root):
if root == None:
return True
if root.left != None and root.left.data != root.data:
return False
if root.right != None and root.right.data != root.data:
return False
if is_unival(root.left) and is_unival(root.right):
return True
return False
# O(n^2)
def count_univals(root):
if root == None:
return 0
total_count = count_univals(root.left) + count_univals(root.right)
if is_unival(root):
total_count += 1
return total_count
"""
5
/ \
4 5
/ \ \
4 4 5
"""
root = Node(5)
root.left = Node(4)
root.right = Node(5)
root.left.left = Node(4)
root.left.right = Node(4)
root.right.right = Node(5)
print(count_univals(root))
print(count_univals2(root)) | 21.728261 | 107 | 0.57979 | 121 | 0.06053 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.212606 |
210d783c232f4ecb3f5661577b36de0f31d8a2b4 | 2,327 | py | Python | Prefixr.py | tribhuvanesh/dyna-snip | ac2c902f3dcbced5e8f6f6aad74826e5178ed9fe | [
"MIT"
] | 1 | 2015-02-06T13:17:56.000Z | 2015-02-06T13:17:56.000Z | Prefixr.py | tribhuvanesh/dyna-snip | ac2c902f3dcbced5e8f6f6aad74826e5178ed9fe | [
"MIT"
] | null | null | null | Prefixr.py | tribhuvanesh/dyna-snip | ac2c902f3dcbced5e8f6f6aad74826e5178ed9fe | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
import urllib
import urllib2
import threading
import re
from dyna_snip_helpers import get_snippet_list, inc_snippet_object
COMMENT_MARKER_JAVA = '//'
COMMENT_MARKER_PYTHON = "#"
class PrefixrCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.edit = edit
# Extract the user query from the comment on the current line
query_region = self.view.sel()[0] # No selection == single region
query_line = self.view.line(query_region) # Make region span line
query_line_contents = self.view.substr(query_line)
self.pos = query_line.end()
# Get the language from the filename extension
filename = self.view.file_name()
if filename.endswith('.java'):
comment_marker = COMMENT_MARKER_JAVA
lang = 'java'
else:
comment_marker = COMMENT_MARKER_PYTHON
lang = 'python'
query = query_line_contents.replace(comment_marker, '').strip()
self.snippet_list = get_snippet_list(query, lang)
self.snippet_list = sorted(self.snippet_list, key=lambda x: x['score'], reverse=True)
"""
self.snippet_list = [{'source': 'source1', 'snippet': 'def snippet1:\n\tprint "snippet1"', 'score': 10},
{'source': 'source2', 'snippet': 'def snippet2:\n\tprint "snippet2"', 'score': 9},
{'source': 'source3', 'snippet': 'def snippet3:\n\tprint "snippet3"', 'score': 8}]
"""
self.snippet_titles = [item['title'] + ' (' + item['source'] + ') ' for item
in sorted(self.snippet_list, key=lambda x: x['score'], reverse=True)]
self.snippets = [item['snippet'] for item
in sorted(self.snippet_list, key=lambda x: x['score'], reverse=True)]
self.view.window().show_quick_panel(self.snippet_titles,\
self.insert_snippet,\
sublime.MONOSPACE_FONT)
return
def insert_snippet(self, choice):
if '_id' in self.snippet_list[choice]:
inc_snippet_object(self.snippet_list[choice]['_id'])
self.view.insert(self.edit, self.pos, '\n' + self.snippets[choice])
| 41.553571 | 112 | 0.594327 | 2,108 | 0.905887 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.271165 |
210e8623af2b83c3331b035c74ce32e4c79cb05e | 2,735 | py | Python | zeus/common/ipc/uni_comm.py | wnov/vega | bf51cbe389d41033c4ae4bc02e5078c3c247c845 | [
"MIT"
] | 6 | 2020-11-13T15:44:47.000Z | 2021-12-02T08:14:06.000Z | zeus/common/ipc/uni_comm.py | JacobLee121/vega | 19256aca4d047bfad3b461f0a927e1c2abb9eb03 | [
"MIT"
] | null | null | null | zeus/common/ipc/uni_comm.py | JacobLee121/vega | 19256aca4d047bfad3b461f0a927e1c2abb9eb03 | [
"MIT"
] | 2 | 2021-06-25T09:42:32.000Z | 2021-08-06T18:00:09.000Z | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Uni comm."""
import threading
from absl import logging
from zeus.common.util.register import Registers
class UniComm(object):
"""Uni comm."""
def __init__(self, comm_name, **comm_info):
super(UniComm, self).__init__()
self.comm = Registers.comm[comm_name](comm_info)
self.lock = threading.Lock()
def send(self, data, name=None, block=True, **kwargs):
"""Create common send interface."""
return self.comm.send(data, name, block, **kwargs)
def recv(self, name=None, block=True):
"""Create common recieve interface."""
return self.comm.recv(name, block)
def send_bytes(self, data):
"""Create common send_bytes interface."""
return self.comm.send_bytes(data)
def recv_bytes(self):
"""Create common recv_bytes interface."""
return self.comm.recv_bytes()
def send_multipart(self, data):
"""Create common send_multipart interface."""
return self.comm.send_multipart(data)
def recv_multipart(self):
"""Create common recv_multipart interface."""
return self.comm.recv_multipart()
def delete(self, name):
"""Delete."""
return self.comm.delete(name)
@property
def info(self):
"""Fetch comm info."""
return str(self.comm)
def close(self):
"""Close."""
logging.debug("start close comm...")
with self.lock:
try:
self.comm.close()
except AttributeError as err:
logging.info("call comm.close failed! with: \n{}".format(err))
| 35.986842 | 79 | 0.677514 | 1,495 | 0.546618 | 0 | 0 | 90 | 0.032907 | 0 | 0 | 1,490 | 0.54479 |
210ef2ae872b4a00dd69a05f6c0a2cbd5afb971f | 269 | py | Python | srv/tools/import_lemuria.py | Nekohime/lemuria | 0e6ca20522547026e4b20bb8cd8caa23633c171c | [
"MIT"
] | 13 | 2021-02-12T23:44:06.000Z | 2022-03-05T16:59:08.000Z | srv/tools/import_lemuria.py | Nekohime/lemuria | 0e6ca20522547026e4b20bb8cd8caa23633c171c | [
"MIT"
] | 3 | 2021-03-16T18:28:54.000Z | 2022-03-25T17:58:42.000Z | srv/tools/import_lemuria.py | Nekohime/lemuria | 0e6ca20522547026e4b20bb8cd8caa23633c171c | [
"MIT"
] | 5 | 2021-03-04T17:56:54.000Z | 2022-01-24T19:31:22.000Z | #!/usr/bin/env python3
# encoding: utf-8
"""Easily create ../app.db and import ../lemuria.json"""
import asyncio
from db_tools import init_db
from db_tools import import_world
asyncio.run(init_db())
asyncio.run(import_world('../atlemuria.txt', '../proplemuria.txt'))
| 24.454545 | 67 | 0.736059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.494424 |
210f4c2d01b5f97c01acae66b6369a2579242949 | 2,517 | py | Python | Handwritten_Numeral_Image_Classification.py | yjnanan/lab3 | 9e93361b46adb47953b89ddc40ded74e445684cf | [
"Apache-2.0"
] | 1 | 2021-11-24T17:37:00.000Z | 2021-11-24T17:37:00.000Z | Handwritten_Numeral_Image_Classification.py | yjnanan/lab3 | 9e93361b46adb47953b89ddc40ded74e445684cf | [
"Apache-2.0"
] | null | null | null | Handwritten_Numeral_Image_Classification.py | yjnanan/lab3 | 9e93361b46adb47953b89ddc40ded74e445684cf | [
"Apache-2.0"
] | null | null | null | import numpy as np
import random
import matplotlib.pyplot as plt
from load_data import loadLabel,loadImage
def der_activation_function(x,type):
if type==1:
return 1 - np.power(np.tanh(x), 2)
elif type==2:
return (1/(1+np.exp(-x)))*(1-1/(1+np.exp(-x)))
else:
x[x<=0]=0.25
x[x>0]=1
return x
def activation_function(x,type):
if type==1:
return np.tanh(x)
elif type==2:
return 1/(1+np.exp(-x))
else:
return np.where(x<=0,0.25*x,x)
def MLP_train(data,labels,hidden_nodes,epoch,test_data,test_labels):
alpha=0.002
size=data.shape
w1=np.zeros((hidden_nodes,size[1]))
for i in range(hidden_nodes):
for j in range(size[1]):
w1[i,j]=random.uniform(-0.4,0.4)
w2=np.zeros((10,hidden_nodes))
for i in range(10):
for j in range(hidden_nodes):
w2[i,j]=random.uniform(-0.4,0.4)
b1=np.zeros(hidden_nodes)
b2=np.zeros(10)
for i in range(epoch):
for x,y in zip(data,labels):
u=np.dot(w1,x.T)+b1
h=activation_function(u,3)
v=np.dot(w2,h)+b2
output=activation_function(v,3)
delta2=(output-y.T)*der_activation_function(v,3)
delta1=der_activation_function(u,3)*np.dot(w2.T,delta2)
d_w1=np.dot(np.expand_dims(delta1,axis=1),np.expand_dims(x,axis=0))
d_w2=np.dot(np.expand_dims(delta2,axis=1),np.expand_dims(h,axis=0))
w1=w1-alpha*d_w1
w2=w2-alpha*d_w2
b1=b1-alpha*delta1
b2=b2-alpha*delta2
u_test=np.dot(w1,test_data.T)+np.expand_dims(b1,axis=1)
h_test=activation_function(u_test,3)
v_test=np.dot(w2,h_test)+np.expand_dims(b2,axis=1)
output_test=activation_function(v_test.T,3)
right_times=0
for i in range(len(output_test)):
if np.argmax(output_test[i])==np.argmax(test_labels[i]):
right_times+=1
accuracy=right_times/len(output_test)
print(accuracy)
if __name__=='__main__':
train_imgs=loadImage("train-images-idx3-ubyte")
train_labels=loadLabel("train-labels-idx1-ubyte")
test_imgs=loadImage("t10k-images-idx3-ubyte")
random.seed(2)
test_labels=loadLabel("t10k-labels-idx1-ubyte")
# MLP_train(train_imgs,train_labels,25,15,test_imgs,test_labels)
for nodes in range(30,60,10):
print('activation function: PReLU')
print(nodes,"hidden nodes:")
MLP_train(train_imgs, train_labels, nodes, 30, test_imgs, test_labels) | 33.56 | 79 | 0.630115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.085419 |
21104b0f0ae3daac360c6b5e66284e9070501a16 | 586 | py | Python | catalog/bindings/gmd/cylindrical_cs.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/cylindrical_cs.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/cylindrical_cs.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.gmd.cylindrical_cstype import CylindricalCstype
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CylindricalCs(CylindricalCstype):
"""gml:CylindricalCS is a three-dimensional coordinate system consisting of
a polar coordinate system extended by a straight coordinate axis
perpendicular to the plane spanned by the polar coordinate system.
A CylindricalCS shall have three gml:axis property elements.
"""
class Meta:
name = "CylindricalCS"
namespace = "http://www.opengis.net/gml"
| 30.842105 | 79 | 0.755973 | 430 | 0.733788 | 0 | 0 | 441 | 0.75256 | 0 | 0 | 360 | 0.614334 |
2110e7d97dede9538a971bfc28f0a5ada650b926 | 4,800 | py | Python | preprocessing.py | tjiho/PoemesProfonds | ede1b32df153254e826cd9779f971fe72d6bd3eb | [
"MIT"
] | 6 | 2020-09-19T14:43:31.000Z | 2021-10-10T22:13:30.000Z | preprocessing.py | tjiho/PoemesProfonds | ede1b32df153254e826cd9779f971fe72d6bd3eb | [
"MIT"
] | 1 | 2021-01-16T19:06:34.000Z | 2021-04-14T20:02:28.000Z | preprocessing.py | tjiho/PoemesProfonds | ede1b32df153254e826cd9779f971fe72d6bd3eb | [
"MIT"
] | 1 | 2021-04-11T23:13:33.000Z | 2021-04-11T23:13:33.000Z | import pandas as pd
from collections import Counter
def import_lexique_as_df(path=r".\lexique3832.xlsx"):
"""importe le lexique
:param path: lexique3832.xlsx chemin du fichier
:return pd.dataframe
"""
df = pd.read_excel(path)
df.iloc[:, 0] = df.iloc[:, 0].fillna(value="nan") # transforme NaN en "nan"
df.iloc[:, 1] = pd.DataFrame(df.iloc[:, 1]).applymap(str) # convertit phonemes en str
return df
def accent_e_fin(df, motsvar="1_ortho", phonvar="2_phon", **kwargs):
""""
:param df: pd.dataframe contenant le lexique
:param motsvar: "1_ortho" variable de df contenant les orthographes
:param phonvar: "2_phon" variable de df contenant les phonemes auxquels il faut ajouter le E final
:param phoneme: phoneme du E final (default symbole du degre)
:param pcvvar: "18_p_cvcv" variable du df contenant les voyelles et consonnes phonemes
:return
pd.dataframe avec phoneme a la fin de phonvar pour signifier les E
"""
phoneme = kwargs.get("phoneme", "°")
pcvvar = kwargs.get("pcvvar", "18_p_cvcv")
# recuperation des mots avec un E final et un phoneme final qui n'est pas une voyelle
e_ended = df[motsvar].apply(lambda x: (x[-1] == "e"))
mute_e = df[pcvvar].apply(lambda x: (x[-1] in ["C", "Y"]))
idx = e_ended & mute_e
# ajout du E prononce
df.loc[idx, phonvar] = df.loc[idx, phonvar].apply(lambda x: "{origin}{E}".format(origin=x, E=phoneme))
return df
def set_ortho2phon(df, mots="1_ortho", phon="2_phon", gram="4_grampos",
occurances="10_freqlivres", accent_e=False, **kwargs):
"""crée un dictionnaire mappant pour chaque mot à sa prononciation
:argument
df: pd.dataframe contenant le lexique
:return dict, pd.DataFrame
"""
# ajout de l'accent au e a la fin des mots
if accent_e:
df = accent_e_fin(df, motsvar=mots, phonvar=phon, **kwargs)
# creation df rassemblant la frequence de la prononciation de chaque orthographe
df_occ = df[[mots, phon, occurances]].groupby([mots, phon], as_index=False).agg({occurances: "sum"})
# # on ne garde que les phonemes qui apparaissent le plus par orthographe
# idx = df_occ.groupby([mots])[occurances].transform(max) == df_occ[occurances]
# df_o2p = df_occ[[mots, phon]][idx]
# dict_o2p = pd.Series(df_o2p.iloc[:, 1].values, index=df_o2p.iloc[:, 0]).to_dict()
# récupération des couples uniques (orthographe, phonemes)
subset = df[[mots, phon]]
tuples = list(set([tuple(x) for x in subset.to_numpy()]))
# comptage des prononciations possibles de chaque orthographe
words = [w for w, _ in tuples]
word_count = Counter(words)
# separation des mots avec une ou plusieurs prononciations
unique_phoneme = list()
multiple_phoneme = list()
for w, c in word_count.items():
if c == 1:
unique_phoneme.append(w)
elif c > 1:
multiple_phoneme.append(w)
# dico mots uniques {ortho: phoneme}
dico_uniques = {w: p for w, p in tuples if w in unique_phoneme}
# dico mots multiples {(ortho, gram): phoneme}
idx_multiples = df.loc[:, "1_ortho"].apply(lambda x: x in multiple_phoneme) # indices des ortho avec des phon mult
subset = df.loc[idx_multiples, [mots, gram, phon]]
dico_multiples = {(w, g): p for w, g, p in [tuple(x) for x in subset.to_numpy()]}
return dico_uniques, dico_multiples, df_occ
def chars2idx(df, mots="1_ortho", phon="2_phon", blank="_"):
"""
:param df: pd.dataframe contenant le lexique
:param mots: "1_ortho" variable de df contenant les orthographes
:param phon: "2_phon" variable de df contenant les phonemes
:param blank: "_" caractere a rajouter pour le padding
:return: 2 dictionnaires caractere indices des lettres et des ohonemes
"""
ltrs = list()
phons = list()
m = df.shape[0]
tx = 0
ty = 0
for i in range(m):
mot = str(df.loc[i, mots])
if len(mot) > tx:
tx = len(mot)
for ltr in mot:
if ltr not in ltrs:
ltrs.append(ltr)
prononciation = str(df.loc[i, phon])
if len(prononciation) > ty:
ty = len(prononciation)
for ph in prononciation:
if ph not in phons:
phons.append(ph)
ltr2idx = {blank: len(ltrs)}
phon2idx = {blank: len(phons)}
for i, v in enumerate(ltrs):
ltr2idx[v] = i
for i, v in enumerate(phons):
phon2idx[v] = i
return ltr2idx, phon2idx, tx, ty
def import_poems(path=r".\scraping.xlsx"):
df = pd.read_excel(path, encoding="utf-8")
idx = df["poem"].notna()
df = df.loc[idx, :]
df["liste_vers"] = df["poem"].apply(lambda x: [strophe.split(r"þ") for strophe in x.split(r"þþ")])
return df
| 36.090226 | 119 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,163 | 0.449875 |
211256a01de1489d0a1b59d2986c7fda716eda8d | 25,517 | py | Python | src/mlpy-3.5.0/mlpy/dimred.py | xuanxiaoliqu/CRC4Docker | 5ee26f9a590b727693202d8ad3b6460970304bd9 | [
"MIT"
] | 1 | 2020-10-26T12:02:08.000Z | 2020-10-26T12:02:08.000Z | src/mlpy-3.5.0/mlpy/dimred.py | TonyZPW/CRC4Docker | e52a6e88d4469284a071c0b96d009f6684dbb2ea | [
"MIT"
] | null | null | null | src/mlpy-3.5.0/mlpy/dimred.py | TonyZPW/CRC4Docker | e52a6e88d4469284a071c0b96d009f6684dbb2ea | [
"MIT"
] | null | null | null | ## This code is written by Davide Albanese, <albanese@fbk.eu>.
## (C) 2011 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.linalg as spla
from ridge import ridge_base
from ols import ols_base
from kernel_class import *
import sys
if sys.version >= '3':
from . import kernel
else:
import kernel
__all__ = ['LDA', 'SRDA', 'KFDA', 'PCA', 'PCAFast', 'KPCA']
def proj(u, v):
"""(<v, u> / <u, u>) u
"""
return (np.dot(v, u) / np.dot(u, u)) * u
def gso(v, norm=False):
"""Gram-Schmidt orthogonalization.
Vectors v_1, ..., v_k are stored by rows.
"""
for j in range(v.shape[0]):
for i in range(j):
v[j] = v[j] - proj(v[i], v[j])
if norm:
v[j] /= np.linalg.norm(v[j])
def lda(xarr, yarr):
"""Linear Discriminant Analysis.
Returns the transformation matrix `coeff` (P, C-1),
where `x` is a matrix (N,P) and C is the number of
classes. Each column of `x` represents a variable,
while the rows contain observations. Each column of
`coeff` contains coefficients for one transformation
vector.
Sample(s) can be embedded into the C-1 dimensional space
by z = x coeff (z = np.dot(x, coeff)).
:Parameters:
x : 2d array_like object (N, P)
data matrix
y : 1d array_like object integer (N)
class labels
:Returns:
coeff: 2d numpy array (P, P)
transformation matrix.
"""
n, p = xarr.shape[0], xarr.shape[1]
labels = np.unique(yarr)
sw = np.zeros((p, p), dtype=np.float)
for i in labels:
idx = np.where(yarr==i)[0]
sw += np.cov(xarr[idx], rowvar=0) * \
(idx.shape[0] - 1)
st = np.cov(xarr, rowvar=0) * (n - 1)
sb = st - sw
evals, evecs = spla.eig(sb, sw, overwrite_a=True,
overwrite_b=True)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evecs = evecs[:, :labels.shape[0]-1]
return evecs
def srda(xarr, yarr, alpha):
"""Spectral Regression Discriminant Analysis.
Returns the (P, C-1) transformation matrix, where
`x` is a matrix (N,P) and C is the number of classes.
Each column of `x` represents a variable, while the
rows contain observations. `x` must be centered
(subtracting the empirical mean vector from each column
of`x`).
Sample(s) can be embedded into the C-1 dimensional space
by z = x coeff (z = np.dot(x, coeff)).
:Parameters:
x : 2d array_like object
training data (N, P)
y : 1d array_like object integer
target values (N)
alpha : float (>=0)
regularization parameter
:Returns:
coeff : 2d numpy array (P, C-1)
tranformation matrix
"""
# Point 1 in section 4.2
yu = np.unique(yarr)
yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
yk[0] = 1.
for i in range(1, yk.shape[0]):
yk[i][yarr==yu[i-1]] = 1.
gso(yk, norm=False) # orthogonalize yk
yk = yk[1:-1]
# Point 2 in section 4.2
ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
for i in range(yk.shape[0]):
ak[i] = ridge_base(xarr, yk[i], alpha)
return ak.T
def pca(xarr, method='svd'):
"""Principal Component Analysis.
Returns the principal component coefficients `coeff`(K,K)
and the corresponding eigenvalues (K) of the covariance
matrix of `x` (N,P) sorted by decreasing eigenvalue, where
K=min(N,P). Each column of `x` represents a variable,
while the rows contain observations. Each column of `coeff`
contains coefficients for one principal component.
Sample(s) can be embedded into the M (<=K) dimensional
space by z = x coeff_M (z = np.dot(x, coeff[:, :M])).
:Parameters:
x : 2d numpy array (N, P)
data matrix
method : str
'svd' or 'cov'
:Returns:
coeff, evals : 2d numpy array (K, K), 1d numpy array (K)
principal component coefficients (eigenvectors of
the covariance matrix of x) and eigenvalues sorted by
decreasing eigenvalue.
"""
n, p = xarr.shape
if method == 'svd':
x_h = (xarr - np.mean(xarr, axis=0)) / np.sqrt(n - 1)
u, s, v = np.linalg.svd(x_h.T, full_matrices=False)
evecs = u
evals = s**2
elif method == 'cov':
k = np.min((n, p))
C = np.cov(xarr, rowvar=0)
evals, evecs = np.linalg.eigh(C)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
evecs = evecs[:, :k]
evals = evals[:k]
else:
raise ValueError("method must be 'svd' or 'cov'")
return evecs, evals
def pca_fast(xarr, m, eps):
"""Fast principal component analysis using the fixed-point
algorithm.
Returns the first `m` principal component coefficients
`coeff` (P, M). Each column of `x` represents a variable,
while the rows contain observations. Each column of `coeff`
contains coefficients for one principal component.
Sample(s) can be embedded into the m (<=P) dimensional space
by z = x coeff (z = np.dot(X, coeff)).
:Parameters:
x : 2d numpy array (N, P)
data matrix
m : integer (0 < m <= P)
the number of principal axes or eigenvectors required
eps : float (> 0)
tolerance error
:Returns:
coeff : 2d numpy array (P, H)
principal component coefficients
"""
m = int(m)
np.random.seed(0)
evecs = np.random.rand(m, xarr.shape[1])
C = np.cov(xarr, rowvar=0)
for i in range(0, m):
while True:
evecs_old = np.copy(evecs[i])
evecs[i] = np.dot(C, evecs[i])
# Gram-Schmidt orthogonalization
a = np.dot(evecs[i], evecs[:i].T).reshape(-1, 1)
b = a * evecs[:i]
evecs[i] -= np.sum(b, axis=0) # if i=0 sum is 0
# Normalization
evecs[i] = evecs[i] / np.linalg.norm(evecs[i])
# convergence criteria
if np.abs(np.dot(evecs[i], evecs_old) - 1) < eps:
break
return evecs.T
def lda_fast(xarr, yarr):
"""Fast implementation of Linear Discriminant Analysis.
Returns the (P, C-1) transformation matrix, where
`x` is a centered matrix (N,P) and C is the number of classes.
Each column of `x` represents a variable, while the
rows contain observations. `x` must be centered
(subtracting the empirical mean vector from each column
of`x`).
:Parameters:
x : 2d array_like object
training data (N, P)
y : 1d array_like object integer
target values (N)
:Returns:
A : 2d numpy array (P, C-1)
tranformation matrix
"""
yu = np.unique(yarr)
yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
yk[0] = 1.
for i in range(1, yk.shape[0]):
yk[i][yarr==yu[i-1]] = 1.
gso(yk, norm=False) # orthogonalize yk
yk = yk[1:-1]
ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
for i in range(yk.shape[0]):
ak[i], _ = ols_base(xarr, yk[i], -1)
return ak.T
def kpca(K):
"""Kernel Principal Component Analysis, PCA in
a kernel-defined feature space making use of the
dual representation.
Returns the kernel principal component coefficients
`coeff` (N, N) computed as :math:`\lambda^{-1/2} \mathbf{v}_j`
where :math:`\lambda` and :math:`\mathbf{v}` are the ordered
eigenvalues and the corresponding eigenvector of the centered
kernel matrix K.
Sample(s) can be embedded into the G (<=N) dimensional space
by z = K coeff_G (z = np.dot(K, coeff[:, :G])).
:Parameters:
K: 2d array_like object (N,N)
precomputed centered kernel matrix
:Returns:
coeff, evals: 2d numpy array (N,N), 1d numpy array (N)
kernel principal component coefficients, eigenvalues
sorted by decreasing eigenvalue.
"""
evals, evecs = np.linalg.eigh(K)
idx = np.argsort(evals)
idx = idx[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
for i in range(len(evals)):
evecs[:, i] /= np.sqrt(evals[i])
return evecs, evals
def kfda(Karr, yarr, lmb=0.001):
"""Kernel Fisher Discriminant Analysis.
Returns the transformation matrix `coeff` (N,1),
where `K` is a the kernel matrix (N,N) and y
is the class labels (the alghoritm works only with 2
classes).
:Parameters:
K: 2d array_like object (N, N)
precomputed kernel matrix
y : 1d array_like object integer (N)
class labels
lmb : float (>= 0.0)
regularization parameter
:Returns:
coeff: 2d numpy array (N,1)
kernel fisher coefficients.
"""
labels = np.unique(yarr)
n = yarr.shape[0]
idx1 = np.where(yarr==labels[0])[0]
idx2 = np.where(yarr==labels[1])[0]
n1 = idx1.shape[0]
n2 = idx2.shape[0]
K1, K2 = Karr[:, idx1], Karr[:, idx2]
N1 = np.dot(np.dot(K1, np.eye(n1) - (1 / float(n1))), K1.T)
N2 = np.dot(np.dot(K2, np.eye(n2) - (1 / float(n2))), K2.T)
N = N1 + N2 + np.diag(np.repeat(lmb, n))
M1 = np.sum(K1, axis=1) / float(n1)
M2 = np.sum(K2, axis=1) / float(n2)
M = M1 - M2
coeff = np.linalg.solve(N, M).reshape(-1, 1)
return coeff
class LDA:
"""Linear Discriminant Analysis.
"""
def __init__(self, method='cov'):
"""Initialization.
:Parameters:
method : str
'cov' or 'fast'
"""
self._coeff = None
self._mean = None
if method not in ['cov', 'fast']:
raise ValueError("method must be 'cov' or 'fast'")
self._method = method
def learn(self, x, y):
"""Computes the transformation matrix.
`x` is a matrix (N,P) and `y` is a vector containing
the class labels. Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
self._mean = np.mean(xarr, axis=0)
if self._method == 'cov':
self._coeff = lda(xarr, yarr)
elif self._method == 'fast':
self._coeff = lda_fast(xarr-self._mean, yarr)
def transform(self, t):
"""Embed `t` (M,P) into the C-1 dimensional space.
Returns a (M,C-1) matrix.
"""
if self._coeff is None:
raise ValueError("no model computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
ValueError("t, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (P,C-1), where
C is the number of classes. Each column contains
coefficients for one transformation vector.
"""
return self._coeff
class SRDA:
"""Spectral Regression Discriminant Analysis.
"""
def __init__(self, alpha=0.001):
"""Initialization.
:Parameters:
alpha : float (>=0)
regularization parameter
"""
self._coeff = None
self._mean = None
self._alpha = alpha
def learn(self, x, y):
"""Computes the transformation matrix.
`x` is a matrix (N,P) and `y` is a vector containing
the class labels. Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
self._mean = np.mean(xarr, axis=0)
self._coeff = srda(xarr-self._mean, yarr, self._alpha)
def transform(self, t):
"""Embed t (M,P) into the C-1 dimensional space.
Returns a (M,C-1) matrix.
"""
if self._coeff is None:
raise ValueError("no model computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
ValueError("t, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (P,C-1), where
C is the number of classes. Each column contains
coefficients for one transformation vector.
"""
return self._coeff
class KFDA:
"""Kernel Fisher Discriminant Analysis.
"""
def __init__(self, lmb=0.001, kernel=None):
"""Initialization.
:Parameters:
lmb : float (>= 0.0)
regularization parameter
kernel : None or mlpy.Kernel object.
if kernel is None, K and Kt in .learn()
and in .transform() methods must be precomputed kernel
matricies, else K and Kt must be training (resp.
test) data in input space.
"""
if kernel is not None:
if not isinstance(kernel, Kernel):
raise ValueError("kernel must be None or a mlpy.Kernel object")
self._kernel = kernel
self._x = None
self._coeff = None
self._lmb = lmb
def learn(self, K, y):
"""Computes the transformation vector.
:Parameters:
K: 2d array_like object
precomputed training kernel matrix (if kernel=None);
training data in input space (if kernel is a Kernel object)
y : 1d array_like object integer (N)
class labels (only two classes)
"""
Karr = np.array(K, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if self._kernel is None:
if Karr.shape[0] != Karr.shape[1]:
raise ValueError("K must be a square matrix")
else:
self._x = Karr.copy()
Karr = self._kernel.kernel(Karr, Karr)
labels = np.unique(yarr)
if labels.shape[0] != 2:
raise ValueError("number of classes must be = 2")
self._coeff = kfda(Karr, yarr, self._lmb)
def transform(self, Kt):
"""Embed Kt into the 1d kernel fisher space.
:Parameters:
Kt : 1d or 2d array_like object
precomputed test kernel matrix. (if kernel=None);
test data in input space (if kernel is a Kernel object).
"""
if self._coeff is None:
raise ValueError("no model computed")
Ktarr = np.asarray(Kt, dtype=np.float)
if self._kernel is not None:
Ktarr = self._kernel.kernel(Ktarr, self._x)
try:
return np.dot(Ktarr, self._coeff)
except:
ValueError("Kt, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation vector (N,1).
"""
return self._coeff
class PCA:
"""Principal Component Analysis.
"""
def __init__(self, method='svd', whiten=False):
"""Initialization.
:Parameters:
method : str
method, 'svd' or 'cov'
whiten : bool
whitening. The eigenvectors will be scaled
by eigenvalues**-(1/2)
"""
self._coeff = None
self._coeff_inv = None
self._evals = None
self._mean = None
self._method = method
self._whiten = whiten
def learn(self, x):
"""Compute the principal component coefficients.
`x` is a matrix (N,P). Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
self._mean = np.mean(xarr, axis=0)
self._coeff, self._evals = pca(x, method=self._method)
if self._whiten:
self._coeff_inv = np.empty((self._coeff.shape[1],
self._coeff.shape[0]), dtype=np.float)
for i in range(len(self._evals)):
eval_sqrt = np.sqrt(self._evals[i])
self._coeff_inv[i] = self._coeff[:, i] * \
eval_sqrt
self._coeff[:, i] /= eval_sqrt
else:
self._coeff_inv = self._coeff.T
def transform(self, t, k=None):
"""Embed `t` (M,P) into the k dimensional subspace.
Returns a (M,K) matrix. If `k` =None will be set to
min(N,P)
"""
if self._coeff is None:
raise ValueError("no PCA computed")
if k == None:
k = self._coeff.shape[1]
if k < 1 or k > self._coeff.shape[1]:
raise ValueError("k must be in [1, %d] or None" % \
self._coeff.shape[1])
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff[:, :k])
except:
raise ValueError("t, coeff: shape mismatch")
def transform_inv(self, z):
"""Transform data back to its original space,
where `z` is a (M,K) matrix. Returns a (M,P) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
zarr = np.asarray(z, dtype=np.float)
return np.dot(zarr, self._coeff_inv[:zarr.shape[1]]) +\
self._mean
def coeff(self):
"""Returns the tranformation matrix (P,L), where
L=min(N,P), sorted by decreasing eigenvalue.
Each column contains coefficients for one principal
component.
"""
return self._coeff
def coeff_inv(self):
"""Returns the inverse of tranformation matrix (L,P),
where L=min(N,P), sorted by decreasing eigenvalue.
"""
return self._coeff_inv
def evals(self):
"""Returns sorted eigenvalues (L), where L=min(N,P).
"""
return self._evals
class PCAFast:
"""Fast Principal Component Analysis.
"""
def __init__(self, k=2, eps=0.01):
"""Initialization.
:Parameters:
k : integer
the number of principal axes or eigenvectors required
eps : float (> 0)
tolerance error
"""
self._coeff = None
self._coeff_inv = None
self._mean = None
self._k = k
self._eps = eps
def learn(self, x):
"""Compute the firsts `k` principal component coefficients.
`x` is a matrix (N,P). Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
self._mean = np.mean(xarr, axis=0)
self._coeff = pca_fast(xarr, m=self._k, eps=self._eps)
self._coeff_inv = self._coeff.T
def transform(self, t):
"""Embed t (M,P) into the `k` dimensional subspace.
Returns a (M,K) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
raise ValueError("t, coeff: shape mismatch")
def transform_inv(self, z):
"""Transform data back to its original space,
where `z` is a (M,K) matrix. Returns a (M,P) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
zarr = np.asarray(z, dtype=np.float)
return np.dot(zarr, self._coeff_inv) + self._mean
def coeff(self):
"""Returns the tranformation matrix (P,K) sorted by
decreasing eigenvalue.
Each column contains coefficients for one principal
component.
"""
return self._coeff
def coeff_inv(self):
"""Returns the inverse of tranformation matrix (K,P),
sorted by decreasing eigenvalue.
"""
return self._coeff_inv
class KPCA:
"""Kernel Principal Component Analysis.
"""
def __init__(self, kernel=None):
"""Initialization.
:Parameters:
kernel : None or mlpy.Kernel object.
if kernel is None, K and Kt in .learn()
and in .transform() methods must be precomputed kernel
matricies, else K and Kt must be training (resp.
test) data in input space.
"""
if kernel is not None:
if not isinstance(kernel, Kernel):
raise ValueError("kernel must be None or a mlpy.Kernel object")
self._coeff = None
self._evals = None
self._K = None
self._kernel = kernel
self._x = None
def learn(self, K):
"""Compute the kernel principal component coefficients.
:Parameters:
K: 2d array_like object
precomputed training kernel matrix (if kernel=None);
training data in input space (if kernel is a Kernel object)
"""
Karr = np.asarray(K, dtype=np.float)
if Karr.ndim != 2:
raise ValueError("K must be a 2d array_like object")
if self._kernel is None:
if Karr.shape[0] != Karr.shape[1]:
raise ValueError("K must be a square matrix")
else:
self._x = Karr.copy()
Karr = self._kernel.kernel(Karr, Karr)
self._K = Karr.copy()
Karr = kernel.kernel_center(Karr, Karr)
self._coeff, self._evals = kpca(Karr)
def transform(self, Kt, k=None):
"""Embed Kt into the `k` dimensional subspace.
:Parameters:
Kt : 1d or 2d array_like object
precomputed test kernel matrix. (if kernel=None);
test data in input space (if kernel is a Kernel object).
"""
if self._coeff is None:
raise ValueError("no KPCA computed")
if k == None:
k = self._coeff.shape[1]
if k < 1 or k > self._coeff.shape[1]:
raise ValueError("k must be in [1, %d] or None" % \
self._coeff.shape[1])
Ktarr = np.asarray(Kt, dtype=np.float)
if self._kernel is not None:
Ktarr = self._kernel.kernel(Ktarr, self._x)
Ktarr = kernel.kernel_center(Ktarr, self._K)
try:
return np.dot(Ktarr, self._coeff[:, :k])
except:
raise ValueError("Kt, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (N,N) sorted by
decreasing eigenvalue.
"""
return self._coeff
def evals(self):
"""Returns sorted eigenvalues (N).
"""
return self._evals
| 30.197633 | 80 | 0.535604 | 14,886 | 0.583376 | 0 | 0 | 0 | 0 | 0 | 0 | 12,886 | 0.504997 |
2114b6575b25531e65cb62deff849490987110cc | 9,695 | py | Python | polls/views.py | Parth-Shah-99/Polling-Project | b9d1548dc801a0b02d1fd8b925276d9349bb10fe | [
"MIT"
] | 1 | 2021-06-23T11:24:01.000Z | 2021-06-23T11:24:01.000Z | polls/views.py | Parth-Shah-99/Polling-Project | b9d1548dc801a0b02d1fd8b925276d9349bb10fe | [
"MIT"
] | null | null | null | polls/views.py | Parth-Shah-99/Polling-Project | b9d1548dc801a0b02d1fd8b925276d9349bb10fe | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.urls import reverse
from .forms import UserSignupForm, CreatePollForm, UserUpdateForm
from django.contrib.auth.models import User
from polls.models import Question, Choice, UserProfile, UserVotes
from django.views import generic
from django.core.paginator import Paginator
import json
from django.db.models import Sum
# Create your views here.
def home(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('profile'))
return render(request, 'home.html', {})
def signup(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('profile'))
form = UserSignupForm()
if request.method == 'POST':
form = UserSignupForm(request.POST)
if form.is_valid():
cleaned_form = form.cleaned_data
user = User(
username=cleaned_form['username'],
first_name=cleaned_form['fname'],
last_name=cleaned_form['lname'],
email=cleaned_form['email'],
)
password = cleaned_form['password1']
user.set_password(password)
user.save()
UserProfile.objects.create(user=user, anonymous=cleaned_form['anonymous'])
messages.success(request, 'You are successfully registered. Please Login to create/answer the Polls.')
return HttpResponseRedirect(reverse('login'))
else:
form.fields['username'].help_text += "<br><b>CAUTION !!</b> Once the account is created, you won't be able to change the Username."
return render(request, 'signup.html', {'form': form})
def profile(request):
if request.user.is_authenticated:
no_of_voted_polls = request.user.uservotes_set.all().count()
questions = Question.objects.filter(published_by=request.user.username)
created_polls = questions.count()
my_polls_total_votes = sum([ques.total_votes for ques in questions])
context = {
"no_of_voted_polls": no_of_voted_polls,
"created_polls": created_polls,
"my_polls_total_votes": my_polls_total_votes
}
return render(request, 'profile.html', context)
return render(request, 'profile.html')
class ProfilePollsView(generic.ListView):
model = Question
context_object_name = 'question_list'
template_name = 'pollslist.html'
paginate_by = 5
def get_queryset(self):
questions = Question.objects.all().order_by('-published_on')
if self.request.method == "GET":
if 'search_text' in self.request.GET:
search_text = self.request.GET.get("search_text", None)
questions = questions.filter(question_text__icontains=search_text).order_by('id')
if 'text_az' in self.request.GET:
questions = questions.order_by('question_text')
if 'text_za' in self.request.GET:
questions = questions.order_by('-question_text')
if 'date_old' in self.request.GET:
questions = questions.order_by('published_on')
if 'date_new' in self.request.GET:
questions = questions.order_by('-published_on')
return questions
def get_context_data(self, *args, **kwargs):
context = super(ProfilePollsView, self).get_context_data(*args, **kwargs)
if self.request.user.is_authenticated:
uservotes = UserVotes.objects.filter(user=self.request.user)
questions_voted = [uservote.question for uservote in uservotes]
context['questions_voted'] = questions_voted
return context
class UserProfilePollsView(generic.ListView):
model = Question
context_object_name = 'question_list'
template_name = 'pollslist.html'
paginate_by = 5
def get_queryset(self):
return Question.objects.filter(published_by=self.kwargs.get('username')).order_by('-published_on')
def get_context_data(self, *args, **kwargs):
context = super(UserProfilePollsView, self).get_context_data(*args, **kwargs)
if self.request.user.is_authenticated:
uservotes = UserVotes.objects.filter(user=self.request.user)
questions_voted = [uservote.question for uservote in uservotes]
context['questions_voted'] = questions_voted
context['specific_user'] = self.kwargs.get('username')
context['specific_user_anonymous'] = User.objects.get(username=self.kwargs.get('username')).userprofile.anonymous
return context
class ProfileMyPollsView(generic.ListView):
model = Question
context_object_name = 'question_list'
template_name = 'pollslist.html'
paginate_by = 5
def get_queryset(self):
username = self.request.user.get_username()
return Question.objects.filter(published_by=username).order_by('-published_on')
def get_context_data(self, *args, **kwargs):
context = super(ProfileMyPollsView, self).get_context_data(*args, **kwargs)
context['mypolls'] = True
if self.request.user.is_authenticated:
uservotes = UserVotes.objects.filter(user=self.request.user)
questions_voted = [uservote.question for uservote in uservotes]
context['questions_voted'] = questions_voted
return context
class PollsDetailView(generic.DetailView):
model = Question
context_object_name = 'question'
template_name = 'pollsdetail.html'
def get_context_data(self, *args, **kwargs):
context = super(PollsDetailView, self).get_context_data(*args, **kwargs)
question = context['question']
uservotes = UserVotes.objects.filter(user=self.request.user, question=question)
if uservotes.exists():
context['disabled'] = 'disabled'
context['selected_choice'] = uservotes[0].choice
return context
class PollsResultView(generic.DetailView):
model = Question
context_object_name = 'question'
template_name = 'pollsresult.html'
def post(self, request, **kwargs):
q = get_object_or_404(Question.objects.filter(id=self.kwargs['pk']))
try:
choice_id = (request.POST['choice'])[6:]
choice = q.choice_set.get(id=choice_id)
user = self.request.user
uservote = UserVotes.objects.filter(user=user, choice=choice).count()
except (KeyError, Choice.DoesNotExist):
return HttpResponseRedirect(reverse('pollsdetail', kwargs={'id': 'q.id'}))
else:
if(uservote>0):
messages.warning(request, 'You have already voted in this Poll.')
return HttpResponseRedirect(reverse('pollsdetail', args=(q.id, )))
choice.votes += 1
choice.save()
UserVotes.objects.create(user=user, choice=choice, question=q)
return HttpResponseRedirect(reverse('pollsresult', args=(q.id, )))
def profilecreatepoll(request):
form = CreatePollForm()
if request.method == "POST":
question_text = request.POST['question_text']
choice1 = request.POST['choice1']
choice2 = request.POST['choice2']
choice3 = request.POST['choice3']
choice4 = request.POST['choice4']
choice5 = request.POST['choice5']
question = Question(question_text=question_text, published_by=request.user.username)
question.save()
Choice.objects.create(question=question, choice_text=choice1)
Choice.objects.create(question=question, choice_text=choice2)
if choice3:
Choice.objects.create(question=question, choice_text=choice3)
if choice4:
Choice.objects.create(question=question, choice_text=choice4)
if choice5:
Choice.objects.create(question=question, choice_text=choice5)
messages.success(request, 'Poll created successfully.')
return HttpResponseRedirect(reverse('profilepolls'))
return render(request, 'createpoll.html', {'form': form})
def profileupdate(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('profile'))
if request.method == "POST":
form = UserUpdateForm(request.POST, instance=request.user)
form.fields['username'].disabled = True
if form.is_valid():
request.user.first_name = form.cleaned_data['fname']
request.user.last_name = form.cleaned_data['lname']
request.user.email = form.cleaned_data['email']
request.user.save()
request.user.userprofile.anonymous = form.cleaned_data['anonymous']
request.user.userprofile.save()
messages.success(request, 'Your Profile has been Updated successfully.')
return HttpResponseRedirect(reverse('profile'))
else:
form = UserUpdateForm(instance=request.user)
form.fields['username'].disabled = True
form.fields['username'].help_text = "You can't change your Username once the account is created."
form.fields['fname'].initial = request.user.first_name
form.fields['lname'].initial = request.user.last_name
form.fields['email'].initial = request.user.email
form.fields['anonymous'].initial = request.user.userprofile.anonymous
return render(request, 'profile_update.html', {'form': form})
# messages.debug, info, success, warning, error | 37.432432 | 139 | 0.665188 | 4,697 | 0.484477 | 0 | 0 | 0 | 0 | 0 | 0 | 1,434 | 0.147911 |
211508cef478fb4f72770257d50ec1792235097e | 1,355 | py | Python | src/katas/alphabet_position.py | Thavarshan/python-code-katas | 496d9224bbef3ee83a0e94f3a27b8e03159f84c5 | [
"MIT"
] | 14 | 2020-08-03T05:29:13.000Z | 2021-08-07T09:53:18.000Z | src/katas/alphabet_position.py | Thavarshan/python-code-katas | 496d9224bbef3ee83a0e94f3a27b8e03159f84c5 | [
"MIT"
] | null | null | null | src/katas/alphabet_position.py | Thavarshan/python-code-katas | 496d9224bbef3ee83a0e94f3a27b8e03159f84c5 | [
"MIT"
] | null | null | null | import re
class AlphabetPosition:
alphabet = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9,
'j': 10,
'k': 11,
'l': 12,
'm': 13,
'n': 14,
'o': 15,
'p': 16,
'q': 17,
'r': 18,
's': 19,
't': 20,
'u': 21,
'v': 22,
'w': 23,
'x': 24,
'y': 25,
'z': 26,
}
def find_position(self, sentence: str):
# Convert all letters to lowercase
sentence = sentence.lower()
# Remove all spaces and split sentence to list of chars
sentence = sentence.replace(" ", "")
# Extract only letters
characters = ''.join(re.findall("[a-zA-Z]+", sentence))
# Make string into list of characters
characters = list(characters)
# Initiate an empty list to save all positions of the characters in
positions = []
# Iterate through each character and find its position in the alphabet.
# once found replace the character with it's relevant position number
for character in characters:
positions.append(self.alphabet.get(character))
# Convert list of integers to single string
return ' '.join(map(str, positions))
| 26.057692 | 79 | 0.487823 | 1,342 | 0.990406 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.36679 |
21160771a6aab5b0c62e80306c257ce5f76ec26d | 300 | py | Python | FreeCodeCamp/Scientific Computing with Python/Python for Everybody/09.py | saulpaiva/Code | 3c6591da52ccf40565ed0a4e857e83e7f643b72d | [
"MIT"
] | 1 | 2021-09-29T01:26:29.000Z | 2021-09-29T01:26:29.000Z | FreeCodeCamp/Scientific Computing with Python/Python for Everybody/09.py | saulpaiva/Code | 3c6591da52ccf40565ed0a4e857e83e7f643b72d | [
"MIT"
] | null | null | null | FreeCodeCamp/Scientific Computing with Python/Python for Everybody/09.py | saulpaiva/Code | 3c6591da52ccf40565ed0a4e857e83e7f643b72d | [
"MIT"
] | null | null | null | # Iterations: Definite Loops
'''
Use the 'for' word
there is a iteration variable like 'i' or 'friend'
'''
# for i in [5, 4, 3, 2, 1] :
# print(i)
# print('Blastoff!')
# friends = ['matheus', 'wataru', 'mogli']
# for friend in friends :
# print('happy new year:', friend)
# print('Done!')
| 18.75 | 50 | 0.596667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.96 |
211608a19d7eb2aa30ebb283349f2fda6915bba2 | 5,008 | py | Python | keras-version/main.py | bzantium/EA-LSTM | ddd318d3f622c1d3c99976b334f5b00df5767578 | [
"BSD-3-Clause"
] | 16 | 2020-01-14T08:53:12.000Z | 2021-12-18T05:30:12.000Z | keras-version/main.py | bzantium/EA-LSTM | ddd318d3f622c1d3c99976b334f5b00df5767578 | [
"BSD-3-Clause"
] | 1 | 2020-06-30T06:39:00.000Z | 2020-07-01T00:12:03.000Z | keras-version/main.py | bzantium/EA-LSTM | ddd318d3f622c1d3c99976b334f5b00df5767578 | [
"BSD-3-Clause"
] | 5 | 2020-01-04T05:51:23.000Z | 2021-05-16T08:14:24.000Z | from utils import (load_data, data_to_series_features,
apply_weight, is_minimum)
from algorithm import (initialize_weights, individual_to_key,
pop_to_weights, select, reconstruct_population)
from sklearn.metrics import mean_squared_error, mean_absolute_error
from tensorflow.keras import optimizers
from tensorflow.keras.models import clone_model
import argparse
import math
import numpy as np
from model import make_model
from copy import copy
from sklearn.model_selection import train_test_split
def parse_arguments():
# argument parsing
parser = argparse.ArgumentParser(description="Specify Params for Experimental Setting")
parser.add_argument('--iterations', type=int, default=20,
help="Specify the number of evolution iterations")
parser.add_argument('--batch_size', type=int, default=256,
help="Specify batch size")
parser.add_argument('--initial_epochs', type=int, default=100,
help="Specify the number of epochs for initial training")
parser.add_argument('--num_epochs', type=int, default=20,
help="Specify the number of epochs for competitive search")
parser.add_argument('--log_step', type=int, default=100,
help="Specify log step size for training")
parser.add_argument('--learning_rate', type=float, default=1e-3,
help="Learning rate")
parser.add_argument('--data', type=str, default='pollution.csv',
help="Path to the dataset")
parser.add_argument('--pop_size', type=int, default=36)
parser.add_argument('--code_length', type=int, default=6)
parser.add_argument('--n_select', type=int, default=6)
parser.add_argument('--time_steps', type=int, default=18)
parser.add_argument('--n_hidden', type=int, default=128)
parser.add_argument('--n_output', type=int, default=1)
parser.add_argument('--max_grad_norm', type=float, default=1.0)
return parser.parse_args()
def main():
args = parse_arguments()
data, y_scaler = load_data(args.data)
args.n_features = np.size(data, axis=-1)
X, y = data_to_series_features(data, args.time_steps)
train_X, X, train_y, y = train_test_split(X, y, test_size=0.3)
valid_X, test_X, valid_y, test_y = train_test_split(X, y, test_size=0.5)
optimizer = optimizers.Adam(learning_rate=args.learning_rate, clipnorm=args.max_grad_norm)
best_model = make_model(args)
best_weight = [1.0] * args.time_steps
best_model.compile(loss='mse', optimizer=optimizer)
print("Initial training before competitive random search")
best_model.fit(apply_weight(train_X, best_weight), train_y, epochs=args.initial_epochs,
validation_data=(apply_weight(valid_X, best_weight), valid_y), shuffle=True)
print("\nInitial training is done. Start competitive random search.\n")
pop, weights = initialize_weights(args.pop_size, args.time_steps, args.code_length)
key_to_rmse = {}
for iteration in range(args.iterations):
for enum, (indiv, weight) in enumerate(zip(pop, weights)):
print('iteration: [%d/%d] indiv_no: [%d/%d]' % (iteration + 1, args.iterations, enum + 1, args.pop_size))
key = individual_to_key(indiv)
if key not in key_to_rmse.keys():
model = make_model(args)
model.compile(loss='mse', optimizer=optimizer)
model.set_weights(best_model.get_weights())
model.fit(apply_weight(train_X, weight), train_y, epochs=args.num_epochs,
validation_data=(apply_weight(valid_X, weight), valid_y), shuffle=True)
pred_y = model.predict(apply_weight(valid_X, weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_valid_y = y_scaler.inverse_transform(np.expand_dims(valid_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_valid_y, inv_pred_y))
mae = mean_absolute_error(inv_valid_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if is_minimum(rmse, key_to_rmse):
best_model.set_weights(model.get_weights())
best_weight = copy(weight)
key_to_rmse[key] = rmse
pop_selected, fitness_selected = select(pop, args.n_select, key_to_rmse)
pop = reconstruct_population(pop_selected, args.pop_size)
weights = pop_to_weights(pop, args.time_steps, args.code_length)
print('test evaluation:')
pred_y = best_model.predict(apply_weight(test_X, best_weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_test_y = y_scaler.inverse_transform(np.expand_dims(test_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_test_y, inv_pred_y))
mae = mean_absolute_error(inv_test_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if __name__ == '__main__':
main()
| 50.08 | 117 | 0.673123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 742 | 0.148163 |