hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6448431393bf85bea3e0e21bbe767d47530277f5 | 5,271 | py | Python | notebooks/utils.py | wranda12/06-machine-learning | b8dc391b2599cfa5d2b1fef092ccd9eb2d27db39 | [
"MIT"
] | 1 | 2020-10-19T07:47:35.000Z | 2020-10-19T07:47:35.000Z | notebooks/utils.py | wranda12/06-machine-learning | b8dc391b2599cfa5d2b1fef092ccd9eb2d27db39 | [
"MIT"
] | 1 | 2020-11-20T04:20:04.000Z | 2020-11-20T04:20:04.000Z | notebooks/utils.py | wranda12/06-machine-learning | b8dc391b2599cfa5d2b1fef092ccd9eb2d27db39 | [
"MIT"
] | 11 | 2018-10-16T01:36:12.000Z | 2019-01-18T01:59:27.000Z |
#
# Some functions to be used in the tutorial
#
# Developed by Debora Cristina Correa
import datetime
import pandas as pd
import matplotlib.pyplot as plt # for 2D plotting
import numpy as np
import seaborn as sns # plot nicely =)
from sklearn.base import clone
from sklearn.decomposition import PCA
from sklearn.model_selection import learning_curve
def plot_decision_boundary(x_train, y_train, estimator):
"""Plot the decision boundary
based on: http://scikit-learn.org/stable/auto_examples/semi_supervised/plot_label_propagation_versus_svm_iris.html
Parameters
----------
x_train: training set
y_train: labels of the training set
estimator: classifier, probability must be set as True
"""
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
# cloning the estimator
clf = clone(estimator)
pca = PCA(n_components=2)
x_train_pca = pca.fit_transform(x_train)
clf.fit(x_train_pca, y_train)
xx, yy = make_meshgrid(x_train_pca[:, 0], x_train_pca[:, 1])
grid = np.c_[xx.ravel(), yy.ravel()]
probs = clf.predict_proba(grid)[:, 1].reshape(xx.shape)
# plotting the decision boundary
f, ax = plt.subplots(figsize=(8, 6))
sns.set(context="notebook", style="whitegrid",
rc={"axes.axisbelow": False})
contour = ax.contourf(xx, yy, probs, 25, cmap="RdBu",
vmin=0, vmax=1)
ax_c = f.colorbar(contour)
ax_c.set_label("$P(y = 1)$")
ax_c.set_ticks([0, .25, .5, .75, 1])
ax.scatter(x_train_pca[:,0], x_train_pca[:, 1], c=y_train, s=50,
cmap="RdBu", vmin=-.2, vmax=1.2,
edgecolor="white", linewidth=1)
ax.set(aspect="equal",
xlim=(-5, 5), ylim=(-5, 5),
xlabel="$X_1$", ylabel="$X_2$")
plt.show()
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
based on: http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html
Parameters
----------
estimator : classifier
title : title for the chart.
X : training set, where n_samples is the number of samples and
n_features is the number of features.
y : labels of the training set
ylim : defines minimum and maximum yvalues plotted.
cv : determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
n_jobs : number of jobs to run in parallel.
train_sizes : relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
# cloning the estimator
clf = clone(estimator)
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
| 33.150943 | 118 | 0.632138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,456 | 0.465946 |
64487297478b72747418471787c1d20d2191f34f | 510 | py | Python | vpn-proxy/app/migrations/0005_tunnel_protocol.py | dimrozakis/priv-net | 3eadea10c3b437ea82d8233579b31f60eaac51b1 | [
"Apache-2.0"
] | null | null | null | vpn-proxy/app/migrations/0005_tunnel_protocol.py | dimrozakis/priv-net | 3eadea10c3b437ea82d8233579b31f60eaac51b1 | [
"Apache-2.0"
] | null | null | null | vpn-proxy/app/migrations/0005_tunnel_protocol.py | dimrozakis/priv-net | 3eadea10c3b437ea82d8233579b31f60eaac51b1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-01 13:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_remove_forwarding_src_addr'),
]
operations = [
migrations.AddField(
model_name='tunnel',
name='protocol',
field=models.CharField(choices=[('udp', 'UDP'), ('tcp', 'TCP')], default='udp', max_length=3),
),
]
| 24.285714 | 106 | 0.605882 | 353 | 0.692157 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.296078 |
644a45eb3ac38fe9ed2fc02d1dd3853ea5184b50 | 714 | py | Python | t/config_test.py | jrmsdev/rosshm | 8f40dff89729351e4cc3485124b352e44fd88cd1 | [
"BSD-3-Clause"
] | null | null | null | t/config_test.py | jrmsdev/rosshm | 8f40dff89729351e4cc3485124b352e44fd88cd1 | [
"BSD-3-Clause"
] | null | null | null | t/config_test.py | jrmsdev/rosshm | 8f40dff89729351e4cc3485124b352e44fd88cd1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from os import path
def test_config(testing_config):
with testing_config() as config:
assert config.filename().endswith('rosshm.ini')
assert config.getbool('debug')
with testing_config(init = False):
config.init(fn = None)
assert config.getbool('debug')
def test_database(testing_config):
with testing_config() as config:
db = config.database()
assert db['driver'] == 'sqlite'
assert db['name'] == ':memory:'
assert db['config'] == ''
with testing_config() as config:
fn = path.join(path.sep, 'testing', 'db.cfg')
config._cfg.set('rosshm', 'db.config', fn)
db = config.database()
assert db['config'] == fn
| 28.56 | 56 | 0.694678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.262937 |
644a513a8c3131883ee5ba1364e79e6383f94060 | 6,791 | py | Python | venv/lib/python3.8/site-packages/IPython/testing/decorators.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 2 | 2022-02-26T11:19:40.000Z | 2022-03-28T08:23:25.000Z | venv/lib/python3.8/site-packages/IPython/testing/decorators.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/IPython/testing/decorators.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 1 | 2022-03-28T09:19:34.000Z | 2022-03-28T09:19:34.000Z | # -*- coding: utf-8 -*-
"""Decorators for labeling test objects.
Decorators that merely return a modified version of the original function
object are straightforward. Decorators that return a new function object need
to use nose.tools.make_decorator(original_function)(decorator) in returning the
decorator, in order to preserve metadata such as function name, setup and
teardown functions and so on - see nose.tools for more information.
This module provides a set of useful decorators meant to be ready to use in
your own tests. See the bottom of the file for the ready-made ones, and if you
find yourself writing a new one that may be of generic use, add it here.
Included decorators:
Lightweight testing that remains unittest-compatible.
- An @as_unittest decorator can be used to tag any normal parameter-less
function as a unittest TestCase. Then, both nose and normal unittest will
recognize it as such. This will make it easier to migrate away from Nose if
we ever need/want to while maintaining very lightweight tests.
NOTE: This file contains IPython-specific decorators. Using the machinery in
IPython.external.decorators, we import either numpy.testing.decorators if numpy is
available, OR use equivalent code in IPython.external._decorators, which
we've copied verbatim from numpy.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
import tempfile
import unittest
import warnings
from importlib import import_module
from decorator import decorator
# Expose the unittest-driven decorators
from .ipunittest import ipdoctest, ipdocstring
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# Simple example of the basic idea
def as_unittest(func):
"""Decorator to make a simple function into a normal test via unittest."""
class Tester(unittest.TestCase):
def test(self):
func()
Tester.__name__ = func.__name__
return Tester
# Utility functions
def skipif(skip_condition, msg=None):
"""Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
"""
if msg is None:
msg = "Test skipped due to test condition."
import pytest
assert isinstance(skip_condition, bool)
return pytest.mark.skipif(skip_condition, reason=msg)
# A version with the condition set to true, common case just to attach a message
# to a skip decorator
def skip(msg=None):
"""Decorator factory - mark a test function for skipping from test suite.
Parameters
----------
msg : string
Optional message to be added.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised, with the optional message added.
"""
if msg and not isinstance(msg, str):
raise ValueError('invalid object passed to `@skip` decorator, did you '
'meant `@skip()` with brackets ?')
return skipif(True, msg)
def onlyif(condition, msg):
"""The reverse from skipif, see skipif for details."""
return skipif(not condition, msg)
#-----------------------------------------------------------------------------
# Utility functions for decorators
def module_not_available(module):
"""Can module be imported? Returns true if module does NOT import.
This is used to make a decorator to skip tests that require module to be
available, but delay the 'import numpy' to test execution time.
"""
try:
mod = import_module(module)
mod_not_avail = False
except ImportError:
mod_not_avail = True
return mod_not_avail
#-----------------------------------------------------------------------------
# Decorators for public use
# Decorators to skip certain tests on specific platforms.
skip_win32 = skipif(sys.platform == 'win32',
"This test does not run under Windows")
skip_linux = skipif(sys.platform.startswith('linux'),
"This test does not run under Linux")
skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
# Decorators to skip tests if not on specific platforms.
skip_if_not_win32 = skipif(sys.platform != 'win32',
"This test only runs under Windows")
skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
"This test only runs under Linux")
_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
os.environ.get('DISPLAY', '') == '')
_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
# Other skip decorators
# generic skip without module
skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
skipif_not_numpy = skip_without('numpy')
skipif_not_matplotlib = skip_without('matplotlib')
# A null 'decorator', useful to make more readable code that needs to pick
# between different decorators based on OS or other conditions
null_deco = lambda f: f
# Some tests only run where we can use unicode paths. Note that we can't just
# check os.path.supports_unicode_filenames, which is always False on Linux.
try:
f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
except UnicodeEncodeError:
unicode_paths = False
else:
unicode_paths = True
f.close()
onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
"where we can use unicode in filenames."))
def onlyif_cmds_exist(*commands):
"""
Decorator to skip test when at least one of `commands` is not found.
"""
assert (
os.environ.get("IPTEST_WORKING_DIR", None) is None
), "iptest deprecated since IPython 8.0"
for cmd in commands:
reason = f"This test runs only if command '{cmd}' is installed"
if not shutil.which(cmd):
import pytest
return pytest.mark.skip(reason=reason)
return null_deco
| 33.453202 | 91 | 0.673686 | 75 | 0.011041 | 0 | 0 | 0 | 0 | 0 | 0 | 4,601 | 0.677315 |
644a804c2e809b51b62ab072cf234ac96a1d9a07 | 1,095 | py | Python | code/_pth0_only/parameters_compute.py | uq-aibe/spir-oz | 4ae3ff6f230679f21b9c4072529df94187f9e098 | [
"MIT"
] | null | null | null | code/_pth0_only/parameters_compute.py | uq-aibe/spir-oz | 4ae3ff6f230679f21b9c4072529df94187f9e098 | [
"MIT"
] | null | null | null | code/_pth0_only/parameters_compute.py | uq-aibe/spir-oz | 4ae3ff6f230679f21b9c4072529df94187f9e098 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
from parameters import *
from fcn_economic import *
# ================================================================
# Computational parameters
# Ranges for state variables
kap_L = 0.1
kap_U = 10
# Ranges for policy variables
lab_L = 0.1
lab_U = 2
vec_kap_L = kap_L * ones(n_agt)
vec_kap_U = kap_U * ones(n_agt)
vec_lab_L = lab_L * ones(n_agt)
vec_lab_U = lab_U * ones(n_agt)
out_L = output(vec_kap_L, vec_lab_L, 0)[0]
out_U = output(vec_kap_U, vec_lab_U, 0)[0]
con_L = 0.1 * output(vec_kap_L, vec_lab_L, 0)[0]
con_U = 0.99 * output(vec_kap_U, vec_lab_U, 0)[0]
utl_L = utility(vec_con_L, vec_lab_U, 0)
utl_U = utility(vec_con_U, vec_lab_L, 0)
# warm start for the policy variables
###
# k0(j) = exp(log(kmin) + (log(kmax)-log(kmin))*(ord(j)-1)/(card(j)-1));
k_init = np.ones(n_agt)
for j in range(n_agt):
k_init[j] = np.exp(
np.log(kap_L) + (np.log(kap_U) - np.log(kap_L)) * j / (n_agt - 1)
)
# ======================================================================
# constraint upper and lower bounds
ctt_L = -1e-6
ctt_U = 1e-6
| 26.707317 | 73 | 0.591781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.356164 |
644c92e4d0e82bad61581439da89a101823a1f63 | 3,269 | py | Python | src/zc/relation/queryfactory.py | witsch/zc.relation | a928064b5b29a8ed0590cb474b5c421365fa19a1 | [
"ZPL-2.1"
] | null | null | null | src/zc/relation/queryfactory.py | witsch/zc.relation | a928064b5b29a8ed0590cb474b5c421365fa19a1 | [
"ZPL-2.1"
] | 9 | 2016-08-02T12:31:05.000Z | 2021-04-30T14:53:28.000Z | src/zc/relation/queryfactory.py | witsch/zc.relation | a928064b5b29a8ed0590cb474b5c421365fa19a1 | [
"ZPL-2.1"
] | 5 | 2015-04-03T06:48:08.000Z | 2020-02-17T10:40:14.000Z | ##############################################################################
#
# Copyright (c) 2006-2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import BTrees
import persistent
import zc.relation.catalog
import zc.relation.interfaces
import zope.interface
##############################################################################
# a common case transitive queries factory
_marker = object()
@zope.interface.implementer(zc.relation.interfaces.IQueryFactory)
class TransposingTransitive(persistent.Persistent):
def __init__(self, name1, name2, static=()):
self.names = [name1, name2] # a list so we can use index
if getattr(static, 'items', None) is not None:
static = static.items()
self.static = tuple(sorted(static))
def __call__(self, query, catalog):
# check static values, if any. we want to be permissive here. (as
# opposed to finding searchindexes in the catalog's
# _getSearchIndexResults method)
for k, v in self.static:
if k in query:
if isinstance(v, zc.relation.catalog.Any):
if isinstance(query[k], zc.relation.catalog.Any):
if query[k].source.issubset(v.source):
continue
elif query[k] in v:
continue
elif v == query[k]:
continue
return None
static = []
name = other = _marker
for nm, val in query.items():
try:
ix = self.names.index(nm)
except ValueError:
static.append((nm, val))
else:
if name is not _marker:
# both were specified: no transitive search known.
return None
else:
name = nm
other = self.names[not ix]
if name is not _marker:
def getQueries(relchain):
if not relchain:
yield query
return
if other is None:
rels = relchain[-1]
else:
tokens = catalog.getValueTokens(other, relchain[-1])
if not tokens:
return
rels = zc.relation.catalog.Any(tokens)
res = BTrees.family32.OO.Bucket(static)
res[name] = rels
yield res
return getQueries
def __eq__(self, other):
return (isinstance(other, self.__class__) and
set(self.names) == set(other.names)
and self.static == other.static)
def __ne__(self, other):
return not self.__eq__(other)
| 36.730337 | 78 | 0.516978 | 2,304 | 0.704803 | 1,743 | 0.533191 | 2,370 | 0.724992 | 0 | 0 | 982 | 0.300398 |
644e700c01e62c3075fcc1c7c8eb2a31a7e080af | 2,524 | py | Python | radolan_scraper/add_coordinate_grid.py | JarnoRFB/radolan-scraper | c4189ff9981306569034e5a4e2c01776503976d1 | [
"MIT"
] | null | null | null | radolan_scraper/add_coordinate_grid.py | JarnoRFB/radolan-scraper | c4189ff9981306569034e5a4e2c01776503976d1 | [
"MIT"
] | 2 | 2019-09-10T11:51:22.000Z | 2019-10-24T11:07:05.000Z | radolan_scraper/add_coordinate_grid.py | JarnoRFB/radolan-scraper | c4189ff9981306569034e5a4e2c01776503976d1 | [
"MIT"
] | null | null | null | """Add the multidimensional coordinates to the netcdf file."""
from pathlib import Path
from typing import *
import h5netcdf
import numpy as np
def main():
base_data_dir = Path(__file__).parents[3] / "data" / "radolan"
metadata_dir = Path(__file__).parents[1] / "metadata"
add_grid_to = base_data_dir / "netcdf" / "combined.nc"
run(
add_grid_to, metadata_dir / "phi_center.txt", metadata_dir / "lambda_center.txt"
)
def run(
add_grid_to: Path, latitude_definitions: Path, longitude_definitions: Path
) -> None:
with h5netcdf.File(add_grid_to, "a") as f:
try:
xc_var = f.create_variable(
"xc",
dimensions=("y", "x"),
data=parse_longitude_definitions(longitude_definitions).flatten(),
)
xc_var.attrs["long_name"] = "longitude of grid cell center"
xc_var.attrs["units"] = "degrees_east"
xc_var.attrs["bounds"] = "xv"
except ValueError:
xc_var = f["xc"]
xc_var[...] = parse_longitude_definitions(longitude_definitions)
try:
yc_var = f.create_variable(
"yc",
dimensions=("y", "x"),
data=parse_latitude_definitions(latitude_definitions).flatten(),
)
yc_var.attrs["long_name"] = "latitude of grid cell center"
yc_var.attrs["units"] = "degrees_north"
yc_var.attrs["bounds"] = "yv"
except ValueError:
yc_var = f["yc"]
yc_var[...] = parse_latitude_definitions(latitude_definitions)
rain_var = f["rain"]
rain_var.attrs["coordinates"] = "yc xc"
def parse_longitude_definitions(coord_definition_path) -> np.array:
import itertools
with open(coord_definition_path) as f:
return np.fromiter(
map(
float,
itertools.chain.from_iterable(
chunk_str(line, 8) for line in f.readlines()
),
),
float,
).reshape(900, 900)
def parse_latitude_definitions(coord_definition_path) -> np.array:
return parse_longitude_definitions(coord_definition_path)[::-1]
def chunk_str(iterable: Iterable, n: int) -> Iterable[str]:
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return map(lambda x: "".join(map(str, x)), zip(*args))
if __name__ == "__main__":
main()
| 30.780488 | 88 | 0.591125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.18225 |
644f7c66068f7d5fd0fdd79a713ae1563438709a | 6,781 | py | Python | test/test_13_BNetwork_class.py | geolovic/topopy | 0ccfc4bfc0364b99489d08a1d4b87582deb08b81 | [
"MIT"
] | 5 | 2020-04-05T18:42:45.000Z | 2022-02-17T11:15:32.000Z | test/test_13_BNetwork_class.py | geolovic/topopy | 0ccfc4bfc0364b99489d08a1d4b87582deb08b81 | [
"MIT"
] | null | null | null | test/test_13_BNetwork_class.py | geolovic/topopy | 0ccfc4bfc0364b99489d08a1d4b87582deb08b81 | [
"MIT"
] | 5 | 2019-07-02T11:14:54.000Z | 2021-12-15T08:43:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 09 february, 2021
Testing suite for BNetwork class
@author: J. Vicente Perez
@email: geolovic@hotmail.com
@date: 09 february, 2021
"""
import unittest
import os
import numpy as np
from topopy import Flow, Basin, Network, BNetwork, DEM
from topopy.network import NetworkError
infolder = "data/in"
outfolder = "data/out"
class BNetworkClassTest(unittest.TestCase):
# Indices de las cabeceras que deben de salir (para comprobar)
results = {"small25":dict([(1, 16171), (2, 9354), (3,1463)]),
"jebja30":dict([(1, 151755), (2, 44786), (3, 48709), (4, 3819)]),
"tunez":dict([(1, 77552), (2, 30013), (3, 7247)])}
def test_BNetwork_class00(self):
"""
Test00 Crea BNetwork para cuencas de prueba a partir de un Grid de cuencas
Sin utilizar cabeceras
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
# Cargamos outlets y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, file), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
for bid in np.unique(cuencas.read_array()):
if bid == 0:
continue
bnet = BNetwork(net, cuencas, None, bid)
self.assertEqual(int(bnet._heads[0]), self.results[file][bid])
def test_BNetwork_class01(self):
"""
Test00 Crea BNetwork para cuencas de prueba a partir de un objeto Basin
Sin utilizar cabeceras
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
dem = DEM("{}/{}.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
# Cargamos outlets y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, file), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
for bid in np.unique(cuencas.read_array()):
if bid == 0:
continue
basin = Basin(dem, cuencas, bid)
bnet = BNetwork(net, basin)
# Este test solo verifica que se realice sin fallos y que
# el objeto bnet tiene una única cabecera
bnet = BNetwork(net, cuencas, None, bid)
self.assertEqual(int(bnet._heads[0]), self.results[file][bid])
def test_BNetwork_class03(self):
"""
Test que prueba cabeceras en cuenca 1 con small25
474260.9;4114339.6;3
474856.9;4114711.1;2
"""
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, "small25"))
net = Network("{}/{}_net.dat".format(infolder, "small25"))
# Cargamos outlets, heads y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, "small25"), delimiter=";")
heads = np.loadtxt("{}/{}_bnet_heads.txt".format(infolder, "small25"), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
bid = 1
bnet = BNetwork(net, cuencas, heads, bid)
self.assertEqual(np.array_equal(bnet._heads, np.array([13494, 16171])), True)
def test_BNetwork_class04(self):
"""
Test que prueba cabeceras en cuenca 1 con small25 (sin utilizar id field)
474260.9;4114339.6;3
474856.9;4114711.1;2
"""
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, "small25"))
net = Network("{}/{}_net.dat".format(infolder, "small25"))
# Cargamos outlets, heads y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, "small25"), delimiter=";")
heads = np.loadtxt("{}/{}_bnet_heads.txt".format(infolder, "small25"), delimiter=";")
# Remove the id column
heads = heads[:,:-1]
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
bid = 1
bnet = BNetwork(net, cuencas, heads, bid)
self.assertEqual(np.array_equal(bnet._heads, np.array([16171, 13494])), True)
def test_BNetwork_class05(self):
"""
Test de creado masivo de cuencas con cabeceras aleatorias
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
dem = DEM("{}/{}.tif".format(infolder, file))
# Generamos todas las cuencas
cuencas = fd.get_drainage_basins(min_area = 0.0025)
# Generamos 50 puntos aleatorios dentro de la extensión del objeto Network
# Estos 50 puntos se usaran como cabeceras
xmin, xmax, ymin, ymax = net.get_extent()
xi = np.random.randint(xmin, xmax, 50)
yi = np.random.randint(ymin, ymax, 50)
heads = np.array((xi, yi)).T
# Cogemos 5 cuencas aleatorias
bids = np.random.choice(np.unique(cuencas.read_array())[1:], 5)
for bid in bids:
try:
if np.random.randint(100) < 70:
bnet = BNetwork(net, cuencas, heads, bid)
else:
basin = Basin(dem, cuencas, bid)
bnet = BNetwork(net, basin, heads)
except NetworkError:
print("Network of {} file inside the basin {} has not enough pixels".format(file, bid))
continue
# Salvamos BNetwork y volvemos a cargar para comprobar que se cargan-guardan bien
bnet_path = "{}/{}_{}_bnet.dat".format(outfolder, file, bid)
bnet.save(bnet_path)
bnet2 = BNetwork(bnet_path)
computed = np.array_equal(bnet._ix, bnet2._ix)
self.assertEqual(computed, True)
# borramos archivo
os.remove(bnet_path)
if __name__ == "__main__":
unittest.main() | 41.347561 | 107 | 0.550214 | 6,350 | 0.936164 | 0 | 0 | 0 | 0 | 0 | 0 | 2,216 | 0.326699 |
644fa580a42f5f6ad41d146a84a6c4b37d698c96 | 1,053 | py | Python | modoboa_postfix_autoreply/migrations/0005_auto_20151202_1623.py | modoboa/modoboa-postfix-autoreply | 353c62e51a0ecd011d056264422d74fcd571f05b | [
"MIT"
] | 5 | 2017-06-23T08:18:52.000Z | 2021-02-17T07:09:24.000Z | modoboa_postfix_autoreply/migrations/0005_auto_20151202_1623.py | modoboa/modoboa-postfix-autoreply | 353c62e51a0ecd011d056264422d74fcd571f05b | [
"MIT"
] | 78 | 2015-05-02T09:19:09.000Z | 2022-02-28T02:07:05.000Z | modoboa_postfix_autoreply/migrations/0005_auto_20151202_1623.py | modoboa/modoboa-postfix-autoreply | 353c62e51a0ecd011d056264422d74fcd571f05b | [
"MIT"
] | 10 | 2015-05-05T10:19:23.000Z | 2020-04-09T05:20:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_useless_aliases(apps, schema_editor):
"""Remove aliases linked to disabled messages."""
ARmessage = apps.get_model("modoboa_postfix_autoreply", "ARmessage")
AliasRecipient = apps.get_model("admin", "AliasRecipient")
qset = ARmessage.objects.select_related("mbox", "mbox__domain").filter(
enabled=False)
for armessage in qset:
alr_address = u"{0}@{1}@autoreply.{1}".format(
armessage.mbox.address, armessage.mbox.domain)
try:
alr = AliasRecipient.objects.get(address=alr_address)
except AliasRecipient.DoesNotExist:
continue
alias = alr.alias
alr.delete()
if not alias.recipients_count:
alias.delete()
class Migration(migrations.Migration):
dependencies = [
('modoboa_postfix_autoreply', '0004_delete_alias'),
]
operations = [
migrations.RunPython(remove_useless_aliases),
]
| 30.085714 | 75 | 0.665717 | 206 | 0.195632 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.211776 |
644fa6c7575de74309c593d18054afd49f392625 | 1,343 | py | Python | mhvdb2/models.py | kjnsn/mhvdb2 | ce3fc77f76ca32e2aaeff928b291cc45d041b68f | [
"MIT"
] | null | null | null | mhvdb2/models.py | kjnsn/mhvdb2 | ce3fc77f76ca32e2aaeff928b291cc45d041b68f | [
"MIT"
] | null | null | null | mhvdb2/models.py | kjnsn/mhvdb2 | ce3fc77f76ca32e2aaeff928b291cc45d041b68f | [
"MIT"
] | null | null | null | from mhvdb2 import database
from peewee import *
class BaseModel(Model):
class Meta:
database = database
class Entity(BaseModel):
"""
An Entity sends money to the organisation or recieves money from the
organistaion. Members are a special type of entity.
"""
is_member = BooleanField() # Is the entity a member (past or present)
name = CharField()
email = CharField(null=True) # Email is required for members
phone = CharField(null=True)
reminder_date = DateField(null=True) # When to send reminder to member
joined_date = DateField(null=True) # date the person first joined
agreement_date = DateField(null=True) # date the person agreed to rules
class Payment(BaseModel):
"""
A Payment is a transaction between an entity and the organisation. A
payment can be either incoming or outgoing, depending on the sign of
"amount".
"""
time = DateTimeField() # Date & time the payment occured
entity = ForeignKeyField(Entity, related_name='payments')
amount = FloatField()
source = IntegerField(choices=[(0, 'Other'), (1, 'Bank Transfer')])
is_donation = BooleanField() # For members, donation vs payment for goods
notes = TextField(null=True)
bank_reference = CharField(null=True) # For bank transfers
pending = BooleanField()
| 35.342105 | 78 | 0.696947 | 1,285 | 0.956813 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.453462 |
64509bf6de10248d798ce10dfca7d707d586dfa0 | 7,214 | py | Python | cogs/tags.py | milindmadhukar/Martin-Garrix-Bot | 571ed68a3eecab34513dd9b12c8527cff865e912 | [
"MIT"
] | 2 | 2021-08-28T07:34:16.000Z | 2021-08-28T11:55:55.000Z | cogs/tags.py | milindmadhukar/Martin-Garrix-Bot | 571ed68a3eecab34513dd9b12c8527cff865e912 | [
"MIT"
] | null | null | null | cogs/tags.py | milindmadhukar/Martin-Garrix-Bot | 571ed68a3eecab34513dd9b12c8527cff865e912 | [
"MIT"
] | 1 | 2022-01-05T05:58:29.000Z | 2022-01-05T05:58:29.000Z | from discord.ext import commands
import discord
from aiohttp import request
import asyncio
from .utils.DataBase.tag import Tag
def setup(bot):
bot.add_cog(TagCommands(bot=bot))
class TagCommands(commands.Cog, name="Tags"):
def __init__(self, bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def tag(self, ctx, *, name: lambda inp: inp.lower()):
"""Main tag group."""
tag = await self.bot.db.get_tag(guild_id=ctx.guild.id, name=name)
if tag is None:
await ctx.message.delete(delay=10.0)
message = await ctx.send('Could not find a tag with that name.')
return await message.delete(delay=10.0)
await ctx.send("{}".format(tag.content))
await self.bot.db.execute("UPDATE tags SET uses = uses + 1 WHERE guild_id = $1 AND name = $2",
ctx.guild.id, name)
@tag.command()
async def info(self, ctx, *, name: lambda inp: inp.lower()):
"""Get information regarding the specified tag."""
tag = await self.bot.db.get_tag(guild_id=ctx.guild.id, name=name)
if tag is None:
await ctx.message.delete(delay=10.0)
message = await ctx.send('Could not find a tag with that name.')
return await message.delete(delay=10.0)
author = self.bot.get_user(tag.creator_id)
author = str(author) if isinstance(author, discord.User) else "(ID: {})".format(tag.creator_id)
text = "Tag: {name}\n\n```prolog\nCreator: {author}\n Uses: {uses}\n```"\
.format(name=name, author=author, uses=tag.uses)
await ctx.send(text)
@tag.command()
@commands.check_any(commands.has_permissions(administrator=True))
async def create(self, ctx, name: lambda inp: inp.lower(), *, content: str):
"""Create a new tag."""
name = await commands.clean_content().convert(ctx=ctx, argument=name)
content = await commands.clean_content().convert(ctx=ctx, argument=content)
user_id = ctx.author.id
tag = await self.bot.db.get_tag(guild_id=ctx.guild.id, name=name)
if tag is not None:
return await ctx.send('A tag with that name already exists.')
tag = Tag(bot=self.bot, guild_id=ctx.guild.id,
creator_id=ctx.author.id, name=name, content=content)
await tag.post()
await ctx.send('You have successfully created your tag.')
@tag.command()
async def list(self, ctx, member: commands.MemberConverter = None):
"""List your existing tags."""
member = member or ctx.author
query = """SELECT name FROM tags WHERE guild_id = $1 AND creator_id = $2 ORDER BY name, uses"""
records = await self.bot.db.fetch(query, ctx.guild.id, member.id)
if not records:
return await ctx.send('No tags found.')
await ctx.send(
f"**{len(records)} tags by {'you' if member == ctx.author else str(member)} found on this server.**"
)
pager = commands.Paginator()
for record in records:
pager.add_line(line=record["name"])
for page in pager.pages:
await ctx.send(page)
@tag.command()
@commands.cooldown(1, 3600 * 24, commands.BucketType.user)
async def all(self, ctx: commands.Context):
"""List all existing tags alphabetically ordered and sends them in DMs."""
records = await self.bot.db.fetch(
"""SELECT name FROM tags WHERE guild_id = $1 ORDER BY name""",
ctx.guild.id
)
if not records:
return await ctx.send("This server doesn't have any tags.")
try:
await ctx.author.send(f"***{len(records)} tags found on this server.***")
except discord.Forbidden:
ctx.command.reset_cooldown(ctx)
return await ctx.send("Could not dm you...", delete_after=10)
pager = commands.Paginator()
for record in records:
pager.add_line(line=record["name"])
for page in pager.pages:
await asyncio.sleep(1)
await ctx.author.send(page)
await ctx.send("Tags sent in DMs.")
@tag.command()
async def edit(self, ctx, name: lambda inp: inp.lower(), *, content: str):
"""Edit a tag"""
content = await commands.clean_content().convert(ctx=ctx, argument=content)
tag = await self.bot.db.get_tag(guild_id=ctx.guild.id, name=name)
if tag is None:
await ctx.message.delete(delay=10.0)
message = await ctx.send('Could not find a tag with that name.')
return await message.delete(delay=10.0)
if tag.creator_id != ctx.author.id:
return await ctx.send('You don\'t have permission to do that.')
await tag.update(content=content)
await ctx.send('You have successfully edited your tag.')
@tag.command()
async def delete(self, ctx, *, name: lambda inp: inp.lower()):
"""Delete an existing tag."""
tag = await self.bot.db.get_tag(guild_id=ctx.guild.id, name=name)
if tag is None:
await ctx.message.delete(delay=10.0)
message = await ctx.send('Could not find a tag with that name.')
return await message.delete(delay=10.0)
if tag.creator_id != ctx.author.id:
return await ctx.send('You don\'t have permission to do that.')
await tag.delete()
await ctx.send('You have successfully deleted your tag.')
@tag.command()
@commands.cooldown(1, 1, commands.BucketType.user)
async def search(self, ctx, *, term: str):
"""Search for a tag given a search term."""
# Test this.
query = """SELECT name FROM tags WHERE guild_id = $1 AND name LIKE % $2 % LIMIT 10"""
records = await self.bot.db.fetch(query, ctx.guild.id, term)
if not records:
return await ctx.send("No tags found that has the term in it's name", delete_after=10)
count = "Maximum of 10" if len(records) == 10 else len(records)
records = "\n".join([record["name"] for record in records])
await ctx.send(f"**{count} tags found with search term on this server.**```\n{records}\n```")
#Add a rename and append feature later maybe
async def rename(self, ctx, name: lambda inp: inp.lower(), *, new_name: lambda inp: inp.lower()):
"""Rename a tag."""
new_name = await commands.clean_content().convert(ctx=ctx, argument=new_name)
tag = await self.bot.db.get_tag(guild_id=ctx.guild.id, name=name)
if tag is None:
await ctx.message.delete(delay=10.0)
message = await ctx.send('Could not find a tag with that name.')
return await message.delete(delay=10.0)
if tag.creator_id != ctx.author.id:
return await ctx.send('You don\'t have permission to do that.')
await tag.rename(new_name=new_name)
await ctx.send('You have successfully renamed your tag.')
| 40.301676 | 113 | 0.597449 | 7,018 | 0.972831 | 0 | 0 | 6,110 | 0.846964 | 6,467 | 0.896451 | 1,655 | 0.229415 |
6451437fb5ad823d205624f75c9557035f253ce1 | 16,825 | py | Python | trainer.py | 97chenxa/Multiview2Novelview | 3492948f983e9b97d4b5ada04ae23f49485a54e3 | [
"MIT"
] | 1 | 2019-03-26T12:10:56.000Z | 2019-03-26T12:10:56.000Z | trainer.py | 97chenxa/Multiview2Novelview | 3492948f983e9b97d4b5ada04ae23f49485a54e3 | [
"MIT"
] | null | null | null | trainer.py | 97chenxa/Multiview2Novelview | 3492948f983e9b97d4b5ada04ae23f49485a54e3 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from util import log
from pprint import pprint
from input_ops import create_input_ops
from model import Model
import os
import time
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
class Trainer(object):
def __init__(self,
config,
dataset,
dataset_test):
self.config = config
hyper_parameter_str = 'bs_{}_lr_flow_{}_pixel_{}_d_{}'.format(
config.batch_size,
config.learning_rate_f,
config.learning_rate_p,
config.learning_rate_d,
)
self.train_dir = './train_dir/%s-%s-%s-num_input-%s-%s' % (
config.dataset,
config.prefix,
hyper_parameter_str,
str(config.num_input),
time.strftime("%Y%m%d-%H%M%S")
)
if not os.path.exists(self.train_dir): os.makedirs(self.train_dir)
log.infov("Train Dir: %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
_, self.batch_train = create_input_ops(
dataset, self.batch_size, is_training=True)
_, self.batch_test = create_input_ops(
dataset_test, self.batch_size, is_training=False)
# --- create model ---
self.model = Model(config)
# --- optimizer ---
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.learning_rate_p = config.learning_rate_p
self.learning_rate_f = config.learning_rate_f
self.learning_rate_d = config.learning_rate_d
self.check_op = tf.no_op()
# --- checkpoint and monitoring ---
all_vars = tf.trainable_variables()
f_var = [v for v in all_vars if 'Flow' in v.op.name or 'flow' in v.op.name]
log.warn("********* f_var ********** ")
slim.model_analyzer.analyze_vars(f_var, print_info=True)
p_var = [v for v in all_vars if 'Pixel' in v.op.name or 'pixel' in v.op.name]
log.warn("********* p_var ********** ")
slim.model_analyzer.analyze_vars(p_var, print_info=True)
d_var = [v for v in all_vars if v.op.name.startswith('Discriminator')]
log.warn("********* d_var ********** ")
slim.model_analyzer.analyze_vars(d_var, print_info=True)
# the whole model without the discriminator
g_var = p_var + f_var
self.f_optimizer = tf.train.AdamOptimizer(
self.learning_rate_f,
).minimize(self.model.flow_loss,
var_list=f_var, name='optimizer_flow_loss')
self.p_optimizer = tf.train.AdamOptimizer(
self.learning_rate_p,
).minimize(self.model.pixel_loss, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss')
self.p_optimizer_gan = tf.train.AdamOptimizer(
self.learning_rate_p,
beta1=0.5
).minimize(self.model.pixel_loss_gan, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss_gan')
self.d_optimizer = tf.train.AdamOptimizer(
self.learning_rate_d,
beta1=0.5
).minimize(self.model.d_loss, global_step=self.global_step,
var_list=d_var, name='optimizer_discriminator_loss')
self.train_summary_op = tf.summary.merge_all(key='train')
self.test_summary_op = tf.summary.merge_all(key='test')
self.saver = tf.train.Saver(max_to_keep=100)
self.pretrain_saver = tf.train.Saver(var_list=all_vars, max_to_keep=1)
self.pretrain_saver_p = tf.train.Saver(var_list=p_var, max_to_keep=1)
self.pretrain_saver_f = tf.train.Saver(var_list=f_var, max_to_keep=1)
self.pretrain_saver_g = tf.train.Saver(var_list=g_var, max_to_keep=1)
self.pretrain_saver_d = tf.train.Saver(var_list=d_var, max_to_keep=1)
self.summary_writer = tf.summary.FileWriter(self.train_dir)
self.max_steps = self.config.max_steps
self.ckpt_save_step = self.config.ckpt_save_step
self.log_step = self.config.log_step
self.test_sample_step = self.config.test_sample_step
self.write_summary_step = self.config.write_summary_step
self.gan_start_step = self.config.gan_start_step
self.checkpoint_secs = 600 # 10 min
self.supervisor = tf.train.Supervisor(
logdir=self.train_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
save_summaries_secs=300,
save_model_secs=self.checkpoint_secs,
global_step=self.global_step,
)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = self.supervisor.prepare_or_wait_for_session(config=session_config)
self.ckpt_path = config.checkpoint
if self.ckpt_path is not None:
log.info("Checkpoint path: %s", self.ckpt_path)
self.pretrain_saver.restore(self.session, self.ckpt_path, )
log.info("Loaded the pretrain parameters from the provided checkpoint path")
self.ckpt_path_f = config.checkpoint_f
if self.ckpt_path_f is not None:
log.info("Checkpoint path: %s", self.ckpt_path_f)
self.pretrain_saver_f.restore(self.session, self.ckpt_path_f)
log.info("Loaded the pretrain Flow module from the provided checkpoint path")
self.ckpt_path_p = config.checkpoint_p
if self.ckpt_path_p is not None:
log.info("Checkpoint path: %s", self.ckpt_path_p)
self.pretrain_saver_p.restore(self.session, self.ckpt_path_p)
log.info("Loaded the pretrain Pixel module from the provided checkpoint path")
self.ckpt_path_g = config.checkpoint_g
if self.ckpt_path_g is not None:
log.info("Checkpoint path: %s", self.ckpt_path_g)
self.pretrain_saver_g.restore(self.session, self.ckpt_path_g)
log.info("Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path")
self.ckpt_path_d = config.checkpoint_d
if self.ckpt_path_d is not None:
log.info("Checkpoint path: %s", self.ckpt_path_d)
self.pretrain_saver_d.restore(self.session, self.ckpt_path_d)
log.info("Loaded the pretrain Discriminator module from the provided checkpoint path")
def train(self):
log.infov("Training Starts!")
pprint(self.batch_train)
max_steps = self.max_steps
ckpt_save_step = self.ckpt_save_step
log_step = self.log_step
test_sample_step = self.test_sample_step
write_summary_step = self.write_summary_step
gan_start_step = self.gan_start_step
for s in xrange(max_steps):
# periodic inference
if s % test_sample_step == 0:
step, test_summary, p_loss, f_loss, loss, output, step_time = \
self.run_test(self.batch_test, step=s, is_train=False)
self.log_step_message(step, p_loss, f_loss, loss, step_time, is_train=False)
self.summary_writer.add_summary(test_summary, global_step=step)
step, train_summary, p_loss, f_loss, loss, output, step_time = \
self.run_single_step(self.batch_train, step=s,
opt_gan=s > gan_start_step, is_train=True)
if s % log_step == 0:
self.log_step_message(step, p_loss, f_loss, loss, step_time)
if s % write_summary_step == 0:
self.summary_writer.add_summary(train_summary, global_step=step)
if s % ckpt_save_step == 0:
log.infov("Saved checkpoint at %d", s)
save_path = self.saver.save(
self.session, os.path.join(self.train_dir, 'model'),
global_step=step)
def run_single_step(self, batch, step=None, opt_gan=False, is_train=True):
_start_time = time.time()
batch_chunk = self.session.run(batch)
fetch = [self.global_step, self.train_summary_op, self.model.output,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.check_op]
# fetch optimizers
if not opt_gan:
# optimize only l1 losses
fetch += [self.p_optimizer, self.f_optimizer]
else:
if step % (self.config.update_rate+1) > 0:
# train the generator
fetch += [self.p_optimizer_gan, self.f_optimizer]
else:
# train the discriminator
fetch += [self.d_optimizer]
fetch_values = self.session.run(
fetch,
feed_dict=self.model.get_feed_dict(batch_chunk, step=step)
)
[step, summary, output, p_loss, f_loss, loss] = fetch_values[:6]
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def run_test(self, batch, step, is_train=False):
_start_time = time.time()
batch_chunk = self.session.run(batch)
step, summary, p_loss, f_loss, loss, output = self.session.run(
[self.global_step, self.test_summary_op,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.model.output],
feed_dict=self.model.get_feed_dict(batch_chunk, step=step, is_training=False)
)
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def log_step_message(self, step, p_loss, f_loss, loss, step_time, is_train=True):
if step_time == 0: step_time = 0.001
log_fn = (is_train and log.info or log.infov)
log_fn((" [{split_mode:5s} step {step:4d}] " +
"Loss: {loss:.5f} " +
"Pixel loss: {p_loss:.5f} " +
"Flow loss: {f_loss:.5f} " +
"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} instances/sec) "
).format(split_mode=(is_train and 'train' or 'val'),
step=step,
loss=loss,
p_loss=p_loss,
f_loss=f_loss,
sec_per_batch=step_time,
instance_per_sec=self.batch_size / step_time
)
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8,
help='the mini-batch size')
parser.add_argument('--prefix', type=str, default='default',
help='a nickname for the training')
parser.add_argument('--dataset', type=str, default='car', choices=[
'car', 'chair', 'kitti', 'synthia'],
help='you can add your own dataset here')
parser.add_argument('--num_input', type=int, default=2,
help='the number of source images')
parser.add_argument('--checkpoint', type=str, default=None,
help='load all the parameters including the flow and '
'pixel modules and the discriminator')
parser.add_argument('--checkpoint_p', type=str, default=None,
help='load the parameters of the pixel module')
parser.add_argument('--checkpoint_f', type=str, default=None,
help='load the parameters of the flow module')
parser.add_argument('--checkpoint_g', type=str, default=None,
help='load the parameters of both the flow and pixel module')
parser.add_argument('--checkpoint_d', type=str, default=None,
help='load the parameters of the discriminator')
# Log
parser.add_argument('--log_step', type=int, default=10,
help='the frequency of outputing log info')
parser.add_argument('--ckpt_save_step', type=int, default=5000,
help='the frequency of saving a checkpoint')
parser.add_argument('--test_sample_step', type=int, default=100,
help='the frequency of performing testing inference during training')
parser.add_argument('--write_summary_step', type=int, default=100,
help='the frequency of writing TensorBoard summaries')
# Learning
parser.add_argument('--max_steps', type=int, default=10000000,
help='the max training iterations')
parser.add_argument('--learning_rate_p', type=float, default=5e-5,
help='the learning rate of the pixel module')
parser.add_argument('--learning_rate_f', type=float, default=1e-4,
help='the learning rate of the flow module')
parser.add_argument('--learning_rate_d', type=float, default=1e-4,
help='the learning rate of the discriminator')
parser.add_argument('--local_confidence_weight', type=int, default=1e-2,
help='the weight of the confidence prediction objective')
# Architecture
parser.add_argument('--num_res_block_pixel', type=int, default=0,
help='the number of residual block in the bottleneck of the pixel module')
parser.add_argument('--num_res_block_flow', type=int, default=4,
help='the number of residual block in the bottleneck of the flow module')
parser.add_argument('--num_dis_conv_layer', type=int, default=5,
help='the number of convolutional layers of the discriminator')
parser.add_argument('--num_conv_layer', type=int, default=5,
help='the number of convolutional layers of '
'the encoder of both the flow and pixel modules')
parser.add_argument('--num_convlstm_block', type=int, default=2,
help='the number of residual ConvLSTM block of the pixel module')
parser.add_argument('--num_convlstm_scale', type=int, default=3,
help='how many innermost layers of the pixel module '
'have a residual ConvLSTM connection')
parser.add_argument('--norm_type', type=str, default='None',
choices=['batch', 'instance', 'None'],
help='the type of normalization')
# GAN
parser.add_argument('--gan_type', type=str, default='ls', choices=['ls', 'normal'],
help='the type of GAN losses such as LS-GAN, WGAN, etc')
parser.add_argument('--gan_start_step', type=int, default=5e5,
help='start to optimize the GAN loss when the model is stable')
parser.add_argument('--update_rate', type=int, default=1,
help='update G more frequently than D')
# Multi-scale prediction: this is not reporeted in the paper
# The main idea is to imporve the flow module by training it to start from
# predict a coarser flow fields (similar to progressive learning GAN
# proposed by Karras et al. ICLR 2017)
parser.add_argument('--num_scale', type=int, default=1,
help='the number of multi-scale flow prediction '
'(1 means without multi-scale prediction)')
parser.add_argument('--moving_weight', type=str, default='uniform',
choices=['uniform', 'shift', 'step'],
help='gradually learn each scale from coarse to fine')
config = parser.parse_args()
if config.dataset == 'car':
import datasets.shapenet_car as dataset
elif config.dataset == 'chair':
import datasets.shapenet_chair as dataset
elif config.dataset == 'kitti':
import datasets.kitti as dataset
elif config.dataset == 'synthia':
import datasets.synthia as dataset
else:
raise ValueError(config.dataset)
if 'car' in config.dataset or 'chair' in config.dataset:
config.dataset_type = 'object'
else:
config.dataset_type = 'scene'
dataset_train, dataset_test = \
dataset.create_default_splits(config.num_input)
image, pose = dataset_train.get_data(dataset_train.ids[0])
config.data_info = np.concatenate([np.asarray(image.shape), np.asarray(pose.shape)])
trainer = Trainer(config, dataset_train, dataset_test)
log.warning("dataset: %s", config.dataset)
trainer.train()
if __name__ == '__main__':
main()
| 44.39314 | 107 | 0.617177 | 10,383 | 0.617117 | 0 | 0 | 0 | 0 | 0 | 0 | 3,776 | 0.224428 |
6452a951fee3eeea2589839d237ea795bf24e925 | 1,413 | py | Python | pyprof/examples/apex/fused_layer_norm.py | yhgon/PyProf | 7b2bcdde43b5edc416b9defb668126d9778dcce0 | [
"Apache-2.0"
] | null | null | null | pyprof/examples/apex/fused_layer_norm.py | yhgon/PyProf | 7b2bcdde43b5edc416b9defb668126d9778dcce0 | [
"Apache-2.0"
] | null | null | null | pyprof/examples/apex/fused_layer_norm.py | yhgon/PyProf | 7b2bcdde43b5edc416b9defb668126d9778dcce0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import fused_layer_norm_cuda
from apex.normalization import FusedLayerNorm
import pyprof
pyprof.init()
pyprof.wrap(fused_layer_norm_cuda, 'forward')
pyprof.wrap(fused_layer_norm_cuda, 'backward')
pyprof.wrap(fused_layer_norm_cuda, 'forward_affine')
pyprof.wrap(fused_layer_norm_cuda, 'backward_affine')
input = torch.randn(20, 5, 10, 10).cuda()
# With Learnable Parameters
m = FusedLayerNorm(input.size()[1:]).cuda()
output = m(input)
# Without Learnable Parameters
m = FusedLayerNorm(input.size()[1:], elementwise_affine=False).cuda()
output = m(input)
# Normalize over last two dimensions
m = FusedLayerNorm([10, 10]).cuda()
output = m(input)
# Normalize over last dimension of size 10
m = FusedLayerNorm(10).cuda()
output = m(input)
| 30.717391 | 74 | 0.757962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.585987 |
64530935af15fd81adb5c87356e6dfb3afbacffb | 878 | py | Python | twitter/forms.py | isulim/twitter | 6b2573fbb10272e11d9566189856297bc5b4913e | [
"MIT"
] | null | null | null | twitter/forms.py | isulim/twitter | 6b2573fbb10272e11d9566189856297bc5b4913e | [
"MIT"
] | null | null | null | twitter/forms.py | isulim/twitter | 6b2573fbb10272e11d9566189856297bc5b4913e | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from twitter import models
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class TweetForm(forms.ModelForm):
class Meta:
model = models.Tweet
fields = ['content']
widgets = {
'content': forms.Textarea()
}
class CommentForm(forms.ModelForm):
class Meta:
model = models.Comment
fields = ['content']
labels = False
widgets = {
'content': forms.Textarea(attrs={'rows': 1, 'cols': 80})
}
class MessageForm(forms.ModelForm):
class Meta:
model = models.Message
fields = ['title', 'text', 'receiver']
| 22.512821 | 68 | 0.612756 | 714 | 0.813212 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.125285 |
64555d34145c4c4b0784189b1218daea8d16af32 | 510 | py | Python | class-notes/chapter_13/readline_text.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/chapter_13/readline_text.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/chapter_13/readline_text.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 15:06:57 2021
@author: user24
"""
file = "./data/tsuretsuregusa.txt"
with open(file, "r", encoding="utf_8") as fileobj:
while True:
# set line as value of the file line
line = fileobj.readline()
# removes any white space at the end of string
aline = line.rstrip()
# if line exists, print line
if aline:
print(aline)
# if don't exist, break from loop
else:
break
| 20.4 | 54 | 0.560784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.515686 |
64572ea93cb97cc8f36d4cdba05a3a5c9a854e9c | 313 | py | Python | appendix_a_comments/reading_comments.py | r00c/automating_excel_with_python | b4fa01aa6bdc2ec1c87a3e9cd00813b100f81f8b | [
"MIT"
] | 43 | 2021-07-01T11:50:38.000Z | 2022-03-26T13:56:15.000Z | appendix_a_comments/reading_comments.py | wenxuefeng3930/automating_excel_with_python | dc2509359e1b14d2ad694f9fe554f3ce1781e497 | [
"MIT"
] | 1 | 2021-11-06T23:06:21.000Z | 2021-11-07T01:24:44.000Z | appendix_a_comments/reading_comments.py | wenxuefeng3930/automating_excel_with_python | dc2509359e1b14d2ad694f9fe554f3ce1781e497 | [
"MIT"
] | 11 | 2021-06-25T02:06:35.000Z | 2022-03-31T14:29:59.000Z | # reading_comments.py
from openpyxl import load_workbook
from openpyxl.comments import Comment
def main(filename, cell):
workbook = load_workbook(filename=filename)
sheet = workbook.active
comment = sheet[cell].comment
print(comment)
if __name__ == "__main__":
main("comments.xlsx", "A1") | 20.866667 | 47 | 0.728435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.159744 |
64572f662356fd4cd4c4f1528b9d651667778d43 | 18,730 | py | Python | qiling/qiling/os/windows/dlls/kernel32/fileapi.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | qiling/qiling/os/windows/dlls/kernel32/fileapi.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | qiling/qiling/os/windows/dlls/kernel32/fileapi.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | #!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import struct, time, os
from shutil import copyfile
from datetime import datetime
from qiling.exception import *
from qiling.os.windows.const import *
from qiling.os.const import *
from qiling.os.windows.fncc import *
from qiling.os.windows.utils import *
from qiling.os.windows.thread import *
from qiling.os.windows.handle import *
from qiling.exception import *
from qiling.os.windows.structs import *
dllname = 'kernel32_dll'
# DWORD GetFileType(
# HANDLE hFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetFileType(ql, address, params):
hFile = params["hFile"]
if hFile == STD_INPUT_HANDLE or hFile == STD_OUTPUT_HANDLE or hFile == STD_ERROR_HANDLE:
ret = FILE_TYPE_CHAR
else:
obj = ql.os.handle_manager.get(hFile)
if obj is None:
raise QlErrorNotImplemented("API not implemented")
else:
# technically is not always a type_char but.. almost
ret = FILE_TYPE_CHAR
return ret
# HANDLE FindFirstFileA(
# LPCSTR lpFileName,
# LPWIN32_FIND_DATAA lpFindFileData
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_FindFirstFileA(ql, address, params):
filename = params['lpFileName']
pointer = params['lpFindFileData']
if filename == None:
return INVALID_HANDLE_VALUE
elif len(filename) >= MAX_PATH:
return ERROR_INVALID_PARAMETER
target_dir = os.path.join(ql.rootfs, filename.replace("\\", os.sep))
print('TARGET_DIR = %s' % target_dir)
real_path = ql.os.path.transform_to_real_path(filename)
# Verify the directory is in ql.rootfs to ensure no path traversal has taken place
if not os.path.exists(real_path):
ql.os.last_error = ERROR_FILE_NOT_FOUND
return INVALID_HANDLE_VALUE
# Check if path exists
filesize = 0
try:
f = ql.os.fs_mapper.open(real_path, mode="r")
filesize = os.path.getsize(real_path).to_bytes(8, byteorder="little")
except FileNotFoundError:
ql.os.last_error = ERROR_FILE_NOT_FOUND
return INVALID_HANDLE_VALUE
# Get size of the file
file_size_low = (int.from_bytes(filesize, "little")) & 0xffffff
file_size_high = (int.from_bytes(filesize, "little") >> 32)
# Create a handle for the path
new_handle = Handle(obj=f)
ql.os.handle_manager.append(new_handle)
# Spoof filetime values
filetime = datetime.now().microsecond.to_bytes(8, byteorder="little")
find_data = Win32FindData(
ql,
FILE_ATTRIBUTE_NORMAL,
filetime, filetime, filetime,
file_size_high, file_size_low,
0, 0,
filename,
0, 0, 0, 0,)
find_data.write(pointer)
ret = new_handle.id
return ret
# HANDLE FindFirstFileExA(
# LPCSTR lpFileName,
# FINDEX_INFO_LEVELS fInfoLevelId,
# FINDEX_SEARCH_OPS fSearchOp,
# LPVOID lpSearchFilter,
# DWORD dwAdditionalFlags
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_FindFirstFileExA(ql, address, params):
pass
# HANDLE FindNextFileA(
# LPCSTR lpFileName,
# LPWIN32_FIND_DATAA lpFindFileData
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCSTR': 'POINTER'})
def hook_FindNextFileA(ql, address, params):
pass
# BOOL FindClose(
# HANDLE hFindFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_FindClose(ql, address, params):
pass
# BOOL ReadFile(
# HANDLE hFile,
# LPVOID lpBuffer,
# DWORD nNumberOfBytesToRead,
# LPDWORD lpNumberOfBytesRead,
# LPOVERLAPPED lpOverlapped
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_ReadFile(ql, address, params):
ret = 1
hFile = params["hFile"]
lpBuffer = params["lpBuffer"]
nNumberOfBytesToRead = params["nNumberOfBytesToRead"]
lpNumberOfBytesRead = params["lpNumberOfBytesRead"]
lpOverlapped = params["lpOverlapped"]
if hFile == STD_INPUT_HANDLE:
if ql.os.automatize_input:
# TODO maybe insert a good random generation input
s = (b"A" * (nNumberOfBytesToRead - 1)) + b"\x00"
else:
ql.log.debug("Insert input")
s = ql.os.stdin.read(nNumberOfBytesToRead)
slen = len(s)
read_len = slen
if slen > nNumberOfBytesToRead:
s = s[:nNumberOfBytesToRead]
read_len = nNumberOfBytesToRead
ql.mem.write(lpBuffer, s)
ql.mem.write(lpNumberOfBytesRead, ql.pack(read_len))
else:
f = ql.os.handle_manager.get(hFile).obj
data = f.read(nNumberOfBytesToRead)
ql.mem.write(lpBuffer, data)
ql.mem.write(lpNumberOfBytesRead, ql.pack32(lpNumberOfBytesRead))
return ret
# BOOL WriteFile(
# HANDLE hFile,
# LPCVOID lpBuffer,
# DWORD nNumberOfBytesToWrite,
# LPDWORD lpNumberOfBytesWritten,
# LPOVERLAPPED lpOverlapped
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFile": HANDLE,
"lpBuffer": POINTER,
"nNumberOfBytesToWrite": DWORD,
"lpNumberOfBytesWritten": POINTER,
"lpOverlapped": POINTER
})
def hook_WriteFile(ql, address, params):
ret = 1
hFile = params["hFile"]
lpBuffer = params["lpBuffer"]
nNumberOfBytesToWrite = params["nNumberOfBytesToWrite"]
lpNumberOfBytesWritten = params["lpNumberOfBytesWritten"]
lpOverlapped = params["lpOverlapped"]
if hFile == STD_OUTPUT_HANDLE:
s = ql.mem.read(lpBuffer, nNumberOfBytesToWrite)
ql.os.stdout.write(s)
ql.os.utils.string_appearance(s.decode())
ql.mem.write(lpNumberOfBytesWritten, ql.pack(nNumberOfBytesToWrite))
else:
f = ql.os.handle_manager.get(hFile)
if f is None:
# Invalid handle
ql.os.last_error = ERROR_INVALID_HANDLE
return 0
else:
f = f.obj
buffer = ql.mem.read(lpBuffer, nNumberOfBytesToWrite)
f.write(bytes(buffer))
ql.mem.write(lpNumberOfBytesWritten, ql.pack32(nNumberOfBytesToWrite))
return ret
def _CreateFile(ql, address, params, name):
ret = INVALID_HANDLE_VALUE
s_lpFileName = params["lpFileName"]
dwDesiredAccess = params["dwDesiredAccess"]
dwShareMode = params["dwShareMode"]
lpSecurityAttributes = params["lpSecurityAttributes"]
dwCreationDisposition = params["dwCreationDisposition"]
dwFlagsAndAttributes = params["dwFlagsAndAttributes"]
hTemplateFile = params["hTemplateFile"]
# access mask DesiredAccess
mode = ""
if dwDesiredAccess & GENERIC_WRITE:
mode += "wb"
else:
mode += "r"
try:
f = ql.os.fs_mapper.open(s_lpFileName, mode)
except FileNotFoundError:
ql.os.last_error = ERROR_FILE_NOT_FOUND
return INVALID_HANDLE_VALUE
new_handle = Handle(obj=f)
ql.os.handle_manager.append(new_handle)
ret = new_handle.id
return ret
# HANDLE CreateFileA(
# LPCSTR lpFileName,
# DWORD dwDesiredAccess,
# DWORD dwShareMode,
# LPSECURITY_ATTRIBUTES lpSecurityAttributes,
# DWORD dwCreationDisposition,
# DWORD dwFlagsAndAttributes,
# HANDLE hTemplateFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpFileName": STRING,
"dwDesiredAccess": DWORD,
"dwShareMode": DWORD,
"lpSecurityAttributes": POINTER,
"dwCreationDisposition": DWORD,
"dwFlagsAndAttributes": DWORD,
"hTemplateFile": HANDLE
})
def hook_CreateFileA(ql, address, params):
ret = _CreateFile(ql, address, params, "CreateFileA")
return ret
# HANDLE CreateFileW(
# LPCWSTR lpFileName,
# DWORD dwDesiredAccess,
# DWORD dwShareMode,
# LPSECURITY_ATTRIBUTES lpSecurityAttributes,
# DWORD dwCreationDisposition,
# DWORD dwFlagsAndAttributes,
# HANDLE hTemplateFile
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_CreateFileW(ql, address, params):
ret = _CreateFile(ql, address, params, "CreateFileW")
return ret
# DWORD GetTempPathW(
# DWORD nBufferLength,
# LPWSTR lpBuffer
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetTempPathW(ql, address, params):
temp = (ql.os.windir + "Temp" + "\\\x00").encode('utf-16le')
dest = params["lpBuffer"]
temp_path = os.path.join(ql.rootfs, "Windows", "Temp")
if not os.path.exists(temp_path):
os.makedirs(temp_path, 0o755)
ql.mem.write(dest, temp)
return len(temp)
# DWORD GetTempPathA(
# DWORD nBufferLength,
# LPSTR lpBuffer
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"nBufferLength": DWORD,
"lpBuffer": POINTER
})
def hook_GetTempPathA(ql, address, params):
temp = (ql.os.windir + "Temp" + "\\\x00").encode('utf-8')
dest = params["lpBuffer"]
temp_path = os.path.join(ql.rootfs, "Windows", "Temp")
if not os.path.exists(temp_path):
os.makedirs(temp_path, 0o755)
ql.mem.write(dest, temp)
return len(temp)
# DWORD GetShortPathNameW(
# LPCWSTR lpszLongPath,
# LPWSTR lpszShortPath,
# DWORD cchBuffer
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetShortPathNameW(ql, address, params):
paths = params["lpszLongPath"].split("\\")
dst = params["lpszShortPath"]
max_size = params["cchBuffer"]
res = paths[0]
for path in paths[1:]:
nameAndExt = path.split(".")
name = nameAndExt[0]
ext = "" if len(nameAndExt) == 1 else "." + nameAndExt[1]
if len(name) > 8:
name = name[:6] + "~1"
res += "\\" + name + ext
res += "\x00"
res = res.encode("utf-16le")
if max_size < len(res):
return len(res)
else:
ql.mem.write(dst, res)
return len(res) - 1
# BOOL GetVolumeInformationW(
# LPCWSTR lpRootPathName,
# LPWSTR lpVolumeNameBuffer,
# DWORD nVolumeNameSize,
# LPDWORD lpVolumeSerialNumber,
# LPDWORD lpMaximumComponentLength,
# LPDWORD lpFileSystemFlags,
# LPWSTR lpFileSystemNameBuffer,
# DWORD nFileSystemNameSize
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_GetVolumeInformationW(ql, address, params):
root = params["lpRootPathName"]
if root != 0:
pt_volume_name = params["lpVolumeNameBuffer"]
if pt_volume_name != 0:
# TODO implement
volume_name = (ql.os.profile["VOLUME"]["name"] + "\x00").encode("utf-16le")
ql.mem.write(pt_volume_name, volume_name)
lpMaximumComponentLength = params["lpMaximumComponentLength"]
if lpMaximumComponentLength != 0:
ql.mem.write(lpMaximumComponentLength, (255).to_bytes(2, byteorder="little"))
pt_serial_number = params["lpVolumeSerialNumber"]
if pt_serial_number != 0:
# TODO maybe has to be int
serial_number = (ql.os.profile["VOLUME"]["serial_number"] + "\x00").encode("utf-16le")
ql.mem.write(pt_serial_number, serial_number)
pt_system_type = params["lpFileSystemNameBuffer"]
pt_flag = params["lpFileSystemFlags"]
if pt_flag != 0:
# TODO implement
flag = 0x00020000.to_bytes(4, byteorder="little")
ql.mem.write(pt_flag, flag)
if pt_system_type != 0:
system_type = (ql.os.profile["VOLUME"]["type"] + "\x00").encode("utf-16le")
ql.mem.write(pt_system_type, system_type)
else:
raise QlErrorNotImplemented("API not implemented")
return 1
# UINT GetDriveTypeW(
# LPCWSTR lpRootPathName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCWSTR': 'POINTER'})
def hook_GetDriveTypeW(ql, address, params):
path = params["lpRootPathName"]
if path != 0:
if path == ql.os.profile["PATH"]["systemdrive"]:
return DRIVE_FIXED
# TODO add configuration for drives
else:
raise QlErrorNotImplemented("API not implemented")
return DRIVE_NO_ROOT_DIR
# BOOL GetDiskFreeSpaceW(
# LPCWSTR lpRootPathName,
# LPDWORD lpSectorsPerCluster,
# LPDWORD lpBytesPerSector,
# LPDWORD lpNumberOfFreeClusters,
# LPDWORD lpTotalNumberOfClusters
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPCWSTR': 'POINTER'})
def hook_GetDiskFreeSpaceW(ql, address, params):
path = params["lpRootPathName"]
if path == ql.os.profile["PATH"]["systemdrive"]:
pt_sectors = params["lpSectorsPerCluster"]
pt_bytes = params["lpBytesPerSector"]
pt_free_clust = params["lpNumberOfFreeClusters"]
pt_total_clust = params["lpTotalNumberOfClusters"]
sectors = ql.os.profile.getint("VOLUME", "sectors_per_cluster").to_bytes(4, byteorder="little")
bytes = ql.os.profile.getint("VOLUME", "bytes_per_sector").to_bytes(4, byteorder="little")
free_clust = ql.os.profile.getint("VOLUME", "number_of_free_clusters").to_bytes(4, byteorder="little")
total_clust = ql.os.profile.getint("VOLUME", "number_of_clusters").to_bytes(4, byteorder="little")
ql.mem.write(pt_sectors, sectors)
ql.mem.write(pt_bytes, bytes)
ql.mem.write(pt_free_clust, free_clust)
ql.mem.write(pt_total_clust, total_clust)
else:
raise QlErrorNotImplemented("API not implemented")
return 0
# BOOL CreateDirectoryA(
# LPCSTR lpPathName,
# LPSECURITY_ATTRIBUTES lpSecurityAttributes
# );
@winsdkapi(cc=STDCALL, dllname=dllname)
def hook_CreateDirectoryA(ql, address, params):
path_name = params["lpPathName"]
target_dir = os.path.join(ql.rootfs, path_name.replace("\\", os.sep))
print('TARGET_DIR = %s' % target_dir)
real_path = ql.os.path.transform_to_real_path(path_name)
# Verify the directory is in ql.rootfs to ensure no path traversal has taken place
if not os.path.exists(real_path):
os.mkdir(real_path)
return 1
else:
ql.os.last_error = ERROR_ALREADY_EXISTS
return 0
# DWORD GetFileSize(
# HANDLE hFile,
# LPDWORD lpFileSizeHigh
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params_type={'LPDWORD': 'DWORD'})
def hook_GetFileSize(ql, address, params):
try:
handle = ql.handle_manager.get(params['hFile'].file)
return os.path.getsize(handle.name)
except:
ql.os.last_error = ERROR_INVALID_HANDLE
return 0xFFFFFFFF #INVALID_FILE_SIZE
# HANDLE CreateFileMappingA(
# HANDLE hFile,
# LPSECURITY_ATTRIBUTES lpFileMappingAttributes,
# DWORD flProtect,
# DWORD dwMaximumSizeHigh,
# DWORD dwMaximumSizeLow,
# LPCSTR lpName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFile": HANDLE,
"lpFileMappingAttributes": POINTER,
"flProtect": DWORD,
"dwMaximumSizeHigh": DWORD,
"dwMaximumSizeLow": DWORD,
"lpName": STRING,
})
def hook_CreateFileMappingA(ql, address, params):
hFile = params['hFile']
lpName = params['lpName']
new_handle = Handle(obj=hFile, name=lpName)
ql.os.handle_manager.append(new_handle)
ret = new_handle.id
return ret
# HANDLE CreateFileMappingW(
# HANDLE hFile,
# LPSECURITY_ATTRIBUTES lpFileMappingAttributes,
# DWORD flProtect,
# DWORD dwMaximumSizeHigh,
# DWORD dwMaximumSizeLow,
# LPCWSTR lpName
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFile": HANDLE,
"lpFileMappingAttributes": POINTER,
"flProtect": DWORD,
"dwMaximumSizeHigh": DWORD,
"dwMaximumSizeLow": DWORD,
"lpName": WSTRING,
})
def hook_CreateFileMappingW(ql, address, params):
hFile = params['hFile']
lpName = params['lpName']
new_handle = Handle(obj=hFile, name=lpName)
ql.os.handle_manager.append(new_handle)
ret = new_handle.id
return ret
# LPVOID MapViewOfFile(
# HANDLE hFileMappingObject,
# DWORD dwDesiredAccess,
# DWORD dwFileOffsetHigh,
# DWORD dwFileOffsetLow,
# SIZE_T dwNumberOfBytesToMap
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"hFileMappingObject": HANDLE,
"dwDesiredAccess": DWORD,
"dwFileOffsetHigh": DWORD,
"dwFileOffsetLow": DWORD,
"dwNumberOfBytesToMap": DWORD
})
def hook_MapViewOfFile(ql, address, params):
hFileMappingObject = params['hFileMappingObject']
dwFileOffsetLow = params['dwFileOffsetLow']
dwNumberOfBytesToMap = params['dwNumberOfBytesToMap']
map_file_handle = ql.os.handle_manager.search_by_obj(hFileMappingObject)
if map_file_handle is None:
ret = ql.os.heap.alloc(dwNumberOfBytesToMap)
new_handle = Handle(obj=hFileMappingObject, name=ret)
ql.os.handle_manager.append(new_handle)
else:
ret = map_file_handle.name
hFile = ql.os.handle_manager.get(hFileMappingObject).obj
if ql.os.handle_manager.get(hFile):
f = ql.os.handle_manager.get(hFile).obj
if type(f) is file:
f.seek(dwFileOffsetLow, 0)
data = f.read(dwNumberOfBytesToMap)
ql.mem.write(ret, data)
return ret
# BOOL UnmapViewOfFile(
# LPCVOID lpBaseAddress
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpBaseAddress": POINTER
})
def hook_UnmapViewOfFile(ql, address, params):
lpBaseAddress = params['lpBaseAddress']
map_file_hande = ql.os.handle_manager.search(lpBaseAddress)
if not map_file_hande:
return 0
ql.os.heap.free(map_file_hande.name)
ql.os.handle_manager.delete(map_file_hande.id)
return 1
# BOOL CopyFileA(
# LPCSTR lpExistingFileName,
# LPCSTR lpNewFileName,
# BOOL bFailIfExists
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpExistingFileName": STRING,
"lpNewFileName": STRING,
"bFailIfExists": DWORD
})
def hook_CopyFileA(ql, address, params):
lpExistingFileName = ql.os.path.transform_to_real_path(params["lpExistingFileName"])
lpNewFileName = ql.os.path.transform_to_real_path(params["lpNewFileName"])
bFailIfExists = params["bFailIfExists"]
if bFailIfExists and os.path.exists(lpNewFileName):
return 0
copyfile(lpExistingFileName, lpNewFileName)
return 1
# BOOL SetFileAttributesA(
# LPCSTR lpFileName,
# DWORD dwFileAttributes
# );
@winsdkapi(cc=STDCALL, dllname=dllname, replace_params={
"lpFileName": STRING,
"dwFileAttributes": DWORD
})
def hook_SetFileAttributesA(ql, address, params):
return 1
| 32.182131 | 110 | 0.665617 | 0 | 0 | 0 | 0 | 13,791 | 0.736305 | 0 | 0 | 6,104 | 0.325894 |
6459bb5a254d67d42306305b964300abdf563dd5 | 1,561 | py | Python | algorithm/merge_sort.py | smartdolphin/recommandation-tutorial | 3bfa8f91a6d2d064db42dfb61c3640e1775e4c31 | [
"MIT"
] | 1 | 2018-10-14T14:19:05.000Z | 2018-10-14T14:19:05.000Z | algorithm/merge_sort.py | smartdolphin/recommandation-tutorial | 3bfa8f91a6d2d064db42dfb61c3640e1775e4c31 | [
"MIT"
] | null | null | null | algorithm/merge_sort.py | smartdolphin/recommandation-tutorial | 3bfa8f91a6d2d064db42dfb61c3640e1775e4c31 | [
"MIT"
] | null | null | null | import unittest
def merge_sort(arr):
def _merge(left, right):
merged_list = []
i, j = 0, 0
while len(left) > i and len(right) > j:
if left[i] < right[j]:
merged_list.append(left[i])
i += 1
else:
merged_list.append(right[j])
j += 1
while len(left) > i:
merged_list.append(left[i])
i += 1
while len(right) > j:
merged_list.append(right[j])
j += 1
return merged_list
def _merge_sort(list):
if len(list) <= 1:
return list
mid = len(list) // 2
left = _merge_sort(list[:mid])
right = _merge_sort(list[mid:])
return _merge(left, right)
result = _merge_sort(arr)
return result
class TestMergeSort(unittest.TestCase):
def test(self):
arr = merge_sort([5, 3, 2, 1, 4])
self.assertEqual(
arr,
[1, 2, 3, 4, 5]
)
arr = merge_sort([-1, -10, 2, 4, 1])
self.assertEqual(
arr,
[-10, -1, 1, 2, 4]
)
arr = merge_sort([1, -1, 5, 2, 4, 2])
self.assertEqual(
arr,
[-1, 1, 2, 2, 4, 5]
)
arr = merge_sort([5, 2])
self.assertEqual(
arr,
[2, 5]
)
arr = merge_sort([-1, -1, 2, 5, 4, 2])
self.assertEqual(
arr,
[-1, -1, 2, 2, 4, 5]
)
if __name__ == '__main__':
unittest.TestCase()
| 23.651515 | 47 | 0.434337 | 680 | 0.435618 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.006406 |
6459d5e70633b4a25bd89627161b0973bbe59d67 | 3,382 | py | Python | run_w2v.py | hugochan/K-Competitive-Autoencoder-for-Text-Analytics | 5433de649028a4e021b8ad17cd0ec5da8c726031 | [
"BSD-3-Clause"
] | 133 | 2017-05-30T20:28:24.000Z | 2022-03-10T01:27:43.000Z | run_w2v.py | hugochan/K-Competitive-Autoencoder-for-Text-Analytics | 5433de649028a4e021b8ad17cd0ec5da8c726031 | [
"BSD-3-Clause"
] | 34 | 2017-09-04T08:04:50.000Z | 2022-02-10T01:12:17.000Z | run_w2v.py | hugochan/K-Competitive-Autoencoder-for-Text-Analytics | 5433de649028a4e021b8ad17cd0ec5da8c726031 | [
"BSD-3-Clause"
] | 49 | 2017-07-08T09:30:17.000Z | 2021-07-30T04:37:29.000Z | '''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import argparse
from os import path
import timeit
import numpy as np
from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v
from autoencoder.baseline.doc_word2vec import doc_word2vec
from autoencoder.utils.io_utils import load_json, dump_json, write_file
from autoencoder.preprocessing.preprocessing import load_corpus
# from autoencoder.datasets.reuters import CorpusIterReuters
from autoencoder.datasets.the20news import CorpusIter20News
# from autoencoder.datasets.movie_review_data import CorpusIterMRD
# from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus
def train(args):
vocab = load_json(args.vocab)
# import pdb;pdb.set_trace()
# load corpus
corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False)
# corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
# corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
# corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False)
# print len([1 for x in corpus])
corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus)
w2v = Word2Vec(args.n_dim, window=args.window_size, \
negative=args.negative, epoches=args.n_epoch)
start = timeit.default_timer()
w2v.train(corpus_iter)
print 'runtime: %ss' % (timeit.default_timer() - start)
save_w2v(w2v.model, args.save_model)
import pdb;pdb.set_trace()
def test(args):
corpus = load_corpus(args.corpus[0])
docs, vocab_dict = corpus['docs'], corpus['vocab']
doc_codes = doc_word2vec(docs, revdict(vocab_dict), args.load_model, args.output, avg=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train', action='store_true', help='train flag')
parser.add_argument('--corpus', nargs='*', required=True, type=str, help='path to the corpus dir (in training phase) or file (in test phase)')
parser.add_argument('-doc', '--docnames', type=str, help='path to the docnames file (in training phase)')
parser.add_argument('--vocab', required=True, type=str, help='path to the vocab file')
parser.add_argument('-ne', '--n_epoch', required=True, type=int, help='num of epoches')
parser.add_argument('-nd', '--n_dim', type=int, help='num of dimensions')
parser.add_argument('-ws', '--window_size', required=True, type=int, help='window size')
parser.add_argument('-neg', '--negative', required=True, type=int, help='num of negative samples')
parser.add_argument('-sm', '--save_model', type=str, default='w2v.mod', help='path to the output model')
parser.add_argument('-lm', '--load_model', type=str, help='path to the trained model')
parser.add_argument('-o', '--output', type=str, help='path to the output doc codes file')
args = parser.parse_args()
if args.train:
if not args.n_dim:
raise Exception('n_dim arg needed in training phase')
train(args)
else:
if not args.output:
raise Exception('output arg needed in test phase')
if not args.load_model:
raise Exception('load_model arg needed in test phase')
test(args)
if __name__ == '__main__':
main()
| 44.5 | 146 | 0.715257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,242 | 0.367238 |
645b1f549815ff06f8102522d4899632169c198c | 713 | py | Python | code/udls/datasets/sol_string.py | acids-ircam/lottery_mir | 1440d717d7fd688ac43c1a406602aaf2d5a3842d | [
"MIT"
] | 10 | 2020-07-29T23:12:15.000Z | 2022-03-23T16:27:43.000Z | code/udls/datasets/sol_string.py | acids-ircam/lottery_mir | 1440d717d7fd688ac43c1a406602aaf2d5a3842d | [
"MIT"
] | null | null | null | code/udls/datasets/sol_string.py | acids-ircam/lottery_mir | 1440d717d7fd688ac43c1a406602aaf2d5a3842d | [
"MIT"
] | 1 | 2022-02-06T11:42:28.000Z | 2022-02-06T11:42:28.000Z | from .. import DomainAdaptationDataset, SimpleDataset
SolV4folders = [
"/fast-2/datasets/Solv4_strings_wav/audio/Cello",
"/fast-2/datasets/Solv4_strings_wav/audio/Contrabass",
"/fast-2/datasets/Solv4_strings_wav/audio/Violin",
"/fast-2/datasets/Solv4_strings_wav/audio/Viola"
]
def Solv4Strings_DomainAdaptation(out_database_location, preprocess_function):
return DomainAdaptationDataset(out_database_location, SolV4folders,
preprocess_function, "*.wav", 1e11)
def Solv4Strings_Simple(out_database_location, preprocess_function):
return SimpleDataset(out_database_location, SolV4folders,
preprocess_function, "*.wav", 1e11)
| 37.526316 | 78 | 0.734923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.297335 |
645ccc5b1f68aec3345ceb1d48f1b9dfe2ef349e | 1,079 | py | Python | testsuite/cases/cv2.py | jcupitt/pillow-perf | dc71bf8597f73ced42724a2203867ba4000e0640 | [
"MIT"
] | null | null | null | testsuite/cases/cv2.py | jcupitt/pillow-perf | dc71bf8597f73ced42724a2203867ba4000e0640 | [
"MIT"
] | null | null | null | testsuite/cases/cv2.py | jcupitt/pillow-perf | dc71bf8597f73ced42724a2203867ba4000e0640 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import print_function, unicode_literals, absolute_import
import cv2
from .base import BaseTestCase, root
try:
cv2.setNumThreads(1)
except AttributeError:
print('!!! You are using OpenCV which does not allow you to set '
'the number of threads')
class Cv2TestCase(BaseTestCase):
filter_ids = {
cv2.INTER_AREA: 'sup',
cv2.INTER_NEAREST: 'ner',
cv2.INTER_LINEAR: 'bil',
cv2.INTER_CUBIC: 'bic',
cv2.INTER_LANCZOS4: 'lzs4',
}
def create_test_data(self):
im = cv2.imread(root('resources', 'color_circle.png'),
flags=cv2.IMREAD_UNCHANGED)
if self.mode == 'RGB':
im = im[:, :, :3]
elif self.mode == 'RGBA':
pass
elif self.mode == 'L':
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
else:
raise ValueError('Unknown mode: {}'.format(self.mode))
# Fine for upscaling
im = cv2.resize(im, tuple(self.size), interpolation=cv2.INTER_CUBIC)
return [im]
| 26.317073 | 76 | 0.591288 | 776 | 0.719184 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.189064 |
645cdcce15f683ef36c7e69e585e1850939a7867 | 1,736 | py | Python | App/main.py | uip-pc3/calculadora-de-comisiones-andrew962 | 344c9e04926c810a9549b8899bcb4b4aae071da7 | [
"MIT"
] | null | null | null | App/main.py | uip-pc3/calculadora-de-comisiones-andrew962 | 344c9e04926c810a9549b8899bcb4b4aae071da7 | [
"MIT"
] | null | null | null | App/main.py | uip-pc3/calculadora-de-comisiones-andrew962 | 344c9e04926c810a9549b8899bcb4b4aae071da7 | [
"MIT"
] | null | null | null | """Librerias Importadas"""
from flask import Flask
from flask import render_template
from flask import request
App=Flask(__name__)
@App.route('/')
def index():
"""Pagina Principal en donde se introduce el nombre, apellido, comision"""
return render_template('index.html')
@App.route('/porcentaje',methods=['POST'])
def porcentaje():
if request.method=='POST':
""":var file: es la variable que estoy utilizando para acceder al
archivo y copiar en el."""
file=open("archivo.csv","w")
""":var nombre: Donde se guarda el nombre obtenido en el html"""
nombre=request.form['nombre']
""":var apellido: Donde se guarda el apellido obtenido en el html"""
apellido=request.form['apellido']
""":var venta: la variable tipo venta se trae en tipo cadena
y se combierte con el float para poder manipularla"""
venta = float(request.form.get('venta'))
if (venta > 100000):
r = venta * 0.15
elif (venta > 75000):
r = venta * 0.10
elif (venta > 50000):
r = venta * 0.07
elif (venta > 25000):
r = venta * 0.05
else:
r = '¡Usted no ha realizado ventas en el Mes!'
"""Se esta escribiendo en el archivo csv"""
file.write(nombre)
file.write(",")
file.write(apellido)
file.write(",")
file.write(str(venta))
file.write(",")
file.write(str(r))
file.close()
""":return render_templates: es el return que se hace para mandar los valores
al html"""
return render_template('porcentaje.html',nom=nombre,ape=apellido,ven=venta,rr=r)
if __name__=="__main__":
App.run() | 32.148148 | 88 | 0.595046 | 0 | 0 | 0 | 0 | 1,558 | 0.896949 | 0 | 0 | 754 | 0.434082 |
645d0a77c8595a137c03943b9accce405a4f8a4c | 2,407 | py | Python | waste_flow/spreading.py | xapple/waste_flow | 7ec6789de6364fb535f4bac4c6a50e0656c9279a | [
"MIT"
] | 1 | 2020-06-08T12:39:44.000Z | 2020-06-08T12:39:44.000Z | waste_flow/spreading.py | xapple/waste_flow | 7ec6789de6364fb535f4bac4c6a50e0656c9279a | [
"MIT"
] | 2 | 2021-02-14T13:54:28.000Z | 2021-02-19T14:02:32.000Z | waste_flow/spreading.py | xapple/waste_flow | 7ec6789de6364fb535f4bac4c6a50e0656c9279a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
JRC Biomass Project.
Unit D1 Bioeconomy.
Typically you can use this class like this:
>>> from waste_flow.spreading import spread
>>> print(spread.by_nace)
"""
# Built-in modules #
# Internal modules #
from waste_flow.common import spread_coefs, nace_selected
# First party modules #
from plumbing.cache import property_cached
# Third party modules #
###############################################################################
class WasteSpreading:
# ----------------------------- Properties ------------------------------ #
@property_cached
def by_nace(self):
"""
Returns a dictionary where every key is a nace sector name
and every value is a dataframe looking like this:
W072 W073 W075 W076 W091 W092 W093 W101
waste
W072_hh 1.0 0.00 0.0 0.0 0.0 0.0 0.0 0.1505
W075_hh 0.0 0.00 1.0 0.0 0.0 0.0 0.0 0.0075
W091_hh 0.0 0.00 0.0 0.0 1.0 0.0 0.0 0.2500
W092_hh 0.0 0.00 0.0 0.0 0.0 1.0 0.0 0.0400
W7376_hh 0.0 0.54 0.0 0.4 0.0 0.0 0.0 0.0024
W9999_hh 0.0 0.00 0.0 0.0 0.0 0.0 0.0 0.0335
"""
# Make a dataframe for every sheet in the original xls #
result = {k: df for k, df in spread_coefs.groupby('nace')}
# Remove the default spreading from the dict #
default_spread = result.pop('default')
# Apply the default to all non-mentioned nace #
result = {k: result[k]
if k in result
else default_spread.copy()
for k in nace_selected}
# Format each dataframe #
result = {k: self.format_spread(df) for k, df in result.items()}
# Return #
return result
# ------------------------------ Methods -------------------------------- #
def format_spread(self, df):
"""Format a given dataframe."""
# Drop columns #
df = df.drop(columns=['nace'])
# Set index #
df = df.set_index('waste')
# Assume zero everywhere #
df = df.fillna(0)
# Return #
return df
###############################################################################
# Create singleton #
spread = WasteSpreading()
| 32.527027 | 79 | 0.493145 | 1,753 | 0.728292 | 0 | 0 | 1,273 | 0.528874 | 0 | 0 | 1,591 | 0.660989 |
645fc4bf818569364ce3bf0cb6225d48dc5020d1 | 13,890 | py | Python | tests/test_deploy.py | lobziik/ocdeployer | 092e65fb6d8868f262980d6221518433de1345f4 | [
"MIT"
] | null | null | null | tests/test_deploy.py | lobziik/ocdeployer | 092e65fb6d8868f262980d6221518433de1345f4 | [
"MIT"
] | null | null | null | tests/test_deploy.py | lobziik/ocdeployer | 092e65fb6d8868f262980d6221518433de1345f4 | [
"MIT"
] | null | null | null | import pytest
from ocdeployer.secrets import SecretImporter
from ocdeployer.deploy import DeployRunner
from ocdeployer.env import EnvConfigHandler, LegacyEnvConfigHandler
def patched_runner(env_values, mock_load_vars_per_env, legacy=False):
if not env_values:
handler = None
elif legacy:
handler = LegacyEnvConfigHandler(env_files=env_values)
handler.env_names = env_values
else:
handler = EnvConfigHandler(env_names=env_values, env_dir_name="envTEST")
runner = DeployRunner(None, "test-project", handler, None, ["service"], None, None, [])
runner.base_env_path = "base/envTEST"
if handler:
runner.env_config_handler._load_vars_per_env = mock_load_vars_per_env
return runner
def build_mock_loader(base_env_data, service_set_env_data={}):
def mock_load_vars_per_env(path=None):
print(f"Mock loader received path: {path}")
if path is None:
return base_env_data
if "base" in "path" and path.endswith("envTEST"):
print("Loading mock base data")
return base_env_data
if "templates" in path and "service" in path and path.endswith("envTEST"):
print("Loading mock service set data")
return service_set_env_data
return {}
return mock_load_vars_per_env
def test__no_env_given():
expected = {
"parameters": {
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(None, None, legacy=False)
assert runner._get_variables("service", "templates/service", "some_component") == expected
@pytest.mark.parametrize("legacy", (True, False), ids=("legacy=true", "legacy=false"))
def test__get_variables_sanity(legacy, patch_os_path):
mock_var_data = {
"test_env": {
"service": {
"enable_routes": False,
"enable_db": False,
"parameters": {"STUFF": "things"},
}
}
}
expected = {
"enable_routes": False,
"enable_db": False,
"parameters": {
"STUFF": "things",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(mock_var_data), legacy)
assert runner._get_variables("service", "templates/service", "some_component") == expected
@pytest.mark.parametrize("legacy", (True, False), ids=("legacy=true", "legacy=false"))
def test__get_variables_merge_from_global(legacy, patch_os_path):
mock_var_data = {
"test_env": {
"global": {"global_variable": "global-value", "parameters": {"GLOBAL": "things"}},
"service": {"service_variable": True, "parameters": {"STUFF": "service-stuff"}},
"service/component": {
"component_variable": "component",
"parameters": {"COMPONENT": "component-param"},
},
}
}
expected = {
"component_variable": "component",
"global_variable": "global-value",
"service_variable": True,
"parameters": {
"COMPONENT": "component-param",
"GLOBAL": "things",
"STUFF": "service-stuff",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(mock_var_data), legacy)
assert runner._get_variables("service", "templates/service", "component") == expected
@pytest.mark.parametrize("legacy", (True, False), ids=("legacy=true", "legacy=false"))
def test__get_variables_service_overwrite_parameter(legacy, patch_os_path):
mock_var_data = {
"test_env": {
"global": {"parameters": {"STUFF": "things"}},
"service": {"parameters": {"STUFF": "service-stuff"}},
}
}
expected = {
"parameters": {
"STUFF": "service-stuff",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
}
}
runner = patched_runner(["test_env"], build_mock_loader(mock_var_data), legacy)
assert runner._get_variables("service", "templates/service", "component") == expected
@pytest.mark.parametrize("legacy", (True, False), ids=("legacy=true", "legacy=false"))
def test__get_variables_service_overwrite_variable(legacy, patch_os_path):
mock_var_data = {"test_env": {"global": {"enable_db": False}, "service": {"enable_db": True}}}
expected = {
"enable_db": True,
"parameters": {
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(mock_var_data), legacy)
assert runner._get_variables("service", "templates/service", "component") == expected
@pytest.mark.parametrize("legacy", (True, False), ids=("legacy=true", "legacy=false"))
def test__get_variables_component_overwrite_parameter(legacy, patch_os_path):
mock_var_data = {
"test_env": {
"global": {"parameters": {"STUFF": "things"}},
"service": {"parameters": {"THINGS": "service-things"}},
"service/component": {"parameters": {"THINGS": "component-things"}},
}
}
expected = {
"parameters": {
"STUFF": "things",
"THINGS": "component-things",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
}
}
runner = patched_runner(["test_env"], build_mock_loader(mock_var_data), legacy)
assert runner._get_variables("service", "templates/service", "component") == expected
@pytest.mark.parametrize("legacy", (True, False), ids=("legacy=true", "legacy=false"))
def test__get_variables_component_overwrite_variable(legacy, patch_os_path):
mock_var_data = {
"test_env": {
"global": {"enable_routes": False},
"service": {"enable_db": True},
"service/component": {"enable_db": False},
}
}
expected = {
"enable_routes": False,
"enable_db": False,
"parameters": {
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(mock_var_data), legacy)
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_base_and_service_set(patch_os_path):
base_var_data = {
"test_env": {
"global": {"global_var": "base_global", "parameters": {"GLOBAL_PARAM": "things"}}
}
}
service_set_var_data = {
"test_env": {
"global": {"global_set_var": "set_global", "parameters": {"PARAM": "something"}},
"component": {"component_var": "something", "parameters": {"ANOTHER_PARAM": "stuff"}},
}
}
expected = {
"global_var": "base_global",
"global_set_var": "set_global",
"component_var": "something",
"parameters": {
"GLOBAL_PARAM": "things",
"PARAM": "something",
"ANOTHER_PARAM": "stuff",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(base_var_data, service_set_var_data))
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_service_set_only(patch_os_path):
base_var_data = {}
service_set_var_data = {
"test_env": {
"global": {"global_set_var": "set_global", "parameters": {"PARAM": "something"}},
"component": {"component_var": "something", "parameters": {"ANOTHER_PARAM": "stuff"}},
}
}
expected = {
"global_set_var": "set_global",
"component_var": "something",
"parameters": {
"PARAM": "something",
"ANOTHER_PARAM": "stuff",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(base_var_data, service_set_var_data))
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_service_set_overrides(patch_os_path):
base_var_data = {
"test_env": {
"global": {"global_var": "base_global", "parameters": {"GLOBAL_PARAM": "things"}},
"service": {"global_set_var": "blah", "parameters": {"PARAM": "blah"}},
"service/component": {"component_var": "override this"},
}
}
service_set_var_data = {
"test_env": {
"global": {"global_set_var": "set_global", "parameters": {"PARAM": "something"}},
"component": {"component_var": "something", "parameters": {"ANOTHER_PARAM": "stuff"}},
}
}
expected = {
"global_var": "base_global",
"global_set_var": "set_global",
"component_var": "something",
"parameters": {
"GLOBAL_PARAM": "things",
"PARAM": "something",
"ANOTHER_PARAM": "stuff",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(["test_env"], build_mock_loader(base_var_data, service_set_var_data))
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_multiple_envs(patch_os_path):
base_var_data = {
"test_env": {
"global": {"global_var": "base_global1", "parameters": {"GLOBAL_PARAM": "things1"}},
},
"test_env2": {
"global": {"global_var": "base_global2"},
"service/component": {"component_var": "comp2"},
},
"test_env3": {
"global": {
"global_var": "base_global3",
"parameters": {"GLOBAL_PARAM": "things3", "ENV3_PARAM": "env3"},
},
"service/component": {"component_var": "comp3"},
},
}
service_set_var_data = {
"test_env": {"global": {"global_set_var": "set_global1"}},
"test_env2": {
"service/component": {
"component_var": "comp2-set",
"parameters": {"ENV2_PARAM": "env2"},
}
},
}
expected = {
"global_var": "base_global1",
"global_set_var": "set_global1",
"component_var": "comp2-set",
"parameters": {
"GLOBAL_PARAM": "things1",
"ENV3_PARAM": "env3",
"ENV2_PARAM": "env2",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(
["test_env", "test_env2", "test_env3"],
build_mock_loader(base_var_data, service_set_var_data),
)
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_multiple_envs_legacy(patch_os_path):
base_var_data = {
"test_env": {
"global": {"global_var": "base_global1", "parameters": {"GLOBAL_PARAM": "things1"}},
},
"test_env2": {
"global": {"global_var": "base_global2"},
"service/component": {"component_var": "comp2"},
},
"test_env3": {
"global": {
"global_var": "base_global3",
"parameters": {"GLOBAL_PARAM": "things3", "ENV3_PARAM": "env3"},
},
"service/component": {"component_var": "comp3"},
},
}
expected = {
"global_var": "base_global1",
"component_var": "comp2",
"parameters": {
"GLOBAL_PARAM": "things1",
"ENV3_PARAM": "env3",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(
["test_env", "test_env2", "test_env3"], build_mock_loader(base_var_data), legacy=True
)
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_multiple_envs_precedence(patch_os_path):
base_var_data = {
"test_env1": {
"service/component": {"parameters": {"PARAM": "things1"}},
},
}
service_set_var_data = {
"test_env2": {
"component": {"parameters": {"PARAM": "things2"}},
},
}
expected = {
"parameters": {
"PARAM": "things1",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(
["test_env1", "test_env2"],
build_mock_loader(base_var_data, service_set_var_data),
)
assert runner._get_variables("service", "templates/service", "component") == expected
def test__get_variables_multiple_envs_precedence_reversed(patch_os_path):
base_var_data = {
"test_env1": {
"service/component": {"parameters": {"PARAM": "things1"}},
},
}
service_set_var_data = {
"test_env2": {
"component": {"parameters": {"PARAM": "things2"}},
},
}
expected = {
"parameters": {
"PARAM": "things2",
"NAMESPACE": "test-project",
"SECRETS_PROJECT": SecretImporter.source_project,
},
}
runner = patched_runner(
["test_env2", "test_env1"],
build_mock_loader(base_var_data, service_set_var_data),
)
assert runner._get_variables("service", "templates/service", "component") == expected
| 33.229665 | 98 | 0.589561 | 0 | 0 | 0 | 0 | 4,891 | 0.352124 | 0 | 0 | 5,155 | 0.37113 |
6460dc559a73dc7e53f1f3a422be42d50e7cc1b0 | 210 | py | Python | app/main/__init__.py | Edwin-Karanu-Muiruri/pitch-perfect | 8d3abaf0898dcfbe57ba1db93043ac6cea1dd0e2 | [
"MIT"
] | null | null | null | app/main/__init__.py | Edwin-Karanu-Muiruri/pitch-perfect | 8d3abaf0898dcfbe57ba1db93043ac6cea1dd0e2 | [
"MIT"
] | null | null | null | app/main/__init__.py | Edwin-Karanu-Muiruri/pitch-perfect | 8d3abaf0898dcfbe57ba1db93043ac6cea1dd0e2 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
from flask import Blueprint
main = Blueprint('main',__name__)
from . import views,error
bootstrap = Bootstrap()
| 21 | 37 | 0.814286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.028571 |
646208f0693f4cb46abcaf9ae8ce2a78afead206 | 2,205 | py | Python | loaner/deployments/lib/password.py | gng-demo/travisfix | 6d64de6dac44d89059eb92f76410fdcc2d41a247 | [
"Apache-2.0"
] | 175 | 2018-03-28T20:33:39.000Z | 2022-03-27T06:02:39.000Z | loaner/deployments/lib/password.py | gng-demo/travisfix | 6d64de6dac44d89059eb92f76410fdcc2d41a247 | [
"Apache-2.0"
] | 111 | 2018-05-22T18:50:59.000Z | 2022-01-23T23:11:15.000Z | loaner/deployments/lib/password.py | gng-demo/travisfix | 6d64de6dac44d89059eb92f76410fdcc2d41a247 | [
"Apache-2.0"
] | 70 | 2018-03-30T01:52:06.000Z | 2021-10-13T11:20:10.000Z | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This library provides a random password generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import flags
from absl import logging
_MIN = 8
_MAX = 100
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'password_length', _MAX,
'The length of the password to be generated for the Grab n Go Role Account.'
'\nNOTE: The length must be between 8 and 100 and must be compliant with '
'the G Suite Admin password settings.\nThe Security Settings can be found '
'in the Google Admin console: admin.google.com'
)
flags.register_validator(
'password_length', lambda length: length >= _MIN and length <= _MAX,
'Password length must be between {} and {} characters.'.format(_MIN, _MAX),
)
def generate(length):
"""Generates a new password of a given length.
Args:
length: int, the length of the password to generate.
Returns:
A random password of type string with the given length.
Raises:
ValueError: if the length provided is invalid.
"""
if length < _MIN or length > _MAX:
raise ValueError(
'password length must be between {!r} and {!r} characters length '
'provided was: {!r}'.format(_MIN, _MAX, length))
logging.debug('Generating a password with length: %r.', length)
chars = (
'abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'!$%^&*()-_=+@:;~#,.<>? '
)
password = ''
rand = random.SystemRandom()
while len(password) < length:
password += rand.choice(chars)
return password
| 29.4 | 80 | 0.711111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,473 | 0.668027 |
64628052cc79203f1662d5c3075c0ef300636aa0 | 732 | py | Python | debug/test_call.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | debug/test_call.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | 1 | 2020-12-09T07:29:00.000Z | 2020-12-09T07:29:00.000Z | debug/test_call.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: test_call.py
# @brief:
# @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com
# @version: 0.0.1
# @creation date: 09-07-2019
# @last modified: Tue 09 Jul 2019 07:09:07 PM EDT
class Stuff(object):
def __init__(self, x, y, rge):
super(Stuff, self).__init__()
self.x = x
self.y = y
self.range = rge
def __call__(self, x, y):
self.x = x
self.y = y
print '__call__ with (%d,%d)' % (self.x, self.y)
def __del__(self):
del self.x
del self.y
del self.range
print ('delete all')
if __name__ == "__main__":
s = Stuff(1,2,3)
print (s.x)
s(7, 8)
s(14, 10)
| 20.333333 | 65 | 0.546448 | 397 | 0.54235 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.378415 |
64629dd661ec8e7faccc37654ec9059a6243e7b1 | 390 | py | Python | python/hackerrank/strings-xor.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/hackerrank/strings-xor.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/hackerrank/strings-xor.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | 1 | 2020-08-29T17:12:52.000Z | 2020-08-29T17:12:52.000Z | '''
https://www.hackerrank.com/challenges/strings-xor/submissions/code/102872134
Given two strings consisting of digits 0 and 1 only, find the XOR of the two strings.
'''
def strings_xor(s, t):
res = ""
for i in range(len(s)):
if s[i] != t[i]:
res += '1'
else:
res += '0'
return res
s = input()
t = input()
print(strings_xor(s, t))
| 17.727273 | 85 | 0.571795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.45641 |
64642512549fbe0dcf2f64eed76704c1c5116562 | 1,365 | py | Python | cleaning_data.py | yoon-gu/dand-p5 | d53443d0e954b4559350ad5e81ebad715e036a69 | [
"BSD-3-Clause"
] | null | null | null | cleaning_data.py | yoon-gu/dand-p5 | d53443d0e954b4559350ad5e81ebad715e036a69 | [
"BSD-3-Clause"
] | null | null | null | cleaning_data.py | yoon-gu/dand-p5 | d53443d0e954b4559350ad5e81ebad715e036a69 | [
"BSD-3-Clause"
] | null | null | null | from pandas import DataFrame, read_csv, cut
import numpy as np
df = read_csv('data/baseball_data.csv')
df = df[(df.avg > 0.0) & (df.HR > 0)]
## Split to 5 intervals using pandas.cut function
df['avg_category'] = cut(df.avg,
bins = np.linspace(0.1, 0.35, 6),
right=False)
## Except 'height', 'weight', 'avg', grouping df by 'avg_category', 'handedness'
## And add all sub-group's entries
avg_handedness_count_df = df.drop(['height', 'weight', 'avg'], axis=1).groupby(by=['avg_category', 'handedness']).sum().reset_index()
avg_partical_count = df.drop(['height', 'weight', 'name', 'avg', 'handedness'], axis=1).groupby(by=['avg_category']).sum()
## Except 'height', 'weight', 'avg', grouping df by 'avg_category'
## And add all sub-group's entries
## This is for compute HR ratio
avg_count_df = avg_partical_count.add_prefix('total_').reset_index()
final_df = avg_handedness_count_df.merge(avg_count_df, on='avg_category')
final_df['HR_Percent'] = final_df.HR / final_df.total_HR * 100.
## Grouping and merge to get mean of home runs for each group
mean_hr_group = df.drop(['height', 'weight', 'avg'], axis=1).groupby(by=['avg_category', 'handedness']).mean().add_prefix('mean_').reset_index()
final_df = final_df.merge(mean_hr_group, on=['avg_category', 'handedness'])
## Save dataframe to .csv file
final_df.to_csv('data/cleaned_baseball.csv') | 47.068966 | 144 | 0.708425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 665 | 0.487179 |
64659fc8013df6eb0ece0729395653522f617a1f | 2,311 | py | Python | model_predict.py | Non1ce/Transformer-Bert | 32a3c002d80c6719e5a93c10879e09c1c645a39b | [
"MIT"
] | 2 | 2021-09-23T07:52:21.000Z | 2021-09-24T13:48:02.000Z | model_predict.py | Non1ce/Transformer-Bert | 32a3c002d80c6719e5a93c10879e09c1c645a39b | [
"MIT"
] | null | null | null | model_predict.py | Non1ce/Transformer-Bert | 32a3c002d80c6719e5a93c10879e09c1c645a39b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from model_train import pipeline_model
from Data import InputData
"""
Created on 20.07.2021
@author: Nikita
The module is designed to predict the topic of the entered text. To make a prediction, it is enough to run the
module as the main program.The longer the text, the better the neural network classifies.
"""
def text_prediction(model, tokenizer, tokenizer_vocabulary, weights_file: str, path: str):
model.load_weights(fr'{path}\output_data\{weights_file}.hdf5')
input_text = input("Enter the text: ")
text_sequence = tokenizer.fast_encode([input_text], tokenizer_vocabulary)
result = model.predict(text_sequence)
if result > 0.785:
print('Neural Network class: Тематика МЧС')
else:
print('Neural Network class: Всесторонняя тематика')
def prediction(const_prediction: bool = False):
"""
If const_prediction=False, then the function triggers a single topic prediction for the text or
Const_prediction=True, then there will be infinite texts predictions.
"""
_, _, model_instance, model_bert = pipeline_model(path=path,
max_len=max_len,
batch_size=batch_size,
bert_model_name=bert_model_name)
tokenizer = InputData(path=path,
max_len=max_len,
batch_size=batch_size)
tokenizer_vocabulary = tokenizer.tokenizer()
text_prediction(model=model_bert,
path=path,
weights_file=weights_file,
tokenizer=tokenizer,
tokenizer_vocabulary=tokenizer_vocabulary)
while const_prediction:
text_prediction(model=model_bert,
path=path,
weights_file=weights_file,
tokenizer=tokenizer,
tokenizer_vocabulary=tokenizer_vocabulary)
if __name__ == "__main__":
weights_file = r'BERT_weights.0.97--04--0.09'
bert_model_name = 'distilbert-base-multilingual-cased'
path = r'C:\PythonProjects\Jobs\BERT_model'
batch_size = 10
max_len = 250
prediction(const_prediction=True)
| 27.511905 | 115 | 0.617049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.32152 |
64683791fc3bd9190d2f464d62fd8297c378bec0 | 5,904 | py | Python | tests/acceptance/commons/behave_step_helpers.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | null | null | null | tests/acceptance/commons/behave_step_helpers.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 88 | 2015-07-21T22:13:23.000Z | 2016-11-15T21:28:56.000Z | tests/acceptance/commons/behave_step_helpers.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 2 | 2015-08-12T11:19:55.000Z | 2018-05-25T19:04:43.000Z | # -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
from commons.constants import IMAGES_DIR
from hamcrest import assert_that, is_not, is_, equal_to
from qautils.dataset.dataset_utils import DatasetUtils
from qautils.logger.logger_utils import get_logger
from commons.utils import get_real_value_of_image_property
import os
__copyright__ = "Copyright 2015-2016"
__license__ = " Apache License, Version 2.0"
__logger__ = get_logger("qautils")
def create_new_image(context, region_name, image_name, image_filename=None):
"""
HELPER: Create new image using given params and step context.
:param region_name (string): Name of the node where image will be created
:param context (Behave Context): Behave context
:param image_name (string): Name of the image
:param image_file (string): Filename to be used as image.
:return: None
"""
__logger__.info("Creating new image '%s' in region '%s'. Image filename: '%s'",
image_name, region_name, image_filename)
__dataset_utils__ = DatasetUtils()
# Get Glance Manager for the given region
glance_ops = context.glance_manager_list[region_name]
properties = dict()
if context.table is not None:
for row in __dataset_utils__.prepare_data(context.table):
if 'param_name' in row.headings:
real_value = get_real_value_of_image_property(glance_ops, row['param_value'])
value = real_value if real_value is not None else row['param_value']
properties.update({row['param_name']: value})
# Create new image (pubic=True by default)
is_public = True
if "is_public" in properties:
is_public = properties["is_public"]
properties.pop("is_public")
__logger__.debug("Is the image '%s' public?: '%s'", image_name, is_public)
__logger__.debug("Image properties: '%s'", properties)
# If filename is None, it will be the same as the image_name
image_filename = image_name if image_filename is None else image_filename
__logger__.debug("Image filename to use: '%s'", image_filename)
glance_ops.create_image(image_name, image_filename, custom_properties=properties, is_public=is_public)
context.created_images_list.append(image_name)
def image_is_present_in_nodes(context, region, image_name, filename_content=None, check_master=True):
"""
HELPER: Check if an image is present in the Glance of the given node (region)
:param context (Behave Context): Behave context
:param image_name (string): Name of the image
:param filename_content (string): Filename to be used as image.
:param check_master (bool): If True, check the image in the Glance of Master node.
:return: None
"""
# If region is Master and check_master is False, DO NOT CHECK the presence of the image
if region == context.master_region_name and check_master is False:
return
glance_ops = context.glance_manager_list[region]
images_list = glance_ops.get_images(image_name)
# There is only one image with that name (if region is not Master)
if region == context.master_region_name:
assert_that(len(images_list), is_not(equal_to(0)),
"There aren't images with the name '{}' in '{}' (master) and it should".format(image_name,
region))
else:
assert_that(len(images_list), is_(equal_to(1)),
"There are more/less than ONE image with the name '{}' in '{}'".format(image_name, region))
image = images_list[0]
# The name is the expected
assert_that(image.name, is_(equal_to(image_name)),
"The image name '{}' in '{}' is not the expected one".format(image_name, region))
# Check if the image data is the expected.
sync_img_content = glance_ops.get_data_as_string(image.id)
filename_content = image_name if filename_content is None else filename_content
expected_img_content = ""
current = os.getcwd()
if "tests/acceptance" in current:
image_path = "{}/{}".format(IMAGES_DIR, filename_content)
else:
image_path = "tests/acceptance/{}/{}".format(IMAGES_DIR, filename_content)
file = open(image_path)
for line in file:
expected_img_content += line
assert_that(sync_img_content, is_(equal_to(expected_img_content)),
"The image content for '{}' in '{}' is not the expected content".format(image_name, region))
def image_is_not_present_in_node(context, region, image_name):
"""
HELPER: Check if an image is NOT present in the Glance of the given node (region)
:param context (Behave Context): Behave context
:param region: Node name to check
:param image_name (string): Name of the image
:return: None
"""
glance_ops = context.glance_manager_list[region]
images_list = glance_ops.get_images(image_name)
# There must not be images with the given name
assert_that(len(images_list), is_(equal_to(0)),
"There are images with the name '{}' in '{}', and it sloudn't".format(image_name, region))
| 40.438356 | 111 | 0.69563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,792 | 0.47274 |
646a09bba433e9134052e21102c6b81a5cd88eb1 | 341 | py | Python | src/video_store/urls.py | staab/video-store | 9badfae8316ab1d33fa20fe6a17cd90ef2737f8c | [
"MIT"
] | null | null | null | src/video_store/urls.py | staab/video-store | 9badfae8316ab1d33fa20fe6a17cd90ef2737f8c | [
"MIT"
] | null | null | null | src/video_store/urls.py | staab/video-store | 9badfae8316ab1d33fa20fe6a17cd90ef2737f8c | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
import store_api.urls
import store_ui.urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(store_api.urls.urlpatterns)),
url(r'^.*$', include(store_ui.urls.urlpatterns))
]
| 24.357143 | 55 | 0.73607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.073314 |
646a4518e28de7da32a6e93047fe7e1de9aa14ed | 776 | py | Python | pp/samples/13_component_yaml.py | smartalecH/gdsfactory | 66dfbf740704f1a6155f4812a1d9483ccf5c116c | [
"MIT"
] | 16 | 2020-02-03T07:05:31.000Z | 2021-12-29T18:40:09.000Z | pp/samples/13_component_yaml.py | smartalecH/gdsfactory | 66dfbf740704f1a6155f4812a1d9483ccf5c116c | [
"MIT"
] | 2 | 2020-01-31T20:01:40.000Z | 2020-09-26T17:50:55.000Z | pp/samples/13_component_yaml.py | smartalecH/gdsfactory | 66dfbf740704f1a6155f4812a1d9483ccf5c116c | [
"MIT"
] | 7 | 2020-02-09T23:16:18.000Z | 2020-10-30T03:12:04.000Z | import pp
def test_mzi():
netlist = """
instances:
CP1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
arm_bot:
component: mzi_arm
placements:
arm_bot:
mirror: [0,0,0,10]
ports:
W0: CP1,W0
E0: CP2,W0
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
return pp.component_from_yaml(netlist)
if __name__ == "__main__":
c = test_mzi()
pp.show(c)
pp.plotgds(c)
| 17.636364 | 42 | 0.476804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.795103 |
646b09594cd86b4fbaef8fd2c64ad439c0f79bf3 | 5,159 | py | Python | simulated_data.py | aamcbee/AdaOja | 11d2525a2123395ba8f8d6c1da3ffc1032356766 | [
"BSD-3-Clause"
] | 9 | 2019-05-30T16:50:28.000Z | 2021-06-21T14:44:29.000Z | simulated_data.py | aamcbee/AdaOja | 11d2525a2123395ba8f8d6c1da3ffc1032356766 | [
"BSD-3-Clause"
] | null | null | null | simulated_data.py | aamcbee/AdaOja | 11d2525a2123395ba8f8d6c1da3ffc1032356766 | [
"BSD-3-Clause"
] | 1 | 2019-07-16T15:05:54.000Z | 2019-07-16T15:05:54.000Z | import numpy as np
import scipy.linalg as la
from scipy.stats import multinomial
def random_multivar_normal(n, d, k, sigma=.1):
'''
Generate random samples from a random multivariate normal distribution
with covariance A A^T + sigma^2 I.
Input:
n: int, number of samples
d: int, dimension of samples
k: int, number of samples approximated
sigma: optional float > 0, default .1, the standard deviation
of the sample noise.
Output:
cov: d x d array, the covariance matrix for the distibution
A0: d x d array, the eigenvectors we want to estimate (note the
eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples.
'''
A0 = la.qr(np.random.rand(d, k), mode='economic')[0]
cov = A0 @ A0.T + sigma**2 * np.eye(d)
X = np.random.multivariate_normal(np.zeros(d), cov, size=n)
return cov, A0, X
def spiked_covariance(n, d, k, sigma=.1):
'''
Generate random samples from a random multivariate normal distribution
with covariance A D A^T + sigma^2 I.
Here A is a set of k orthogonal vectors and D is a diagonal matrix with
random, uniform entries, sorted and scaled so that the first entry = 1.
Input:
n: int, number of samples
d: int, dimension of samples
k: int, number of samples approximated
sigma: optional float > 0, default .1, the standard deviation
of the sample noise.
Output:
cov: d x d array, the true covariance matrix for the distibution
w: d vector of the diagonal values from matrix D.
A0: d x k array, the eigenvectors we want to estimate (note the
eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples.
'''
A0 = la.qr(np.random.rand(d, k), mode='economic')[0]
w = np.sort(np.random.rand(k, 1), axis=0)[::-1]
w /= w.max()
cov = A0 @ (w**2 * A0.T) + sigma**2 * np.eye(d)
X = np.random.multivariate_normal(np.zeros(d), cov, size=n)
return cov, w, A0, X
def random_multinomial(n, d, trials=100, mean0 = True, scale=1):
'''
Generate random samples from a random multinomial distribution with p_i ~ U(0,1).
Input:
n: int, number of samples
d: int, dimension of samples
trials: optional int, the number of trials for each sample from the
multinomial distribution default is 100.
mean0: optional boolean, default True. Indicates whether to normalize
the samples so they are mean 0.
Output:
cov: d x d array, the true covariance matrix for the distribution
e: d-dimensional array, the eigenvalues of the covariance matrix
v: d x d array, the eigenvectors of the covariance matrix
X: n x d array of n d-dimensional samples from the random_dirichlet
distribution with covariance cov.
'''
# Initialize p values
p = np.random.rand(d)
p /= p.sum()
# Calculate the covariance matrix for the multinomial distribution
# For large d > 10000, use multinomial.cov(d,p)
if d >= 10000:
cov = multinomial.cov(trials, p)
else:
cov = -np.outer(p, p) * trials
cov[np.diag_indices(d)] = trials * p * (1-p)
cov *= scale**2
# Obtain the eigenvectors of the covariance matrix.
e, v = la.eigh(cov)
e = e[::-1]
v = v[:,::-1]
# Obtain a sample from the multinomial distribution of size n
X = np.random.multinomial(trials, p, n).astype(float)
if mean0:
# Let X have mean 0
X -= trials * p
X *= scale
return cov, e, v, X
def random_dirichlet(n, d, mean0=True, scale=1):
'''
Generate random samples from a random dirichlet distribution with a_i ~ U(0,1).
Input:
n: int, number of samples
d: int, dimension of samples
mean0: optional boolean, default True. Indicates whether to normalize
the samples so they are mean 0.
Output:
cov: d x d array, the true covariance matrix for the distribution
e: d-dimensional array, the eigenvalues of the covariance matrix
v: d x d array, the eigenvectors of the covariance matrix
(note the eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples from the random_dirichlet
distribution with covariance cov.
'''
# Initialize a random set of parameters a drawn from the
# uniform distribution.
a = np.random.rand(d)
a0 = a.sum()
a_denom = a0**2 * (a0 + 1)
# Obtain the covariance matrix for the dirichlet distribution.
# Note that scipy doesn't currently have a builtin method for this
# (I may add one myself)
cov = -np.outer(a, a) / a_denom # i neq j case
cov[np.diag_indices(d)] = a * (a0 - a) / a_denom # i = j case
cov *= scale**2
# Obtain the eigenvectors of the covariance matrix.
e, v = la.eigh(cov)
e = e[::-1]
v = v[:,::-1]
X = np.random.dirichlet(a, n)
if mean0:
X -= a / a0
X *= scale
return cov, e, v, X
| 35.826389 | 85 | 0.629773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,574 | 0.69277 |
646ba550e2915c48f8240facac2579b39fb7e221 | 600 | py | Python | livestream/api_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 12 | 2017-09-27T21:23:27.000Z | 2020-12-25T04:31:30.000Z | livestream/api_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 3,293 | 2017-06-30T18:16:01.000Z | 2022-03-31T18:01:34.000Z | livestream/api_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 1 | 2020-04-13T12:19:57.000Z | 2020-04-13T12:19:57.000Z | """livestream API tests"""
from livestream.api import get_upcoming_events
def test_get_upcoming_events(settings, mocker):
"""test get upcoming events"""
settings.LIVESTREAM_ACCOUNT_ID = 392_239
settings.LIVESTREAM_SECRET_KEY = "secret key"
requests_patch = mocker.patch("requests.get", autospec=True)
resp = get_upcoming_events()
requests_patch.assert_called_once_with(
f"https://livestreamapis.com/v3/accounts/{settings.LIVESTREAM_ACCOUNT_ID}/upcoming_events",
auth=(settings.LIVESTREAM_SECRET_KEY, ""),
)
assert resp == requests_patch.return_value
| 37.5 | 99 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.29 |
646bb4c53e729313b2e24c298dec7ba02d40b510 | 2,105 | py | Python | xcamserver/framebuffer.py | Moskari/xcamserver | ed8cedf7cd0308b1e5fd9a8f74cec5e95d6c4978 | [
"MIT"
] | null | null | null | xcamserver/framebuffer.py | Moskari/xcamserver | ed8cedf7cd0308b1e5fd9a8f74cec5e95d6c4978 | [
"MIT"
] | null | null | null | xcamserver/framebuffer.py | Moskari/xcamserver | ed8cedf7cd0308b1e5fd9a8f74cec5e95d6c4978 | [
"MIT"
] | null | null | null | '''
Created on 15.2.2017
@author: sapejura
'''
import io
import threading
import struct
class FrameQueue(io.IOBase):
def __init__(self, frame_size):
super().__init__()
self.queue_lock = threading.Lock()
self._queue = bytearray()
self._store_mode = 0
self.frame_size = frame_size
# self.remaining = bytearray()
# self.memview = memoryview(self.remaining)
def readable(self):
return True
def writable(self):
return True
def put(self, b):
self._queue.extend(b)
return len(b)
def get(self, n=-1):
'''
Depending on the FrameQueue mode this method returns
different bytearray.
mode 1:
makes the queue FIFO and returns n bytes from it.
n=-1 returns everything.
mode 2:
makes the queue LILO and returns the last full
'frame_size' bytes frame. Otherwise returns b''.
'''
if self._store_mode == 1:
if n < 0:
b = self._queue[:]
del self._queue[:]
else:
b = self._queue[:n]
del self._queue[:n]
elif self._store_mode == 2:
# Returns the last full frame and
# removes it and everything come before that
l = len(self._queue)
frames = int(l/self.frame_size)
start = max((frames-1)*self.frame_size, 0)
end = start + self.frame_size
if l >= end-start:
b = self._queue[start:end]
del self._queue[:end]
else:
b = b''
else:
b = b''
return b
def set_mode(self, byte):
m = struct.unpack('B', byte)[0]
if m in [0, 1, 2]:
self._store_mode = m
else:
raise Exception('Illegal mode parameter.')
def buffer_size(self):
return len(self._queue)
def is_empty(self):
size = len(self._queue)
return size == 0
def clear_queue(self):
self._queue = bytearray()
| 25.361446 | 60 | 0.524466 | 2,013 | 0.956295 | 0 | 0 | 0 | 0 | 0 | 0 | 580 | 0.275534 |
646d6247dd6126e8107c3cf99ab420aeeee219c8 | 14,246 | py | Python | tests/test_nmt.py | LSSTDESC/TJPCov | eb70afc3d1e9a349ccd5e3c8ffe9c7e89a77b3cc | [
"MIT"
] | 3 | 2020-01-26T16:20:11.000Z | 2022-01-21T15:56:41.000Z | tests/test_nmt.py | LSSTDESC/TJPCov | eb70afc3d1e9a349ccd5e3c8ffe9c7e89a77b3cc | [
"MIT"
] | 25 | 2020-01-24T22:53:56.000Z | 2022-01-21T14:31:05.000Z | tests/test_nmt.py | LSSTDESC/TJPCov | eb70afc3d1e9a349ccd5e3c8ffe9c7e89a77b3cc | [
"MIT"
] | 1 | 2021-07-01T16:08:48.000Z | 2021-07-01T16:08:48.000Z | #!/usr/bin/python
import numpy as np
import os
import pymaster as nmt
import pytest
import tjpcov.main as cv
from tjpcov.parser import parse
import yaml
import sacc
root = "./tests/benchmarks/32_DES_tjpcov_bm/"
input_yml = os.path.join(root, "tjpcov_conf_minimal.yaml")
input_yml_no_nmtc = os.path.join(root, "tjpcov_conf_minimal_no_nmtconf.yaml")
xcell_yml = os.path.join(root, "desy1_tjpcov_bm.yml")
def get_xcell_yml():
with open(xcell_yml) as f:
config = yaml.safe_load(f)
return config
def get_nmt_bin():
bpw_edges = [0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96]
return nmt.NmtBin.from_edges(bpw_edges[:-1], bpw_edges[1:])
def get_pair_folder_name(tracer_comb):
bn = []
for tr in tracer_comb:
bn.append(tr.split('__')[0])
return '_'.join(bn)
def get_data_cl(tr1, tr2, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_fiducial_cl(s, tr1, tr2, binned=True, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, 'fiducial', bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
if binned:
s = s.copy()
s.remove_selection(data_type='cl_0b')
s.remove_selection(data_type='cl_eb')
s.remove_selection(data_type='cl_be')
s.remove_selection(data_type='cl_bb')
ix = s.indices(tracers=(tr1, tr2))
bpw = s.get_bandpower_windows(ix)
cl0_bin = bpw.weight.T.dot(cl[0])
cl_bin = np.zeros((cl.shape[0], cl0_bin.size))
cl_bin[0] = cl0_bin
cl = cl_bin
else:
cl
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_tracer_noise(tr, cp=True):
bn = get_pair_folder_name((tr, tr))
fname = os.path.join(root, bn, f"cl_{tr}_{tr}.npz")
clfile = np.load(fname)
if cp:
return clfile['nl_cp'][0, -1]
else:
return clfile['nl'][0, 0]
def get_benchmark_cov(tracer_comb1, tracer_comb2):
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
fname = os.path.join(root, 'cov', f'cov_{tr1}_{tr2}_{tr3}_{tr4}.npz')
return np.load(fname)['cov']
def get_workspace(tr1, tr2):
config = get_xcell_yml()
w = nmt.NmtWorkspace()
bn = get_pair_folder_name((tr1, tr2))
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
fname = os.path.join(root, bn, f"w__{m1}__{m2}.fits")
w.read_from(fname)
return w
def get_covariance_workspace(tr1, tr2, tr3, tr4):
config = get_xcell_yml()
cw = nmt.NmtCovarianceWorkspace()
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
m3 = config['tracers'][tr3]['mask_name']
m4 = config['tracers'][tr4]['mask_name']
fname = os.path.join(root, 'cov', f"cw__{m1}__{m2}__{m3}__{m4}.fits")
cw.read_from(fname)
return cw
def assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, threshold):
cl1 = get_data_cl(*tracer_comb1, remove_be=True)
cl2 = get_data_cl(*tracer_comb2, remove_be=True)
clf1 = get_fiducial_cl(s, *tracer_comb1, remove_be=True)
clf2 = get_fiducial_cl(s, *tracer_comb2, remove_be=True)
ndim, nbpw = cl1.shape
# This only runs if tracer_comb1 = tracer_comb2 (when the block covariance
# is invertible)
if (tracer_comb1[0] == tracer_comb1[1]) and (ndim == 3):
cov = cov.reshape((nbpw, 4, nbpw, 4))
cov = np.delete(np.delete(cov, 2, 1), 2, 3).reshape(3 * nbpw, -1)
cov_bm = cov_bm.reshape((nbpw, 4, nbpw, 4))
cov_bm = np.delete(np.delete(cov_bm, 2, 1), 2, 3).reshape(3 * nbpw, -1)
delta1 = (clf1 - cl1).flatten()
delta2 = (clf2 - cl2).flatten()
chi2 = delta1.dot(np.linalg.inv(cov)).dot(delta2)
chi2_bm = delta1.dot(np.linalg.inv(cov_bm)).dot(delta2)
assert np.abs(chi2 / chi2_bm - 1) < threshold
def test_nmt_conf_missing():
"""
Check that input file might not have nmt_conf and it still works
"""
tjpcov_class = cv.CovarianceCalculator(input_yml_no_nmtc)
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
tracer_comb1 = tracer_comb2 = ('DESgc__0', 'DESgc__0')
cache = {'bins': get_nmt_bin()}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_noise,
cache=cache)['final'] + 1e-100
@pytest.mark.parametrize('tracer_comb1,tracer_comb2',
[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')),
(('DESgc__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESgc__0', 'DESgc__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1')),
])
def test_nmt_gaussian_cov(tracer_comb1, tracer_comb2):
# tjpcov_class = cv.CovarianceCalculator(input_yml)
# cache = {'bins': get_nmt_bin()}
config, _= parse(input_yml)
bins = get_nmt_bin()
config['tjpcov']['binning_info'] = bins
tjpcov_class = cv.CovarianceCalculator(config)
cache = None
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
for tr in tracer_comb1 + tracer_comb2:
tracer_noise[tr] = get_tracer_noise(tr)
# Test error with uncoupled and coupled noise provided
with pytest.raises(ValueError):
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise=tracer_noise,
tracer_Noise_coupled=tracer_noise,
cache=cache)['final']
# Cov with coupled noise (as in benchmark)
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
cov_bm = get_benchmark_cov(tracer_comb1, tracer_comb2) + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-5
# Test error with 'bins' in cache different to that at initialization
with pytest.raises(ValueError):
cache2 = {'bins': nmt.NmtBin.from_nside_linear(32, bins.get_n_bands())}
cov2 = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise=tracer_noise,
tracer_Noise_coupled=tracer_noise,
cache=cache2)['final']
# Test it runs with 'bins' in cache if they are the same
cache2 = {'bins': bins}
cov2 = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache2)['final'] + 1e-100
assert np.all(cov == cov2)
# Cov with uncoupled noise cannot be used for benchmark as tracer_noise is
# assumed to be flat but it is not when computed from the coupled due to
# edge effects
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-5)
# Check that it runs if one of the masks does not overlap with the others
if tracer_comb1 != tracer_comb2:
os.system("rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*")
tjpcov_class.mask_fn[tracer_comb1[0]] = \
'./tests/benchmarks/32_DES_tjpcov_bm/catalogs/mask_nonoverlapping.fits.gz'
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache)
os.system("rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*")
@pytest.mark.parametrize('tracer_comb1,tracer_comb2',
[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')),
(('DESgc__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESgc__0', 'DESgc__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1')),
])
def test_nmt_gaussian_cov_cache(tracer_comb1, tracer_comb2):
tjpcov_class = cv.CovarianceCalculator(input_yml)
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
for tr in tracer_comb1 + tracer_comb2:
tracer_noise[tr] = get_tracer_noise(tr)
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
s = None # Not needed if binned=False
cl13 = get_fiducial_cl(s, tr1, tr3, binned=False)
cl24 = get_fiducial_cl(s, tr2, tr4, binned=False)
cl14 = get_fiducial_cl(s, tr1, tr4, binned=False)
cl23 = get_fiducial_cl(s, tr2, tr3, binned=False)
cache = {
# 'f1': f1, 'f2': f2, 'f3': f3, 'f4': f4,
# 'm1': m1, 'm2': m2, 'm3': m3, 'm4': m4,
# 'w13': w13, 'w23': w23, 'w14': w14, 'w24': w24,
# 'w12': w12, 'w34': w34,
# 'cw': cw,
'cl13': cl13, 'cl24': cl24, 'cl14': cl14, 'cl23':cl23,
# 'SN13': SN13, 'SN24': SN24, 'SN14': SN14, 'SN23': SN23,
'bins': get_nmt_bin()
}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
cov_bm = get_benchmark_cov(tracer_comb1, tracer_comb2) + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-5
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-5)
w13 = get_workspace(tr1, tr3)
w23 = get_workspace(tr2, tr3)
w14 = get_workspace(tr1, tr4)
w24 = get_workspace(tr2, tr4)
w12 = get_workspace(tr1, tr2)
w34 = get_workspace(tr3, tr4)
cw = get_covariance_workspace(*tracer_comb1, *tracer_comb2)
cache = {
# 'f1': f1, 'f2': f2, 'f3': f3, 'f4': f4,
# 'm1': m1, 'm2': m2, 'm3': m3, 'm4': m4,
'w13': w13, 'w23': w23, 'w14': w14, 'w24': w24,
'w12': w12, 'w34': w34,
'cw': cw,
'cl13': cl13, 'cl24': cl24, 'cl14': cl14, 'cl23':cl23,
# 'SN13': SN13, 'SN24': SN24, 'SN14': SN14, 'SN23': SN23,
'bins': get_nmt_bin()
}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-6
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-6
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-6)
def test_get_all_cov_nmt():
tjpcov_class = cv.CovarianceCalculator(input_yml)
s = tjpcov_class.cl_data
bins = get_nmt_bin()
tracer_noise = {}
for tr in s.tracers:
tracer_noise[tr] = get_tracer_noise(tr)
# Test error with uncoupled and coupled noise provided
with pytest.raises(ValueError):
cov = tjpcov_class.get_all_cov_nmt(tracer_noise=tracer_noise,
tracer_noise_coupled=tracer_noise,
cache={'bins': bins})
cov = tjpcov_class.get_all_cov_nmt(tracer_noise_coupled=tracer_noise,
cache={'bins': bins}) + 1e-100
cov_bm = s.covariance.covmat + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-3
# Check chi2
clf = np.array([])
for trs in s.get_tracer_combinations():
cl_trs = get_fiducial_cl(s, *trs, remove_be=True)
clf = np.concatenate((clf, cl_trs.flatten()))
cl = s.mean
delta = clf - cl
chi2 = delta.dot(np.linalg.inv(cov)).dot(delta)
chi2_bm = delta.dot(np.linalg.inv(cov_bm)).dot(delta)
assert np.abs(chi2 / chi2_bm - 1) < 1e-5
# Check that it also works if they don't use concise data_types
s2 = s.copy()
for dp in s2.data:
dt = dp.data_type
if dt == 'cl_00':
dp.data_type = sacc.standard_types.galaxy_density_cl
elif dt == 'cl_0e':
dp.data_type = sacc.standard_types.galaxy_shearDensity_cl_e
elif dt == 'cl_0b':
dp.data_type = sacc.standard_types.galaxy_shearDensity_cl_b
elif dt == 'cl_ee':
dp.data_type = sacc.standard_types.galaxy_shear_cl_ee
elif dt == 'cl_eb':
dp.data_type = sacc.standard_types.galaxy_shear_cl_eb
elif dt == 'cl_be':
dp.data_type = sacc.standard_types.galaxy_shear_cl_be
elif dt == 'cl_bb':
dp.data_type = sacc.standard_types.galaxy_shear_cl_bb
else:
raise ValueError('Something went wrong. Data type not recognized')
tjpcov_class.cl_data = s2
cov2 = tjpcov_class.get_all_cov_nmt(tracer_noise_coupled=tracer_noise,
cache={'bins': bins}) + 1e-100
assert np.all(cov == cov2)
# Clean up after the tests
os.system("rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/")
| 38.090909 | 87 | 0.585989 | 0 | 0 | 0 | 0 | 7,107 | 0.498877 | 0 | 0 | 2,804 | 0.196827 |
646da2c390d3322cbbc2c43e4e62944383fbc9f4 | 1,174 | py | Python | lib/fathead/scikit_learn/fetch.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | null | null | null | lib/fathead/scikit_learn/fetch.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | null | null | null | lib/fathead/scikit_learn/fetch.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from os.path import join
import requests
from bs4 import BeautifulSoup
SCIKIT_LEARN_BASE_URL = 'http://scikit-learn.org/stable/auto_examples/'
SCIKIT_INDEX_URL = 'http://scikit-learn.org/stable/auto_examples/index.html'
def download_file(fetch_me):
"""
Fetches a file in given url into the 'download' directory
Args:
fetch_me: URL to file
Returns:
local_filename: Path to local version of the downloaded file.
"""
local_filename = join('download', fetch_me.split('/')[-1])
r = requests.get(fetch_me, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return local_filename
if __name__ == "__main__":
local_file = download_file(SCIKIT_INDEX_URL)
with open(local_file, 'r') as index:
soup = BeautifulSoup(index, 'html.parser')
# Download everything in found in "examples" list
for example_link in soup.select('#examples .caption-text a'):
href_path = join(SCIKIT_LEARN_BASE_URL, example_link.get('href'))
download_file(href_path)
| 31.72973 | 77 | 0.670358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.382453 |
646e3f798705f6b0c67f2b3094fafcabdb531a9a | 2,283 | py | Python | frappe-bench/apps/erpnext/erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | frappe-bench/apps/erpnext/erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py | Semicheche/foa_frappe_docker | a186b65d5e807dd4caf049e8aeb3620a799c1225 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint, getdate
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
if not filters.get("date"):
msgprint(_("Please select date"), raise_exception=1)
columns = get_columns(filters)
active_student_group = get_active_student_group()
data = []
for student_group in active_student_group:
row = [student_group.name]
present_students = 0
absent_students = 0
student_group_strength = get_student_group_strength(student_group.name)
student_attendance = get_student_attendance(student_group.name, filters.get("date"))
if student_attendance:
for attendance in student_attendance:
if attendance.status== "Present":
present_students = attendance.count
elif attendance.status== "Absent":
absent_students = attendance.count
unmarked_students = student_group_strength - (present_students + absent_students)
row+= [student_group_strength, present_students, absent_students, unmarked_students]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Student Group") + ":Link/Student Group:250",
_("Student Group Strength") + "::170",
_("Present") + "::90",
_("Absent") + "::90",
_("Not Marked") + "::90"
]
return columns
def get_active_student_group():
active_student_groups = frappe.db.sql("""select name from `tabStudent Group` where group_based_on = "Batch"
and academic_year=%s order by name""", (frappe.defaults.get_defaults().academic_year), as_dict=1)
return active_student_groups
def get_student_group_strength(student_group):
student_group_strength = frappe.db.sql("""select count(*) from `tabStudent Group Student`
where parent = %s and active=1""", student_group)[0][0]
return student_group_strength
def get_student_attendance(student_group, date):
student_attendance = frappe.db.sql("""select count(*) as count, status from `tabStudent Attendance` where \
student_group= %s and date= %s and\
(course_schedule is Null or course_schedule='') group by status""",
(student_group, date), as_dict=1)
return student_attendance | 35.671875 | 109 | 0.750329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.29435 |
646e5dbf5f25bac61c00c886ecb35af7a68f74e5 | 1,819 | py | Python | src/role_filter.py | kjkszpj/mifans | 523ea1907c7bf32cdabed129676c748eeb57e552 | [
"MIT"
] | null | null | null | src/role_filter.py | kjkszpj/mifans | 523ea1907c7bf32cdabed129676c748eeb57e552 | [
"MIT"
] | null | null | null | src/role_filter.py | kjkszpj/mifans | 523ea1907c7bf32cdabed129676c748eeb57e552 | [
"MIT"
] | null | null | null | import pickle
data = pickle.load(open('../data/record.pk', 'rb'))
namedict = pickle.load(open('../data/namedict.pk', 'rb'))
rndict = {v:k for k, v in namedict.items()}
pickle.dump(rndict, open('../data/rndict.pk', 'wb'))
cnt_name = {v:[0, 0, 0] for v in namedict.values()}
for record in data:
for a in record[2]:
cnt_name[a][0] += 1
cnt_name[a][1] += 1
for a in record[4]:
cnt_name[a][0] += 1
cnt_name[a][2] += 1
for a in record[6]:
cnt_name[a][0] += 1
cnt_name[a][2] += 1
for a in record[8]:
cnt_name[a][0] += 1
cnt_name[a][2] += 1
a = [[v[2], k] for k, v in cnt_name.items()]
a.sort(reverse=True)
good_list = []
for cnt, id in a:
name = rndict[id]
ok = True
if name.find('@') != -1: ok = False
if name.upper().find('HACKINGTEAM') != -1: ok = True
if ok: good_list.append(id)
temp = []
for id in good_list:
if (cnt_name[id][1] >= 3 and cnt_name[id][2] >= 5):
print(cnt_name[id], id, rndict[id])
temp.append(id)
# print(len(temp))
# print(len(good_list))
# inf = open('../data/list.txt', 'r')
# lines = inf.readlines()
# print(len(lines))
# cnt = 0
# for line in lines:
# [idf, idt, time] = line.split(' ')
# idf = int(idf)
# idt = int(idt)
# time = int(time)
# if idf in temp and idt in temp:
# cnt += 1
# print(cnt)
# 3d display for initial list
tempf1 = open('../data/temp1.txt', 'w')
tempf2 = open('../data/temp2.txt', 'w')
tempf3 = open('../data/temp3.txt', 'w')
for k, v in cnt_name.items():
if k in temp:
tempf1.write('%d %d %d\n' % (v[0], v[1], v[2]))
elif k in good_list:
tempf3.write('%d %d %d\n' % (v[0], v[1], v[2]))
else:
tempf2.write('%d %d %d\n' % (v[0], v[1], v[2]))
print([rndict[k]] + v)
print(len(temp)) | 27.984615 | 57 | 0.536009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.295767 |
646f82e80f897c2f8bd798e481eac1b38a160e51 | 14,748 | py | Python | server/internal/rest_server.py | VentionCo/mm-machineapp-template | 61de22b9bb65c534407f2f9ff3a389a799484284 | [
"MIT"
] | null | null | null | server/internal/rest_server.py | VentionCo/mm-machineapp-template | 61de22b9bb65c534407f2f9ff3a389a799484284 | [
"MIT"
] | 1 | 2021-11-02T13:59:54.000Z | 2021-11-03T22:28:54.000Z | server/internal/rest_server.py | VentionCo/mm-machineapp-template | 61de22b9bb65c534407f2f9ff3a389a799484284 | [
"MIT"
] | null | null | null | import logging
from bottle import Bottle, request, response, abort, static_file
import os
import time
import threading
from threading import Thread
from pathlib import Path
import json
import subprocess
import io
import sys
import signal
from internal.notifier import getNotifier, NotificationLevel
from internal.interprocess_message import SubprocessToParentMessage
from machine_app import MachineAppEngine
import paho.mqtt.subscribe as MQTTsubscribe
import paho.mqtt.client as mqtt
import traceback
class RestServer(Bottle):
'''
RESTful server that handles control of the MachineApp and configuration IO
'''
def __init__(self):
super(RestServer, self).__init__()
self.__clientDirectory = os.path.join('..', 'client')
self.__serverDirectory = os.path.join('.')
self.__logger = logging.getLogger(__name__)
self.__notifier = getNotifier()
self.__subprocess = MachineAppSubprocessManager()
self.__estopManager = EstopManager(self.onEstopEntered)
self.isPaused = False # TODO: It would be better to no track isPaused here
# Set up callbacks
self.route('/', callback=self.index)
self.route('/ping', callback=self.ping)
self.route('/<filepath:path>', callback=self.serveStatic)
self.route('/run/start', method='POST', callback=self.start)
self.route('/run/stop', method='POST', callback=self.stop)
self.route('/run/pause', method='POST', callback=self.pause)
self.route('/run/resume', method='POST', callback=self.resume)
self.route('/run/estop', method='POST', callback=self.estop)
self.route('/run/estop', method='GET', callback=self.getEstop)
self.route('/run/releaseEstop', method='POST', callback=self.releaseEstop)
self.route('/run/resetSystem', method='POST', callback=self.resetSystem)
self.route('/run/state', method='GET', callback=self.getState)
self.route('/run/message', method='POST', callback=self.sendMessage)
self.route('/kill', method='GET', callback=self.kill)
self.route('/logs', method='GET', callback=self.getLog)
def ping(self):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
return 'pong'
def index(self):
self.__logger.info('Handling index file request')
return static_file('index.html', root=self.__clientDirectory)
def serveStatic(self, filepath):
self.__logger.info('Serving static file: {}'.format(filepath))
return static_file(filepath, root=self.__clientDirectory)
def getLog(self):
return static_file('machine_app.log', root=self.__serverDirectory)
def start(self):
inStateStepperMode = (request.params['stateStepperMode'] == 'true') if 'stateStepperMode' in request.params else False
configuration = request.json
if self.__subprocess.start(inStateStepperMode, configuration):
return 'OK'
else:
abort(400, 'Failed to start the MachineApp')
def stop(self):
if self.__subprocess.sendMsgToSubprocess({ 'request': 'stop' }):
self.isPaused = False
return 'OK'
else:
abort(400, 'Failed to stop the MachineApp')
def pause(self):
if self.__subprocess.sendMsgToSubprocess({ 'request': 'pause' }):
self.isPaused = True
return 'OK'
else:
abort(400, 'Failed to pause the MachineApp')
def resume(self):
if self.__subprocess.sendMsgToSubprocess({ 'request': 'resume' }):
self.isPaused = False
return 'OK'
else:
abort(400, 'Failed to resume the MachineApp')
# TODO: All E-Stop functionality should be handles on this process
def estop(self):
if self.__estopManager.estop():
self.onEstopEntered()
return 'OK'
else:
abort(400, 'Failed to estop the MachineApp')
def getEstop(self):
return self.__estopManager.getEstop()
def releaseEstop(self):
if self.__estopManager.release():
return 'OK'
else:
abort(400, 'Failed to release estop')
def resetSystem(self):
if self.__estopManager.reset():
return 'OK'
else:
abort(400, 'Failed to reset the system')
def getState(self):
return {
"isRunning": self.__subprocess.isRunning(),
"isPaused": self.isPaused
}
def sendMessage(self):
msg = request.json
if self.__subprocess.sendMsgToSubprocess({ 'topic': msg['topic'], 'message': msg['message'] }):
return 'OK'
else:
abort(400, 'Failed to send the message to the MachineApp')
def kill(self):
self.__subprocess.terminate()
os.kill(os.getpid(), signal.SIGTERM)
return 'OK'
def onEstopEntered(self):
try:
if self.__subprocess.isRunning():
self.__subprocess.terminate()
self.isPaused = False
# Create a temporary MachineAppEngine and call onEstop
temporaryApp = MachineAppEngine()
temporaryApp.initialize()
temporaryApp.onEstop()
except Exception as e:
self.__notifier.sendMessage(NotificationLevel.ERROR, "Failed to run onEstop behavior: %s (%s)" % (traceback.format_exc(), e))
class MQTTPATHS :
ESTOP = "estop"
ESTOP_STATUS = ESTOP + "/status"
ESTOP_TRIGGER_REQUEST = ESTOP + "/trigger/request"
ESTOP_TRIGGER_RESPONSE = ESTOP + "/trigger/response"
ESTOP_RELEASE_REQUEST = ESTOP + "/release/request"
ESTOP_RELEASE_RESPONSE = ESTOP + "/release/response"
ESTOP_SYSTEMRESET_REQUEST = ESTOP + "/systemreset/request"
ESTOP_SYSTEMRESET_RESPONSE = ESTOP + "/systemreset/response"
class EstopManager:
TIMEOUT = 10.0
'''
Small class that subscribes/publishes to MQTT eStop events
to control the current state of the estop.
'''
def __init__(self, onEstopEntered):
self.__onEstopEntered = onEstopEntered
self.__isEstopped = False
self.__notifier = getNotifier()
self.__mqttClient = mqtt.Client()
self.__logger = logging.getLogger(__name__)
self.__mqttClient.on_connect = self.__onConnect
self.__mqttClient.on_message = self.__onMessage
self.__mqttClient.on_disconnect = self.__onDisconnect
self.IP = '127.0.0.1'
self.__mqttClient.connect(self.IP)
self.__mqttClient.loop_start()
def __onConnect(self, client, userData, flags, rc):
if rc == 0:
self.__mqttClient.subscribe(MQTTPATHS.ESTOP_STATUS)
def __onMessage(self, client, userData, msg):
topicParts = msg.topic.split('/')
deviceType = topicParts[1]
if (topicParts[0] == MQTTPATHS.ESTOP) :
if (topicParts[1] == "status") :
self.__isEstopped = json.loads(msg.payload.decode('utf-8'))
if self.__isEstopped:
self.__notifier.sendMessage(NotificationLevel.APP_ESTOP, 'Machine is in estop')
self.__onEstopEntered()
else:
self.__notifier.sendMessage(NotificationLevel.APP_ESTOP_RELEASE, 'Estop Released')
def __onDisconnect(self, client, userData, rc):
logging.info("Disconnected with rtn code [%d]"% (rc))
return
def estop(self):
return_value = { 'value': False }
def mqttResponse() :
# Wait for response
return_value['value'] = json.loads(MQTTsubscribe.simple(MQTTPATHS.ESTOP_TRIGGER_RESPONSE, retained = False, hostname = self.IP).payload.decode('utf-8'))
return
mqttResponseThread = threading.Thread(target = mqttResponse)
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish trigger request on MQTT
self.__mqttClient.publish(MQTTPATHS.ESTOP_TRIGGER_REQUEST, "message is not important")
mqttResponseThread.join(EstopManager.TIMEOUT)
if mqttResponseThread.isAlive() :
self.__logger.error('MQTT response timeout.')
return False
else :
return return_value['value']
return return_value['value']
def release(self):
return_value = { 'value': False }
def mqttResponse() :
# Wait for response
return_value['value'] = json.loads(MQTTsubscribe.simple(MQTTPATHS.ESTOP_RELEASE_RESPONSE, retained = False, hostname = self.IP).payload.decode('utf-8'))
return
mqttResponseThread = threading.Thread(target = mqttResponse)
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish release request on MQTT
self.__mqttClient.publish(MQTTPATHS.ESTOP_RELEASE_REQUEST, "message is not important")
mqttResponseThread.join(EstopManager.TIMEOUT)
if mqttResponseThread.isAlive() :
self.__logger.error('MQTT response timeout.')
return False
else :
return return_value['value']
return return_value['value']
def reset(self):
return_value = { 'value': False }
def mqttResponse() :
# Wait for response
return_value['value'] = json.loads(MQTTsubscribe.simple(MQTTPATHS.ESTOP_SYSTEMRESET_RESPONSE, retained = False, hostname = self.IP).payload.decode('utf-8'))
return
mqttResponseThread = threading.Thread(target = mqttResponse)
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish reset system request on MQTT
self.__mqttClient.publish(MQTTPATHS.ESTOP_SYSTEMRESET_REQUEST, "message is not important")
mqttResponseThread.join(EstopManager.TIMEOUT)
if mqttResponseThread.isAlive() :
self.__logger.error('MQTT response timeout.')
return False
else :
return return_value['value']
return return_value['value']
def getEstop(self):
return 'true' if self.__isEstopped else 'false'
class MachineAppSubprocessManager:
'''
Manages the lifetime of the MachineApp subprocess, forwards stdin commands and stdout information.
'''
def __init__(self):
self.__isRunning = False
self.__subprocess = None
self.__stdout = None
self.__stderr = None
self.__logger = logging.getLogger(__name__)
self.__notifier = getNotifier()
self.__stdthread = Thread(name='subprocess_stdout', target=self.__update)
self.__stdthread.daemon = True
self.__stdthread.start()
def start(self, inStateStepperMode, configuration):
'''
Starts running the MachineApp in a new process
'''
if self.__isRunning == True:
return False
with open('./internal/configuration.json', 'w') as f:
f.write(json.dumps(configuration, indent=4))
command = [ sys.executable, 'subapp.py' ]
if inStateStepperMode:
command.append('--inStateStepperMode')
self.__logger.info('Attempting to run subprocess: {}'.format(' '.join(command)))
self.__subprocess = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
self.__isRunning = True
return True
def sendMsgToSubprocess(self, data):
'''
Write a JSON payload to stdin of the child process
'''
if self.__subprocess == None:
return False
self.__subprocess.stdin.write(str(json.dumps(data) + '\r\n').encode('utf-8'))
self.__subprocess.stdin.flush()
return True
def __update(self):
'''
Used to forward notifier messages from the child process to the client. This enables us to not
have to constantly disconnect/reconnect to the child process' websocket whenever the user pressed
start or stop.
We also catch standard 'print' outputs here too, and print them out to the parent process' console.
'''
while True: # Waiting to receive the start command, will sleep in the meantime
while self.__isRunning: # Received start and waiting on the subprocess stdout
if self.__subprocess == None or self.__subprocess.poll() != None or self.__subprocess.stdout.closed:
self.__isRunning = False
self.__logger.info('Subprocess is no longer active')
continue
while self.__subprocess != None and self.__subprocess.stdout != None and self.__subprocess.stdout.readable():
line = self.__subprocess.stdout.readline().decode('utf-8').strip()
if len(line) == 0:
break
line = line.strip()
try:
content = json.loads(line)
if not "type" in content:
continue
msgType = content["type"]
if msgType == SubprocessToParentMessage.NOTIFICATION:
notification = content["data"]
self.__notifier.sendMessage(notification['level'], notification['message'], notification['customPayload'])
except:
print(line)
time.sleep(1.0)
def terminate(self):
'''
Terminates the subprocess immediately
returns:
bool
Successfully terminated a running application or not
'''
if self.__subprocess == None:
return False
self.__subprocess.kill()
self.__subprocess = None
return True
def isRunning(self):
return self.__isRunning
def runServer():
restServer = RestServer()
restServer.run(host='0.0.0.0', port=3011, server='paste') | 36.595533 | 168 | 0.624695 | 14,130 | 0.958096 | 0 | 0 | 0 | 0 | 0 | 0 | 3,580 | 0.242745 |
6470ee21c76238510e2a174a475155ece7f28c66 | 30,430 | py | Python | minpiler/mind.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 23 | 2020-12-20T03:39:30.000Z | 2022-03-23T15:47:10.000Z | minpiler/mind.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 15 | 2020-12-21T01:12:22.000Z | 2021-04-19T10:40:11.000Z | minpiler/mind.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 2 | 2022-02-12T19:19:50.000Z | 2022-02-12T21:33:35.000Z | import ast
import sys
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import Any, Callable
from . import mast, utils
_PY = (sys.version_info.major, sys.version_info.minor)
def _get_ast_slice(node):
if _PY >= (3, 9):
return node.slice
else:
assert isinstance(node.slice, ast.Index)
return node.slice.value
BIN_OP_MAP = {
ast.Add: 'add',
ast.Sub: 'sub',
ast.Mult: 'mul',
ast.Div: 'div',
ast.FloorDiv: 'idiv',
ast.Mod: 'mod',
ast.Pow: 'pow',
ast.LShift: 'shl',
ast.RShift: 'shr',
ast.BitOr: 'or',
ast.BitXor: 'xor',
ast.BitAnd: 'and',
# MatMult
}
COND_OP_MAP = {
ast.Eq: 'equal',
ast.NotEq: 'notEqual',
ast.Lt: 'lessThan',
ast.LtE: 'lessThanEq',
ast.Gt: 'greaterThan',
ast.GtE: 'greaterThanEq',
ast.Is: 'equal',
}
BOOL_OP_MAP = { # op, shortcut_condition
ast.And: ('land', 'equal'),
ast.Or: ('or', 'notEqual'),
}
UNARY_OP_MAP = {
ast.Invert: lambda val, result: mast.FunctionCall(
'op not', [val], result),
ast.Not: lambda val, result: mast.FunctionCall(
'op equal', [mast.Literal(0), val], result),
ast.UAdd: lambda val, result: mast.FunctionCall(
'op add', [mast.Literal(0), val], result),
ast.USub: lambda val, result: mast.FunctionCall(
'op sub', [mast.Literal(0), val], result),
}
RESERVED_NAMES = {
'print',
'exit',
'min',
'max',
'atan2',
'dst',
'noise',
'abs',
'log',
'log10',
'sin',
'cos',
'tan',
'floor',
'ceil',
'sqrt',
'rand',
'print',
'GetLink',
'Draw',
'Control',
'Radar',
'Sensor',
'Material',
'Liquid',
'Property',
'Sort',
'Target',
'UnitType',
'BuildingType',
'Building',
}
def get_type_map(map, item, desc):
if type(item) not in map:
raise ValueError(f'Unsupported {desc} {item}')
return map[type(item)]
@dataclass
class BaseExpressionHandler:
expr: Any
trec: Callable = field(repr=False)
scope: utils.Scope = field(repr=False)
pre: list = field(default_factory=list)
resmap: dict = field(default_factory=dict)
# AST_CLASS = ast.Xxx
def dev_dump(self):
print(ast.dump(self.expr))
def get_results(self):
return [self.resmap[i] for i in range(len(self.resmap))]
def run_trec(self, expr):
retvals, pre = self.trec(expr, self.scope)
self.pre.extend(pre)
return retvals
def run_trec_single(self, expr):
retvals = self.run_trec(expr)
if len(retvals) < 1:
return mast.Literal(None)
return retvals[0]
def proc(self, name, *args):
self.pre.append(mast.ProcedureCall(name, args))
def jump(self, label, op, *args):
self.pre.append(mast.Jump(label, op, args))
@contextmanager
def sub_scope(self):
self.scope = utils.Scope(self.scope)
try:
yield
finally:
self.scope = self.scope._parent_scope
def handle(self):
raise NotImplementedError
class ConstantHandler(BaseExpressionHandler):
AST_CLASS = ast.Constant
def handle(self):
self.resmap[0] = mast.Literal(self.expr.value)
class NameHandler(BaseExpressionHandler):
AST_CLASS = ast.Name
def handle(self):
if self.expr.id in self.scope:
self.resmap[0] = self.scope[self.expr.id]
return
if self.expr.id in RESERVED_NAMES:
raise ValueError(f'The name {self.expr.id} is reserved')
self.resmap[0] = mast.Name(self.expr.id)
class TupleHandler(BaseExpressionHandler):
AST_CLASS = ast.Tuple
def handle(self):
for index, value in enumerate(self.expr.elts):
self.resmap[index] = self.run_trec_single(value)
class SubscriptHandler(BaseExpressionHandler):
AST_CLASS = ast.Subscript
def handle(self):
# memory cell access
array_val = self.run_trec_single(self.expr.value)
index_val = self.run_trec_single(_get_ast_slice(self.expr))
self.resmap[0] = mast.Name()
self.proc('read', self.resmap[0], array_val, index_val)
class UnaryOpHandler(BaseExpressionHandler):
AST_CLASS = ast.UnaryOp
def handle(self):
factory = get_type_map(UNARY_OP_MAP, self.expr.op, 'UnaryOp')
val = self.run_trec_single(self.expr.operand)
self.resmap[0] = mast.Name()
self.pre.append(factory(val, self.resmap[0]))
class BinOpHandler(BaseExpressionHandler):
AST_CLASS = ast.BinOp
def handle(self):
op = get_type_map(BIN_OP_MAP, self.expr.op, 'BinOp')
left_val = self.run_trec_single(self.expr.left)
right_val = self.run_trec_single(self.expr.right)
self.resmap[0] = mast.Name()
self.proc(f'op {op}', self.resmap[0], left_val, right_val)
class CompareHandler(BaseExpressionHandler):
AST_CLASS = ast.Compare
def handle(self):
end_label = mast.Label()
self.resmap[0] = mast.Name()
a_val = self.run_trec_single(self.expr.left)
for op, comparator in zip(self.expr.ops, self.expr.comparators):
op = get_type_map(COND_OP_MAP, op, 'Compare')
b_val = self.run_trec_single(comparator)
self.proc(f'op {op}', self.resmap[0], a_val, b_val)
self.jump(end_label, 'equal', self.resmap[0], mast.Literal(False))
a_val = b_val
self.pre.append(end_label)
class BoolOpHandler(BaseExpressionHandler):
AST_CLASS = ast.BoolOp
def handle(self):
op, shortcut_condition = get_type_map(
BOOL_OP_MAP, self.expr.op, 'BoolOp')
end_label = mast.Label()
self.resmap[0] = mast.Name()
val = self.run_trec_single(self.expr.values[0])
self.proc('set', self.resmap[0], val)
self.jump(
end_label, shortcut_condition, val, mast.Literal(False))
bool_value = mast.Name()
for value in self.expr.values[1:]:
val = self.run_trec_single(value)
self.proc(f'op {op}', bool_value, self.resmap[0], val)
self.proc('set', self.resmap[0], val)
self.jump(
end_label, shortcut_condition, bool_value, mast.Literal(False))
self.pre.append(end_label)
class IfExpHandler(BaseExpressionHandler):
AST_CLASS = ast.IfExp
def handle(self):
else_label = mast.Label()
end_label = mast.Label()
self.resmap[0] = mast.Name()
cond = self.run_trec_single(self.expr.test)
self.jump(else_label, 'equal', cond, mast.Literal(False))
val = self.run_trec_single(self.expr.body)
self.proc('set', self.resmap[0], val)
self.jump(end_label, 'always')
self.pre.append(else_label)
val = self.run_trec_single(self.expr.orelse)
self.proc('set', self.resmap[0], val)
self.pre.append(end_label)
def _create_unary_op(token):
def fn(self, a):
self.resmap[0] = mast.Name()
self.proc(f'op {token}', self.resmap[0], a)
return fn
def _create_bin_op(token):
def fn(self, a, b):
self.resmap[0] = mast.Name()
self.proc(f'op {token}', self.resmap[0], a, b)
return fn
_ZERO = mast.Literal(0)
def build_attr_index(method_map):
patterns = {}
for name, method in method_map.items():
nm = tuple(1 if n == '1' else n for n in name.split('__'))
cur = patterns
for n in nm:
if n not in cur:
cur[n] = {}
cur = cur[n]
assert 'method' not in cur
cur['method'] = method
def resolve(nm):
pnames = []
cur = patterns
for n in nm:
if n.name is not None and n.name in cur:
cur = cur[n.name]
elif 1 in cur:
pnames.append(n)
cur = cur[1]
else:
raise IndexError(f'Unresolvable name {".".join(nm)}')
if 'method' not in cur:
raise IndexError(f'Unresolvable name {".".join(nm)}')
return cur['method'], pnames
return resolve
def no_at_const(n):
return mast.Name(n.name.lstrip('@'))
def sort_dir_fn(n):
if not isinstance(n, mast.Name):
return n
elif n.name in ('@asc', 'asc'):
return mast.Literal(1)
elif n.name in ('@desc', 'desc'):
return mast.Literal(-1)
else:
return n
class CallHandler(BaseExpressionHandler):
AST_CLASS = ast.Call
# TODO: support multiple values
func__M__min = _create_bin_op('min')
# TODO: support multiple values
func__M__max = _create_bin_op('max')
func__M__atan2 = _create_bin_op('atan2')
func__M__dst = _create_bin_op('dst')
func__M__noise = _create_bin_op('noise')
func__M__abs = _create_unary_op('abs')
func__M__log = _create_unary_op('log')
func__M__log10 = _create_unary_op('log10')
func__M__sin = _create_unary_op('sin')
func__M__cos = _create_unary_op('cos')
func__M__tan = _create_unary_op('tan')
func__M__floor = _create_unary_op('floor')
func__M__ceil = _create_unary_op('ceil')
func__M__sqrt = _create_unary_op('sqrt')
func__M__rand = _create_unary_op('rand')
def func__M__print(self, *args):
for arg in args:
self.proc('print', arg)
def func__1__printFlush(self, target):
self.proc('printflush', target)
def func__M__exit(self):
self.proc('end')
def func__M__linkCount(self):
self.resmap[0] = mast.Name()
self.proc('set', self.resmap[0], mast.Name('@links'))
def func__M__getLink(self, index):
self.resmap[0] = mast.Name()
self.proc('getlink', self.resmap[0], index)
def func__1__radar(
self, unit, target1, target2, target3, sort_type, sort_dir):
self.resmap[0] = mast.Name()
self.proc(
'radar',
no_at_const(target1), no_at_const(target2), no_at_const(target3),
no_at_const(sort_type), unit,
sort_dir_fn(sort_dir), self.resmap[0])
def func__1__sensor(self, unit, prop):
self.resmap[0] = mast.Name()
self.proc('sensor', self.resmap[0], unit, prop)
def func__M__unit__bind(self, utype):
self.proc('ubind', utype)
def func__M__unit__radar(
self, target1, target2, target3, sort_type, sort_dir):
self.resmap[0] = mast.Name()
self.proc(
'uradar',
no_at_const(target1), no_at_const(target2), no_at_const(target3),
no_at_const(sort_type), mast.Name('turret1'),
sort_dir_fn(sort_dir), self.resmap[0])
def func__M__locate__building(self, block_type, enemy):
found = self.resmap[0] = mast.Name()
x = self.resmap[1] = mast.Name()
y = self.resmap[2] = mast.Name()
building = self.resmap[3] = mast.Name()
self.proc(
'ulocate building', no_at_const(block_type), enemy,
mast.Name('@copper'),
x, y, found, building)
def func__M__locate__ore(self, material):
found = self.resmap[0] = mast.Name()
x = self.resmap[1] = mast.Name()
y = self.resmap[2] = mast.Name()
self.proc(
'ulocate ore', mast.Name('core'), mast.Literal(True),
material, x, y, found, mast.Name())
def func__M__locate__spawn(self):
found = self.resmap[0] = mast.Name()
x = self.resmap[1] = mast.Name()
y = self.resmap[2] = mast.Name()
building = self.resmap[3] = mast.Name()
self.proc(
'ulocate spawn', mast.Name('core'), mast.Literal(True),
mast.Name('@copper'), x, y, found, building)
def func__M__locate__damaged(self):
found = self.resmap[0] = mast.Name()
x = self.resmap[1] = mast.Name()
y = self.resmap[2] = mast.Name()
building = self.resmap[3] = mast.Name()
self.proc(
'ulocate damaged', mast.Name('core'), mast.Literal(True),
mast.Name('@copper'), x, y, found, building)
def func__M__draw__clear(self, r, g, b):
self.proc('draw clear', r, g, b)
def func__M__draw__color(self, r, g, b, a):
self.proc('draw color', r, g, b, a)
def func__M__draw__stroke(self, width):
self.proc('draw stroke', width)
def func__M__draw__line(self, x, y, x2, y2):
self.proc('draw line', x, y, x2, y2)
def func__M__draw__rect(self, x, y, width, height):
self.proc('draw rect', x, y, width, height)
def func__M__draw__lineRect(self, x, y, width, height):
self.proc('draw lineRect', x, y, width, height)
def func__M__draw__poly(self, x, y, sides, radius, rotation):
self.proc('draw poly', x, y, sides, radius, rotation)
def func__M__draw__linePoly(self, x, y, sides, radius, rotation):
self.proc('draw linePoly', x, y, sides, radius, rotation)
def func__M__draw__triangle(self, x, y, x2, y2, x3, y3):
self.proc('draw triangle', x, y, x2, y2, x3, y3)
def func__M__draw__image(self, x, y, image, size, rotation):
self.proc('draw image', x, y, image, size, rotation)
def func__1__drawFlush(self, target):
self.proc('drawflush', target)
def func__1__setEnabled(self, unit, is_enabled):
self.proc('control enabled', unit, is_enabled)
def func__1__targetPosition(self, unit, x, y, shoot):
self.proc('control shoot', unit, x, y, shoot)
def func__1__targetObject(self, unit, target, shoot):
self.proc('control shootp', unit, target, shoot)
def func__1__configure(self, unit, value):
self.proc('control configure', unit, value)
def func__M__unit__stop(self):
self.proc('ucontrol stop', _ZERO, _ZERO, _ZERO, _ZERO, _ZERO)
def func__M__unit__move(self, x, y):
self.proc('ucontrol move', x, y, _ZERO, _ZERO, _ZERO)
def func__M__unit__approach(self, x, y, radius):
self.proc('ucontrol approach', x, y, radius, _ZERO, _ZERO)
def func__M__unit__boost(self, value):
self.proc('ucontrol boost', value, _ZERO, _ZERO, _ZERO, _ZERO)
def func__M__unit__pathfind(self):
self.proc('ucontrol pathfind', _ZERO, _ZERO, _ZERO, _ZERO, _ZERO)
def func__M__unit__targetPosition(self, x, y, shoot):
self.proc('ucontrol target', x, y, shoot, _ZERO, _ZERO)
def func__M__unit__targetObject(self, unit, shoot):
self.proc('ucontrol targetp', unit, shoot, _ZERO, _ZERO, _ZERO)
def func__M__unit__itemDrop(self, target, amount):
self.proc('ucontrol itemDrop', target, amount, _ZERO, _ZERO, _ZERO)
def func__M__unit__itemTake(self, target, material, amount):
self.proc('ucontrol itemTake', target, material, amount, _ZERO, _ZERO)
def func__M__unit__payDrop(self):
self.proc('ucontrol payDrop', _ZERO, _ZERO, _ZERO, _ZERO, _ZERO)
def func__M__unit__payTake(self, amount):
self.proc('ucontrol payTake', amount, _ZERO, _ZERO, _ZERO, _ZERO)
def func__M__unit__mine(self, x, y):
self.proc('ucontrol mine', x, y, _ZERO, _ZERO, _ZERO)
def func__M__unit__setFlag(self, value):
self.proc('ucontrol flag', value, _ZERO, _ZERO, _ZERO, _ZERO)
def func__M__unit__build(self, x, y, block, rotation, config):
self.proc('ucontrol build', x, y, block, rotation, config)
def func__M__unit__getBlock(self, x, y):
btype = self.resmap[0] = mast.Name()
unit = self.resmap[1] = mast.Name()
self.proc('ucontrol getBlock', x, y, btype, unit, _ZERO)
def func__M__unit__within(self, x, y, radius):
self.resmap[0] = mast.Name()
self.proc('ucontrol within', x, y, radius, self.resmap[0], _ZERO)
def func__1(self, fname, *args):
fname = fname.name
if fname not in self.scope:
raise NameError(f'Undefined function {fname}')
fdef = self.scope[fname]
assert isinstance(fdef, utils.FuncDef)
if len(args) < fdef.n_args:
raise TypeError(f'Insufficient arguments for function {fname}')
for fa, sa in zip(fdef.args, args):
self.proc('set', fa, sa)
self.proc(
'op add', fdef.return_addr, mast.Name('@counter'), mast.Literal(1))
self.jump(fdef.start_label, 'always')
for k, v in fdef.resmap.items():
self.resmap[k] = v
_resolver = staticmethod(build_attr_index({
k[len('func__'):]: v
for k, v in vars().items()
if k.startswith('func__')
}))
def resolve_func(self, value):
if isinstance(value, ast.Name):
return [self.run_trec_single(value)]
elif isinstance(value, ast.Attribute):
return [*self.resolve_func(value.value), mast.Name(value.attr)]
else:
raise ValueError(
'Expressions resulting in functions are not allowed, '
'only direct calls of named functions: func(1, 2, 3)'
)
def handle(self):
nm = self.resolve_func(self.expr.func)
method, pre_args = self._resolver(nm)
if self.expr.keywords:
raise ValueError('Keyword arguments are not supported')
arg_vals = [self.run_trec_single(arg) for arg in self.expr.args]
method(self, *pre_args, *arg_vals)
class AttributeHandler(BaseExpressionHandler):
AST_CLASS = ast.Attribute
def prop__M__at__1(self, attr):
self.resmap[0] = mast.Name(f'@{attr.replace("_", "-")}')
def prop__1__1(self, unit, prop):
self.resmap[0] = mast.Name()
self.proc(
'sensor', self.resmap[0],
unit, mast.Name(f'@{prop.replace("_", "-")}'))
def prop__M__unit__1(self, prop):
self.prop__1__1(mast.Name('@unit'), prop)
def prop__M__at__unit__1(self, prop):
self.prop__M__unit__1(prop)
_resolver = staticmethod(build_attr_index({
k[len('prop__'):]: v
for k, v in vars().items()
if k.startswith('prop__')
}))
def resolve_value(self, value):
if isinstance(value, ast.Name):
return [self.run_trec_single(value)]
elif isinstance(value, ast.Attribute):
return [*self.resolve_value(value.value), mast.Name(value.attr)]
else:
raise ValueError(
'Expressions are not allowed before attribute access, '
'use names of objects directly: Material.copper'
)
def handle(self):
nm = self.resolve_value(self.expr)
method, pre_args = self._resolver(nm)
if method.__name__.endswith('__1'):
pre_args[-1] = pre_args[-1].name
method(self, *pre_args)
# Statements ===================================
class ExprStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.Expr
def handle(self):
self.run_trec(self.expr.value)
def _check_assignment_to_reserved(name):
if name in RESERVED_NAMES:
raise ValueError(f'The name {name} is reserved')
class AssignStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.Assign
def _assign(self, targets, values):
assert len(targets) <= len(values)
for target, value in zip(targets, values):
target = self.run_trec_single(target)
self.proc('set', target, value)
def named_assign(self, target, value):
_check_assignment_to_reserved(target.id)
retvals = self.run_trec(value)
self._assign([target], retvals)
def tuple_assign(self, target, value):
assert all(isinstance(n, ast.Name) for n in target.elts)
retvals = self.run_trec(value)
self._assign(target.elts, retvals)
def memory_assign(self, target, value):
if not isinstance(target.value, ast.Name):
raise ValueError(f'Unsupported assignment target {target}')
_check_assignment_to_reserved(target.value.id)
index_val = self.run_trec_single(_get_ast_slice(target))
value_val = self.run_trec_single(value)
self.proc('write', value_val, mast.Name(target.value.id), index_val)
TARGET_MAP = {
ast.Name: named_assign,
ast.Subscript: memory_assign,
ast.Tuple: tuple_assign,
}
def handle(self):
if len(self.expr.targets) != 1:
raise ValueError(
'Only single target can be used in assignment: a = 3')
target = self.expr.targets[0]
if type(target) not in self.TARGET_MAP:
raise ValueError(f'Unsupported assignment target {target}')
method = self.TARGET_MAP[type(target)]
method(self, target, self.expr.value)
class AugAssignStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.AugAssign
def named_assign(self, target, op, operand):
operand_val = self.run_trec_single(operand)
t = self.run_trec_single(target)
self.proc(f'op {op}', t, t, operand_val)
def memory_assign(self, target, op, operand):
if not isinstance(target.value, ast.Name):
raise ValueError(f'Unsupported assignment target {target}')
index_val = self.run_trec_single(_get_ast_slice(target))
operand_val = self.run_trec_single(operand)
op_output = mast.Name()
cell = self.run_trec_single(target.value)
self.proc('read', op_output, cell, index_val)
self.proc(f'op {op}', op_output, op_output, operand_val)
self.proc('write', op_output, cell, index_val)
TARGET_MAP = {
ast.Name: named_assign,
ast.Subscript: memory_assign,
}
def handle(self):
method = get_type_map(
self.TARGET_MAP, self.expr.target, 'assignment target')
op = get_type_map(BIN_OP_MAP, self.expr.op, 'BinOp')
method(self, self.expr.target, op, self.expr.value)
class AnnAssignStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.AnnAssign
def handle(self):
if self.expr.value is not None:
raise ValueError('Assignments near annotations are not supported')
class IfStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.If
def handle(self):
end_label = mast.Label()
else_label = mast.Label() if self.expr.orelse else end_label
test_val = self.run_trec_single(self.expr.test)
self.jump(else_label, 'equal', test_val, mast.Literal(False))
for stmt in self.expr.body:
self.run_trec(stmt)
if self.expr.orelse:
self.jump(end_label, 'always')
self.pre.append(else_label)
for stmt in self.expr.orelse:
self.run_trec(stmt)
self.pre.append(end_label)
class WhileStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.While
def handle(self):
loop_label = mast.Label()
end_label = mast.Label()
self.pre.append(loop_label)
test_val = self.run_trec_single(self.expr.test)
self.jump(end_label, 'equal', test_val, mast.Literal(False))
for stmt in self.expr.body:
self.run_trec(stmt)
self.jump(loop_label, 'always')
self.pre.append(end_label)
class ForStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.For
def target_name(self, target):
return self.run_trec_single(target)
TARGET_MAP = {
ast.Name: target_name,
}
def iter_call__range(self, test_val, pre_target_val, *args):
stop = mast.Name()
step = mast.Name()
bidi = False
if len(args) == 1:
self.proc('set', pre_target_val, mast.Literal(0))
self.proc('set', step, mast.Literal(1))
self.proc('set', stop, args[0])
elif len(args) == 2:
self.proc('set', pre_target_val, args[0])
self.proc('set', step, mast.Literal(1))
self.proc('set', stop, args[1])
elif len(args) == 3:
self.proc('set', pre_target_val, args[0])
self.proc('set', step, args[2])
self.proc('set', stop, args[1])
bidi = True
else:
raise ValueError
def create_test_code():
if not bidi:
self.proc('op lessThan', test_val, pre_target_val, stop)
else:
decr = mast.Label()
fin = mast.Label()
self.jump(decr, 'lessThan', step, mast.Literal(0))
self.proc('op lessThan', test_val, pre_target_val, stop)
self.jump(fin, 'always')
self.pre.append(decr)
self.proc('op greaterThan', test_val, pre_target_val, stop)
self.pre.append(fin)
def create_pre_body():
pass
def create_after_body():
self.proc('op add', pre_target_val, pre_target_val, step)
return create_test_code, create_pre_body, create_after_body
def iter_call(self, call, test_val, pre_target_val):
if not isinstance(call.func, ast.Name):
raise ValueError
assert call.keywords == []
fname = call.func.id
m = getattr(self, 'iter_call__' + fname)
args = [self.run_trec_single(a) for a in call.args]
return m(test_val, pre_target_val, *args)
def iter_tuple(self, tup, test_val, pre_target_val):
items = [self.run_trec_single(item) for item in tup.elts]
if not items:
raise ValueError('Tuple must not be empty')
after_table = mast.Label()
tuple_start = mast.Label()
ret_addr = mast.Name()
pointer = mast.Name()
self.proc('set', pointer, tuple_start)
self.jump(after_table, 'always')
self.pre.append(tuple_start)
for item in items:
self.proc('set', pre_target_val, item)
self.proc('set', mast.Name('@counter'), ret_addr)
self.pre.append(after_table)
def create_test_code():
self.proc('op lessThan', test_val, pointer, after_table)
def create_pre_body():
self.proc(
'op add', ret_addr, mast.Name('@counter'), mast.Literal(1))
self.proc('set', mast.Name('@counter'), pointer)
def create_after_body():
self.proc('op add', pointer, pointer, mast.Literal(2))
return create_test_code, create_pre_body, create_after_body
ITER_MAP = {
ast.Call: iter_call,
ast.Tuple: iter_tuple,
ast.List: iter_tuple,
}
def handle(self):
loop_label = mast.Label()
else_label = mast.Label()
end_label = mast.Label()
test_val = mast.Name()
pre_target_val = mast.Name()
target = get_type_map(
self.TARGET_MAP, self.expr.target, 'loop counter variable',
)(self, self.expr.target)
create_test_code, create_pre_body, create_after_body = get_type_map(
self.ITER_MAP, self.expr.iter, 'iterator',
)(self, self.expr.iter, test_val, pre_target_val)
self.pre.append(loop_label)
create_test_code()
self.jump(else_label, 'equal', test_val, mast.Literal(False))
create_pre_body()
self.proc('set', target, pre_target_val)
for stmt in self.expr.body:
self.run_trec(stmt)
create_after_body()
self.jump(loop_label, 'always')
self.pre.append(else_label)
for stmt in self.expr.orelse:
self.run_trec(stmt)
self.pre.append(end_label)
class FunctionDefStatementHandler(BaseExpressionHandler):
AST_CLASS = ast.FunctionDef
def handle(self):
if _PY >= (3, 8):
assert self.expr.args.posonlyargs == []
assert self.expr.type_comment is None
assert self.expr.args.vararg is None
assert self.expr.args.kwonlyargs == []
assert self.expr.args.kw_defaults == []
assert self.expr.args.kwarg is None
assert self.expr.args.defaults == []
assert self.expr.decorator_list == []
assert self.expr.returns is None
fdef = utils.FuncDef(self.expr.name, len(self.expr.args.args))
self.scope[self.expr.name] = fdef
end_label = mast.Label()
self.jump(end_label, 'always') # skip function body
self.pre.append(fdef.start_label)
before, self.pre = self.pre, []
with self.sub_scope():
self.scope._current_func = fdef
for a, n in zip(self.expr.args.args, fdef.args):
self.scope[a.arg] = n
for stmt in self.expr.body:
self.run_trec(stmt)
fdef.create_return(self)
self.pre.append(end_label)
# we have to do this here, since we need to know
# max size of return tuple
body, self.pre = self.pre, before
for rv in fdef.resmap.values():
self.proc('set', rv, mast.Literal(None))
self.pre.extend(body)
class ReturnHandler(BaseExpressionHandler):
AST_CLASS = ast.Return
def handle(self):
fdef = self.scope.current_func
if fdef is None:
raise ValueError('Returning outside function context')
if self.expr.value is not None:
for index, value in enumerate(self.run_trec(self.expr.value)):
self.resmap[index] = value
if index not in fdef.resmap:
fdef.resmap[index] = mast.Name()
self.proc('set', fdef.resmap[index], value)
fdef.create_return(self)
class ImportFromHandler(BaseExpressionHandler):
AST_CLASS = ast.ImportFrom
def handle(self):
pass # intentionally do nothing
AST_NODE_MAP = {
subcls.AST_CLASS: subcls
for subcls in BaseExpressionHandler.__subclasses__()
}
def _create_ast_constant_hack(ast_cls, conv):
def cons(anode, *args, **kwargs):
return ConstantHandler(ast.Constant(conv(anode)), *args, **kwargs)
AST_NODE_MAP[ast_cls] = cons
if _PY < (3, 8):
_create_ast_constant_hack(ast.Num, lambda expr: expr.n)
_create_ast_constant_hack(ast.NameConstant, lambda expr: expr.value)
_create_ast_constant_hack(ast.Str, lambda expr: expr.s)
_create_ast_constant_hack(ast.Ellipsis, lambda expr: ...)
def transform_expr(expr, scope):
htype = get_type_map(AST_NODE_MAP, expr, 'expression')
node = htype(expr, transform_expr, scope)
node.handle()
return node.get_results(), node.pre
| 30.675403 | 79 | 0.610746 | 25,909 | 0.85143 | 163 | 0.005357 | 1,136 | 0.037332 | 0 | 0 | 2,991 | 0.098291 |
6471f48da26ddba2df69a8c07fa52ed1ac917387 | 1,499 | py | Python | sdk/python/pulumi_aws_native/redshift/_enums.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/redshift/_enums.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/redshift/_enums.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'EventSubscriptionEventCategoriesItem',
'EventSubscriptionSeverity',
'EventSubscriptionSourceType',
'EventSubscriptionStatus',
'ScheduledActionState',
]
class EventSubscriptionEventCategoriesItem(str, Enum):
CONFIGURATION = "configuration"
MANAGEMENT = "management"
MONITORING = "monitoring"
SECURITY = "security"
PENDING = "pending"
class EventSubscriptionSeverity(str, Enum):
"""
Specifies the Amazon Redshift event severity to be published by the event notification subscription.
"""
ERROR = "ERROR"
INFO = "INFO"
class EventSubscriptionSourceType(str, Enum):
"""
The type of source that will be generating the events.
"""
CLUSTER = "cluster"
CLUSTER_PARAMETER_GROUP = "cluster-parameter-group"
CLUSTER_SECURITY_GROUP = "cluster-security-group"
CLUSTER_SNAPSHOT = "cluster-snapshot"
SCHEDULED_ACTION = "scheduled-action"
class EventSubscriptionStatus(str, Enum):
"""
The status of the Amazon Redshift event notification subscription.
"""
ACTIVE = "active"
NO_PERMISSION = "no-permission"
TOPIC_NOT_EXIST = "topic-not-exist"
class ScheduledActionState(str, Enum):
"""
The state of the scheduled action.
"""
ACTIVE = "ACTIVE"
DISABLED = "DISABLED"
| 25.844828 | 104 | 0.697799 | 1,107 | 0.738492 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.565043 |
6472e6b9caa6ef8c7d6637f36d731e5edcd08f66 | 1,739 | py | Python | openmdao/utils/tests/test_cs_safe.py | friedenhe/OpenMDAO | db1d7e22a8bf9f66afa82ec3544b7244d5545f6d | [
"Apache-2.0"
] | 451 | 2015-07-20T11:52:35.000Z | 2022-03-28T08:04:56.000Z | openmdao/utils/tests/test_cs_safe.py | friedenhe/OpenMDAO | db1d7e22a8bf9f66afa82ec3544b7244d5545f6d | [
"Apache-2.0"
] | 1,096 | 2015-07-21T03:08:26.000Z | 2022-03-31T11:59:17.000Z | openmdao/utils/tests/test_cs_safe.py | friedenhe/OpenMDAO | db1d7e22a8bf9f66afa82ec3544b7244d5545f6d | [
"Apache-2.0"
] | 301 | 2015-07-16T20:02:11.000Z | 2022-03-28T08:04:39.000Z | import numpy as np
import unittest
from openmdao.utils import cs_safe
from openmdao.utils.assert_utils import assert_near_equal
class TestCSSafeFuctions(unittest.TestCase):
def test_abs(self):
test_data = np.array([1, -1, -2, 2, 5.675, -5.676], dtype='complex')
assert_near_equal(cs_safe.abs(test_data), np.abs(test_data))
test_data += complex(0,1e-50)
cs_derivs = cs_safe.abs(test_data).imag/1e-50
expected = [1, -1, -1, 1, 1, -1]
assert_near_equal(cs_derivs, expected)
def test_norm(self):
test_data = np.array([[1, 2, 3, -4],[5, 6, 7, -8]], dtype='complex')
assert_near_equal(cs_safe.norm(test_data,axis=None), np.linalg.norm(test_data,axis=None))
assert_near_equal(cs_safe.norm(test_data,axis=0), np.linalg.norm(test_data,axis=0))
assert_near_equal(cs_safe.norm(test_data,axis=1), np.linalg.norm(test_data,axis=1))
deriv_test_data = test_data.copy()
deriv_test_data[0,0] += complex(0, 1e-50)
cs_deriv = cs_safe.norm(deriv_test_data).imag/1e-50
expected = 1/np.linalg.norm(test_data) * test_data[0,0].real
assert_near_equal(cs_deriv, expected)
def test_arctan2(self):
x = np.array([-1, +1, +1, -1], dtype='complex')
y = np.array([-1, -1, +1, +1], dtype='complex')
expected = np.array([-2.35619449, -0.78539816, 0.78539816, 2.35619449])
assert_near_equal(cs_safe.arctan2(y, x), expected, tolerance=1e-8)
x += complex(0,1e-50)
y += complex(0,1e-50)
cs_derivs = cs_safe.arctan2(y, x).imag/1e-50
expected = [0., 1., 0., -1.]
assert_near_equal(cs_derivs, expected)
if __name__ == "__main__":
unittest.main() | 29.982759 | 97 | 0.635423 | 1,556 | 0.894767 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.026452 |
64731da3c8956920b9c929cc9f200ff529bc2c34 | 1,341 | py | Python | scripts/main.py | kjenney/community-ops | c9132079e3685f7457199ef2f37c7d5d8361d67e | [
"Apache-2.0"
] | 14 | 2021-08-10T03:46:25.000Z | 2022-03-16T11:25:01.000Z | scripts/main.py | kjenney/community-ops | c9132079e3685f7457199ef2f37c7d5d8361d67e | [
"Apache-2.0"
] | null | null | null | scripts/main.py | kjenney/community-ops | c9132079e3685f7457199ef2f37c7d5d8361d67e | [
"Apache-2.0"
] | 4 | 2020-11-03T07:14:45.000Z | 2022-02-25T23:31:53.000Z | #!/usr/bin/env python
from parser.configuration import ConfigurationParser
from deployer.helm import HelmDeployer
from deployer.shell import ShellDeployer
from deployer.kustomize import KustomizeDeployer
from deployer.manifest import ManifestDeployer
from deployer.istio import IstioDeployer
from utils import *
import os
import click
DEFAULT_CLUSTER_CONFIG = "config.yaml"
DEFAULT_CLUSTER_MANIFEST_DIR = "manifests/"
@click.command()
@click.option("--cluster-name",
help="Cluster name to where the runtime should be deployed to",
required=True)
def main(cluster_name):
os.chdir("clusters/" + cluster_name)
configuration = ConfigurationParser(DEFAULT_CLUSTER_CONFIG)
configuration.validate()
shell = ShellDeployer()
print_header("Deploying Kustomize")
kustomize = KustomizeDeployer(shell, cluster_name)
kustomize.deploy()
print_header("Deploying Manifest")
manifest = ManifestDeployer(shell, DEFAULT_CLUSTER_MANIFEST_DIR, cluster_name)
manifest.deploy()
print_header("Deploying Helm")
helm = HelmDeployer(shell, cluster_name)
helm.deploy()
print_header("Deploying Istio")
istio_config_file = configuration.get_istio()
istio = IstioDeployer(shell, istio_config_file, cluster_name)
istio.deploy()
if __name__ == '__main__':
main()
| 27.367347 | 82 | 0.753915 | 0 | 0 | 0 | 0 | 879 | 0.655481 | 0 | 0 | 214 | 0.159582 |
647401346cf10238d3f0044de0b2d66f7bc135f8 | 203 | py | Python | python/ex034.py | deniseicorrea/Aulas-de-Python | c5bcafa34f03ea4b9c73805b58c8004bb13f70e5 | [
"MIT"
] | null | null | null | python/ex034.py | deniseicorrea/Aulas-de-Python | c5bcafa34f03ea4b9c73805b58c8004bb13f70e5 | [
"MIT"
] | null | null | null | python/ex034.py | deniseicorrea/Aulas-de-Python | c5bcafa34f03ea4b9c73805b58c8004bb13f70e5 | [
"MIT"
] | null | null | null | salario = float(input('Qual o seu salário? R$ '))
if salario <= 1250:
novo = salario + (salario * 15 / 100)
else:
novo = salario + (salario * 10 / 100)
print(f'Seu novo salário é R${novo :.2f}.') | 33.833333 | 49 | 0.610837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.31068 |
6475a7145db7db9856cc53ae5ca36a32dd1e2c4c | 3,615 | py | Python | custom/icds/messaging/custom_recipients.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | custom/icds/messaging/custom_recipients.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | custom/icds/messaging/custom_recipients.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.locations.models import SQLLocation
from corehq.form_processor.models import CommCareCaseIndexSQL
from custom.icds.case_relationships import (
mother_person_case_from_ccs_record_case,
mother_person_case_from_child_health_case,
mother_person_case_from_child_person_case,
)
from custom.icds.const import SUPERVISOR_LOCATION_TYPE_CODE
from custom.icds.exceptions import CaseRelationshipError
from datetime import datetime
from dimagi.utils.logging import notify_exception
def skip_notifying_missing_mother_person_case(e):
# https://manage.dimagi.com/default.asp?271995
# It's expected that some child person cases will not have a mother person case,
# so we don't notify when that's the lookup that fails.
return (
e.child_case_type == 'person' and
e.identifier == 'mother' and
e.relationship == CommCareCaseIndexSQL.CHILD and
e.num_related_found == 0
)
def skip_notifying_missing_ccs_record_parent(e):
# https://manage.dimagi.com/default.asp?277600
# This is an open issue so it probably doesn't make sense to keep notifying
# these unless it gets resolved. Going to make these start notifying at a
# later date so this can be revisited.
return (
datetime.utcnow() < datetime(2018, 8, 1) and
e.child_case_type == 'ccs_record' and
e.identifier == 'parent' and
e.relationship == CommCareCaseIndexSQL.CHILD and
e.num_related_found == 0
)
def recipient_mother_person_case_from_ccs_record_case(case_schedule_instance):
try:
return mother_person_case_from_ccs_record_case(case_schedule_instance.case)
except CaseRelationshipError as e:
if not skip_notifying_missing_ccs_record_parent(e):
notify_exception(None, message="ICDS ccs_record relationship error")
return None
def recipient_mother_person_case_from_ccs_record_case_excl_migrated_or_opted_out(case_schedule_instance):
from custom.icds.messaging.custom_content import person_case_is_migrated_or_opted_out
mother = recipient_mother_person_case_from_ccs_record_case(case_schedule_instance)
if mother is None or person_case_is_migrated_or_opted_out(mother):
return None
return mother
def recipient_mother_person_case_from_child_health_case(case_schedule_instance):
try:
return mother_person_case_from_child_health_case(case_schedule_instance.case)
except CaseRelationshipError as e:
if not skip_notifying_missing_mother_person_case(e):
notify_exception(None, message="ICDS child health case relationship error")
return None
def recipient_mother_person_case_from_child_person_case(case_schedule_instance):
try:
return mother_person_case_from_child_person_case(case_schedule_instance.case)
except CaseRelationshipError as e:
if not skip_notifying_missing_mother_person_case(e):
notify_exception(None, message="ICDS child person case relationship error")
return None
def supervisor_from_awc_owner(case_schedule_instance):
if not case_schedule_instance.case:
return None
# Use one query to lookup the AWC, ensure there is a parent location,
# and ensure the parent location is a supervisor
awc = SQLLocation.objects.filter(
location_id=case_schedule_instance.case.owner_id,
parent__location_type__code=SUPERVISOR_LOCATION_TYPE_CODE
).select_related('parent').first()
if not awc:
return None
return awc.parent
| 36.15 | 105 | 0.768188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.192531 |
6476c66a90cc6db2e0f89497af03df52e9401883 | 3,053 | py | Python | main.py | tuzhucheng/sent-sim | ebda09322be1dca3e967b80ffcf6437adb789132 | [
"MIT"
] | 109 | 2017-12-09T04:52:06.000Z | 2022-02-08T17:41:37.000Z | main.py | tuzhucheng/sent-sim | ebda09322be1dca3e967b80ffcf6437adb789132 | [
"MIT"
] | 5 | 2018-06-05T01:50:02.000Z | 2021-03-14T04:45:02.000Z | main.py | tuzhucheng/sent-sim | ebda09322be1dca3e967b80ffcf6437adb789132 | [
"MIT"
] | 24 | 2018-04-27T01:52:34.000Z | 2021-12-21T09:21:26.000Z | """
Driver program for training and evaluation.
"""
import argparse
import logging
import numpy as np
import random
import torch
import torch.optim as O
from datasets import get_dataset, get_dataset_configurations
from models import get_model
from runners import Runner
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sentence similarity models')
parser.add_argument('--model', default='sif', choices=['sif', 'mpcnn', 'mpcnn-lite', 'bimpm'], help='Model to use')
parser.add_argument('--dataset', default='sick', choices=['sick', 'wikiqa'], help='Dataset to use')
parser.add_argument('--batch-size', type=int, default=64, help='Batch size')
parser.add_argument('--epochs', type=int, default=15, help='Number of epochs')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
parser.add_argument('--regularization', type=float, default=3e-4, help='Regularization')
parser.add_argument('--seed', type=int, default=1234, help='Seed for reproducibility')
parser.add_argument('--device', type=int, default=0, help='Device, -1 for CPU')
parser.add_argument('--log-interval', type=int, default=50, help='Device, -1 for CPU')
# Special options for SIF model
parser.add_argument('--unsupervised', action='store_true', default=False, help='Set this flag to use unsupervised mode.')
parser.add_argument('--alpha', type=float, default=1e-3, help='Smoothing term for smooth inverse frequency baseline model')
parser.add_argument('--no-remove-special-direction', action='store_true', default=False, help='Set to not remove projection onto first principal component')
parser.add_argument('--frequency-dataset', default='enwiki', choices=['train', 'enwiki'])
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)
model = get_model(args, dataset_cls, embedding)
if args.model == 'sif':
model.populate_word_frequency_estimation(train_loader)
total_params = 0
for param in model.parameters():
size = [s for s in param.size()]
total_params += np.prod(size)
logger.info('Total number of parameters: %s', total_params)
loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(args)
optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.regularization)
runner = Runner(model, loss_fn, metrics, optimizer, y_to_score, resolved_pred_to_score, args.device, None)
runner.run(args.epochs, train_loader, dev_loader, test_loader, args.log_interval)
| 45.567164 | 160 | 0.72224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 787 | 0.257779 |
6477f0f67faa25656d06bc7f042cf253f6f4ecae | 544 | py | Python | dhook.py | Araon/Sadhu-Kamra | daf99d9d4ccb4e1f39d98ce9296fc774a9562fc0 | [
"MIT"
] | null | null | null | dhook.py | Araon/Sadhu-Kamra | daf99d9d4ccb4e1f39d98ce9296fc774a9562fc0 | [
"MIT"
] | null | null | null | dhook.py | Araon/Sadhu-Kamra | daf99d9d4ccb4e1f39d98ce9296fc774a9562fc0 | [
"MIT"
] | null | null | null | import requests
url = "https://discord.com/api/webhooks/848514072194580511/_7RDKRz4PeX3oPPaRuPsA8P-3287J6uFoUAiXYtA0yygVPOdkLV3HphfLargI1dJ9eJi"
def notify(message):
data = {
"content": "Tweet Posted: "+message,
}
headers = {
"Content-Type": "application/json"
}
result = requests.post(url, json=data, headers=headers)
if 200 <= result.status_code < 300:
print(f"Webhook sent {result.status_code}")
else:
print(f"Not sent with {result.status_code}, response:\n{result.json()}")
| 25.904762 | 128 | 0.670956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.514706 |
64784891319622c3277255e07ba9df3046e9426c | 16,301 | py | Python | proganomaly_modules/training_module/trainer/training_inputs.py | ryangillard/P-CEAD | d4e95fa17112af07eb99cd581470bd3146d1c8e5 | [
"Apache-2.0"
] | 6 | 2021-07-01T23:37:10.000Z | 2022-02-19T03:12:41.000Z | proganomaly_modules/training_module/trainer/training_inputs.py | ryangillard/P-CEAD | d4e95fa17112af07eb99cd581470bd3146d1c8e5 | [
"Apache-2.0"
] | null | null | null | proganomaly_modules/training_module/trainer/training_inputs.py | ryangillard/P-CEAD | d4e95fa17112af07eb99cd581470bd3146d1c8e5 | [
"Apache-2.0"
] | 2 | 2021-07-01T23:37:30.000Z | 2021-12-21T18:19:21.000Z | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
def preprocess_images(images):
"""Preprocesses images array.
Args:
images: np.array or tensor, images of shape
(batch_size, height width, depth).
Returns:
Tensor of images of shape (batch_size, height width, depth).
"""
# Convert image values from [0, 255] to [-1., 1.].
images = (tf.cast(x=images, dtype=tf.float32) - 127.5) / 127.5
return images
def in_core_dataset(
images, labels, dataset_len, batch_size, block_idx, params, training
):
"""Gets in-core dataset.
Args:
images: tensor, images of shape
(batch_size, dataset_height, dataset_width, depth).
labels: tensor, int labels of shape (batch_size,).
dataset_len: int, number of examples in dataset.
batch_size: int, number of examples per batch.
block_idx: int, current resolution block index.
params: dict, user passed parameters.
training: bool, if training or not.
Returns:
Dataset of dictionary of images tensor of shape
(batch_size, height, width, depth) and labels tensor of shape
(batch_size,).
"""
def resize_images(image, label):
"""Resizes images tensor.
Args:
image: tensor, images of shape
(batch_size, dataset_height, dataset_width, depth).
label: tensor, int labels of shape (batch_size,).
Returns:
Dictionary of images tensor of shape
(batch_size, size, size, depth) and labels tensor of shape
(batch_size,).
"""
size = 4 * 2 ** block_idx
image = tf.image.resize(
images=image, size=(size, size), method="nearest"
)
return {"image": image}, label
dataset = tf.data.Dataset.from_tensor_slices(tensors=(images, labels))
if training:
dataset = dataset.repeat(count=None).shuffle(buffer_size=dataset_len)
dataset = dataset.batch(batch_size=batch_size)
# Resize images for resolution block.
dataset = dataset.map(
map_func=resize_images,
num_parallel_calls=(
tf.data.experimental.AUTOTUNE
if params["input_fn_autotune"]
else None
),
deterministic=False
)
# Prefetch data to improve latency.
dataset = dataset.prefetch(
buffer_size=(
tf.data.experimental.AUTOTUNE
if params["input_fn_autotune"]
else 1
)
)
return dataset
def mnist_dataset(batch_size, block_idx, params, training):
"""Gets tf.data.Dataset using in-core MNIST dataset.
Args:
batch_size: int, number of examples per batch.
block_idx: int, current resolution block index.
params: dict, user passed parameters.
training: bool, if training or not.
Returns:
An input function.
"""
def preprocess_mnist_images(images):
"""Preprocesses specifically for MNIST images.
Args:
images: np.array, array of images of shape (60000, 28, 28).
Returns:
Tensor of images of shape (60000, 32, 32, 1).
"""
# Pad 28x28 images to 32x32.
images = tf.pad(
tensor=images,
paddings=[[0, 0], [2, 2], [2, 2]],
mode="CONSTANT",
constant_values=0
)
# Add dimension for num_channels(1) to end.
images = tf.expand_dims(input=images, axis=-1)
return images
def _input_fn():
"""Wrapper input function to get data tensors.
Returns:
Batched dataset object of dictionary of image tensors and label
tensor.
"""
if training:
(train_images, train_labels), (_, _) = (
tf.keras.datasets.mnist.load_data()
)
images = preprocess_images(
images=preprocess_mnist_images(images=train_images)
)
labels = train_labels
else:
(_, _), (test_images, test_labels) = (
tf.keras.datasets.mnist.load_data()
)
images = preprocess_images(
images=preprocess_mnist_images(images=test_images)
)
labels = test_labels
print("MNIST dataset shape = {}".format(images.shape))
return in_core_dataset(
images=images,
labels=labels,
dataset_len=50000,
batch_size=batch_size,
block_idx=block_idx,
params=params,
training=training
)
return _input_fn
def cifar10_dataset(batch_size, block_idx, params, training):
"""Gets tf.data.Dataset using in-core CIFAR-10 dataset.
Args:
batch_size: int, number of examples per batch.
block_idx: int, current resolution block index.
params: dict, user passed parameters.
training: bool, if training or not.
Returns:
An input function.
"""
def filter_cifar_images_by_class(images, labels, class_idx):
"""Filters CIFAR-10 images to only chosen class.
Args:
images: np.array, array of images of shape (50000, 32, 32, 3).
labels: np.array, array of int labels of shape (50000,).
class_idx: int, index of chosen class.
Returns:
Numpy array of chosen class images of shape (5000, 32, 32, 3).
"""
in_class_images = images[labels.flatten() == class_idx, :, :, :]
return in_class_images
def preprocess_cifar10_images(images, labels):
"""Preprocesses specifically for CIFAR-10 images.
Args:
images: np.array, array of images of shape (50000, 32, 32, 3).
labels: np.array, array of int labels of shape (50000,).
Returns:
images: tensor, images either of shape (5000, 32, 32, 3) with
filtering to one class or (50000, 32, 32, 3) if otherwise.
labels: tensor, labels either of shape (5000,) with filtering to
one class or (50000,) if otherwise.
"""
if params["dataset"] == "cifar10_car":
images = filter_cifar_images_by_class(
images=images, labels=labels, class_idx=1
)
labels = tf.tile(input=[1], multiples=[images.shape[0]])
return images, labels
def _input_fn():
"""Wrapper input function to get data tensors.
Returns:
Batched dataset object of dictionary of image tensors and label
tensor.
"""
if training:
(train_images, train_labels), (_, _) = (
tf.keras.datasets.cifar10.load_data()
)
images, labels = preprocess_cifar10_images(
images=train_images, labels=train_labels
)
else:
(_, _), (test_images, test_labels) = (
tf.keras.datasets.cifar10.load_data()
)
images, labels = preprocess_cifar10_images(
images=test_images, labels=test_labels
)
images = preprocess_images(images=images)
print("CIFAR-10 dataset shape = {}".format(images.shape))
return in_core_dataset(
images=images,
labels=labels,
dataset_len=50000,
batch_size=batch_size,
block_idx=block_idx,
params=params,
training=training
)
return _input_fn
def read_tf_record_dataset(
file_pattern, batch_size, block_idx, params, training
):
"""Reads TF Record data using tf.data, doing necessary preprocessing.
Given filename, mode, batch size, and other parameters, read TF Record
dataset using Dataset API, apply necessary preprocessing, and return an
input function to the Estimator API.
Args:
file_pattern: str, file pattern that to read into our tf.data dataset.
batch_size: int, number of examples per batch.
block_idx: int, the current resolution block index.
params: dict, dictionary of user passed parameters.
training: bool, if training or not.
Returns:
An input function.
"""
def fetch_dataset(filename):
"""Fetches TFRecord Dataset from given filename.
Args:
filename: str, name of TFRecord file.
Returns:
Dataset containing TFRecord Examples.
"""
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(
filenames=filename,
buffer_size=(
None
if params["input_fn_autotune"]
else buffer_size
),
num_parallel_reads=8
)
return dataset
def decode_example(protos, block_idx, params):
"""Decodes TFRecord file into tensors.
Given protobufs, decode into image and label tensors.
Args:
protos: protobufs from TFRecord file.
block_idx: int, the current resolution block index.
params: dict, user passed parameters.
Returns:
Image and label tensors.
"""
dtype_map = {
"str": tf.string,
"int": tf.int64,
"float": tf.float32
}
# Create feature schema map for protos.
tf_example_features = {
feat["name"]: (
tf.io.FixedLenFeature(
shape=feat["shape"], dtype=dtype_map[feat["dtype"]]
)
if feat["type"] == "FixedLen"
else tf.io.FixedLenSequenceFeature(
shape=feat["shape"], dtype=dtype_map[feat["dtype"]]
)
)
for feat in params["tf_record_example_schema"]
}
# Parse features from tf.Example.
parsed_features = tf.io.parse_single_example(
serialized=protos, features=tf_example_features
)
# Convert from a scalar string tensor (whose single string has
# length height * width * depth) to a uint8 tensor with shape
# (height * width * depth).
if params["image_encoding"] == "raw":
image = tf.io.decode_raw(
input_bytes=parsed_features[params["image_feature_name"]],
out_type=tf.uint8
)
elif params["image_encoding"] == "png":
image = tf.io.decode_png(
contents=parsed_features[params["image_feature_name"]],
channels=params["image_depth"]
)
elif params["image_encoding"] == "jpeg":
image = tf.io.decode_jpeg(
contents=parsed_features[params["image_feature_name"]],
channels=params["image_depth"]
)
# Reshape flattened image back into normal dimensions.
if params["use_multiple_resolution_records"]:
height, width = params["projection_dims"][0:2]
height *= (2 ** block_idx)
width *= (2 ** block_idx)
image = tf.reshape(
tensor=image, shape=(height, width, params["image_depth"])
)
else:
image = tf.reshape(
tensor=image,
shape=(
params["image_predownscaled_height"],
params["image_predownscaled_width"],
params["image_depth"]
)
)
# Preprocess image.
image = preprocess_images(images=image)
if params["label_feature_name"]:
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(x=parsed_features["label"], dtype=tf.int32)
return {"image": image}, label
return {"image": image}
def set_static_shape(features, labels, batch_size, params):
"""Sets static shape of batched input tensors in dataset.
Args:
features: dict, keys are feature names and values are tensors.
labels: tensor, label data.
batch_size: int, number of examples per batch.
params: dict, user passed parameters.
Returns:
Features tensor dictionary and labels tensor.
"""
features["image"].set_shape(
features["image"].get_shape().merge_with(
tf.TensorShape(dims=(batch_size, None, None, None))
)
)
if params["label_feature_name"]:
labels.set_shape(
labels.get_shape().merge_with(
tf.TensorShape(dims=(batch_size))
)
)
return features, labels
return features
def _input_fn():
"""Wrapper input function used by Estimator API to get data tensors.
Returns:
Batched dataset object of dictionary of feature tensors and label
tensor.
"""
# Create dataset to contain list of files matching pattern.
dataset = tf.data.Dataset.list_files(
file_pattern=file_pattern, shuffle=training
)
# Repeat dataset files indefinitely if in training.
if training:
dataset = dataset.repeat()
# Parallel interleave multiple files at once with map function.
dataset = dataset.interleave(
map_func=fetch_dataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False
)
# Shuffle the Dataset TFRecord Examples if in training.
if training:
dataset = dataset.shuffle(buffer_size=1024)
# Decode TF Record Example into a features dictionary of tensors.
dataset = dataset.map(
map_func=lambda x: decode_example(
protos=x,
block_idx=block_idx,
params=params
),
num_parallel_calls=(
tf.data.experimental.AUTOTUNE
if params["input_fn_autotune"]
else None
)
)
# Batch dataset and drop remainder so there are no partial batches.
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
# Assign static shape, namely make the batch size axis static.
if params["label_feature_name"]:
dataset = dataset.map(
map_func=lambda x, y: set_static_shape(
features=x,
labels=y,
batch_size=batch_size,
params=params
),
num_parallel_calls=(
tf.data.experimental.AUTOTUNE
if params["input_fn_autotune"]
else None
)
)
else:
dataset = dataset.map(
map_func=lambda x: set_static_shape(
features=x,
labels=None,
batch_size=batch_size,
params=params
),
num_parallel_calls=(
tf.data.experimental.AUTOTUNE
if params["input_fn_autotune"]
else None
)
)
# Prefetch data to improve latency.
dataset = dataset.prefetch(
buffer_size=(
tf.data.experimental.AUTOTUNE
if params["input_fn_autotune"]
else 1
)
)
return dataset
return _input_fn
| 32.667335 | 80 | 0.565241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,479 | 0.458806 |
64799d5c6f6b719d0987aca3b83297c592058a73 | 58 | py | Python | python/testData/intentions/quotedStringDoubleSlash_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/quotedStringDoubleSlash_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/quotedStringDoubleSlash_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | data_path = dirname(realpath(__file__)).replace("\\", '/') | 58 | 58 | 0.672414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.12069 |
6479a0308b3995ce53fc23a94f0f47e1afdd3615 | 3,846 | py | Python | usfm_references/__init__.py | anthonyraj/usfm-references | e0e2cc804545b029df55f9780c1361a8a2702f9c | [
"MIT"
] | null | null | null | usfm_references/__init__.py | anthonyraj/usfm-references | e0e2cc804545b029df55f9780c1361a8a2702f9c | [
"MIT"
] | null | null | null | usfm_references/__init__.py | anthonyraj/usfm-references | e0e2cc804545b029df55f9780c1361a8a2702f9c | [
"MIT"
] | null | null | null | """
USFM References Tools
"""
import re
__version__ = '1.1.0'
ANY_REF = re.compile(r'^[1-9A-Z]{3}\.([0-9]{1,3}(_[0-9]+)?(\.[0-9]{1,3})?|INTRO\d+)$')
CHAPTER = re.compile(r'^[1-6A-Z]{3}\.[0-9]{1,3}(_[0-9]+)?$')
CHAPTER_OR_INTRO = re.compile(r'^[1-9A-Z]{3}\.([0-9]{1,3}(_[0-9]+)?|INTRO\d+)$')
SINGLE_CHAPTER_OR_VERSE = re.compile(r'^([A-Za-z]{3})\.([1-9]+\.{0,1}[1-9]*)$')
VERSE = re.compile(r'^[1-6A-Z]{3}\.[0-9]{1,3}(_[0-9]+)?\.[0-9]{1,3}$')
BOOKS = [
'GEN', 'EXO', 'LEV', 'NUM', 'DEU', 'JOS', 'JDG', 'RUT', '1SA', '2SA', '1KI', '2KI', '1CH',
'2CH', 'EZR', 'NEH', 'EST', 'JOB', 'PSA', 'PRO', 'ECC', 'SNG', 'ISA', 'JER', 'LAM', 'EZK',
'DAN', 'HOS', 'JOL', 'AMO', 'OBA', 'JON', 'MIC', 'NAM', 'HAB', 'ZEP', 'HAG', 'ZEC', 'MAL',
'MAT', 'MRK', 'LUK', 'JHN', 'ACT', 'ROM', '1CO', '2CO', 'GAL', 'EPH', 'PHP', 'COL', '1TH',
'2TH', '1TI', '2TI', 'TIT', 'PHM', 'HEB', 'JAS', '1PE', '2PE', '1JN', '2JN', '3JN', 'JUD',
'REV', 'TOB', 'JDT', 'ESG', 'WIS', 'SIR', 'BAR', 'LJE', 'S3Y', 'SUS', 'BEL', '1MA', '2MA',
'3MA', '4MA', '1ES', '2ES', 'MAN', 'PS2', 'ODA', 'PSS', 'EZA', '5EZ', '6EZ', 'DAG', 'PS3',
'2BA', 'LBA', '2MQ', '3MQ', 'REP', '4BA', 'LAO', 'LKA'
]
def valid_chapter(ref):
"""
Succeeds if the given string is a validly structured USFM Bible chapter reference.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length.
"""
return bool(re.match(CHAPTER, ref) and ref.split('.')[0] in BOOKS)
def valid_chapter_or_intro(ref):
"""
Succeeds if the given string is a validly structured USFM Bible chapter reference or and INTRO.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length.
OR
followed by a period (.) and INTRO, followed by a number
"""
return bool(CHAPTER_OR_INTRO.match(ref)) and ref.split('.')[0] in BOOKS
def valid_usfm(ref):
"""
Succeeds if the given string is a validly structured USFM Bible reference.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length,
optionally followed by a period (.) and a (verse) number of any length.
"""
return bool(ANY_REF.match(ref)) and ref.split('.')[0] in BOOKS
def valid_verse(ref):
"""
Succeeds if the given string is a validly structured USFM Bible verse reference.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length,
optionally followed by a period (.) and a (verse) number of any length.
"""
return bool(re.match(VERSE, ref) and ref.split('.')[0] in BOOKS)
def valid_multi_usfm(ref, delimiter='+'):
"""
Succeeds if the given string is a validly structured set of UFM Bible references.
A valid, capitalized (English) book abbreviation,
followed by a period (.) and a (chapter) number of any length,
optionally followed by an underscore (_) and a (sub-chapter?) number of any length,
optionally followed by a period (.) and a (verse) number of any length.
Multiple verses are seperated by a plus (+)
Example Multi USFM ref (James1:1-5): JAS.1.1+JAS.1.2+JAS.1.3+JAS.1.4+JAS.1.5
Another Example with COMMA delimiter: JAS.1.1,JAS.1.2,JAS.1.3,JAS.1.4,JAS.1.5
"""
if any([not valid_usfm(usfm) for usfm in ref.split(delimiter)]):
return False
return True
| 46.337349 | 99 | 0.606084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,888 | 0.75091 |
647a4c4839fcdf56e3e2a60854159ce82350a4e1 | 1,591 | py | Python | dataset/gla_vol_time_series/cumul_to_rates_rgi01regions.py | subond/ww_tvol_study | 6fbcae251015a7cd49220abbb054914266b3b4a1 | [
"MIT"
] | 20 | 2021-04-28T18:11:43.000Z | 2022-03-09T13:15:56.000Z | dataset/gla_vol_time_series/cumul_to_rates_rgi01regions.py | subond/ww_tvol_study | 6fbcae251015a7cd49220abbb054914266b3b4a1 | [
"MIT"
] | 4 | 2021-04-28T15:51:43.000Z | 2022-01-02T19:10:25.000Z | dataset/gla_vol_time_series/cumul_to_rates_rgi01regions.py | rhugonnet/ww_tvol_study | f29fc2fca358aa169f6b7cc790e6b6f9f8b55c6f | [
"MIT"
] | 9 | 2021-04-28T17:58:27.000Z | 2021-12-19T05:51:56.000Z | import os, sys
import pandas as pd
import numpy as np
import pyddem.tdem_tools as tt
# example to integrate the RGI-O1 cumulative time series into rates with time-varying glacier areas
# file with time-varying areas for RGI regions
fn_tarea = '/home/atom/data/inventory_products/RGI/tarea_zemp.csv'
# list of regional cumulative series
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
list_fn_reg= [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]]
# periods of interest
tlim_00_10 = [np.datetime64('2000-01-01'),np.datetime64('2010-01-01')]
tlim_10_20 = [np.datetime64('2010-01-01'),np.datetime64('2020-01-01')]
tlim_ar6 = [np.datetime64('2006-01-01'),np.datetime64('2019-01-01')]
tlim_00_20 = [np.datetime64('2000-01-01'),np.datetime64('2020-01-01')]
list_tlim = [tlim_00_10,tlim_10_20,tlim_ar6,tlim_00_20]
list_tag = ['decad1','decad2','ar6','full']
# integrate the cumulative series into rates for each period and region
list_df = []
for fn_reg in list_fn_reg:
df_reg = pd.read_csv(fn_reg)
df_agg = tt.aggregate_all_to_period(df_reg,list_tlim=list_tlim,fn_tarea=fn_tarea,frac_area=1,list_tag=list_tag)
list_df.append(df_agg)
# concatenate results
df = pd.concat(list_df)
# convert m w.e. yr-1 into kg m-2 yr-1
df.dmdtda *= 1000
df.err_dmdtda *= 1000
# keep only variables of interests
df = df[['reg','period','dmdt','err_dmdt','dmdtda','err_dmdtda']]
df.to_csv('/home/atom/ongoing/work_ipcc_ar6/table_hugonnet_regions_10yr_20yr_ar6period_final.csv',float_format='%.2f',index=None)
| 40.794872 | 143 | 0.750471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.478316 |
647ae795ccb2eae73a4642206f65fa6ba9dc4abe | 375 | py | Python | wallarm_api/core/api/triggers_api.py | Neraverin/wallarm-api-python | a033cfee28b1648f6bb7d1e531f353929b5d41c1 | [
"Apache-2.0"
] | null | null | null | wallarm_api/core/api/triggers_api.py | Neraverin/wallarm-api-python | a033cfee28b1648f6bb7d1e531f353929b5d41c1 | [
"Apache-2.0"
] | null | null | null | wallarm_api/core/api/triggers_api.py | Neraverin/wallarm-api-python | a033cfee28b1648f6bb7d1e531f353929b5d41c1 | [
"Apache-2.0"
] | null | null | null | from wallarm_api.core.api.base_api import BaseApi
from wallarm_api.core.models.trigger import Triggers
class TriggersApi(BaseApi):
def get_triggers(self, clientid):
url = f'/v2/clients/{clientid}/triggers?denormalize=true'
response = self.client.get(url)
triggers = Triggers(triggers=response['triggers'])
return triggers
| 37.5 | 69 | 0.696 | 270 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.162667 |
647b7f76abf9d66cbd9622be1caf548a048a1b6b | 534 | py | Python | kokki/cookbooks/gearmand/metadata.py | samuel/kokki | da98da55e0bba8db5bda993666a43c6fdc4cacdb | [
"BSD-3-Clause"
] | 11 | 2015-01-14T00:43:26.000Z | 2020-12-29T06:12:51.000Z | kokki/cookbooks/gearmand/metadata.py | samuel/kokki | da98da55e0bba8db5bda993666a43c6fdc4cacdb | [
"BSD-3-Clause"
] | null | null | null | kokki/cookbooks/gearmand/metadata.py | samuel/kokki | da98da55e0bba8db5bda993666a43c6fdc4cacdb | [
"BSD-3-Clause"
] | 3 | 2015-01-14T01:05:56.000Z | 2019-01-26T05:09:37.000Z |
__description__ = "Gearman RPC broker"
__config__ = {
"gearmand.listen_address": dict(
description = "IP address to bind to",
default = "127.0.0.1",
),
"gearmand.user": dict(
display_name = "Gearmand user",
description = "User to run the gearmand procses as",
default = "nobody",
),
"gearmand.pidfile": dict(
display_name = "Gearmand pid file",
description = "Path to the PID file for gearmand",
default = "/var/run/gearmand/gearmand.pid",
),
}
| 28.105263 | 60 | 0.593633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.483146 |
647b873dcc7cc9f8ebf1215e9427968dfcb02031 | 1,751 | py | Python | class0/pil.py | dapianzi/tf_start | b6dc85c4c06c65ff892f6eb19aceb09fffc676a9 | [
"MIT"
] | null | null | null | class0/pil.py | dapianzi/tf_start | b6dc85c4c06c65ff892f6eb19aceb09fffc676a9 | [
"MIT"
] | null | null | null | class0/pil.py | dapianzi/tf_start | b6dc85c4c06c65ff892f6eb19aceb09fffc676a9 | [
"MIT"
] | null | null | null | from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
names = locals()
img0 = Image.open("./assets/pyCharm.png")
# print image info:
print(img0.size, img0.format, img0.mode, np.array(img0))
# save other format
# img0.save('./assets/pyCharm.tiff')
# img0.convert('RGB').save('./assets/pyCharm.jpeg')
# img0.convert('L').save('./assets/pyCharm.bmp') # 灰度图
img1 = Image.open('./assets/pyCharm.tiff') # 3通道图
img2 = Image.open('./assets/pyCharm.jpeg')
img3 = Image.open('./assets/pyCharm.bmp')
# 3通道图可以拆分
img4, img5, img6 = img2.split()
img7 = Image.merge('RGB', [img5, img6, img4])
plt.figure(figsize=(15, 15))
for i in range(8):
plt.subplot(4, 3, i + 1)
plt.axis('off') # hide axis
plt.imshow(names.get('img' + str(i)))
plt.title(names.get('img' + str(i)).format)
# 去除 png 的白边
img_dir = '/Users/carl/Pictures/logos/'
logo = Image.open(img_dir + 'google.png')
# 将压缩的8位图像转成rgba
logo = logo.convert('RGBA')
# 分离通道
(logo_r, logo_g, logo_b, logo_a) = logo.split()
# 转换成numpy数组
arr_r = np.array(logo_r)
arr_g = np.array(logo_g)
arr_b = np.array(logo_b)
arr_a = np.array(logo_a)
# 筛选像素坐标
idx = (arr_r == 245) & (arr_g == 247) & (arr_b == 247)
# 修改为透明像素点
arr_r[idx] = 0
arr_g[idx] = 0
arr_b[idx] = 0
arr_a[idx] = 0
# 将numpy数组转回图片对象
shard_r = Image.fromarray(arr_r)
shard_g = Image.fromarray(arr_g)
shard_b = Image.fromarray(arr_b)
shard_a = Image.fromarray(arr_a)
rgb_dict = 'rgba'
for i in range(4):
plt.subplot(4, 3, i+9)
plt.axis('off') # hide axis
plt.imshow(names.get('shard_' + rgb_dict[i]))
plt.title(names.get('shard_' + rgb_dict[i]).format)
# 合并通道,保存
Image.merge('RGBA', [shard_r, shard_g, shard_b, shard_a]).save(img_dir + 'logo-1.png', overWrite=True)
plt.tight_layout()
plt.show() | 26.134328 | 102 | 0.672758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 641 | 0.340414 |
647eebf2afa85364fe3d780d7873a3f80debbdc1 | 2,123 | py | Python | rpython/jit/backend/x86/codebuf.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2016-07-06T23:30:20.000Z | 2017-05-30T15:59:31.000Z | rpython/jit/backend/x86/codebuf.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | null | null | null | rpython/jit/backend/x86/codebuf.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2020-07-09T08:14:22.000Z | 2021-01-15T18:01:25.000Z | from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.debug import have_debug_prints
from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin
from rpython.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder
from rpython.jit.backend.x86.regloc import LocationCodeBuilder
from rpython.jit.backend.x86.arch import IS_X86_32, IS_X86_64, WORD
from rpython.jit.backend.x86 import valgrind
# XXX: Seems nasty to change the superclass of MachineCodeBlockWrapper
# like this
if IS_X86_32:
codebuilder_cls = X86_32_CodeBuilder
backend_name = 'x86'
elif IS_X86_64:
codebuilder_cls = X86_64_CodeBuilder
backend_name = 'x86_64'
class MachineCodeBlockWrapper(BlockBuilderMixin,
LocationCodeBuilder,
codebuilder_cls):
def __init__(self):
self.init_block_builder()
# a list of relative positions; for each position p, the bytes
# at [p-4:p] encode an absolute address that will need to be
# made relative. Only works on 32-bit!
if WORD == 4:
self.relocations = []
else:
self.relocations = None
#
# ResOperation --> offset in the assembly.
# ops_offset[None] represents the beginning of the code after the last op
# (i.e., the tail of the loop)
self.ops_offset = {}
def add_pending_relocation(self):
self.relocations.append(self.get_relative_pos())
def mark_op(self, op):
pos = self.get_relative_pos()
self.ops_offset[op] = pos
def copy_to_raw_memory(self, addr):
self._copy_to_raw_memory(addr)
if self.relocations is not None:
for reloc in self.relocations:
p = addr + reloc
adr = rffi.cast(rffi.LONGP, p - WORD)
adr[0] = intmask(adr[0] - p)
valgrind.discard_translations(addr, self.get_relative_pos())
self._dump(addr, "jit-backend-dump", backend_name)
| 38.6 | 81 | 0.676401 | 1,330 | 0.626472 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.197362 |
647f046c31c221244ebc2df5f266fb6c4c36a234 | 223 | py | Python | apps/integrations/github/resources/__init__.py | wizzzet/github_backend | 9e4b5d3273e850e4ac0f425d22911987be7a7eff | [
"MIT"
] | null | null | null | apps/integrations/github/resources/__init__.py | wizzzet/github_backend | 9e4b5d3273e850e4ac0f425d22911987be7a7eff | [
"MIT"
] | null | null | null | apps/integrations/github/resources/__init__.py | wizzzet/github_backend | 9e4b5d3273e850e4ac0f425d22911987be7a7eff | [
"MIT"
] | null | null | null | from .users import UsersListResource # NOQA
from .users import UserResource # NOQA
from .followers import FollowersListResource # NOQA
from .repos import ReposListResource # NOQA
from .repos import RepoResource # NOQA
| 37.166667 | 52 | 0.798206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.134529 |
647fa64e5f63de84b7eadfe651862d2472378acf | 1,590 | py | Python | 2019/day03/day03_part1.py | boffman/adventofcode | 077e727b9b050c1fc5cb99ed7fbd64c5a69d9605 | [
"MIT"
] | null | null | null | 2019/day03/day03_part1.py | boffman/adventofcode | 077e727b9b050c1fc5cb99ed7fbd64c5a69d9605 | [
"MIT"
] | null | null | null | 2019/day03/day03_part1.py | boffman/adventofcode | 077e727b9b050c1fc5cb99ed7fbd64c5a69d9605 | [
"MIT"
] | null | null | null | import math
def apply_move(maze, x, y):
key = "{},{}".format(x,y)
maze.add(key)
def calc_distance(key):
x, y = [int(v) for v in key.split(",")]
return abs(x) + abs(y)
def find_smallest_distance(maze):
min_distance = None
for key in maze:
distance = calc_distance(key)
print("Intersection at {} - distance = {}".format(key, distance))
if not min_distance or distance < min_distance:
min_distance = distance
if min_distance is None:
raise Exception("Failed to find distances in maze")
return min_distance
def apply_moves(moves, maze):
x = 0
y = 0
for move in moves:
direction, distance = move[0], int(move[1:])
if direction == "U":
for y in range(y+1, y+distance+1):
apply_move(maze, x, y)
elif direction == "D":
for y in range(y-1, y-distance-1, -1):
apply_move(maze, x, y)
elif direction == "R":
for x in range(x+1, x+distance+1):
apply_move(maze, x, y)
elif direction == "L":
for x in range(x-1, x-distance-1, -1):
apply_move(maze, x, y)
else:
raise Exception("Unknown direction: {}".format(direction,))
maze1 = set()
maze2 = set()
with open("input") as infile:
moves1 = infile.readline().strip().split(",")
apply_moves(moves1, maze1)
moves2 = infile.readline().strip().split(",")
apply_moves(moves2, maze2)
merge_maze = maze1 & maze2
distance = find_smallest_distance(merge_maze)
print(distance) | 26.949153 | 73 | 0.573585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.080503 |
64806f65878c18d62b19689145b457942a25bb91 | 1,991 | py | Python | server/src/weaverbird/pipeline/steps/aggregate.py | JeremyJacquemont/weaverbird | e04ab6f9c8381986ab71078e5199ece7a875e743 | [
"BSD-3-Clause"
] | 54 | 2019-11-20T15:07:39.000Z | 2022-03-24T22:13:51.000Z | server/src/weaverbird/pipeline/steps/aggregate.py | JeremyJacquemont/weaverbird | e04ab6f9c8381986ab71078e5199ece7a875e743 | [
"BSD-3-Clause"
] | 786 | 2019-10-20T11:48:37.000Z | 2022-03-23T08:58:18.000Z | server/src/weaverbird/pipeline/steps/aggregate.py | JeremyJacquemont/weaverbird | e04ab6f9c8381986ab71078e5199ece7a875e743 | [
"BSD-3-Clause"
] | 10 | 2019-11-21T10:16:16.000Z | 2022-03-21T10:34:06.000Z | from typing import List, Literal, Optional, Sequence
from pydantic import Field, root_validator, validator
from pydantic.main import BaseModel
from weaverbird.pipeline.steps.utils.base import BaseStep
from weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin
from weaverbird.pipeline.steps.utils.validation import validate_unique_columns
from weaverbird.pipeline.types import ColumnName, PopulatedWithFieldnames, TemplatedVariable
AggregateFn = Literal[
'avg',
'sum',
'min',
'max',
'count',
'count distinct',
'first',
'last',
'count distinct including empty',
]
class Aggregation(BaseModel):
class Config(PopulatedWithFieldnames):
...
new_columns: List[ColumnName] = Field(alias='newcolumns')
agg_function: AggregateFn = Field(alias='aggfunction')
columns: List[ColumnName]
@validator('columns', pre=True)
def validate_unique_columns(cls, value):
return validate_unique_columns(value)
@root_validator(pre=True)
def handle_legacy_syntax(cls, values):
if 'column' in values:
values['columns'] = [values.pop('column')]
if 'newcolumn' in values:
values['new_columns'] = [values.pop('newcolumn')]
return values
class AggregateStep(BaseStep):
name = Field('aggregate', const=True)
on: List[ColumnName] = []
aggregations: Sequence[Aggregation]
keep_original_granularity: Optional[bool] = Field(
default=False, alias='keepOriginalGranularity'
)
class Config(PopulatedWithFieldnames):
...
class AggregationWithVariables(Aggregation):
class Config(PopulatedWithFieldnames):
...
new_columns: List[TemplatedVariable] = Field(alias='newcolumns')
agg_function: TemplatedVariable = Field(alias='aggfunction')
columns: List[TemplatedVariable]
class AggregateStepWithVariables(AggregateStep, StepWithVariablesMixin):
aggregations: Sequence[AggregationWithVariables]
| 29.279412 | 92 | 0.721748 | 1,352 | 0.679056 | 0 | 0 | 394 | 0.197891 | 0 | 0 | 243 | 0.122049 |
64818116878cb9d102e8939921e2cfcabcbe47ce | 23,929 | py | Python | src/ea/libs/FileModels/TestUnit.py | dmachard/extensivetesting | a5c3d2648aebcfaf1d0352a7aff8728ab843b73f | [
"MIT"
] | 9 | 2019-09-01T04:56:28.000Z | 2021-04-08T19:45:52.000Z | src/ea/libs/FileModels/TestUnit.py | dmachard/extensivetesting | a5c3d2648aebcfaf1d0352a7aff8728ab843b73f | [
"MIT"
] | 5 | 2020-10-27T15:05:12.000Z | 2021-12-13T13:48:11.000Z | src/ea/libs/FileModels/TestUnit.py | dmachard/extensivetesting | a5c3d2648aebcfaf1d0352a7aff8728ab843b73f | [
"MIT"
] | 2 | 2019-10-01T06:12:06.000Z | 2020-04-29T13:28:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Copyright (c) 2010-2021 Denis Machard
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------
"""
Test unit module
"""
from ea.libs.FileModels import GenericModel
from ea.libs.PyXmlDict import Dict2Xml as PyDictXml
from ea.libs.PyXmlDict import Xml2Dict as PyXmlDict
import sys
import datetime
import time
import copy
import re
# unicode = str with python3
if sys.version_info > (3,):
unicode = str
r = re.compile(
u"[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\xFF\u0100-\uD7FF\uE000-\uFDCF\uFDE0-\uFFFD]")
def removeInvalidXML(string):
"""
Remove invalid XML
"""
def replacer(m):
"""
return empty string
"""
return ""
return re.sub(r, replacer, string)
DEFAULT_INPUTS = [{'type': 'bool', 'name': 'DEBUG', 'description': '',
'value': 'False', 'color': '', 'scope': 'local'},
{'type': 'float', 'name': 'TIMEOUT', 'description': '',
'value': '1.0', 'color': '', 'scope': 'local'},
{'type': 'bool', 'name': 'VERBOSE', 'description': '',
'value': 'True', 'color': '', 'scope': 'local'}]
DEFAULT_OUTPUTS = [{'type': 'float', 'name': 'TIMEOUT', 'description': '',
'value': '1.0', 'color': '', 'scope': 'local'}]
DEFAULT_AGENTS = [{'name': 'AGENT', 'description': '',
'value': 'agent-dummy01', 'type': 'dummy'}]
def bytes2str(val):
"""
bytes 2 str conversion, only for python3
"""
if isinstance(val, bytes):
return str(val, "utf8")
else:
return val
class DataModel(GenericModel.GenericModel):
"""
Data model for test unit
"""
def __init__(self, userName='unknown', testDef='', defLibrary='',
defAdapter='', timeout="10.0", inputs=[], outputs=[]):
"""
This class describes the model of one script document,
and provides a xml <=> python encoder
The following xml :
<?xml version="1.0" encoding="utf-8" ?>
<file>
<properties">
<descriptions>
<description>
<key>author</key>
<value>...</value>
</description>
<description>
<key>creation date</key>
<value>...</value>
</description>
<description>
<key>summary</key>
<value>...</value>
</description>
<description>
<key>prerequisites</key>
<value>...</value>
</description>
<description>
<key>comments</key>
<value>
<comments>
<comment>
<author>author</author>
<datetime>...</datetime>
<post>...</post>
</comment>
....
</comments>
</value>
</description>
</descriptions>
<inputs-parameters>
<parameter>
<name>...</name>
<type>...</type>
<description>...</description>
<value>...</value>
</parameter>
</inputs-parameters>
<outputs-parameters>
<parameter>
<name>...</name>
<type>...</type>
<description>...</description>
<value>...</value>
</parameter>
</outputs-parameters>
<probes>
<probe>
<active>...</active>
<args>...</args>
<name>...</name>
<type>...</type>
</probe>
</probes>
<agents>
<agent>
<name>...</name>
<type>...</type>
<description>...</description>
<value>...</value>
</agent>
</agents>
</properties>
<testdefinition>...</testdefinition>
<testdevelopment>...</testdevelopment>
<snapshots>
<snapshot>
<name>....</name>
<timestamp>...</timestamp>
<properties>...</properties>
<testdef>....</testdef>
</snapshot>
</snapshots>
</file>
"""
GenericModel.GenericModel.__init__(self)
today = datetime.datetime.today()
self.dateToday = today.strftime("%d/%m/%Y %H:%M:%S")
self.currentUser = userName
self.defLibrary = defLibrary
self.defAdapter = defAdapter
# new in v17
self.timeout = timeout
self.inputs = inputs
self.outputs = outputs
# end of new
# init xml encoder
self.codecX2D = PyXmlDict.Xml2Dict()
self.codecD2X = PyDictXml.Dict2Xml(coding=None)
# files properties
self.properties = {'properties': {
'descriptions': {
'description': [
{'key': 'author', 'value': self.currentUser},
{'key': 'creation date', 'value': self.dateToday},
{'key': 'summary',
'value': 'Just a basic sample.'},
{'key': 'prerequisites',
'value': 'None.'},
{'key': 'comments', 'value': {'comments': {'comment': []}}},
{'key': 'libraries',
'value': self.defLibrary},
{'key': 'adapters',
'value': self.defAdapter},
{'key': 'state', 'value': 'Writing'},
{'key': 'name', 'value': 'TESTCASE'},
{'key': 'requirement',
'value': 'REQ_01'},
]},
'probes': {
'probe': [{'active': 'False', 'args': '', 'name': 'probe01', 'type': 'default'}]
},
'inputs-parameters': {
'parameter': copy.deepcopy(DEFAULT_INPUTS)
},
'outputs-parameters': {
'parameter': copy.deepcopy(DEFAULT_OUTPUTS)
},
'agents': {
'agent': copy.deepcopy(DEFAULT_AGENTS)
},
}
}
# new in v17
for p in self.properties["properties"]["inputs-parameters"]["parameter"]:
if p["name"] == "TIMEOUT":
p["value"] = self.timeout
for p in self.properties["properties"]["outputs-parameters"]["parameter"]:
if p["name"] == "TIMEOUT":
p["value"] = self.timeout
if len(self.inputs):
self.properties["properties"]["inputs-parameters"]["parameter"] = self.inputs
if len(self.outputs):
self.properties["properties"]["outputs-parameters"]["parameter"] = self.outputs
# end of new
# file contents
self.testdef = testDef
# dev duration
self.testdev = time.time()
def toXml(self):
"""
Python data to xml
@return:
@rtype:
"""
try:
# fix xml
self.fixPyXML(
data=self.properties['properties']['inputs-parameters'],
key='parameter')
self.fixPyXML(
data=self.properties['properties']['outputs-parameters'],
key='parameter')
self.fixPyXML(
data=self.properties['properties']['probes'],
key='probe')
self.fixPyXML(
data=self.properties['properties']['agents'],
key='agent')
# prepare xml structure
xmlDataList = ['<?xml version="1.0" encoding="utf-8" ?>']
xmlDataList.append('<file>')
if sys.version_info > (3,): # python3 support
xmlDataList.append(
bytes2str(
self.codecD2X.parseDict(
dico=self.properties)))
else:
xmlDataList.append(
self.codecD2X.parseDict(
dico=self.properties))
xmlDataList.append(
'<testdefinition><![CDATA[%s]]></testdefinition>' %
unicode(
self.testdef))
xmlDataList.append(
'<testdevelopment>%s</testdevelopment>' %
unicode(
self.testdev))
xmlDataList.append('</file>')
ret = '\n'.join(xmlDataList)
# remove all invalid xml data
ret = removeInvalidXML(ret)
except Exception as e:
self.error("TestUnit > To Xml %s" % str(e))
ret = None
return ret
def fixParameterstoUTF8(self):
"""
Fix encodage not pretty....
"""
for param in self.properties['properties']['inputs-parameters']['parameter']:
param['value'] = param['value'].decode("utf-8")
param['description'] = param['description'].decode("utf-8")
param['name'] = param['name'].decode("utf-8")
for param in self.properties['properties']['outputs-parameters']['parameter']:
param['value'] = param['value'].decode("utf-8")
param['description'] = param['description'].decode("utf-8")
param['name'] = param['name'].decode("utf-8")
for agent in self.properties['properties']['agents']['agent']:
agent['value'] = agent['value'].decode("utf-8")
agent['description'] = agent['description'].decode("utf-8")
agent['name'] = agent['name'].decode("utf-8")
def fixDescriptionstoUTF8(self):
"""
Fix encodage not pretty....
"""
for descr in self.properties['properties']['descriptions']['description']:
descr['key'] = descr['key'].decode("utf-8")
if isinstance(descr['value'], dict):
pass
else:
descr['value'] = descr['value'].decode("utf-8")
def setTestDef(self, testDef):
"""
Set the test definition
"""
self.testdef = testDef
def onLoad(self, decompressedData):
"""
Called on load data model
"""
# reset properties
self.properties = {}
self.testdef = ""
self.testexec = ""
decodedStatus = False
# decode content
try:
# Extract xml from the file data
ret = self.codecX2D.parseXml(xml=decompressedData)
except Exception as e:
self.error("TestUnit > Parse Xml: %s" % str(e))
else:
try:
if sys.version_info > (3,): # python3 support
# Extract test definition and test execution
self.testdef = ret['file']['testdefinition']
# Extract test time development
if 'testdevelopment' not in ret['file']:
self.testdev = time.time()
else:
self.testdev = ret['file']['testdevelopment']
else:
# Extract test definition and test execution
self.testdef = ret['file']['testdefinition'].decode(
"utf-8")
# BEGIN NEW in 5.1.0 :
if 'testdevelopment' not in ret['file']:
self.testdev = time.time()
else:
self.testdev = ret['file']['testdevelopment'].decode(
"utf-8")
# END NEW in 5.1.0
# Extract all properties
properties = ret['file']['properties']
except Exception as e:
self.error(
"TestUnit > extract properties, test definition and execution: %s" %
str(e))
else:
try:
# BEGIN NEW in 2.0.0 : description and can be missing model
# file, to keep the compatibility
if 'descriptions' not in properties:
properties['descriptions'] = {'description': [{'key': 'author', 'value': ''},
{'key': 'date', 'value': ''},
{'key': 'summary', 'value': ''},
{'key': 'prerequisites', 'value': ''},
{'key': 'requirement', 'value': ''}]}
# END NEW in 2.0.0
# BEGIN NEW in 13.0.0
if 'descriptions' in properties:
foundLibraries = False
foundAdapters = False
creationDate = None
foundState = False
foundPrerequis = False
foundComments = False
foundTestname = False
foundRequirement = False
for kv in properties['descriptions']['description']:
if kv['key'] == 'prerequisites':
foundPrerequis = True
if kv['key'] == 'name':
foundTestname = True
if kv['key'] == 'comments':
foundComments = True
if kv['key'] == 'libraries':
foundLibraries = True
if kv['key'] == 'adapters':
foundAdapters = True
if kv['key'] == 'date':
creationDate = kv['value']
if kv['key'] == 'state':
foundState = True
if kv['key'] == 'requirement':
foundRequirement = True
if not foundLibraries:
properties['descriptions']['description'].append(
{'key': 'libraries', 'value': self.defLibrary})
if not foundAdapters:
properties['descriptions']['description'].append(
{'key': 'adapters', 'value': self.defAdapter})
if not foundState:
properties['descriptions']['description'].append(
{'key': 'state', 'value': 'Writing'})
if not foundComments:
properties['descriptions']['description'].append(
{'key': 'comments', 'value': {'comments': {'comment': []}}})
if not foundTestname:
properties['descriptions']['description'].append(
{'key': 'name', 'value': 'TESTCASE'})
if not foundPrerequis:
properties['descriptions']['description'].append(
{'key': 'prerequisites', 'value': ''})
if not foundRequirement:
properties['descriptions']['description'].append(
{'key': 'requirement', 'value': 'REQ_01'})
if creationDate is not None:
properties['descriptions']['description'].insert(
1, {'key': 'creation date', 'value': creationDate})
# END NEW in 13.0.0
# BEGIN NEW in 5.1.0 : output-parameters can be missing in
# the model file, to keep the compatibility
if 'outputs-parameters' not in properties:
properties['outputs-parameters'] = {
'parameter': copy.deepcopy(DEFAULT_OUTPUTS)}
# END NEW in 5.1.0
# BEGIN NEW in 5.1.0 : replace parameters by
# intput-parameters in the model file, to keep the
# compatibility
if 'inputs-parameters' not in properties:
properties['inputs-parameters'] = properties['parameters']
properties.pop('parameters')
# END NEW in 5.1.0
# BEGIN NEW in 6.0.0 : agents can be missing in the model
# file, to keep the compatibility
if 'agents' not in properties:
properties['agents'] = {
'agent': copy.deepcopy(DEFAULT_AGENTS)}
# END NEW in 6.0.0
# bug fix in 10.1
if properties['agents'] == '' or properties['agents'] == b'': # python3 support
properties['agents'] = {'agent': [], '@agent': []}
# BEGIN NEW in 9.0.0 :
if isinstance(properties['agents']['agent'], dict):
properties['agents']['agent'] = [
properties['agents']['agent']]
for agt in properties['agents']['agent']:
if 'type' not in agt:
agt.update({'type': ''})
# END NEW in 9.0.0
except Exception as e:
self.error(
"TestUnit > fix backward compatibility %s" %
str(e))
else:
try:
if isinstance(properties['probes'], str) or isinstance(
properties['probes'], bytes): # python3 support
properties['probes'] = {'probe': [], '@probe': []}
if isinstance(properties['inputs-parameters'], str) or isinstance(
properties['inputs-parameters'], bytes): # python3 support
properties['inputs-parameters'] = {
'parameter': [], '@parameter': []}
if isinstance(properties['outputs-parameters'], str) or isinstance(
properties['outputs-parameters'], bytes): # python3 support
properties['outputs-parameters'] = {
'parameter': [], '@parameter': []}
if isinstance(properties['agents'], str) or isinstance(
properties['agents'], bytes): # python3 support
properties['agents'] = {'agent': [], '@agent': []}
self.fixXML(data=properties['probes'], key='probe')
if '@probe' in properties['probes']:
self.fixXML(
data=properties['probes'], key='@probe')
self.fixXML(data=properties['agents'], key='agent')
if '@agent' in properties['agents']:
self.fixXML(
data=properties['agents'], key='@agent')
self.fixXML(
data=properties['inputs-parameters'],
key='parameter')
if '@parameter' in properties['inputs-parameters']:
self.fixXML(
data=properties['inputs-parameters'],
key='@parameter')
self.fixXML(
data=properties['outputs-parameters'],
key='parameter')
if '@parameter' in properties['outputs-parameters']:
self.fixXML(
data=properties['outputs-parameters'],
key='@parameter')
self.fixXML(
data=properties['descriptions'],
key='description')
if '@description' in properties['descriptions']:
self.fixXML(
data=properties['descriptions'],
key='@description')
# BEGIN NEW in 19.0.0 : add missing scope parameters
for p in properties['inputs-parameters']['parameter']:
if "scope" not in p:
p["scope"] = "local"
p["@scope"] = {}
for p in properties['outputs-parameters']['parameter']:
if "scope" not in p:
p["scope"] = "local"
p["@scope"] = {}
# END OF NEW
except Exception as e:
self.error("TestUnit > fix xml %s" % str(e))
else:
try:
self.properties = {'properties': properties}
if sys.version_info < (3,): # python3 support
self.fixDescriptionstoUTF8()
self.fixParameterstoUTF8()
except Exception as e:
self.error("TestUnit > fix utf8 %s" % str(e))
else:
decodedStatus = True
return decodedStatus
| 42.277385 | 108 | 0.426428 | 21,167 | 0.884575 | 0 | 0 | 0 | 0 | 0 | 0 | 10,410 | 0.435037 |
648561ff4edc4db1c644515ed643f5d862a173dc | 6,672 | py | Python | BenchmarkScripts/convert2panoptic.py | Skywalker666666/scannet_dataset_prep | 0cda8c360512eda8c2ade892c5f23ed21320cc69 | [
"MIT"
] | null | null | null | BenchmarkScripts/convert2panoptic.py | Skywalker666666/scannet_dataset_prep | 0cda8c360512eda8c2ade892c5f23ed21320cc69 | [
"MIT"
] | null | null | null | BenchmarkScripts/convert2panoptic.py | Skywalker666666/scannet_dataset_prep | 0cda8c360512eda8c2ade892c5f23ed21320cc69 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Convert to COCO-style panoptic segmentation format (http://cocodataset.org/#format-data).
#
# python imports
from __future__ import print_function, absolute_import, division, unicode_literals
import os
import glob
import sys
import argparse
import json
import numpy as np
# Image processing
from PIL import Image
EVAL_LABELS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
EVAL_LABEL_NAMES = ["wall", "floor", "cabinet", "bed", "chair", "sofa", "table", "door", "window", "bookshelf", "picture", "counter", "desk", "curtain", "refrigerator", "shower curtain", "toilet", "sink", "bathtub", "otherfurniture"]
EVAL_LABEL_CATS = ["indoor", "indoor", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "appliance", "furniture", "furniture", "appliance", "furniture", "furniture"]
EVAL_LABEL_COLORS = [(174, 199, 232), (152, 223, 138), (31, 119, 180), (255, 187, 120), (188, 189, 34), (140, 86, 75), (255, 152, 150), (214, 39, 40), (197, 176, 213), (148, 103, 189), (196, 156, 148), (23, 190, 207), (247, 182, 210), (219, 219, 141), (255, 127, 14), (158, 218, 229), (44, 160, 44), (112, 128, 144), (227, 119, 194), (82, 84, 163)]
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
# The main method
def convert2panoptic(scannetPath, outputFolder=None):
if outputFolder is None:
outputFolder = scannetPath
# find files
search = os.path.join(scannetPath, "*", "instance", "*.png")
files = glob.glob(search)
files.sort()
# quit if we did not find anything
if not files:
print(
"Did not find any files for using matching pattern {}. Please consult the README.".format(search)
)
sys.exit(-1)
# a bit verbose
print("Converting {} annotation files.".format(len(files)))
outputBaseFile = "scannet_panoptic"
outFile = os.path.join(outputFolder, "{}.json".format(outputBaseFile))
print("Json file with the annotations in panoptic format will be saved in {}".format(outFile))
panopticFolder = os.path.join(outputFolder, outputBaseFile)
if not os.path.isdir(panopticFolder):
print("Creating folder {} for panoptic segmentation PNGs".format(panopticFolder))
os.mkdir(panopticFolder)
print("Corresponding segmentations in .png format will be saved in {}".format(panopticFolder))
categories = []
for idx in range(len(EVAL_LABELS)):
label = EVAL_LABELS[idx]
name = EVAL_LABEL_NAMES[idx]
cat = EVAL_LABEL_CATS[idx]
color = EVAL_LABEL_COLORS[idx]
isthing = label > 2
categories.append({'id': int(label),
'name': name,
'color': color,
'supercategory': cat,
'isthing': isthing})
images = []
annotations = []
for progress, f in enumerate(files):
originalFormat = np.array(Image.open(f))
parts = splitall(f)
fileName = parts[-1]
sceneName = parts[-3]
outputFileName = "{}__{}".format(sceneName, fileName)
inputFileName = os.path.join(sceneName, "color", fileName)
imageId = os.path.splitext(outputFileName)[0]
# image entry, id for image is its filename without extension
images.append({"id": imageId,
"width": int(originalFormat.shape[1]),
"height": int(originalFormat.shape[0]),
"file_name": outputFileName})
#"file_name": inputFileName})
pan_format = np.zeros(
(originalFormat.shape[0], originalFormat.shape[1], 3), dtype=np.uint8
)
segmentIds = np.unique(originalFormat)
segmInfo = []
for segmentId in segmentIds:
isCrowd = 0
if segmentId < 1000:
semanticId = segmentId
else:
semanticId = segmentId // 1000
if semanticId not in EVAL_LABELS:
continue
mask = originalFormat == segmentId
color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]
pan_format[mask] = color
area = np.sum(mask) # segment area computation
# bbox computation for a segment
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = hor_idx[-1] - x + 1
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = vert_idx[-1] - y + 1
bbox = [int(x), int(y), int(width), int(height)]
segmInfo.append({"id": int(segmentId),
"category_id": int(semanticId),
"area": int(area),
"bbox": bbox,
"iscrowd": isCrowd})
annotations.append({'image_id': imageId,
'file_name': outputFileName,
"segments_info": segmInfo})
Image.fromarray(pan_format).save(os.path.join(panopticFolder, outputFileName))
print("\rProgress: {:>3.2f} %".format((progress + 1) * 100 / len(files)), end=' ')
sys.stdout.flush()
print("\nSaving the json file {}".format(outFile))
d = {'images': images,
'annotations': annotations,
'categories': categories}
with open(outFile, 'w') as f:
json.dump(d, f, sort_keys=True, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-folder",
dest="scannetPath",
help="path to the ScanNet data 'scannet_frames_25k' folder",
required=True,
type=str)
parser.add_argument("--output-folder",
dest="outputFolder",
help="path to the output folder.",
default=None,
type=str)
args = parser.parse_args()
convert2panoptic(args.scannetPath, args.outputFolder)
# call the main
if __name__ == "__main__":
main()
| 38.566474 | 348 | 0.561451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,583 | 0.23726 |
6485fec3abdcd71b449dc07dfd6e24085b4ec88d | 19,281 | py | Python | tlmshop/settings.py | LegionMarket/django-cms-base | 1b6fc3423e3d0b2165552cc980432befb496f3e0 | [
"BSD-3-Clause"
] | null | null | null | tlmshop/settings.py | LegionMarket/django-cms-base | 1b6fc3423e3d0b2165552cc980432befb496f3e0 | [
"BSD-3-Clause"
] | null | null | null | tlmshop/settings.py | LegionMarket/django-cms-base | 1b6fc3423e3d0b2165552cc980432befb496f3e0 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for this project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.utils.translation import ugettext_lazy as _
from cmsplugin_cascade.utils import format_lazy
from django.core.urlresolvers import reverse_lazy
from decimal import Decimal
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y&+f+)tw5sqkcy$@vwh8cy%y^9lwytqtn*y=lv7f9t39b(cufx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
SITE_ID = 1
APP_LABEL = 'tlmshop'
# Enable this to additionally show the debug toolbar
INTERNAL_IPS = ['localhost', '127.0.0.1', '192.168.1.69']
# Root directory for this django project
PROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, os.path.pardir))
# Directory where working files, such as media and databases are kept
WORK_DIR = os.environ.get('DJANGO_WORKDIR', os.path.abspath(os.path.join(PROJECT_ROOT, 'LegionMarket')))
if not os.path.exists(WORK_DIR):
os.makedirs(WORK_DIR)
# Application definition
DJANGO_APPS_JET = (
# todo: Fix bug in jet does not all for adding new page when clicked on from create page on start
# 'jet_ole.dashboard',
# 'jet_ole',
'jet.dashboard',
'jet',
)
DJANGO_APPS_ADMIN_INTERFACE = (
# todo: Fix bug in jet does not all for adding new page when clicked on from create page on start
'admin_interface',
'flat_responsive',
'colorfield',
)
DJANGO_APPS_MATERIAL = (
# material apps
'material',
# 'material.frontend',
'material.admin',
)
DJANGO_APPS = (
# djangocms_admin_style needs to be before django.contrib.admin!
# https://django-cms.readthedocs.org/en/develop/how_to/install.html#configuring-your-project-for-django-cms
'djangocms_admin_style',
'django.contrib.admin',
# django defaults
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
)
DJANGO_CMS = (
'cms',
'menus',
'treebeard',
'sekizai',
# 'reversion',
# requirements for django-filer
'filer',
'easy_thumbnails',
'easy_thumbnails.optimize',
'mptt',
# core addons
'djangocms_text_ckeditor',
'djangocms_link',
'djangocms_picture',
'djangocms_snippet',
'djangocms_style',
'djangocms_googlemap',
'djangocms_audio',
)
DJANGO_CMS_ADDONS = (
# Cassaade
'cmsplugin_cascade',
'cmsplugin_cascade.clipboard',
'cmsplugin_cascade.sharable',
'cmsplugin_cascade.extra_fields',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.segmentation',
)
THIRD_PARTY_APPS = (
'embed_video',
'crispy_forms',
)
SHOP = (
'django_select2',
'cms_bootstrap3',
'adminsortable2',
'django_fsm',
'fsm_admin',
'djng',
'compressor',
'sass_processor',
'django_filters',
'post_office',
'haystack',
'shop',
'shop_stripe',
)
SHOP_TOO = (
'email_auth',
'polymorphic',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
)
LOCAL_APPS = (
'video_back',
'videojs',
# 'background',
'tlmshop',
)
DEV_APP = (
'django.contrib.flatpages',
'django.contrib.redirects',
)
INSTALLED_APPS = DJANGO_APPS_ADMIN_INTERFACE + DJANGO_APPS + SHOP_TOO + \
DJANGO_CMS + DJANGO_CMS_ADDONS + THIRD_PARTY_APPS + \
LOCAL_APPS + SHOP
#######
# DJANGO_APPS_JET + \
# DJANGO_APPS_MATERIAL + \
# DJANGO_APPS_ADMIN_INTERFACE + \
##
MIDDLEWARE_CLASSES = (
'djng.middleware.AngularUrlMiddleware',
# its recommended to place this as high as possible to enable apphooks
# to reload the page without loading unnecessary middlewaresfuschiaatinuma
'cms.middleware.utils.ApphookReloadMiddleware',
# django defaults
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'shop.middleware.CustomerMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# django CMS additions
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
ROOT_URLCONF = 'tlmshop.urls'
WSGI_APPLICATION = 'tlmshop.wsgi.application'
# Templates
# https://docs.djangoproject.com/en/1.8/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
# django CMS additions
'cms.context_processors.cms_settings',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# additional context processors for local development
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
# django CMS additions
'cms.context_processors.cms_settings',
'sekizai.context_processors.sekizai',
# Shop
'shop.context_processors.customer',
'shop.context_processors.ng_model_options',
'shop_stripe.context_processors.public_keys',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
# django CMS additions
'django.template.loaders.eggs.Loader',
],
'debug': DEBUG,
},
},
]
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# we use os.getenv to be able to override the default database settings for the docker setup
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(WORK_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'America/New_York'
USE_I18N = False
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(WORK_DIR, 'static')
STATIC_URL = '/static/'
# print(STATIC_ROOT)
# we need to add additional configuration for filer etc.
MEDIA_ROOT = os.path.join(WORK_DIR, 'media')
MEDIA_URL = '/media/'
# Checking to see if directories are there
if not os.path.exists(STATIC_ROOT):
os.makedirs(STATIC_ROOT)
if not os.path.exists(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
STATICFILES_FINDERS = [
# 'tlmshop.finders.FileSystemFinder', # or
# 'tlmshop.finders.AppDirectoriesFinder', # or
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
'compressor.finders.CompressorFinder',
]
# we need to add additional configuration for filer etc.
NODE = os.path.join(PROJECT_ROOT, 'node_modules')
if not os.path.exists(NODE):
os.makedirs(NODE)
STATICFILES_DIRS = [
('static', os.path.join(PROJECT_ROOT, 'static')),
('node_modules', os.path.join(PROJECT_ROOT, 'node_modules')),
('templates', os.path.join(PROJECT_ROOT, 'templates')),
]
# print(STATICFILES_DIRS)
NODE_MODULES_URL = STATIC_URL + 'node_modules/'
# print(STATICFILES_DIRS)
SASS_PROCESSOR_INCLUDE_DIRS = [
os.path.join(PROJECT_ROOT, 'node_modules'),
]
# print(SASS_PROCESSOR_INCLUDE_DIRS)
COERCE_DECIMAL_TO_STRING = True
FSM_ADMIN_FORCE_PERMIT = True
ROBOTS_META_TAGS = ('noindex', 'nofollow')
# django CMS settings
# http://docs.django-cms.org/en/latest/
# #########################################
# Static Templates Files
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {
}
CMS_PAGE_WIZARD_CONTENT_PLACEHOLDER = 'content'
# django CMS internationalization
# http://docs.django-cms.org/en/latest/topics/i18n.html
# LANGUAGES = (
# ('en', _('English')),
# )
# django CMS templates
# http://docs.django-cms.org/en/latest/how_to/templates.html
CMS_TEMPLATES = (
('content.html', 'Content'),
('t458_lavish/index.html', 'TLM-Lavish')
)
# CUSTOM
# Filer
THUMBNAIL_PRESERVE_EXTENSIONS = True
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
# CKEditor
# DOCS: https://github.com/divio/djangocms-text-ckeditor
# CKEDITOR_SETTINGS = {
# 'stylesSet': 'default:/static/js/addons/ckeditor.wysiwyg.js',
# 'contentsCss': ['/static/css/base.css'],
# }
CKEDITOR_SETTINGS = {
'language': '{{ language }}',
'skin': 'moono',
'toolbar': 'CMS',
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source']
],
'stylesSet': format_lazy('default:{}', reverse_lazy('admin:cascade_texticon_wysiwig_config')),
}
CKEDITOR_SETTINGS_CAPTION = {
'language': '{{ language }}',
'skin': 'moono',
'height': 70,
'toolbar_HTMLField': [
['Undo', 'Redo'],
['Format', 'Styles'],
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['Source']
],
}
CKEDITOR_SETTINGS_DESCRIPTION = {
'language': '{{ language }}',
'skin': 'moono',
'height': 250,
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source']
],
}
SELECT2_CSS = 'node_modules/select2/dist/css/select2.min.css'
SELECT2_JS = 'node_modules/select2/dist/js/select2.min.js'
# Embed Video
APPEND_SLASH = True
###################################################################################
#
# Shop Settings
#
###################################################################################
SHOP_APP_LABEL = 'tlmshop'
AUTH_USER_MODEL = 'email_auth.User'
SHOP_TYPE = 'smartcard'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
MIGRATION_MODULES = {
'tlmshop': 'tlmshop.migrations.{}'.format(SHOP_TYPE)
}
############################################
# settings for sending mail
EMAIL_HOST = 'smtp.example.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'no-reply@example.com'
EMAIL_HOST_PASSWORD = 'smtp-secret-password'
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'My Shop <no-reply@example.com>'
EMAIL_REPLY_TO = 'info@example.com'
EMAIL_BACKEND = 'post_office.EmailBackend'
############################################
# settings for third party Django apps
SERIALIZATION_MODULES = {'json': str('shop.money.serializers')}
############################################
# settings for django-restframework and plugins
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'shop.rest.money.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer', # can be disabled for production environments
),
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
# ),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 12,
}
############################################
# settings for storing session data
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
SESSION_SAVE_EVERY_REQUEST = True
###########################################################
# Files
SHOP_TYPE = 'smartcard'
# 'commodity', 'i18n_commodity', 'smartcard', 'i18n_smartcard', 'i18n_polymorphic', 'polymorphic'
##############################################################
#
#
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},
'formatters': {
'simple': {
'format': '[%(asctime)s %(module)s] %(levelname)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'post_office': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': True,
},
},
}
SILENCED_SYSTEM_CHECKS = ['auth.W004']
FIXTURE_DIRS = [os.path.join(WORK_DIR, SHOP_TYPE, 'fixtures')]
############################################
# settings for django-cms and its plugins
CMS_CACHE_DURATIONS = {
'content': 600,
'menus': 3600,
'permissions': 86400,
}
cascade_workarea_glossary = {
'breakpoints': ['xs', 'sm', 'md', 'lg'],
'container_max_widths': {'xs': 750, 'sm': 750, 'md': 970, 'lg': 1170},
'fluid': True,
'media_queries': {
'xs': ['(max-width: 768px)'],
'sm': ['(min-width: 768px)', '(max-width: 992px)'],
'md': ['(min-width: 992px)', '(max-width: 1200px)'],
'lg': ['(min-width: 1200px)'],
},
}
CMSPLUGIN_CASCADE_PLUGINS = [
'cmsplugin_cascade.segmentation',
'cmsplugin_cascade.generic',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.link',
'shop.cascade',
'cmsplugin_cascade.bootstrap3',
]
CMSPLUGIN_CASCADE = {
'link_plugin_classes': [
'shop.cascade.plugin_base.CatalogLinkPluginBase',
'cmsplugin_cascade.link.plugin_base.LinkElementMixin',
'shop.cascade.plugin_base.CatalogLinkForm',
],
'alien_plugins': ['TextPlugin', 'TextLinkPlugin', 'AcceptConditionPlugin'],
'bootstrap3': {
'template_basedir': 'angular-ui',
},
'plugins_with_extra_render_templates': {
'CustomSnippetPlugin': [
('shop/catalog/product-heading.html', _("Product Heading")),
('tlmshop/catalog/manufacturer-filter.html', _("Manufacturer Filter")),
],
},
'plugins_with_sharables': {
'BootstrapImagePlugin': ['image_shapes', 'image_width_responsive', 'image_width_fixed',
'image_height', 'resize_options'],
'BootstrapPicturePlugin': ['image_shapes', 'responsive_heights', 'image_size', 'resize_options'],
},
'bookmark_prefix': '/',
'segmentation_mixins': [
('shop.cascade.segmentation.EmulateCustomerModelMixin', 'shop.cascade.segmentation.EmulateCustomerAdminMixin'),
],
'allow_plugin_hiding': True,
}
#############################################
# settings for full index text search (Haystack)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'tlmshop-{}-en'.format(SHOP_TYPE),
},
}
if USE_I18N:
HAYSTACK_CONNECTIONS['de'] = {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'tlmshop-{}-de'.format(SHOP_TYPE),
}
HAYSTACK_ROUTERS = [
'shop.search.routers.LanguageRouter',
]
#####################################################################################3
############################################
# settings for django-shop and its plugins
SHOP_VALUE_ADDED_TAX = Decimal(19)
SHOP_DEFAULT_CURRENCY = 'USD'
SHOP_PRODUCT_SUMMARY_SERIALIZER = 'tlmshop.serializers.ProductSummarySerializer'
if SHOP_TYPE in ['i18n_polymorphic', 'polymorphic']:
SHOP_CART_MODIFIERS = ['tlmshop.polymorphic_modifiers.tlmshopCartModifier']
else:
SHOP_CART_MODIFIERS = ['shop.modifiers.defaults.DefaultCartModifier']
SHOP_CART_MODIFIERS.extend([
'shop.modifiers.taxes.CartExcludedTaxModifier',
'tlmshop.modifiers.PostalShippingModifier',
'tlmshop.modifiers.CustomerPickupModifier',
'shop.modifiers.defaults.PayInAdvanceModifier',
])
if 'shop_stripe' in INSTALLED_APPS:
SHOP_CART_MODIFIERS.append('tlmshop.modifiers.StripePaymentModifier')
SHOP_EDITCART_NG_MODEL_OPTIONS = "{updateOn: 'default blur', debounce: {'default': 2500, 'blur': 0}}"
SHOP_ORDER_WORKFLOWS = [
'shop.payment.defaults.PayInAdvanceWorkflowMixin',
'shop.payment.defaults.CancelOrderWorkflowMixin',
'shop_stripe.payment.OrderWorkflowMixin',
]
if SHOP_TYPE in ['i18n_polymorphic', 'polymorphic']:
SHOP_ORDER_WORKFLOWS.append('shop.shipping.delivery.PartialDeliveryWorkflowMixin')
else:
SHOP_ORDER_WORKFLOWS.append('shop.shipping.defaults.CommissionGoodsWorkflowMixin')
SHOP_STRIPE = {
'PUBKEY': 'pk_test_HlEp5oZyPonE21svenqowhXp',
'APIKEY': 'sk_test_xUdHLeFasmOUDvmke4DHGRDP',
'PURCHASE_DESCRIPTION': _("Thanks for purchasing at tlmshop"),
}
try:
from .private_settings import * # NOQA
except ImportError:
pass
| 29.213636 | 119 | 0.64478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,644 | 0.655775 |
64867655df53ae53f42648b7a2d27b6c674ce9c1 | 272 | py | Python | examples/02-hello_kml.py | JMSchietekat/polycircles | 26f46bb77c234ac0aec756131f599f1651a559da | [
"MIT"
] | 9 | 2016-07-04T08:57:57.000Z | 2021-04-30T16:02:12.000Z | examples/02-hello_kml.py | JMSchietekat/polycircles | 26f46bb77c234ac0aec756131f599f1651a559da | [
"MIT"
] | 11 | 2016-06-30T19:36:24.000Z | 2021-12-04T21:20:23.000Z | examples/02-hello_kml.py | JMSchietekat/polycircles | 26f46bb77c234ac0aec756131f599f1651a559da | [
"MIT"
] | 7 | 2015-11-15T02:38:38.000Z | 2021-12-04T09:16:49.000Z | import os
import simplekml
from polycircles.polycircles import Polycircle
polycircle = Polycircle(latitude=31.611878, longitude=34.505351, radius=100)
kml = simplekml.Kml()
pol = kml.newpolygon(name=f"Polycircle", outerboundaryis=polycircle.to_kml())
kml.save('02.kml') | 27.2 | 77 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.077206 |
6486e28543122cc731938867a4ab44ae1ac8a42a | 5,579 | py | Python | amazon_main_xgboost.py | twankim/ensemble_amazon | 9019d8dcdfa3651b374e0216cc310255c2d660aa | [
"Apache-2.0"
] | 236 | 2016-04-08T01:49:46.000Z | 2021-08-16T21:27:34.000Z | amazon_main_xgboost.py | twankim/ensemble_amazon | 9019d8dcdfa3651b374e0216cc310255c2d660aa | [
"Apache-2.0"
] | 1 | 2017-07-09T10:35:01.000Z | 2017-07-09T10:55:19.000Z | amazon_main_xgboost.py | kaz-Anova/ensemble_amazon | 9019d8dcdfa3651b374e0216cc310255c2d660aa | [
"Apache-2.0"
] | 87 | 2016-04-08T05:13:44.000Z | 2022-02-02T14:46:51.000Z | """ Amazon Access Challenge Code for ensemble
Marios Michaildis script for Amazon .
xgboost on input data
based on Paul Duan's Script.
"""
from __future_
_ import division
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
import XGBoostClassifier as xg
from sklearn.cross_validation import StratifiedKFold
SEED = 42 # always use a seed for randomized procedures
def load_data(filename, use_labels=True):
"""
Load data from CSV files and return them as numpy arrays
The use_labels parameter indicates whether one should
read the first column (containing class labels). If false,
return all 0s.
"""
# load column 1 to 8 (ignore last one)
data = np.loadtxt(open( filename), delimiter=',',
usecols=range(1, 9), skiprows=1)
if use_labels:
labels = np.loadtxt(open( filename), delimiter=',',
usecols=[0], skiprows=1)
else:
labels = np.zeros(data.shape[0])
return labels, data
def save_results(predictions, filename):
"""Given a vector of predictions, save results in CSV format."""
with open(filename, 'w') as f:
f.write("id,ACTION\n")
for i, pred in enumerate(predictions):
f.write("%d,%f\n" % (i + 1, pred))
def bagged_set(X_t,y_c,model, seed, estimators, xt, update_seed=True):
# create array object to hold predictions
baggedpred=[ 0.0 for d in range(0, (xt.shape[0]))]
#loop for as many times as we want bags
for n in range (0, estimators):
#shuff;e first, aids in increasing variance and forces different results
#X_t,y_c=shuffle(Xs,ys, random_state=seed+n)
if update_seed: # update seed if requested, to give a slightly different model
model.set_params(random_state=seed + n)
model.fit(X_t,y_c) # fit model0.0917411475506
preds=model.predict_proba(xt)[:,1] # predict probabilities
# update bag's array
for j in range (0, (xt.shape[0])):
baggedpred[j]+=preds[j]
# divide with number of bags to create an average estimate
for j in range (0, len(baggedpred)):
baggedpred[j]/=float(estimators)
# return probabilities
return np.array(baggedpred)
# using numpy to print results
def printfilcsve(X, filename):
np.savetxt(filename,X)
def main():
"""
Fit models and make predictions.
We'll use one-hot encoding to transform our categorical features
into binary features.
y and X will be numpy array objects.
"""
filename="main_xgboost" # nam prefix
#model = linear_model.LogisticRegression(C=3) # the classifier we'll use
model=xg.XGBoostClassifier(num_round=1000 ,nthread=25, eta=0.12, gamma=0.01,max_depth=12, min_child_weight=0.01, subsample=0.6,
colsample_bytree=0.7,objective='binary:logistic',seed=1)
# === load data in memory === #
print "loading data"
y, X = load_data('train.csv')
y_test, X_test = load_data('test.csv', use_labels=False)
# === one-hot encoding === #
# we want to encode the category IDs encountered both in
# the training and the test set, so we fit the encoder on both
encoder = preprocessing.OneHotEncoder()
encoder.fit(np.vstack((X, X_test)))
X = encoder.transform(X) # Returns a sparse matrix (see numpy.sparse)
X_test = encoder.transform(X_test)
# if you want to create new features, you'll need to compute them
# before the encoding, and append them to your dataset after
#create arrays to hold cv an dtest predictions
train_stacker=[ 0.0 for k in range (0,(X.shape[0])) ]
# === training & metrics === #
mean_auc = 0.0
bagging=20 # number of models trained with different seeds
n = 5 # number of folds in strattified cv
kfolder=StratifiedKFold(y, n_folds= n,shuffle=True, random_state=SEED)
i=0
for train_index, test_index in kfolder: # for each train and test pair of indices in the kfolder object
# creaning and validation sets
X_train, X_cv = X[train_index], X[test_index]
y_train, y_cv = np.array(y)[train_index], np.array(y)[test_index]
#print (" train size: %d. test size: %d, cols: %d " % ((X_train.shape[0]) ,(X_cv.shape[0]) ,(X_train.shape[1]) ))
# if you want to perform feature selection / hyperparameter
# optimization, this is where you want to do it
# train model and make predictions
preds=bagged_set(X_train,y_train,model, SEED , bagging, X_cv, update_seed=True)
# compute AUC metric for this CV fold
roc_auc = roc_auc_score(y_cv, preds)
print "AUC (fold %d/%d): %f" % (i + 1, n, roc_auc)
mean_auc += roc_auc
no=0
for real_index in test_index:
train_stacker[real_index]=(preds[no])
no+=1
i+=1
mean_auc/=n
print (" Average AUC: %f" % (mean_auc) )
print (" printing train datasets ")
printfilcsve(np.array(train_stacker), filename + ".train.csv")
# === Predictions === #
# When making predictions, retrain the model on the whole training set
preds=bagged_set(X, y,model, SEED, bagging, X_test, update_seed=True)
#create submission file
printfilcsve(np.array(preds), filename+ ".test.csv")
#save_results(preds, filename+"_submission_" +str(mean_auc) + ".csv")
if __name__ == '__main__':
main()
| 35.535032 | 133 | 0.639003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,504 | 0.448826 |
64875676b96c2647ed1948f84397285ec47bff0c | 4,289 | py | Python | venv/lib/python3.6/site-packages/ansible/modules/set_fact.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible/modules/set_fact.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible/modules/set_fact.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: set_fact
short_description: Set host variable(s) and fact(s).
version_added: "1.2"
description:
- This action allows setting variables associated to the current host.
- These variables will be available to subsequent plays during an ansible-playbook run via the host they were set on.
- Set C(cacheable) to C(yes) to save variables across executions using a fact cache.
Variables will keep the set_fact precedence for the current run, but will used 'cached fact' precedence for subsequent ones.
- Per the standard Ansible variable precedence rules, other types of variables have a higher priority, so this value may be overridden.
options:
key_value:
description:
- "The C(set_fact) module takes C(key=value) pairs or C(key: value) (YAML notation) as variables to set in the playbook scope.
The 'key' is the resulting variable name and the value is, of course, the value of said variable."
- You can create multiple variables at once, by supplying multiple pairs, but do NOT mix notations.
required: true
cacheable:
description:
- This boolean converts the variable into an actual 'fact' which will also be added to the fact cache, if fact caching is enabled.
- Normally this module creates 'host level variables' and has much higher precedence, this option changes the nature and precedence
(by 7 steps) of the variable created.
U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable)
- "This actually creates 2 copies of the variable, a normal 'set_fact' host variable with high precedence and
a lower 'ansible_fact' one that is available for persistance via the facts cache plugin.
This creates a possibly confusing interaction with C(meta: clear_facts) as it will remove the 'ansible_fact' but not the host variable."
type: bool
default: no
version_added: "2.4"
notes:
- Because of the nature of tasks, set_fact will produce 'static' values for a variable.
Unlike normal 'lazy' variables, the value gets evaluated and templated on assignment.
- Some boolean values (yes, no, true, false) will always be converted to boolean type,
unless C(DEFAULT_JINJA2_NATIVE) is enabled. This is done so the C(var=value) booleans,
otherwise it would only be able to create strings, but it also prevents using those values to create YAML strings.
Using the setting will restrict k=v to strings, but will allow you to specify string or boolean in YAML.
- "To create lists/arrays or dictionary/hashes use YAML notation C(var: [val1, val2])."
- Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name.
- This action does not use a connection and always executes on the controller.
seealso:
- module: ansible.builtin.include_vars
- ref: ansible_variable_precedence
description: More information related to variable precedence and which type of variable wins over others.
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Setting host facts using key=value pairs, this format can only create strings or booleans
set_fact: one_fact="something" other_fact="{{ local_var }}"
- name: Setting host facts using complex arguments
set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
- name: Setting facts so that they will be persisted in the fact cache
set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
cacheable: yes
- name: Creating list and dictionary variables
set_fact:
one_dict:
something: here
other: there
one_list:
- a
- b
- c
- name: Creating list and dictionary variables using 'shorthand' YAML
set_fact:
two_dict: {'something': here2, 'other': somewhere}
two_list: [1,2,3]
'''
| 47.655556 | 144 | 0.726743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,165 | 0.971089 |
64882886d4f3bd8190f93a2bc8758ff2dad50669 | 7,114 | py | Python | general/general_app_for_company/general_ledger.py | surajano/general | 82157f61f7686aceeb6ff108474faa060463b9c2 | [
"MIT"
] | null | null | null | general/general_app_for_company/general_ledger.py | surajano/general | 82157f61f7686aceeb6ff108474faa060463b9c2 | [
"MIT"
] | null | null | null | general/general_app_for_company/general_ledger.py | surajano/general | 82157f61f7686aceeb6ff108474faa060463b9c2 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
from frappe.model.meta import get_field_precision
from erpnext.accounts.utils import validate_expense_against_budget
class StockAccountInvalidTransaction(frappe.ValidationError): pass
def make_gl_entries(gl_map, cancel=False, adv_adj=False, merge_entries=True, update_outstanding='Yes'):
if gl_map:
if not cancel:
gl_map = process_gl_map(gl_map, merge_entries)
if gl_map and len(gl_map) > 1:
save_entries(gl_map, adv_adj, update_outstanding)
else:
frappe.throw(_("Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."))
else:
delete_gl_entries(gl_map, adv_adj=adv_adj, update_outstanding=update_outstanding)
def process_gl_map(gl_map, merge_entries=True):
if merge_entries:
gl_map = merge_similar_entries(gl_map)
for entry in gl_map:
# toggle debit, credit if negative entry
if flt(entry.debit) < 0:
entry.credit = flt(entry.credit) - flt(entry.debit)
entry.debit = 0.0
if flt(entry.debit_in_account_currency) < 0:
entry.credit_in_account_currency = \
flt(entry.credit_in_account_currency) - flt(entry.debit_in_account_currency)
entry.debit_in_account_currency = 0.0
if flt(entry.credit) < 0:
entry.debit = flt(entry.debit) - flt(entry.credit)
entry.credit = 0.0
if flt(entry.credit_in_account_currency) < 0:
entry.debit_in_account_currency = \
flt(entry.debit_in_account_currency) - flt(entry.credit_in_account_currency)
entry.credit_in_account_currency = 0.0
return gl_map
def merge_similar_entries(gl_map):
merged_gl_map = []
for entry in gl_map:
# if there is already an entry in this account then just add it
# to that entry
same_head = check_if_in_list(entry, merged_gl_map)
if same_head:
same_head.debit = flt(same_head.debit) + flt(entry.debit)
same_head.debit_in_account_currency = \
flt(same_head.debit_in_account_currency) + flt(entry.debit_in_account_currency)
same_head.credit = flt(same_head.credit) + flt(entry.credit)
same_head.credit_in_account_currency = \
flt(same_head.credit_in_account_currency) + flt(entry.credit_in_account_currency)
else:
merged_gl_map.append(entry)
# filter zero debit and credit entries
merged_gl_map = filter(lambda x: flt(x.debit, 9)!=0 or flt(x.credit, 9)!=0, merged_gl_map)
return merged_gl_map
def check_if_in_list(gle, gl_map):
for e in gl_map:
if e.account == gle.account \
and cstr(e.get('party_type'))==cstr(gle.get('party_type')) \
and cstr(e.get('party'))==cstr(gle.get('party')) \
and cstr(e.get('against_voucher'))==cstr(gle.get('against_voucher')) \
and cstr(e.get('against_voucher_type')) == cstr(gle.get('against_voucher_type')) \
and cstr(e.get('cost_center')) == cstr(gle.get('cost_center')):
return e
def save_entries(gl_map, adv_adj, update_outstanding):
validate_account_for_auto_accounting_for_stock(gl_map)
round_off_debit_credit(gl_map)
for entry in gl_map:
make_entry(entry, adv_adj, update_outstanding)
# check against budget
validate_expense_against_budget(entry)
def make_entry(args, adv_adj, update_outstanding):
args.update({"doctype": "GL Entry"})
gle = frappe.get_doc(args)
gle.flags.ignore_permissions = 1
gle.insert()
gle.run_method("on_update_with_args", adv_adj, update_outstanding)
gle.submit()
def validate_account_for_auto_accounting_for_stock(gl_map):
if cint(frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock")) \
and gl_map[0].voucher_type=="Journal Entry":
aii_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Warehouse' and (warehouse != '' and warehouse is not null)""")]
for entry in gl_map:
if entry.account in aii_accounts:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(entry.account), StockAccountInvalidTransaction)
def round_off_debit_credit(gl_map):
precision = get_field_precision(frappe.get_meta("GL Entry").get_field("debit"),
currency=frappe.db.get_value("Company", gl_map[0].company, "default_currency", cache=True))
debit_credit_diff = 0.0
for entry in gl_map:
entry.debit = flt(entry.debit, precision)
entry.credit = flt(entry.credit, precision)
debit_credit_diff += entry.debit - entry.credit
debit_credit_diff = flt(debit_credit_diff, precision)
if abs(debit_credit_diff) >= (5.0 / (10**precision)):
frappe.throw(_("Debit and Credit not equal for {0} #{1}. Difference is {2}.")
.format(gl_map[0].voucher_type, gl_map[0].voucher_no, debit_credit_diff))
elif abs(debit_credit_diff) >= (1.0 / (10**precision)):
make_round_off_gle(gl_map, debit_credit_diff)
def make_round_off_gle(gl_map, debit_credit_diff):
round_off_account, round_off_cost_center = frappe.db.get_value("Company", gl_map[0].company,
["round_off_account", "round_off_cost_center"]) or [None, None]
if not round_off_account:
frappe.throw(_("Please mention Round Off Account in Company"))
if not round_off_cost_center:
frappe.throw(_("Please mention Round Off Cost Center in Company"))
round_off_gle = frappe._dict()
for k in ["voucher_type", "voucher_no", "company",
"posting_date", "remarks", "fiscal_year", "is_opening"]:
round_off_gle[k] = gl_map[0][k]
round_off_gle.update({
"account": round_off_account,
"debit_in_account_currency": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit_in_account_currency": debit_credit_diff if debit_credit_diff > 0 else 0,
"debit": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit": debit_credit_diff if debit_credit_diff > 0 else 0,
"cost_center": round_off_cost_center,
"party_type": None,
"party": None,
"against_voucher_type": None,
"against_voucher": None
})
gl_map.append(round_off_gle)
def delete_gl_entries(gl_entries=None, voucher_type=None, voucher_no=None,
adv_adj=False, update_outstanding="Yes"):
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, \
check_freezing_date, update_outstanding_amt, validate_frozen_account
if not gl_entries:
gl_entries = frappe.db.sql("""select * from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no), as_dict=True)
if gl_entries:
check_freezing_date(gl_entries[0]["posting_date"], adv_adj)
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""",
(voucher_type or gl_entries[0]["voucher_type"], voucher_no or gl_entries[0]["voucher_no"]))
for entry in gl_entries:
validate_frozen_account(entry["account"], adv_adj)
validate_balance_type(entry["account"], adv_adj)
validate_expense_against_budget(entry)
if entry.get("against_voucher") and update_outstanding == 'Yes':
update_outstanding_amt(entry["account"], entry.get("party_type"), entry.get("party"), entry.get("against_voucher_type"),
entry.get("against_voucher"), on_cancel=True)
| 39.303867 | 132 | 0.758645 | 66 | 0.009277 | 0 | 0 | 0 | 0 | 0 | 0 | 1,619 | 0.227579 |
648835b0871fa76f3d85b2a140f67b19860ce428 | 114 | py | Python | virgool_cloud/config-sample.py | mavahedinia/virgoolcloud | cef18e5dc85940d079c51552e8def158dff6ec88 | [
"MIT"
] | 1 | 2018-03-27T15:16:12.000Z | 2018-03-27T15:16:12.000Z | virgool_cloud/config-sample.py | Mr0Null/virgoolcloud | cef18e5dc85940d079c51552e8def158dff6ec88 | [
"MIT"
] | 4 | 2021-03-18T20:18:20.000Z | 2022-03-11T23:14:34.000Z | virgool_cloud/config-sample.py | mavahedinia/virgoolcloud | cef18e5dc85940d079c51552e8def158dff6ec88 | [
"MIT"
] | null | null | null | bot_token = ''
waiting_timeout = 5 # Seconds
admin_id = ""
channel_id = ""
bitly_access_token = ""
vars_file = ""
| 16.285714 | 29 | 0.675439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.166667 |
6488eb36f073b4226b997654ad44ca0cfd79c48b | 5,425 | py | Python | src/runs/step321_pulmonary_summary.py | uw-bionlp/ards | e9fc27f7034cc6b54f0ccdba4a58377948cf0258 | [
"BSD-3-Clause"
] | null | null | null | src/runs/step321_pulmonary_summary.py | uw-bionlp/ards | e9fc27f7034cc6b54f0ccdba4a58377948cf0258 | [
"BSD-3-Clause"
] | null | null | null | src/runs/step321_pulmonary_summary.py | uw-bionlp/ards | e9fc27f7034cc6b54f0ccdba4a58377948cf0258 | [
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function, unicode_literals
from sacred import Experiment
from sacred.observers import FileStorageObserver
from pathlib import Path
import os
import re
import numpy as np
import json
import joblib
import pandas as pd
from collections import Counter, OrderedDict
import logging
from tqdm import tqdm
from scipy.stats import ttest_ind
import re
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from scoring.scoring_utils import PRF
from utils.custom_observer import CustomObserver
from utils.proj_setup import make_and_clear
import config.constants as constants
import config.constants_pulmonary as constants_pulmonary
import config.paths as paths
import config.paths_pulmonary as paths_pulmonary
import corpus.corpus_brat_xray as corpus_brat_xray
from config.constants import CV, FIT, PREDICT, SCORE, DOC_LABELS, TYPE, F1, SENT_LABELS, SUBTYPE, NT, NP, TP, P, R
from config.constants_pulmonary import NONE, PRESENT, UNILATERAL, BILATERAL
# Define experiment and load ingredients
ex = Experiment('step321_pulmonary_summary')
MODEL = 'model'
RUN = 'run'
CATEGORY = 'category'
MICRO = "micro"
RANK = 'rank'
@ex.config
def cfg():
#mode = CV
mode = FIT
#mode = PREDICT
#mode = SCORE
#file_doc_scores = ["scores_doc_labels.csv", "scores_doc_labels_summary.csv", "scores_sent_labels_summary.csv"] #"scores_entities.csv", "scores_relations.csv"]
file_doc_scores = "scores_doc_labels.csv" #"scores_entities.csv", "scores_relations.csv"]
file_sent_scores = "scores_sent_labels_summary.csv"
source_dirs = [os.path.join(paths_pulmonary.modeling, mode)]
discrete_dir = '/home/lybarger/clinical_extractors/analyses_pulmonary/step322_pulmonary_discrete/ngrams/'
if mode == FIT:
source_dirs.append(discrete_dir)
metric = F1
destination = os.path.join(paths_pulmonary.summary, mode)
suffix_pat = '\+run\d'
# Destination file for corpus
# Scratch directory
make_and_clear(destination)
# Create observers
file_observ = FileStorageObserver.create(destination)
cust_observ = CustomObserver(destination)
ex.observers.append(file_observ)
ex.observers.append(cust_observ)
@ex.automain
def main(source_dirs, destination, file_doc_scores, file_sent_scores, suffix_pat, metric):
# get all sub directories
result_dirs = []
for dir in source_dirs:
result_dirs.extend([path for path in Path(dir).iterdir() if path.is_dir()])
logging.info(f"Source directories:")
for dir in source_dirs:
logging.info(f"{dir}")
logging.info(f"Directory count: {len(result_dirs)}")
unnamed = 'Unnamed: 0'
file_name = file_doc_scores
dfs = []
for dir in result_dirs:
f = os.path.join(dir, file_name)
name = str(dir.name)
name_abbrev = re.sub(suffix_pat, '', name)
if os.path.exists(f):
df = pd.read_csv(f)
if unnamed in df:
del df[unnamed]
df.insert(0, RUN, name)
df.insert(0, MODEL, name_abbrev)
df = df.fillna(0)
dfg = PRF(df.groupby(TYPE).agg({MODEL:'first', RUN:'first', NT:'sum', NP:'sum', TP:'sum'}))
dfg.reset_index(level=0, inplace=True)
dfg.insert(0, SUBTYPE, MICRO)
df = pd.concat([df, dfg])
df[RANK] = df[SUBTYPE].apply(lambda row: [NONE, PRESENT, UNILATERAL, BILATERAL, MICRO].index(row))
df = df.sort_values([MODEL, RUN, TYPE, RANK])
print(df)
dfs.append(df)
if len(dfs) > 0:
df = pd.concat(dfs)
df.sort_values([MODEL, RUN, TYPE, RANK], inplace=True)
f = os.path.join(destination, file_name)
df.to_csv(f)
f = os.path.join(destination, f'{file_name}_micro.csv')
df_ = df[df[SUBTYPE] == MICRO]
df_.to_csv(f)
df[CATEGORY] = df.apply(lambda row: f'{row[TYPE]}-{row[SUBTYPE]}', axis = 1)
summary = []
for category in df[CATEGORY].unique():
df_temp = df[df[CATEGORY] == category]
models = df_temp[MODEL].unique()
stds = OrderedDict()
pvals = OrderedDict()
for a in models:
A = df_temp[df_temp[MODEL] == a][metric]
d = OrderedDict()
d[CATEGORY] = category
d[MODEL] = a
d['mean'] = A.mean()
d['std'] = A.std()
for b in models:
B = df_temp[df_temp[MODEL] == b][metric]
_, pval = ttest_ind(A, B, equal_var=False)
d[f'Pval-{b}'] = pval
summary.append(d)
df = pd.DataFrame(summary)
f = os.path.join(destination, f'summary.csv')
df.to_csv(f)
file_name = file_sent_scores
dfs = []
for dir in result_dirs:
f = os.path.join(dir, file_name)
name = str(dir.name)
name_abbrev = re.sub(suffix_pat, '', name)
if os.path.exists(f):
df = pd.read_csv(f)
if unnamed in df:
del df[unnamed]
df.insert(0, RUN, name)
df.insert(0, MODEL, name_abbrev)
dfs.append(df)
if len(dfs) > 0:
df = pd.concat(dfs)
f = os.path.join(destination, file_name)
df.to_csv(f)
return 'Successful completion'
| 26.99005 | 163 | 0.627281 | 0 | 0 | 0 | 0 | 4,223 | 0.778433 | 0 | 0 | 820 | 0.151152 |
648989e670d245b0f41026c6263b79ce124e9e03 | 3,811 | py | Python | pydanmaku_old/game.py | Alokpro1/PyDanmaku | 7c33252774c07c99216fc4e7ac957e9fe27101d1 | [
"MIT"
] | null | null | null | pydanmaku_old/game.py | Alokpro1/PyDanmaku | 7c33252774c07c99216fc4e7ac957e9fe27101d1 | [
"MIT"
] | null | null | null | pydanmaku_old/game.py | Alokpro1/PyDanmaku | 7c33252774c07c99216fc4e7ac957e9fe27101d1 | [
"MIT"
] | null | null | null | import pygame
import inspect
from .bullet import Bullet
from .player import Player
PRESSED = {
pygame.K_UP: 'up',
pygame.K_DOWN: 'down',
pygame.K_LEFT: 'left',
pygame.K_RIGHT: 'right',
}
DOWN = {
pygame.K_LSHIFT: 'shift',
}
UP = {
pygame.K_LSHIFT: 'unshift',
}
class Game:
def __init__(self):
self.screen = pygame.display.set_mode((640, 480))
self.tasks = []
self.scripts = []
self.objects = pygame.sprite.Group()
self.bullets = pygame.sprite.Group()
self.player = None
def run(self):
"""
Start the game
:return:
"""
clock = pygame.time.Clock()
while True:
clock.tick(60)
self.screen.fill((255, 255, 255))
# self.objects.clear(self.screen)
self.objects.draw(self.screen)
for task in list(self.tasks):
try:
while True:
result = task[-1].__next__()
if inspect.isgenerator(result):
task.append(result)
else:
break
except StopIteration:
task.pop()
except IndexError:
self.tasks.remove(task)
if not self.tasks and self.scripts:
self.tasks.append([self.scripts.pop()])
keys = pygame.key.get_pressed()
events = pygame.event.get()
for obj in self.objects:
for event in events:
if event.type == pygame.KEYDOWN and event.key in DOWN and hasattr(obj, DOWN[event.key]):
obj.__getattribute__(DOWN[event.key])()
elif event.type == pygame.KEYUP and event.key in UP and hasattr(obj, UP[event.key]):
obj.__getattribute__(UP[event.key])()
for k, v in PRESSED.items():
if keys[k] and hasattr(obj, v):
obj.__getattribute__(v)()
if keys[pygame.K_q]:
break
for obj in list(self.objects.sprites()):
if hasattr(obj, 'step'):
obj.step()
if hasattr(obj, 'should_remove') and obj.should_remove:
self.objects.remove(obj)
pygame.display.update()
def add_task(self, task):
"""
Adds a task to be run.
:param task:
:return:
"""
if not inspect.isgenerator(task):
raise TypeError('Object must be a generator')
self.tasks.append(task)
def set_player(self, player):
"""
Sets the player object
:param player:
:return:
"""
if not isinstance(player, Player):
raise TypeError("Object must be Player")
if self.player:
self.player.kill()
self.player = player
self.objects.add(player)
def add_bullet(self, bullet):
"""
Adds a bullet onto the screen
:param bullet:
:return:
"""
if not isinstance(bullet, Bullet):
raise TypeError("Object must be a Bullet")
self.objects.add(bullet)
self.bullets.add(bullet)
return bullet
def add_bullets(self, *bullets):
"""
Adds multiple bullets
:param bullets:
:return:
"""
return [self.add_bullet(b) for b in bullets]
def add_script(self, module):
"""
Adds a script to the end of the script queue
:param module:
:return:
"""
module = __import__(module, fromlist=['*'])
task = module.main(self)
self.scripts.insert(0, task)
return task
| 27.417266 | 108 | 0.504067 | 3,518 | 0.923117 | 0 | 0 | 0 | 0 | 0 | 0 | 684 | 0.17948 |
648a55e4d03e0a1260cec1b579e77e89a43b04dd | 4,040 | py | Python | inz/tests/test_utils.py | matbur/inz | f6be1a685761f99f8c808d8b23f58debf7e19da2 | [
"MIT"
] | null | null | null | inz/tests/test_utils.py | matbur/inz | f6be1a685761f99f8c808d8b23f58debf7e19da2 | [
"MIT"
] | 2 | 2020-03-24T16:35:39.000Z | 2020-03-31T00:33:08.000Z | inz/tests/test_utils.py | matbur/inz | f6be1a685761f99f8c808d8b23f58debf7e19da2 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pytest
from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2
from inz.utils import chi2, select_k_best, split, train_test_split
def test_split_list_int():
ints = list(range(7))
want = [[0, 1, 2], [3, 4, 5], [6]]
get = list(split(ints, 3))
assert len(get) == len(want)
assert get == want
def test_split_int():
ints = range(7)
want = [[0, 1, 2], [3, 4, 5], [6]]
get = list(split(ints, 3))
assert len(get) == len(want)
assert get == want
def test_split_list_int_greater_width():
ints = list(range(3))
want = [[0, 1, 2]]
get = list(split(ints, 4))
assert len(get) == len(want)
assert get == want
def test_split_list_str():
strings = list(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2))
assert len(get) == len(want)
assert get == want
def test_str():
string = ''.join(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(string, 2))
assert len(get) == len(want)
assert get == want
def test_split_ndarray_int():
array = np.arange(10, dtype=int).reshape(-1, 2)
want = [np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]]),
np.array([[8, 9]])]
get = list(split(array, 2))
assert len(get) == len(want)
for i, j in zip(get, want):
assert type(i) == type(j)
assert np.array_equal(i, j)
def test_split_generator_str():
strings = map(str, range(6))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2))
assert len(get) == len(want)
assert get == want
def test_split_list_int_not_allow():
ints = list(range(7))
want = [[0, 1, 2], [3, 4, 5]]
get = list(split(ints, 3, False))
assert len(get) == len(want)
assert get == want
def test_split_list_int_greater_width_not_allow():
ints = list(range(3))
want = []
get = list(split(ints, 4, False))
assert len(get) == len(want)
assert get == want
def test_split_list_str_not_allow():
strings = list(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2, False))
assert len(get) == len(want)
assert get == want
def test_split_ndarray_int_not_allow():
array = np.arange(10, dtype=int).reshape(-1, 2)
want = [np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]])]
get = list(split(array, 2, False))
assert len(get) == len(want)
for i, j in zip(get, want):
assert type(i) == type(j)
assert np.array_equal(i, j)
def test_split_generator_str_not_allow():
strings = map(str, range(6))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2, False))
assert len(get) == len(want)
assert get == want
@pytest.fixture
def data():
X = pd.read_csv('../../data/data.csv')
y = X.pop('Choroba')
return X.values, y.values
def test_chi2(data):
X, y = data
sk_val, _ = sk_chi2(X, y)
my_val = chi2(X, y)
np.testing.assert_equal(sk_val, my_val)
def test_select_k_best(data):
X, y = data
for i in range(1, 31):
sk_sup1 = SelectKBest(sk_chi2, i).fit(X, y).get_support()
sk_sup2 = SelectKBest(sk_chi2, i).fit(X, y).get_support(True)
my_sup1 = select_k_best(X, y, k=i)
my_sup2 = select_k_best(X, y, k=i, indices=True)
np.testing.assert_equal(sk_sup1, my_sup1, str(i))
np.testing.assert_equal(sk_sup2, sorted(my_sup2), str(i))
def test_train_test_split():
x = np.arange(10)
get = train_test_split(x, shuffle=False)
want = [np.arange(7), np.arange(7, 10)]
for i in zip(get, want):
np.testing.assert_equal(*i)
def test_train_test_split5():
x = np.arange(10)
get = train_test_split(x, test_size=.5, shuffle=False)
want = [np.arange(5), np.arange(5, 10)]
for i in zip(get, want):
np.testing.assert_equal(*i)
if __name__ == '__main__':
pytest.main()
| 25.56962 | 69 | 0.576733 | 0 | 0 | 0 | 0 | 125 | 0.030941 | 0 | 0 | 132 | 0.032673 |
648a6781dfe93e87fceccd3d76deb550de9b06e9 | 7,572 | py | Python | Library/Database.py | kensand/HonorsProject | 219b9b448a41c74f17f89319ef1550878d77e6e0 | [
"Apache-2.0"
] | null | null | null | Library/Database.py | kensand/HonorsProject | 219b9b448a41c74f17f89319ef1550878d77e6e0 | [
"Apache-2.0"
] | null | null | null | Library/Database.py | kensand/HonorsProject | 219b9b448a41c74f17f89319ef1550878d77e6e0 | [
"Apache-2.0"
] | null | null | null | import psycopg2
batch_size = 10000
embedding_length=256
#These are the default database settings, and assumes the tweets, tweets_hashtags, and hashtags tables are in the public schema.
# default database information
Dbname = 'postgres'
User = 'kenny'
Host = 'localhost'
Password = 'honorsproject2017'
# default table information
#You probably should not change column names because the
tweets = {'table_name': "tweets", 'tweet_id_column': 'id', 'text_column': 'text'}
formatted_tweets = {'table_name': 'formatted_tweets', 'tweet_id_column': 'id', 'tokens_column': 'tokens'}
dictionary = {'table_name': 'dictionary', 'default_size': str(50000), 'word_id_column': 'word_id',
'word_column': 'word',
'use_column': 'use'}
int_tweets = {'table_name': 'int_tweets', 'id_column': 'id', 'int_array_column': 'int_array'}
word_embeddings = {'table_name': 'word_embeddings', 'embedding_size': str(embedding_length), 'word_id_column': 'word_id',
'word_embedding_column': 'word_embedding'}
tweet_embeddings = {'table_name': 'tweet_embeddings', 'tweet_id_column': 'tweet_id',
'tweet_embedding_column': 'tweet_embedding'}
hashtag_embeddings = {'table_name': 'hashtag_embeddings', 'hashtag_id_column': 'hashtag_id',
'hashtag_embedding_column': 'hashtag_embedding', 'hashtag_use_column': 'use'}
tweets_hashtags = {'table_name': 'tweets_hashtags', 'tweets_id_column': 'tweet_id', 'hashtag_id': 'hashtag_id'}
hashtag_relationships={'table_name': 'hashtag_relationships'}
#A group of functions to create the tables needed and in the specified schemas
#TODO change all the table and column names to depend on the above config.
def CreateHashtagEmbeddingTable(schema='public'):
cur = get_Cur()
if not table_exists(hashtag_embeddings['table_name'], schema):
cur.execute("""create table """ + schema + """.""" + hashtag_embeddings['table_name'] + """( hashtag_id bigint not null constraint hashtag_embeddings_pkey primary key, hashtag_embedding double precision[], use integer); create index hashtag_embeddings_index_index on """ + schema + """.hashtag_embeddings (use); COMMIT;""")
def CreateWordEmbeddingTable(schema='public'):
cur = get_Cur()
if not table_exists(word_embeddings['table_name'], schema):
cur.execute("""create table """ + schema + """.""" + word_embeddings['table_name'] + """ ( word_id integer not null constraint word_embeddings_pkey primary key, word_embedding double precision[]); create unique index word_embeddings_index_uindex on """ + schema + """.word_embeddings (word_id); COMMIT;""")
def CreateHashtagRelationshipsTable(schema='public'):
cur = get_Cur()
if not table_exists(hashtag_relationships['table_name'], schema):
cur.execute("""create table """ + schema + """.""" + hashtag_relationships['table_name'] + """ ( index serial not null constraint hashtag_relationships_pkey primary key, hashtag varchar(255), relationships double precision[]); create unique index hashtag_relationships_index_uindex on """ + schema + """.hashtag_relationships (index); COMMIT;""")
def CreateDictionaryTable(schema='public'):
cur = get_Cur()
if not table_exists(dictionary['table_name'], schema):
cur.execute("""create table """ + schema + """.""" + dictionary['table_name'] + """ ( word_id integer not null constraint dictionary_pkey primary key, use integer, word varchar(256)); COMMIT;""")
def CreateIntTweetsTable(schema='public'):
cur = get_Cur()
if not table_exists(int_tweets['table_name'], schema):
cur.execute(
"""create table """ + schema + """.""" + int_tweets['table_name'] + """ ( id bigint, int_array integer[]); create index tweetvecs_tweet_id_index on """ + schema + """.int_tweets (id); COMMIT;""")
def CreateFormattedTweetsTable(schema='public'):
cur = get_Cur()
if not table_exists(formatted_tweets['table_name'], schema):
cur.execute("""create table """ + schema + """.""" + formatted_tweets['table_name'] + """ (id bigint not null constraint formatted_tweets_tweet_id_pk primary key, tokens varchar(128)[]); COMMIT;""")
# function to return the default psycopg2 connection
def get_Conn(dbname=Dbname, user=User, host=Host, password=Password):
try:
conn = psycopg2.connect(
"dbname='" + dbname + "' user='" + user + "' host='" + host + "' password='" + password + "'")
except:
print("Unable to connect to the database")
exit(1)
# print("Connected to database")
conn.autocommit = False
return conn
# function to return a cursor, whether from the default database, or with a given connection.
def get_Cur(conn=False):
if not conn:
return get_Conn().cursor()
return conn.cursor()
# function to get the dictionary, with either the default config or with it given
def get_dictionary(table=dictionary['table_name'], word_id_column=dictionary['word_id_column'],
word_column=dictionary['word_column'], cursor=get_Cur(), schema='public'):
d = dict()
cursor.execute("""SELECT """ + word_id_column + ", " + word_column + " FROM " + schema + '.' +table)
for row in cursor:
id, word = row
if id not in d:
d[word] = id
return d
# function to get the reverse dicitonary, like the regular dictionary but backwards
def get_reverse_dictionary(table=dictionary['table_name'], word_id_column=dictionary['word_id_column'],
word_column=dictionary['word_column'], cursor=get_Cur(), schema='public'):
d = dict()
cursor.execute("""SELECT """ + word_id_column + ", " + word_column + " FROM " + schema + '.' +table)
for row in cursor:
id, word = row
if id not in d:
d[id] = word
return d
# check if a table exists
def table_exists(table_name, schema='public'):
conn = get_Conn()
cursor = get_Cur(conn)
#q = """select exists(SELECT * FROM information_schema.tables WHERE table_name=%s AND table_schema=%s)"""
#q = """SELECT EXISTS ( SELECT 1 FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = %s AND c.relname = %s );"""
q = """SELECT '""" + schema+ '.' + table_name + """'::regclass"""
try:
cursor.execute(q)#, [schema, table_name])
except psycopg2.ProgrammingError:
conn.rollback()
return False
return True
if cursor.fetchone() is None:
return False
return True
# check if table exists and it contains the given columns
def table_and_columns_exist(cursor, table_name, columns=[]):
if not table_exists(cursor, table_name):
return False
for col in columns:
q = """select exists(SELECT * FROM information_schema.columns WHERE table_name=%s AND column_name=%s)"""
cursor.execute(q, [table_name, col])
if cursor.fetchone() is None:
return False
return True
# create all the tables needed
def CreateDatabases(conn=get_Conn()):
cur = get_Cur(conn)
if not table_and_columns_exist(cur, tweets['table_name'], [tweets['tweet_id_column'], tweets['text_column']]):
cur.execute("""CREATE TABLE """ + tweets['table_name'] + """ (""" + tweets['tweet_id_column'] + """ BIGINT, """ + tweets['text_column'] + """ varchar(500))""")
cur.execute(
"""CREATE INDEX idx_""" + tweets['tweet_id_column'] + """ ON """ + tweets['table_name'] + """ (""" + tweets['tweet_id_column'] + """)""")
# TODO finish database create function
| 46.740741 | 354 | 0.67261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,978 | 0.525357 |
648b8ff35a770ae66532b53431befbafd6fbde8c | 3,596 | py | Python | train.py | TaoHuUMD/3D-Reconstruction | 8f3648ee5e6506d7aa04c20e0a7bfe84534e8cb1 | [
"MIT"
] | null | null | null | train.py | TaoHuUMD/3D-Reconstruction | 8f3648ee5e6506d7aa04c20e0a7bfe84534e8cb1 | [
"MIT"
] | null | null | null | train.py | TaoHuUMD/3D-Reconstruction | 8f3648ee5e6506d7aa04c20e0a7bfe84534e8cb1 | [
"MIT"
] | null | null | null | import time
import open3d
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer
from config import *
import os
from torch.utils.tensorboard import SummaryWriter
if __name__ == '__main__':
opt = TrainOptions().parse()
opt.phase = 'train'
opt.dataroot = os.path.join(opt.data_dir, opt.dataroot)
opt.checkpoints_dir = os.path.join(opt.data_dir, 'checkpoints')
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
writer = SummaryWriter(opt.checkpoints_dir+'/%s' % opt.name)
model = create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
total_steps = 0
if opt.isLoad >0:
model.load_networks('%s' % opt.load_file_name)
if dataset_size>10000:
opt.save_epoch_freq = 1
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
avg_loss = 0.0
cntt = 0
print(opt.lr)
for i, data in enumerate(dataset):
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
#continue
model.optimize_parameters()
if total_steps % opt.display_freq == 0:
save_result = total_steps % opt.update_html_freq == 0
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
print(opt.name)
losses = model.get_current_losses()
t = (time.time() - iter_start_time) / opt.batch_size
avg_loss += losses['G_L1']
cntt +=1
writer.add_scalar('loss_iter/train(%s)' % opt.name, losses['G_L1'], total_steps)
visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)
iter_data_time = time.time()
t = (time.time() - iter_start_time)
print('one epoch time %f' % t)
if epoch % opt.save_epoch_freq == 0 or epoch == opt.niter + opt.niter_decay:
real_epoch = epoch + opt.epoch_from_last
print('saving the model at the end of epoch %d, iters %d' % (real_epoch, total_steps))
model.save_networks('latest')
model.save_networks(epoch + opt.epoch_from_last)
rm_dir = os.path.join(opt.checkpoints_dir, opt.name)
file_name = '%s/%d_net_G.pth' % (rm_dir, real_epoch-opt.save_epoch_freq*2)
if ( (real_epoch-opt.save_epoch_freq*2) % 50 ==0):
continue
if os.path.exists(file_name):
os.remove(file_name)
writer.add_scalar('loss_epoch/train(%s)' % opt.name, avg_loss/cntt, total_steps)
print('End of epoch %d \\ %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
writer.close()
| 33.924528 | 112 | 0.598443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.073971 |
648c2c20bd854f69e485a8cf34eeda1f41447e10 | 9,434 | py | Python | tests/contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/test_michelson_coding_KT1BDM.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | tests/contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/test_michelson_coding_KT1BDM.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/test_michelson_coding_KT1BDM.py | tqtezos/pytezos | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1BDM(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/code_KT1BDM.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1BDM(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/storage_KT1BDM.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooMDoN(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooMDoN(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooMDoN(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooMDoN.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooT7Uy(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooT7Uy(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooT7Uy(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooT7Uy.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onuB3S(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onuB3S(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onuB3S(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onuB3S.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooArSr(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooArSr(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooArSr(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooArSr.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onrCFo(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onrCFo(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onrCFo(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_onrCFo.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ongBCW(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ongBCW(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ongBCW(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ongBCW.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooe4gB(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooe4gB(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooe4gB(self):
expected = get_data(
path='contracts/KT1BDMQEhMATgVAcwtgqNgZNBM6LEM1PANuM/parameter_ooe4gB.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| 46.935323 | 89 | 0.734683 | 9,247 | 0.980178 | 0 | 0 | 0 | 0 | 0 | 0 | 3,079 | 0.326373 |
648c4346d07df4969891123c667fe653aca75a7c | 2,310 | py | Python | kmeans/dbscan.py | kravtsun/au-ml | a9e354c14d4df8d1e4569e7f6cfa2fdad060522f | [
"MIT"
] | null | null | null | kmeans/dbscan.py | kravtsun/au-ml | a9e354c14d4df8d1e4569e7f6cfa2fdad060522f | [
"MIT"
] | null | null | null | kmeans/dbscan.py | kravtsun/au-ml | a9e354c14d4df8d1e4569e7f6cfa2fdad060522f | [
"MIT"
] | null | null | null | #!/bin/python
import argparse
import numpy as np
from cluster import read_csv, plot_clusters, distance, print_cluster_distribution
def kmeans_chosen(data, centers):
def best_cluster(p):
distances = [distance(p, c) for c in centers]
return np.argmin(distances)
return np.apply_along_axis(best_cluster, 1, data)
# return [best_cluster(p) for p in data]
def dbscan(data, eps, m):
n = data.shape[0]
cur_cluster = 0
worked = [False] * n
marked = [-1] * n
def propagate(i, cur_cluster):
if worked[i]:
return []
worked[i] = True
directly_reachable = [j for j in range(n) if i != j and distance(data[i, :], data[j, :]) < eps]
if len(directly_reachable) < m:
return []
marked[i] = cur_cluster
next_work = filter(lambda j: marked[j] == -1, directly_reachable)
for j in filter(lambda j: marked[j] != cur_cluster, directly_reachable):
marked[j] = cur_cluster
return next_work
for i in range(n):
if worked[i]: continue
next_work = [i]
while len(next_work) > 0:
next_next_work = []
for j in next_work:
next_next_work += propagate(j, cur_cluster)
next_work = next_next_work
if marked[i] != -1:
cur_cluster += 1
result = np.array(marked)
assert result.shape == (n,)
return result
def sklearn_bruteforce(data, clusters):
from sklearn import cluster
for m in range(1, 21):
for eps in np.arange(0.01, 0.5, 0.01):
core_samples, result = cluster.dbscan(data, min_samples=m, eps=eps)
if len(set(result)) == clusters+1:
print m, eps, np.count_nonzero(result == 0), np.count_nonzero(result == -1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="run k-means clusterization with given arguments")
parser.add_argument("-f", dest="filename", type=str, required=True)
parser.add_argument("-e", dest="eps", type=float, required=True)
parser.add_argument("-m", dest="m", type=int, default=10)
args = parser.parse_args()
data = read_csv(args.filename)
result = dbscan(data, args.eps, args.m)
print_cluster_distribution(result)
plot_clusters(data, result)
| 33.970588 | 103 | 0.619913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.061472 |
648e30761d7e3ede29079ce9d0cd99b661034fad | 527 | py | Python | harness.py | vmchale/phash-fut | 941dfc891077253ff550d16438b3b416c1d6b1c4 | [
"BSD-3-Clause"
] | 2 | 2020-01-04T23:10:15.000Z | 2020-01-05T12:51:03.000Z | harness.py | vmchale/phash-fut | 941dfc891077253ff550d16438b3b416c1d6b1c4 | [
"BSD-3-Clause"
] | null | null | null | harness.py | vmchale/phash-fut | 941dfc891077253ff550d16438b3b416c1d6b1c4 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T09:22:26.000Z | 2020-02-05T09:22:26.000Z | import timeit
setup = """
import phash
import imageio
import numpy as np
mod = phash.phash()
"""
read_image = """
img0 = np.array(imageio.imread('data/frog.jpeg', pilmode='F'))
mod.img_hash_f32(img0)
"""
print('data/frog.jpeg', timeit.timeit(read_image, setup=setup, number=100) * 10, "ms")
setup_imagehash = """
from PIL import Image
import imagehash
"""
hash_bench = """
imagehash.phash(Image.open('data/frog.jpeg'))
"""
print('data/frog.jpeg', timeit.timeit(hash_bench, setup=setup_imagehash, number=100) * 10, "ms")
| 18.821429 | 96 | 0.70019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.582543 |
648fa6c89a991e12d29d38c835dfac9b5c5c7d52 | 13,798 | py | Python | simopt/models/san.py | simopt-admin/simopt | 5119c605305699dce9e0c44e0b8b68e23e77c02f | [
"MIT"
] | 24 | 2020-01-06T17:21:10.000Z | 2022-03-08T16:36:29.000Z | simopt/models/san.py | simopt-admin/simopt | 5119c605305699dce9e0c44e0b8b68e23e77c02f | [
"MIT"
] | 4 | 2020-02-20T18:59:41.000Z | 2020-10-18T22:28:29.000Z | simopt/models/san.py | simopt-admin/simopt | 5119c605305699dce9e0c44e0b8b68e23e77c02f | [
"MIT"
] | 8 | 2020-02-13T18:37:48.000Z | 2021-12-15T08:27:33.000Z | """
Summary
-------
Simulate duration of stochastic activity network (SAN).
"""
import numpy as np
from base import Model, Problem
class SAN(Model):
"""
A model that simulates a stochastic activity network problem with tasks
that have exponentially distributed durations, and the selected means
come with a cost.
Returns the optimal mean duration for each task.
Attributes
----------
name : string
name of model
n_rngs : int
number of random-number generators used to run a simulation replication
n_responses : int
number of responses (performance measures)
factors : dict
changeable factors of the simulation model
specifications : dict
details of each factor (for GUI and data validation)
check_factor_list : dict
switch case for checking factor simulatability
Arguments
---------
fixed_factors : nested dict
fixed factors of the simulation model
See also
--------
base.Model
"""
def __init__(self, fixed_factors={}):
self.name = "SAN"
self.n_rngs = 1
self.n_responses = 1
self.specifications = {
"num_arcs": {
"description": "Number of arcs.",
"datatype": int,
"default": 13
},
"num_nodes": {
"description": "Number of nodes.",
"datatype": int,
"default": 9
},
"arc_means": {
"description": "Initial solution of means.",
"datatype": tuple,
"default": (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
}
}
self.check_factor_list = {
"num_arcs": self.check_num_arcs,
"num_nodes": self.check_num_nodes,
"arc_means": self.check_arc_means
}
# Set factors of the simulation model.
super().__init__(fixed_factors)
def check_num_arcs(self):
return self.factors["num_arcs"] > 0
def check_num_nodes(self):
return self.factors["num_nodes"] > 0
def check_arc_means(self):
positive = True
for x in list(self.factors["arc_means"]):
positive = positive & x > 0
return (len(self.factors["arc_means"]) != self.factors["num_arcs"]) & positive
def replicate(self, rng_list):
"""
Simulate a single replication for the current model factors.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
rngs for model to use when simulating a replication
Returns
-------
responses : dict
performance measures of interest
"longest_path_length" = length/duration of longest path
gradients : dict of dicts
gradient estimates for each response
"""
# Designate separate random number generators.
exp_rng = rng_list[0]
# Generate arc lengths.
T = np.zeros(self.factors["num_nodes"])
Tderiv = np.zeros((self.factors["num_nodes"], self.factors["num_arcs"]))
thetas = list(self.factors["arc_means"])
arcs = [exp_rng.expovariate(1 / x) for x in thetas]
# Brute force calculation like in Matlab code
T[1] = T[0] + arcs[0]
Tderiv[1, :] = Tderiv[0, :]
Tderiv[1, 0] = Tderiv[1, 0] + arcs[0] / thetas[0]
T[2] = max(T[0] + arcs[1], T[1] + arcs[2])
if T[0] + arcs[1] > T[1] + arcs[2]:
T[2] = T[0] + arcs[1]
Tderiv[2, :] = Tderiv[0, :]
Tderiv[2, 1] = Tderiv[2, 1] + arcs[1] / thetas[1]
else:
T[2] = T[1] + arcs[2]
Tderiv[2, :] = Tderiv[1, :]
Tderiv[2, 2] = Tderiv[2, 2] + arcs[2] / thetas[2]
T[3] = T[1] + arcs[3]
Tderiv[3, :] = Tderiv[1, :]
Tderiv[3, 3] = Tderiv[3, 3] + arcs[3] / thetas[3]
T[4] = T[3] + arcs[6]
Tderiv[4, :] = Tderiv[3, :]
Tderiv[4, 6] = Tderiv[4, 6] + arcs[6] / thetas[6]
T[5] = max([T[1] + arcs[4], T[2] + arcs[5], T[4] + arcs[8]])
ind = np.argmax([T[1] + arcs[4], T[2] + arcs[5], T[4] + arcs[8]])
if ind == 1:
Tderiv[5, :] = Tderiv[1, :]
Tderiv[5, 4] = Tderiv[5, 4] + arcs[4] / thetas[4]
elif ind == 2:
Tderiv[5, :] = Tderiv[2, :]
Tderiv[5, 5] = Tderiv[5, 5] + arcs[5] / thetas[5]
else:
Tderiv[5, :] = Tderiv[4, :]
Tderiv[5, 8] = Tderiv[5, 8] + arcs[8] / thetas[8]
T[6] = T[3] + arcs[7]
Tderiv[6, :] = Tderiv[3, :]
Tderiv[6, 7] = Tderiv[6, 7] + arcs[7] / thetas[7]
if T[6] + arcs[11] > T[4] + arcs[9]:
T[7] = T[6] + arcs[11]
Tderiv[7, :] = Tderiv[6, :]
Tderiv[7, 11] = Tderiv[7, 11] + arcs[11] / thetas[11]
else:
T[7] = T[4] + arcs[9]
Tderiv[7, :] = Tderiv[4, :]
Tderiv[7, 9] = Tderiv[7, 9] + arcs[9] / thetas[9]
if T[5] + arcs[10] > T[7] + arcs[12]:
T[8] = T[5] + arcs[10]
Tderiv[8, :] = Tderiv[5, :]
Tderiv[8, 10] = Tderiv[8, 10] + arcs[10] / thetas[10]
else:
T[8] = T[7] + arcs[12]
Tderiv[8, :] = Tderiv[7, :]
Tderiv[8, 12] = Tderiv[8, 12] + arcs[12] / thetas[12]
longest_path = T[8]
longest_path_gradient = Tderiv[8, :]
# Compose responses and gradients.
responses = {"longest_path_length": longest_path}
gradients = {"longest_path_length": {"mean_grad": longest_path_gradient}}
return responses, gradients
"""
Summary
-------
Minimize the duration of the longest path from a to i plus cost.
"""
class SANLongestPath(Problem):
"""
Base class to implement simulation-optimization problems.
Attributes
----------
name : string
name of problem
dim : int
number of decision variables
n_objectives : int
number of objectives
n_stochastic_constraints : int
number of stochastic constraints
minmax : tuple of int (+/- 1)
indicator of maximization (+1) or minimization (-1) for each objective
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
lower_bounds : tuple
lower bound for each decision variable
upper_bounds : tuple
upper bound for each decision variable
gradient_available : bool
indicates if gradient of objective function is available
optimal_value : float
optimal objective function value
optimal_solution : tuple
optimal solution
model : Model object
associated simulation model that generates replications
model_default_factors : dict
default values for overriding model-level default factors
model_fixed_factors : dict
combination of overriden model-level factors and defaults
model_decision_factors : set of str
set of keys for factors that are decision variables
rng_list : list of rng.MRG32k3a objects
list of RNGs used to generate a random initial solution
or a random problem instance
factors : dict
changeable factors of the problem
initial_solution : list
default initial solution from which solvers start
budget : int > 0
max number of replications (fn evals) for a solver to take
specifications : dict
details of each factor (for GUI, data validation, and defaults)
Arguments
---------
name : str
user-specified name for problem
fixed_factors : dict
dictionary of user-specified problem factors
model_fixed factors : dict
subset of user-specified non-decision factors to pass through to the model
See also
--------
base.Problem
"""
def __init__(self, name="SAN-1", fixed_factors={}, model_fixed_factors={}):
self.name = name
self.n_objectives = 1
self.n_stochastic_constraints = 0
self.minmax = (-1,)
self.constraint_type = "box"
self.variable_type = "continuous"
self.gradient_available = False
self.optimal_value = None
self.optimal_solution = None
self.model_default_factors = {}
self.model_decision_factors = {"arc_means"}
self.factors = fixed_factors
self.specifications = {
"initial_solution": {
"description": "Initial solution.",
"datatype": tuple,
"default": (10,) * 13
},
"budget": {
"description": "Max # of replications for a solver to take.",
"datatype": int,
"default": 10000
}
}
self.check_factor_list = {
"initial_solution": self.check_initial_solution,
"budget": self.check_budget
}
super().__init__(fixed_factors, model_fixed_factors)
# Instantiate model with fixed factors and over-riden defaults.
self.model = SAN(self.model_fixed_factors)
self.dim = self.model.factors["num_arcs"]
self.lower_bounds = (0.01,) * self.dim
self.upper_bounds = (100,) * self.dim
def vector_to_factor_dict(self, vector):
"""
Convert a vector of variables to a dictionary with factor keys
Arguments
---------
vector : tuple
vector of values associated with decision variables
Returns
-------
factor_dict : dictionary
dictionary with factor keys and associated values
"""
factor_dict = {
"arc_means": vector[:]
}
return factor_dict
def factor_dict_to_vector(self, factor_dict):
"""
Convert a dictionary with factor keys to a vector
of variables.
Arguments
---------
factor_dict : dictionary
dictionary with factor keys and associated values
Returns
-------
vector : tuple
vector of values associated with decision variables
"""
vector = tuple(factor_dict["arc_means"])
return vector
def response_dict_to_objectives(self, response_dict):
"""
Convert a dictionary with response keys to a vector
of objectives.
Arguments
---------
response_dict : dictionary
dictionary with response keys and associated values
Returns
-------
objectives : tuple
vector of objectives
"""
objectives = (response_dict["longest_path_length"],)
return objectives
def response_dict_to_stoch_constraints(self, response_dict):
"""
Convert a dictionary with response keys to a vector
of left-hand sides of stochastic constraints: E[Y] >= 0
Arguments
---------
response_dict : dictionary
dictionary with response keys and associated values
Returns
-------
stoch_constraints : tuple
vector of LHSs of stochastic constraint
"""
stoch_constraints = None
return stoch_constraints
def deterministic_stochastic_constraints_and_gradients(self, x):
"""
Compute deterministic components of stochastic constraints for a solution `x`.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
det_stoch_constraints : tuple
vector of deterministic components of stochastic constraints
det_stoch_constraints_gradients : tuple
vector of gradients of deterministic components of stochastic constraints
"""
det_stoch_constraints = None
det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros
return det_stoch_constraints, det_stoch_constraints_gradients
def deterministic_objectives_and_gradients(self, x):
"""
Compute deterministic components of objectives for a solution `x`.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
det_objectives : tuple
vector of deterministic components of objectives
det_objectives_gradients : tuple
vector of gradients of deterministic components of objectives
"""
det_objectives = (np.sum(1 / np.array(x)),)
det_objectives_gradients = (-1 / (np.array(x) ** 2))
return det_objectives, det_objectives_gradients
def check_deterministic_constraints(self, x):
"""
Check if a solution `x` satisfies the problem's deterministic constraints.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
satisfies : bool
indicates if solution `x` satisfies the deterministic constraints.
"""
return np.all(np.array(x) >= 0)
def get_random_solution(self, rand_sol_rng):
"""
Generate a random solution for starting or restarting solvers.
Arguments
---------
rand_sol_rng : rng.MRG32k3a object
random-number generator used to sample a new random solution
Returns
-------
x : tuple
vector of decision variables
"""
x = tuple([rand_sol_rng.uniform(0.01, 10) for _ in range(self.dim)])
return x
| 32.16317 | 126 | 0.567908 | 13,571 | 0.983406 | 0 | 0 | 0 | 0 | 0 | 0 | 7,713 | 0.558913 |
64904492fae611833b44081cd57a9959ef89af7d | 169 | py | Python | algorithms/da3c/__init__.py | j0k/relaax | dff865facc2932e4f8317d6ab4ad32a1f218e7b6 | [
"MIT"
] | 4 | 2018-07-31T06:32:30.000Z | 2021-05-02T20:21:37.000Z | algorithms/da3c_cont/__init__.py | bohblue2/relaax | 0a7ed8f2a21e37ca047e16d216d164527c1fffdd | [
"MIT"
] | null | null | null | algorithms/da3c_cont/__init__.py | bohblue2/relaax | 0a7ed8f2a21e37ca047e16d216d164527c1fffdd | [
"MIT"
] | null | null | null | from .common.config import Config
from .parameter_server.parameter_server import ParameterServer
from .agent.agent import Agent
from .bridge.bridge import BridgeControl
| 33.8 | 62 | 0.857988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6491da520089ccd407f5b9a996b24d12d4cf98c1 | 1,223 | py | Python | examples/simple-box.py | renovate-tests/gaphas | 388ee28b573fdb67e246bb9e66ff02b5afcf8204 | [
"Apache-2.0"
] | null | null | null | examples/simple-box.py | renovate-tests/gaphas | 388ee28b573fdb67e246bb9e66ff02b5afcf8204 | [
"Apache-2.0"
] | null | null | null | examples/simple-box.py | renovate-tests/gaphas | 388ee28b573fdb67e246bb9e66ff02b5afcf8204 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""A simple example containing two boxes and a line.
"""
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from gaphas import Canvas, GtkView
from gaphas.examples import Box
from gaphas.painter import DefaultPainter
from gaphas.item import Line
from gaphas.segment import Segment
def create_canvas(canvas, title):
# Setup drawing window
view = GtkView()
view.painter = DefaultPainter()
view.canvas = canvas
window = Gtk.Window()
window.set_title(title)
window.set_default_size(400, 400)
win_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
window.add(win_box)
win_box.pack_start(view, True, True, 0)
# Draw first gaphas box
b1 = Box(60, 60)
b1.matrix = (1.0, 0.0, 0.0, 1, 10, 10)
canvas.add(b1)
# Draw second gaphas box
b2 = Box(60, 60)
b2.min_width = 40
b2.min_height = 50
b2.matrix.translate(170, 170)
canvas.add(b2)
# Draw gaphas line
line = Line()
line.matrix.translate(100, 60)
canvas.add(line)
line.handles()[1].pos = (30, 30)
window.show_all()
window.connect("destroy", Gtk.main_quit)
c = Canvas()
create_canvas(c, "Simple Gaphas App")
Gtk.main()
| 22.648148 | 61 | 0.673753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.165985 |
649222f97f73f123281635bb2c3d36c6e7dafb0b | 2,574 | py | Python | 09_NNI/files/nni_xgb.py | aiq2020-tw/automl-notebooks | 689a494b3614d8a17d62e9b8713c97c54976e796 | [
"MIT"
] | 17 | 2021-07-07T02:04:47.000Z | 2022-03-24T17:42:00.000Z | 09_NNI/files/nni_xgb.py | aiq2020-tw/automl-notebooks | 689a494b3614d8a17d62e9b8713c97c54976e796 | [
"MIT"
] | null | null | null | 09_NNI/files/nni_xgb.py | aiq2020-tw/automl-notebooks | 689a494b3614d8a17d62e9b8713c97c54976e796 | [
"MIT"
] | 1 | 2021-11-29T04:32:13.000Z | 2021-11-29T04:32:13.000Z | # NNIをインポートする
import nni
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import KFold, cross_val_score
from sklearn.preprocessing import LabelEncoder
def load_data(train_file_path):
"""
データの前処理を行う関数
Parameters
----------
train_file_path : str
学習用データのファイルパス
Returns
-------
X_train : pd.DataFrame
学習用のデータ
y_train : Series
学習用の正解ラベル
"""
train_df = pd.read_csv(train_file_path)
y_train = train_df.pop('Survived')
X_train = train_df.drop(['PassengerId', 'Name'], axis=1)
list_cols = ['Sex', 'Ticket', 'Cabin', 'Embarked']
for col in list_cols:
le = LabelEncoder()
le.fit(X_train[col])
X_train[col] = le.transform(X_train[col])
return X_train, y_train
def get_default_parameters():
"""
デフォルトのパラメーターを取得する関数
Returns
-------
params : dict
デフォルトのパラメーター
"""
params = {
'learning_rate': 0.02,
'n_estimators': 2000,
'max_depth': 4,
'min_child_weight': 2,
'gamma': 0.9,
'subsample': 0.8,
'colsample_bytree': 0.8,
'objective': 'binary:logistic',
'nthread': -1,
'scale_pos_weight': 1
}
return params
def get_model(PARAMS):
"""
モデルを入手する関数
Parameters
----------
PARAMS : dict
パラメーター
Returns
-------
model : xgboost.sklearn.XGBClassifier
学習に使用するモデル
"""
model = xgb.XGBClassifier()
model.learning_rate = PARAMS.get("learning_rate")
model.max_depth = PARAMS.get("max_depth")
model.subsample = PARAMS.get("subsample")
model.colsample_btree = PARAMS.get("colsample_btree")
return model
def run(X_train, y_train, model):
"""
モデルを実行する関数
Parameters
----------
X_train : pd.DataFrame
学習用のデータ
y_train : pd.DataFrame
学習用の正解ラベル
model : xgboost.sklearn.XGBClassifier
学習に使用するモデル
"""
scores = cross_val_score(model, X_train, y_train,
scoring='accuracy', cv=KFold(n_splits=5))
score = scores.mean()
# Configurationの結果を報告する
nni.report_final_result(score)
if __name__ == '__main__':
X_train_sub, y_train_sub = load_data('train.csv')
# TunerからConfigurationを取得する
RECEIVED_PARAMS = nni.get_next_parameter()
PARAMS = get_default_parameters()
PARAMS.update(RECEIVED_PARAMS)
model = get_model(PARAMS)
run(X_train_sub, y_train_sub, model)
| 24.75 | 71 | 0.590909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,400 | 0.484765 |
6492ff76f8b02098a203441a7f17d6b324cf4767 | 22,255 | py | Python | parsetab.py | Hebbarkh/ScientificCalculator | 35e7c547b2cfebb8b8f6a7b090a43973abef5a0b | [
"Apache-2.0"
] | 1 | 2018-07-14T23:16:56.000Z | 2018-07-14T23:16:56.000Z | parsetab.py | Hebbarkh/ScientificCalculator | 35e7c547b2cfebb8b8f6a7b090a43973abef5a0b | [
"Apache-2.0"
] | null | null | null | parsetab.py | Hebbarkh/ScientificCalculator | 35e7c547b2cfebb8b8f6a7b090a43973abef5a0b | [
"Apache-2.0"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.5'
_lr_method = 'LALR'
_lr_signature = '5E2BB3531AA676A6BB1D06733251B2F3'
_lr_action_items = {'COS':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[1,1,1,1,1,1,1,1,1,1,-26,1,-32,1,1,1,1,1,-18,-19,1,-52,1,1,-27,-35,-24,1,-28,-37,1,-25,-34,-36,1,-30,-31,-39,1,1,1,-23,-38,-21,1,-29,1,1,-22,1,1,1,1,-47,-40,1,1,1,-7,-42,-49,1,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'COT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[5,5,5,5,5,5,5,5,5,5,-26,5,-32,5,5,5,5,5,-18,-19,5,-52,5,5,-27,-35,-24,5,-28,-37,5,-25,-34,-36,5,-30,-31,-39,5,5,5,-23,-38,-21,5,-29,5,5,-22,5,5,5,5,-47,-40,5,5,5,-7,-42,-49,5,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'DEGREE':([12,14,22,23,27,32,33,34,36,37,39,40,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[-26,-32,-18,-19,-52,-27,62,-24,-28,69,-25,72,75,-30,-31,78,-23,83,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,-33,]),'SUM':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[3,3,3,3,3,3,3,3,3,3,-26,3,-32,3,3,3,3,3,-18,-19,3,-52,3,3,-27,-35,-24,3,-28,-37,3,-25,-34,-36,3,-30,-31,-39,3,3,3,-23,-38,-21,3,-29,3,3,-22,3,3,3,3,-47,-40,3,3,3,-7,-42,-49,3,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'MINUS':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,21,22,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[4,4,4,4,4,4,4,4,4,4,-26,4,-32,4,4,4,4,4,-27,-18,-19,4,-52,4,4,61,-27,61,61,64,-28,61,4,61,61,61,61,64,61,61,61,64,64,4,61,61,61,4,-29,4,4,-22,4,4,4,4,-47,-40,4,4,4,61,-42,-49,64,-45,-46,-20,-41,-48,61,-44,-51,61,61,61,-43,-50,61,61,-14,61,-11,-13,-12,-12,61,-33,]),'LOG':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[2,2,2,2,2,2,2,2,2,2,-26,2,-32,2,2,2,2,2,-18,-19,2,-52,2,2,-27,-35,-24,2,-28,-37,2,-25,-34,-36,2,-30,-31,-39,2,2,2,-23,-38,-21,2,-29,2,2,-22,2,2,2,2,-47,-40,2,2,2,-7,-42,-49,2,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'RAD':([12,14,22,23,27,32,33,34,36,37,39,40,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[-26,-32,-18,-19,-52,-27,63,-24,-28,68,-25,71,74,-30,-31,77,-23,82,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,-33,]),'POWER':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[6,6,6,6,6,6,6,6,6,6,-26,6,-32,6,6,6,6,6,-18,-19,6,-52,6,6,-27,-35,-24,6,-28,-37,6,-25,-34,-36,6,-30,-31,-39,6,6,6,-23,-38,-21,6,-29,6,6,-22,6,6,6,6,-47,-40,6,6,6,-7,-42,-49,6,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'LN':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[7,7,7,7,7,7,7,7,7,7,-26,7,-32,7,7,7,7,7,-18,-19,7,-52,7,7,-27,-35,-24,7,-28,-37,7,-25,-34,-36,7,-30,-31,-39,7,7,7,-23,-38,-21,7,-29,7,7,-22,7,7,7,7,-47,-40,7,7,7,-7,-42,-49,7,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'SIN':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[8,8,8,8,8,8,8,8,8,8,-26,8,-32,8,8,8,8,8,-18,-19,8,-52,8,8,-27,-35,-24,8,-28,-37,8,-25,-34,-36,8,-30,-31,-39,8,8,8,-23,-38,-21,8,-29,8,8,-22,8,8,8,8,-47,-40,8,8,8,-7,-42,-49,8,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'OPAR':([0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[9,9,9,9,9,9,38,9,9,9,9,-26,9,-32,9,9,9,9,9,-18,-19,9,-52,9,9,-27,-35,-24,9,-28,-37,9,-25,-34,-36,9,-30,-31,-39,9,9,9,-23,-38,-21,9,-29,9,9,-22,9,9,9,9,-47,-40,9,9,9,-7,-42,-49,9,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'SEC':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[29,29,29,29,29,29,29,29,29,29,-26,29,-32,29,29,29,29,29,-18,-19,29,-52,29,29,-27,-35,-24,29,-28,-37,29,-25,-34,-36,29,-30,-31,-39,29,29,29,-23,-38,-21,29,-29,29,29,-22,29,29,29,29,-47,-40,29,29,29,-7,-42,-49,29,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'TAN':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[11,11,11,11,11,11,11,11,11,11,-26,11,-32,11,11,11,11,11,-18,-19,11,-52,11,11,-27,-35,-24,11,-28,-37,11,-25,-34,-36,11,-30,-31,-39,11,11,11,-23,-38,-21,11,-29,11,11,-22,11,11,11,11,-47,-40,11,11,11,-7,-42,-49,11,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'PI':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[12,12,12,12,12,12,12,12,12,12,-26,12,-32,12,12,12,12,12,-18,-19,12,-52,12,12,-27,-35,-24,12,-28,-37,12,-25,-34,-36,12,-30,-31,-39,12,12,12,-23,-38,-21,12,-29,12,12,-22,12,12,12,12,-47,-40,12,12,12,-7,-42,-49,12,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'QUOTIENT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[13,13,13,13,13,13,13,13,13,13,-26,13,-32,13,13,13,13,13,-18,-19,13,-52,13,13,-27,-35,-24,13,-28,-37,13,-25,-34,-36,13,-30,-31,-39,13,13,13,-23,-38,-21,13,-29,13,13,-22,13,13,13,13,-47,-40,13,13,13,-7,-42,-49,13,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'PLUS':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,59,-27,59,59,59,-28,59,59,59,59,59,59,59,59,59,59,59,59,59,59,-29,-22,-47,-40,-29,-22,59,-42,-49,59,-45,-46,-20,-41,-48,59,-44,-51,59,59,59,-43,-50,59,59,-14,59,-11,-13,-12,-12,59,-33,]),'SQUARE':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,21,22,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[15,15,15,15,15,15,15,15,15,15,-26,15,-32,15,15,15,15,15,-27,-18,-19,15,-52,15,15,54,-27,54,54,65,-28,54,15,54,54,54,54,65,54,54,54,65,65,15,54,54,54,15,-29,15,15,-22,15,15,15,15,-47,-40,15,15,15,54,-42,-49,65,-45,-46,-20,-41,-48,54,-44,-51,54,54,54,-43,-50,54,54,-14,54,-11,-13,-12,-12,54,-33,]),'XOR':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,55,-27,55,55,55,-28,55,55,55,55,55,55,55,55,55,55,55,55,55,55,-29,-22,-47,-40,-29,-22,55,-42,-49,55,-45,-46,-20,-41,-48,55,-44,-51,55,55,55,-43,-50,55,55,-14,55,-11,-13,-12,-12,55,-33,]),'DIVIDE':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,56,-27,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,-29,-22,-47,-40,-29,-22,56,-42,-49,56,-45,-46,-20,-41,-48,56,-44,-51,56,56,56,-43,-50,56,56,-14,56,56,-13,56,56,56,-33,]),'SQROOT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[16,16,16,16,16,16,16,16,16,16,-26,16,-32,16,16,16,16,16,-18,-19,16,-52,16,16,-27,-35,-24,16,-28,-37,16,-25,-34,-36,16,-30,-31,-39,16,16,16,-23,-38,-21,16,-29,16,16,-22,16,16,16,16,-47,-40,16,16,16,-7,-42,-49,16,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'CPAR':([12,14,22,23,27,32,33,34,36,37,39,40,41,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-18,-19,-52,-27,-35,-24,-28,-37,-25,-34,73,-36,-30,-31,-39,-23,-38,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,93,-33,]),'EQUALS':([21,],[49,]),'TIMES':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,60,-27,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,-29,-22,-47,-40,-29,-22,60,-42,-49,60,-45,-46,-20,-41,-48,60,-44,-51,60,60,60,-43,-50,60,60,-14,60,60,-13,60,60,60,-33,]),'COSEC':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[17,17,17,17,17,17,17,17,17,17,-26,17,-32,17,17,17,17,17,-18,-19,17,-52,17,17,-27,-35,-24,17,-28,-37,17,-25,-34,-36,17,-30,-31,-39,17,17,17,-23,-38,-21,17,-29,17,17,-22,17,17,17,17,-47,-40,17,17,17,-7,-42,-49,17,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'DIFFERENCE':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[18,18,18,18,18,18,18,18,18,18,-26,18,-32,18,18,18,18,18,-18,-19,18,-52,18,18,-27,-35,-24,18,-28,-37,18,-25,-34,-36,18,-30,-31,-39,18,18,18,-23,-38,-21,18,-29,18,18,-22,18,18,18,18,-47,-40,18,18,18,-7,-42,-49,18,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'AND':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,53,-27,53,53,53,-28,53,53,53,53,53,53,53,53,53,53,53,53,53,53,-29,-22,-47,-40,-29,-22,53,-42,-49,53,-45,-46,-20,-41,-48,53,-44,-51,53,53,53,-43,-50,53,53,-14,53,-11,-13,-12,-12,53,-33,]),'QUIT':([0,],[19,]),'PRODUCT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[20,20,20,20,20,20,20,20,20,20,-26,20,-32,20,20,20,20,20,-18,-19,20,-52,20,20,-27,-35,-24,20,-28,-37,20,-25,-34,-36,20,-30,-31,-39,20,20,20,-23,-38,-21,20,-29,20,20,-22,20,20,20,20,-47,-40,20,20,20,-7,-42,-49,20,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'NAME':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[21,32,32,32,32,32,32,32,32,32,-26,32,-32,32,32,32,32,32,-18,-19,32,-52,32,32,-27,-35,-24,32,-28,-37,32,-25,-34,-36,32,-30,-31,-39,32,32,32,-23,-38,-21,32,-29,32,32,-22,32,32,32,32,-47,-40,32,32,32,-7,-42,-49,32,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'INT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[22,22,22,22,22,22,22,22,22,22,-26,22,-32,22,22,22,22,22,-18,-19,22,-52,22,22,-27,-35,-24,22,-28,-37,22,-25,-34,-36,22,-30,-31,-39,22,22,22,-23,-38,-21,22,-29,22,22,-22,22,22,22,22,-47,-40,22,22,22,-7,-42,-49,22,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'FLOAT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[23,23,23,23,23,23,23,23,23,23,-26,23,-32,23,23,23,23,23,-18,-19,23,-52,23,23,-27,-35,-24,23,-28,-37,23,-25,-34,-36,23,-30,-31,-39,23,23,23,-23,-38,-21,23,-29,23,23,-22,23,23,23,23,-47,-40,23,23,23,-7,-42,-49,23,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'BREAK':([0,],[25,]),'FACTORIAL':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,21,22,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[26,26,26,26,26,26,26,26,26,26,-26,26,-32,26,26,26,26,26,-27,-18,-19,26,-52,26,26,57,-27,57,57,66,-28,57,26,57,57,57,57,66,57,57,57,66,66,26,57,57,57,26,-29,26,26,-22,26,26,26,26,-47,-40,26,26,26,57,-42,-49,66,-45,-46,-20,-41,-48,57,-44,-51,57,57,57,-43,-50,57,57,-14,57,-11,-13,-12,-12,57,-33,]),'REGISTERS':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[27,27,27,27,27,27,27,27,27,27,-26,27,-32,27,27,27,27,27,-18,-19,27,-52,27,27,-27,-35,-24,27,-28,-37,27,-25,-34,-36,27,-30,-31,-39,27,27,27,-23,-38,-21,27,-29,27,27,-22,27,27,27,27,-47,-40,27,27,27,-7,-42,-49,27,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'EXIT':([0,],[28,]),'NOT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[30,30,30,30,30,30,30,30,30,30,-26,30,-32,30,30,30,30,30,-18,-19,30,-52,30,30,-27,-35,-24,30,-28,-37,30,-25,-34,-36,30,-30,-31,-39,30,30,30,-23,-38,-21,30,-29,30,30,-22,30,30,30,30,-47,-40,30,30,30,-7,-42,-49,30,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'$end':([10,12,14,19,21,22,23,24,25,27,28,31,32,33,34,36,37,39,40,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,93,],[-2,-26,-32,-6,-27,-18,-19,0,-4,-52,-5,-3,-27,-35,-24,-28,-37,-25,-34,-36,-30,-31,-39,-23,-38,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-1,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,-33,]),'OR':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,58,-27,58,58,58,-28,58,58,58,58,58,58,58,58,58,58,58,58,58,58,-29,-22,-47,-40,-29,-22,58,-42,-49,58,-45,-46,-20,-41,-48,58,-44,-51,58,58,58,-43,-50,58,58,-14,58,-11,-13,-12,-12,58,-33,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'function1':([0,1,2,3,4,5,7,8,9,11,13,15,16,17,18,20,26,29,30,35,38,43,47,48,49,53,55,56,58,59,60,61,64,65,66,70,],[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,]),'expression':([0,1,2,3,4,5,7,8,9,11,13,15,16,17,18,20,26,29,30,35,38,43,47,48,49,53,55,56,58,59,60,61,64,65,66,70,],[31,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,67,70,76,79,80,81,84,85,86,87,88,89,90,91,44,50,92,]),'assign':([0,],[24,]),'statement':([0,],[10,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> assign","S'",1,None,None,None),
('assign -> NAME EQUALS expression','assign',3,'p_statement_assign','calc1.py',13),
('assign -> statement','assign',1,'p_statement_assign','calc1.py',14),
('statement -> expression','statement',1,'p_statement_expr','calc1.py',22),
('statement -> BREAK','statement',1,'p_statement_expr','calc1.py',23),
('statement -> EXIT','statement',1,'p_statement_expr','calc1.py',24),
('statement -> QUIT','statement',1,'p_statement_expr','calc1.py',25),
('expression -> SUM expression expression','expression',3,'p_exprr','calc1.py',34),
('expression -> DIFFERENCE expression expression','expression',3,'p_exprr','calc1.py',35),
('expression -> PRODUCT expression expression','expression',3,'p_exprr','calc1.py',36),
('expression -> QUOTIENT expression expression','expression',3,'p_exprr','calc1.py',37),
('expression -> expression PLUS expression','expression',3,'p_expression_binop','calc1.py',50),
('expression -> expression MINUS expression','expression',3,'p_expression_binop','calc1.py',51),
('expression -> expression TIMES expression','expression',3,'p_expression_binop','calc1.py',52),
('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','calc1.py',53),
('expression -> expression OR expression','expression',3,'p_expression_binop','calc1.py',54),
('expression -> expression AND expression','expression',3,'p_expression_binop','calc1.py',55),
('expression -> expression XOR expression','expression',3,'p_expression_binop','calc1.py',56),
('expression -> INT','expression',1,'p_factor','calc1.py',80),
('expression -> FLOAT','expression',1,'p_factor','calc1.py',81),
('expression -> OPAR expression CPAR','expression',3,'p_paran','calc1.py',85),
('expression -> NOT expression','expression',2,'p_logical_not','calc1.py',89),
('expression -> expression FACTORIAL','expression',2,'p_factorial_exp','calc1.py',93),
('expression -> FACTORIAL expression','expression',2,'p_factorial_exp','calc1.py',94),
('expression -> LOG expression','expression',2,'p_logarithms','calc1.py',108),
('expression -> LN expression','expression',2,'p_logarithms','calc1.py',109),
('expression -> PI','expression',1,'p_pival','calc1.py',126),
('expression -> NAME','expression',1,'p_pival','calc1.py',127),
('expression -> MINUS expression','expression',2,'p_uniminus','calc1.py',134),
('expression -> expression SQUARE','expression',2,'p_square_fun','calc1.py',138),
('expression -> SQUARE expression','expression',2,'p_square_fun','calc1.py',139),
('expression -> SQROOT expression','expression',2,'p_square_root','calc1.py',147),
('expression -> function1','expression',1,'p_math_fun','calc1.py',151),
('expression -> POWER OPAR expression expression CPAR','expression',5,'p_math_pow','calc1.py',155),
('function1 -> SIN expression','function1',2,'p_trig_func1','calc1.py',161),
('function1 -> COS expression','function1',2,'p_trig_func1','calc1.py',162),
('function1 -> TAN expression','function1',2,'p_trig_func1','calc1.py',163),
('function1 -> COT expression','function1',2,'p_trig_func1','calc1.py',164),
('function1 -> SEC expression','function1',2,'p_trig_func1','calc1.py',165),
('function1 -> COSEC expression','function1',2,'p_trig_func1','calc1.py',166),
('function1 -> COS expression RAD','function1',3,'p_trig_func1','calc1.py',167),
('function1 -> TAN expression RAD','function1',3,'p_trig_func1','calc1.py',168),
('function1 -> COT expression RAD','function1',3,'p_trig_func1','calc1.py',169),
('function1 -> SEC expression RAD','function1',3,'p_trig_func1','calc1.py',170),
('function1 -> COSEC expression RAD','function1',3,'p_trig_func1','calc1.py',171),
('function1 -> SIN expression RAD','function1',3,'p_trig_func1','calc1.py',172),
('function1 -> SIN expression DEGREE','function1',3,'p_func1','calc1.py',190),
('function1 -> COS expression DEGREE','function1',3,'p_func1','calc1.py',191),
('function1 -> TAN expression DEGREE','function1',3,'p_func1','calc1.py',192),
('function1 -> COT expression DEGREE','function1',3,'p_func1','calc1.py',193),
('function1 -> SEC expression DEGREE','function1',3,'p_func1','calc1.py',194),
('function1 -> COSEC expression DEGREE','function1',3,'p_func1','calc1.py',195),
('expression -> REGISTERS','expression',1,'p_registers','calc1.py',211),
]
| 271.402439 | 16,810 | 0.608268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,974 | 0.178567 |
649457b8032d8db424acfcf9fd15600116f0ed28 | 526 | py | Python | tests/data.py | biologic/stylus | ae642bbb7e2205bab1ab1b4703ea037e996e13db | [
"Apache-2.0"
] | null | null | null | tests/data.py | biologic/stylus | ae642bbb7e2205bab1ab1b4703ea037e996e13db | [
"Apache-2.0"
] | null | null | null | tests/data.py | biologic/stylus | ae642bbb7e2205bab1ab1b4703ea037e996e13db | [
"Apache-2.0"
] | null | null | null | # The following is a list of gene-plan combinations which should
# not be run
BLACKLIST = [
('8C58', 'performance'), # performance.xml make specific references to 52DC
('7DDA', 'performance') # performance.xml make specific references to 52DC
]
IGNORE = {
'history' : ['uuid', 'creationTool', 'creationDate'],
'genome' : ['uuid', 'creationTool', 'creationDate'],
# the following two ignored because they contain line numbers
'attempt' : ['description'],
'compared' : ['description']
}
| 35.066667 | 79 | 0.657795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.769962 |
64946af2634bec93cc261bffb1f1c3c754ac3109 | 2,015 | py | Python | hidtcore.py | zengxinzhy/HiDT | 3a223d727e6888dfcdfd5e81bd83df428d3a5596 | [
"BSD-3-Clause"
] | null | null | null | hidtcore.py | zengxinzhy/HiDT | 3a223d727e6888dfcdfd5e81bd83df428d3a5596 | [
"BSD-3-Clause"
] | null | null | null | hidtcore.py | zengxinzhy/HiDT | 3a223d727e6888dfcdfd5e81bd83df428d3a5596 | [
"BSD-3-Clause"
] | null | null | null | import torch
import sys
import coremltools as ct
from hidt.style_transformer import StyleTransformer
from ops import inference_size
sys.path.append('./HiDT')
class HiDT(torch.nn.Module):
def __init__(self):
super().__init__()
config_path = './configs/daytime.yaml'
gen_weights_path = './trained_models/generator/daytime.pt'
with torch.no_grad():
self.style_transformer = StyleTransformer(
config_path,
gen_weights_path,
inference_size=inference_size,
device='cpu')
def forward(self, content, style_to_transfer):
n, c, h, w = content.shape
style_to_transfer = style_to_transfer.view(1, 1, 3, 1)
style_to_transfer = style_to_transfer.repeat(n, 1, 1, 1)
encoding_fn = self.style_transformer.trainer.gen.content_encoder
content_decomposition = encoding_fn(content)
decoder_input = {'content': content_decomposition[0],
'intermediate_outputs': content_decomposition[1:],
'style': style_to_transfer}
transferred = self.style_transformer.trainer.gen.decode(decoder_input)[
'images']
return transferred.view(1, n * c, h, w)
if __name__ == '__main__':
image = torch.zeros(1, 3, 256, 452)
style_to_transfer = torch.zeros(3)
model = HiDT()
model.eval()
for param in model.parameters():
param.requires_grad = False
model.style_transformer.trainer.eval()
for param in model.style_transformer.trainer.parameters():
param.requires_grad = False
# transferred = model(image, style_to_transfer)
traced_model = torch.jit.trace(
model, (image, style_to_transfer), check_trace=False)
mlmodel = ct.convert(model=traced_model, inputs=[
ct.TensorType(name="image", shape=ct.Shape(image.shape)),
ct.TensorType(name="style", shape=ct.Shape(style_to_transfer.shape))
])
mlmodel.save("~/hidtcore.mlmodel")
| 36.636364 | 79 | 0.655087 | 1,100 | 0.545906 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.105707 |
64949bbe3dc54542a6bf1cf20073d7801df0a497 | 295 | py | Python | prm/relations/migrations/0010_delete_mood.py | justaname94/innovathon2019 | d1a4e9b1b877ba12ab23384b9ee098fcdbf363af | [
"MIT"
] | null | null | null | prm/relations/migrations/0010_delete_mood.py | justaname94/innovathon2019 | d1a4e9b1b877ba12ab23384b9ee098fcdbf363af | [
"MIT"
] | 4 | 2021-06-08T20:20:05.000Z | 2022-03-11T23:58:37.000Z | prm/relations/migrations/0010_delete_mood.py | justaname94/personal_crm | d1a4e9b1b877ba12ab23384b9ee098fcdbf363af | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-03 23:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('relations', '0009_auto_20191003_2258'),
]
operations = [
migrations.DeleteModel(
name='Mood',
),
]
| 17.352941 | 49 | 0.60339 | 210 | 0.711864 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.301695 |
6494d7e235268c91bde539243623f44dd265dd50 | 962 | py | Python | tests/fake_websocket_server.py | UrbanOS-Examples/PredictiveParking | 778acb68a0c8be78655d38698ab68f1c1b47cbc5 | [
"Apache-2.0"
] | 2 | 2021-03-29T03:36:32.000Z | 2021-07-01T16:51:18.000Z | tests/fake_websocket_server.py | UrbanOS-Examples/PredictiveParking | 778acb68a0c8be78655d38698ab68f1c1b47cbc5 | [
"Apache-2.0"
] | null | null | null | tests/fake_websocket_server.py | UrbanOS-Examples/PredictiveParking | 778acb68a0c8be78655d38698ab68f1c1b47cbc5 | [
"Apache-2.0"
] | 1 | 2022-01-28T15:56:00.000Z | 2022-01-28T15:56:00.000Z | import json
def _standard_join_messages():
connection_status = [{
'event': 'phx_reply',
'payload': {
'status': 'ok'
}
}]
cached_records = [
update_event({
'id': '9861',
'occupancy': 'UNOCCUPIED',
'time_of_ingest': '2020-05-21T17:59:45.000000'
})
]
start_of_stream = [{
'event': 'presence_diff'
}]
return connection_status + cached_records + start_of_stream
def create_fake_server(messages=[]):
all_messages = _standard_join_messages() + messages
async def _fake_server(websocket, _path):
_join_message = await websocket.recv()
for message in all_messages:
await websocket.send(json.dumps(message))
return _fake_server
def update_event(payload):
payload['price'] = 1.0
payload['status'] = 'open'
payload['limit'] = 'no-limit'
return {'event':'update','payload':payload}
| 21.377778 | 63 | 0.591476 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.18711 | 200 | 0.2079 |
6495674227e2d4f327739dbc79ec0dbe607aeede | 189 | py | Python | simplepush/__init__.py | bobquest33/django-simplepush | af53ba086e51976a346e7741cb101c509ca9de0f | [
"BSD-3-Clause"
] | 1 | 2021-07-30T21:00:49.000Z | 2021-07-30T21:00:49.000Z | simplepush/__init__.py | bobquest33/django-simplepush | af53ba086e51976a346e7741cb101c509ca9de0f | [
"BSD-3-Clause"
] | null | null | null | simplepush/__init__.py | bobquest33/django-simplepush | af53ba086e51976a346e7741cb101c509ca9de0f | [
"BSD-3-Clause"
] | null | null | null | import json
from .helpers import send_notification_to_user
def send_user_notification(user, payload, ttl=0):
payload = json.dumps(payload)
send_notification_to_user(user, payload, ttl) | 23.625 | 49 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
64968402e2034f893d17334b1fc6d1f4e99dc264 | 24,885 | py | Python | goalrepresent/models/lenia_BC/pytorchnnrepresentation/helper.py | flowersteam/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | 6 | 2020-12-19T00:16:16.000Z | 2022-01-28T14:59:21.000Z | goalrepresent/models/lenia_BC/pytorchnnrepresentation/helper.py | Evolutionary-Intelligence/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | null | null | null | goalrepresent/models/lenia_BC/pytorchnnrepresentation/helper.py | Evolutionary-Intelligence/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | 1 | 2021-05-24T14:58:26.000Z | 2021-05-24T14:58:26.000Z | import math
from math import floor
from numbers import Number
import h5py
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, xavier_normal_, kaiming_uniform_, kaiming_normal_
from torch.utils.data import Dataset
from torchvision.transforms import CenterCrop, ToTensor, ToPILImage, RandomHorizontalFlip, RandomResizedCrop, \
RandomVerticalFlip, RandomRotation
to_tensor = ToTensor()
to_PIL_image = ToPILImage()
from torchvision import transforms
import random
''' ---------------------------------------------
PYTORCH DATASET HELPERS
-------------------------------------------------'''
class Dataset(Dataset):
""" Dataset to train auto-encoders representations during exploration"""
def __init__(self, img_size, preprocess=None, data_augmentation = False):
self.n_images = 0
self.images = []
self.labels = []
self.img_size = img_size
self.preprocess = preprocess
self.data_augmentation = data_augmentation
if self.data_augmentation:
radius = max(self.img_size[0], self.img_size[1]) / 2
padding_size = int(np.sqrt(2*np.power(radius, 2)) - radius)
self.spheric_pad = SphericPad(padding_size=padding_size) #max rotation needs padding of [sqrt(2*128^2)-128 = 53.01]
self.random_horizontal_flip = RandomHorizontalFlip(0.2)
self.random_vertical_flip = RandomVerticalFlip(0.2)
self.random_resized_crop = RandomResizedCrop(size = self.img_size)
self.random_rotation = RandomRotation(40)
self.center_crop = CenterCrop(self.img_size)
self.roll_y = Roll(shift = 0, dim = 1)
self.roll_x = Roll(shift = 0, dim = 2)
def update(self, n_images, images, labels=None):
if labels is None:
labels = torch.Tensor([-1] * n_images)
assert n_images == images.shape[0] == labels.shape[0], print('ERROR: the given dataset size ({0}) mismatch with observations size ({1}) and labels size ({2})'.format(n_images, images.shape[0], labels.shape[0]))
self.n_images = int(n_images)
self.images = images
self.labels = labels
def __len__(self):
return self.n_images
def __getitem__(self, idx):
# image
img_tensor = self.images[idx]
if self.data_augmentation:
# random rolled translation (ie pixels shifted outside appear on the other side of image))
p_y = p_x = 0.3
if np.random.random() < p_y:
## the maximum translation is of half the image size
max_dy = 0.5 * self.img_size[0]
shift_y = int(np.round(np.random.uniform(-max_dy, max_dy)))
self.roll_y.shift = shift_y
img_tensor = self.roll_y(img_tensor)
if np.random.random() < p_x:
max_dx = 0.5 * self.img_size[1]
shift_x = int(np.round(np.random.uniform(-max_dx, max_dx)))
self.roll_y.shift = shift_x
img_tensor = self.roll_x(img_tensor)
# random spherical padding + rotation (avoid "black holes" when rotating)
p_r = 0.3
if np.random.random() < p_r:
img_tensor = self.spheric_pad(img_tensor.view(1, img_tensor.size(0), img_tensor.size(1), img_tensor.size(2))).squeeze(0)
img_PIL = to_PIL_image(img_tensor)
img_PIL = self.random_rotation(img_PIL)
img_PIL = self.center_crop(img_PIL)
img_tensor = to_tensor(img_PIL)
img_PIL = to_PIL_image(img_tensor)
# random horizontal flip
img_PIL = self.random_horizontal_flip(img_PIL)
# random vertical flip
img_PIL = self.random_vertical_flip(img_PIL)
# convert back to tensor
img_tensor = to_tensor(img_PIL)
if self.preprocess:
img_tensor = self.preprocess(img_tensor)
# label
if self.labels[idx] is not None and not np.isnan(self.labels[idx]):
label = int(self.labels[idx])
else:
label = -1
return {'image':img_tensor, 'label':label}
def save(self, output_npz_filepath):
np.savez(output_npz_filepath, n_images = self.n_images, images = np.stack(self.images), labels = np.asarray(self.labels))
return
class DatasetHDF5(Dataset):
"""
Dataset to train auto-encoders representations during exploration from datatsets in hdf5 files.
TODO: add a cache for loaded objects to be faster (see https://towardsdatascience.com/hdf5-datasets-for-pytorch-631ff1d750f5)
"""
def __init__(self, filepath, split='train', img_size=None, preprocess=None, data_augmentation=False):
self.filepath = filepath
self.split = split
# HDF5 file isn’t pickleable and to send Dataset to workers’ processes it needs to be serialised with pickle, self.file will be opened in __getitem__
self.data_group = None
if img_size is not None:
self.img_size = img_size
with h5py.File(self.filepath, 'r') as file:
self.n_images = int(file[self.split]['observations'].shape[0])
self.has_labels = bool('labels' in file[self.split])
if img_size is None:
self.img_size = file[self.split]['observations'][0].shape
self.preprocess = preprocess
self.data_augmentation = data_augmentation
if self.data_augmentation:
radius = max(self.img_size[0], self.img_size[1]) / 2
padding_size = int(np.sqrt(2 * np.power(radius, 2)) - radius)
self.spheric_pad = SphericPad(padding_size=padding_size) # max rotation needs padding of [sqrt(2*128^2)-128 = 53.01]
self.random_horizontal_flip = RandomHorizontalFlip(0.2)
self.random_vertical_flip = RandomVerticalFlip(0.2)
self.random_resized_crop = RandomResizedCrop(size=self.img_size)
self.random_rotation = RandomRotation(40)
self.center_crop = CenterCrop(self.img_size)
self.roll_y = Roll(shift=0, dim=1)
self.roll_x = Roll(shift=0, dim=2)
def __len__(self):
return self.n_images
def __getitem__(self, idx):
# open the HDF5 file here and store as the singleton. Do not open it each time as it introduces huge overhead.
if self.data_group is None:
self.data_group = h5py.File(self.filepath , "r")[self.split]
# image
img_tensor = torch.from_numpy(self.data_group['observations'][idx,:,:]).unsqueeze(dim=0).float()
if self.data_augmentation:
# random rolled translation (ie pixels shifted outside appear on the other side of image))
p_y = p_x = 0.3
if np.random.random() < p_y:
## the maximum translation is of half the image size
max_dy = 0.5 * self.img_size[0]
shift_y = int(np.round(np.random.uniform(-max_dy, max_dy)))
self.roll_y.shift = shift_y
img_tensor = self.roll_y(img_tensor)
if np.random.random() < p_x:
max_dx = 0.5 * self.img_size[1]
shift_x = int(np.round(np.random.uniform(-max_dx, max_dx)))
self.roll_y.shift = shift_x
img_tensor = self.roll_x(img_tensor)
# random spherical padding + rotation (avoid "black holes" when rotating)
p_r = 0.3
if np.random.random() < p_r:
img_tensor = self.spheric_pad(img_tensor.view(1, img_tensor.size(0), img_tensor.size(1), img_tensor.size(2))).squeeze(0)
img_PIL = to_PIL_image(img_tensor)
img_PIL = self.random_rotation(img_PIL)
img_PIL = self.center_crop(img_PIL)
img_tensor = to_tensor(img_PIL)
img_PIL = to_PIL_image(img_tensor)
# random horizontal flip
img_PIL = self.random_horizontal_flip(img_PIL)
# random vertical flip
img_PIL = self.random_vertical_flip(img_PIL)
# convert back to tensor
img_tensor = to_tensor(img_PIL)
if self.preprocess:
img_tensor = self.preprocess(img_tensor)
# label
label = -1
if self.has_labels:
tmp_label = self.data_group['labels'][idx]
if not np.isnan(tmp_label):
label = int(tmp_label)
return {'image': img_tensor, 'label': label}
''' ---------------------------------------------
NN MODULES HELPERS
-------------------------------------------------'''
class Flatten(nn.Module):
"""Flatten the input """
def forward(self, input):
return input.view(input.size(0), -1)
'''
class LinearFromFlatten(nn.Module):
"""Flatten the input and then apply a linear module """
def __init__(self, output_flat_size):
super(LinearFromFlatten, self).__init__()
self.output_flat_size = output_flat_size
def forward(self, input):
input = input.view(input.size(0), -1) # Batch_size * flatenned_size
input_flatten_size = input.size(1)
Linear = nn.Linear(input_flatten_size, self.output_flat_size)
return Linear(input)
'''
class Channelize(nn.Module):
"""Channelize a flatten input to the given (C,H,W) output """
def __init__(self, n_channels, height, width):
super(Channelize, self).__init__()
self.n_channels = n_channels
self.height = height
self.width = width
def forward(self, input):
return input.view(input.size(0), self.n_channels, self.height, self.width)
class SphericPad(nn.Module):
"""Pads spherically the input on all sides with the given padding size."""
def __init__(self, padding_size):
super(SphericPad, self).__init__()
if isinstance(padding_size, int):
self.pad_left = self.pad_right = self.pad_top = self.pad_bottom = padding_size
elif isinstance(padding_size, tuple) and len(padding_size) == 2:
self.pad_left = self.pad_right = padding_size[0]
self.pad_top = self.pad_bottom = padding_size[1]
elif isinstance(padding_size, tuple) and len(padding_size) == 4:
self.pad_left = padding_size[0]
self.pad_top = padding_size[1]
self.pad_right = padding_size[2]
self.pad_bottom = padding_size[3]
else:
raise ValueError('The padding size shoud be: int, tuple of size 2 or tuple of size 4')
def forward(self, input):
output = torch.cat([input, input[:, :, :self.pad_bottom, :]], dim=2)
output = torch.cat([output, output[:, :, :, :self.pad_right]], dim=3)
output = torch.cat([output[:, :, -(self.pad_bottom+self.pad_top):-self.pad_bottom, :], output], dim=2)
output = torch.cat([output[:, :, :, -(self.pad_right+self.pad_left):-self.pad_right], output], dim=3)
return output
class Roll(nn.Module):
"""Rolls spherically the input with the given padding shit on the given dimension."""
def __init__(self, shift, dim):
super(Roll, self).__init__()
self.shift = shift
self.dim = dim
def forward(self, input):
""" Shifts an image by rolling it"""
if self.shift == 0:
return input
elif self.shift < 0:
self.shift = -self.shift
gap = input.index_select(self.dim, torch.arange(self.shift, dtype=torch.long))
return torch.cat([input.index_select(self.dim, torch.arange(self.shift, input.size(self.dim), dtype=torch.long)), gap], dim = self.dim)
else:
self.shift = input.size(self.dim) - self.shift
gap = input.index_select(self.dim, torch.arange(self.shift, input.size(self.dim), dtype=torch.long))
return torch.cat([gap, input.index_select(self.dim, torch.arange(self.shift, dtype=torch.long))], dim = self.dim)
def conv2d_output_flatten_size(h_w, n_conv=0, kernels_size=1, strides=1, pads=0, dils=1):
"""Returns the flattened size of a tensor after a sequence of convolutions"""
assert n_conv == len(kernels_size) == len(strides) == len(pads) == len(dils), print('The number of kernels({}), strides({}), paddings({}) and dilatations({}) has to match the number of convolutions({})'.format(len(kernels_size), len(strides), len(pads), len(dils), n_conv))
h = h_w[0]
w = h_w[1]
for conv_id in range(n_conv):
if type(kernels_size[conv_id]) is not tuple:
kernel_size = (kernels_size[conv_id], kernels_size[conv_id])
if type(strides[conv_id]) is not tuple:
stride = (strides[conv_id], strides[conv_id])
if type(pads[conv_id]) is not tuple:
pad = (pads[conv_id], pads[conv_id])
if type(dils[conv_id]) is not tuple:
dil = (dils[conv_id], dils[conv_id])
h = floor( ((h + (2 * pad[0]) - ( dil[0] * (kernel_size[0] - 1) ) - 1 ) / stride[0]) + 1)
w = floor( ((w + (2 * pad[1]) - ( dil[1] * (kernel_size[1] - 1) ) - 1 ) / stride[1]) + 1)
return h*w
def conv2d_output_size(h_w, n_conv=0, kernels_size=1, strides=1, pads=0, dils=1):
"""Returns the size of a tensor after a sequence of convolutions"""
assert n_conv == len(kernels_size) == len(strides) == len(pads) == len(dils), print('The number of kernels ({}), strides({}), paddings({}) and dilatations({}) has to match the number of convolutions({})'.format(len(kernels_size), len(strides), len(pads), len(dils), n_conv))
h = h_w[0]
w = h_w[1]
for conv_id in range(n_conv):
if type(kernels_size[conv_id]) is not tuple:
kernel_size = (kernels_size[conv_id], kernels_size[conv_id])
if type(strides[conv_id]) is not tuple:
stride = (strides[conv_id], strides[conv_id])
if type(pads[conv_id]) is not tuple:
pad = (pads[conv_id], pads[conv_id])
if type(dils[conv_id]) is not tuple:
dil = (dils[conv_id], dils[conv_id])
h = floor( ((h + (2 * pad[0]) - ( dil[0] * (kernel_size[0] - 1) ) - 1 ) / stride[0]) + 1)
w = floor( ((w + (2 * pad[1]) - ( dil[1] * (kernel_size[1] - 1) ) - 1 ) / stride[1]) + 1)
return h, w
''' ---------------------------------------------
PREPROCESS DATA HELPER
-------------------------------------------------'''
def weights_init_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.bias.data.fill_(0)
def weights_init_pytorch_(m):
classname = m.__class__.__name__
if (classname.find('Conv') != -1) or (classname.find('Linear') != -1):
m.reset_parameters()
def weights_init_xavier_uniform_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
def weights_init_xavier_normal_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
def weights_init_kaiming_uniform_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
kaiming_uniform_(m.weight.data, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
kaiming_uniform_(m.weight.data, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
m.bias.data.fill_(0)
def weights_init_kaiming_normal_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
kaiming_normal_(m.weight.data, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
kaiming_normal_(m.weight.data, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
m.bias.data.fill_(0.01)
def weights_init_custom_uniform_(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
#m.weight.data.uniform_(-1,1)
m.weight.data.uniform_(-1/(m.weight.size(2)), 1/(m.weight.size(2)))
if m.bias is not None:
m.bias.data.uniform_(-0.1,0.1)
elif classname.find('Linear') != -1:
m.weight.data.uniform_(-1/math.sqrt(m.weight.size(0)), 1/math.sqrt(m.weight.size(0)))
if m.bias is not None:
m.bias.data.uniform_(-0.1,0.1)
''' ---------------------------------------------
LOSSES HELPERS
-------------------------------------------------'''
def MSE_loss(recon_x, x, reduction=True):
if reduction:
""" Returns the reconstruction loss (mean squared error) summed on the image dims and averaged on the batch size """
return F.mse_loss(recon_x, x, size_average=False) / x.size()[0]
else:
return F.mse_loss(recon_x, x, reduce = False)
def BCE_loss(recon_x, x, reduction=True):
if reduction:
""" Returns the reconstruction loss (binary cross entropy) summed on the image dims and averaged on the batch size """
return F.binary_cross_entropy(recon_x, x, size_average=False) / x.size()[0]
else:
return F.binary_cross_entropy(recon_x, x, reduce = False)
def BCE_with_digits_loss(recon_x, x, reduction=True):
if reduction:
""" Returns the reconstruction loss (sigmoid + binary cross entropy) summed on the image dims and averaged on the batch size """
return F.binary_cross_entropy_with_logits(recon_x, x, size_average=False) / x.size()[0]
else:
return F.binary_cross_entropy_with_logits(recon_x, x, reduce = False)
def KLD_loss(mu, logvar, reduction=True):
if reduction:
""" Returns the KLD loss D(q,p) where q is N(mu,var) and p is N(0,I) """
# 0.5 * (1 + log(sigma^2) - mu^2 - sigma^2)
KLD_loss_per_latent_dim = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim = 0 ) / mu.size()[0] #we average on the batch
#KL-divergence between a diagonal multivariate normal and the standard normal distribution is the sum on each latent dimension
KLD_loss = torch.sum(KLD_loss_per_latent_dim)
# we add a regularisation term so that the KLD loss doesnt "trick" the loss by sacrificing one dimension
KLD_loss_var = torch.var(KLD_loss_per_latent_dim)
return KLD_loss, KLD_loss_per_latent_dim, KLD_loss_var
else:
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_loss_per_latent_dim = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp())
#KL-divergence between a diagonal multivariate normal and the standard normal distribution is the sum on each latent dimension
KLD_loss = torch.sum(KLD_loss_per_latent_dim, dim = 1)
# we add a regularisation term so that the KLD loss doesnt "trick" the loss by sacrificing one dimension
KLD_loss_var = torch.var(KLD_loss_per_latent_dim, dim = 1)
return KLD_loss, KLD_loss_per_latent_dim, KLD_loss_var
def CE_loss(recon_y, y):
""" Returns the cross entropy loss (softmax + NLLLoss) averaged on the batch size """
return F.cross_entropy(recon_y, y, size_average=False) / y.size()[0]
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
''' ---------------------------------------------
PREPROCESS DATA HELPER
-------------------------------------------------'''
class Crop_Preprocess(object):
def __init__(self, crop_type ='center', crop_ratio = 1):
if crop_type not in ['center', 'random']:
raise ValueError('Unknown crop type {!r}'.format(crop_type))
self.crop_type = crop_type
self.crop_ratio = crop_ratio
def __call__(self, x):
if self.crop_type == 'center':
return centroid_crop_preprocess(x, self.crop_ratio)
elif self.crop_type == 'random':
return centroid_crop_preprocess(x, self.crop_ratio)
def centroid_crop_preprocess(x, ratio = 2):
'''
arg: x, tensor 1xHxW
'''
if ratio==1:
return x
img_size = (x.size(1), x.size(2))
patch_size = (img_size[0]/ratio, img_size[1]/ratio)
# crop around center of mass (mY and mX describe the position of the centroid of the image)
image = x[0].numpy()
x_grid, y_grid = np.meshgrid(range(img_size[0]), range(img_size[1]))
y_power1_image = y_grid * image
x_power1_image = x_grid * image
## raw moments
m00 = np.sum(image)
m10 = np.sum(y_power1_image)
m01 = np.sum(x_power1_image)
if m00 == 0:
mY = (img_size[1]-1) / 2 # the crop is happening in PIL system (so inverse of numpy (x,y))
mX = (img_size[0]-1) / 2
else:
mY = m10 / m00
mX = m01 / m00
# if crop in spherical image
padding_size = round(max(patch_size[0]/2, patch_size[1]/2))
spheric_pad = SphericPad(padding_size=padding_size)
mX += padding_size
mY += padding_size
to_PIL = transforms.ToPILImage()
to_Tensor = transforms.ToTensor()
j = int(mX - patch_size[0]/2)
i = int(mY - patch_size[1]/2)
w = patch_size[0]
h = patch_size[1]
x = spheric_pad(x.view(1, x.size(0), x.size(1), x.size(2))).squeeze(0)
x = to_PIL(x)
patch = transforms.functional.crop(x, i, j, h, w)
patch = to_Tensor(patch)
return patch
def random_crop_preprocess(x, ratio = 2):
'''
arg: x, tensor 1xHxW
'''
if ratio==1:
return x
img_size = (x.size(1), x.size(2))
patch_size = (img_size[0]/ratio, img_size[1]/ratio)
random_crop_transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomCrop(patch_size),transforms.ToTensor()])
# set the seed as mX*mY for reproducibility (mY and mX describe the position of the centroid of the image)
image = x[0].numpy()
x_grid, y_grid = np.meshgrid(range(img_size[0]), range(img_size[1]))
y_power1_image = y_grid * image
x_power1_image = x_grid * image
## raw moments
m00 = np.sum(image)
m10 = np.sum(y_power1_image)
m01 = np.sum(x_power1_image)
if m00 == 0:
mY = (img_size[1]-1) / 2
mX = (img_size[0]-1) / 2
else:
mY = m10 / m00
mX = m01 / m00
## raw set seed
global_rng_state = random.getstate()
local_seed = mX*mY
random.seed(local_seed)
n_trials = 0
best_patch_activation = 0
selected_patch = False
activation = m00 / (img_size[0]*img_size[1])
while 1:
patch = random_crop_transform(x)
patch_activation = patch.sum(dim=-1).sum(dim=-1) / (patch_size[0]*patch_size[1])
if patch_activation > (activation * 0.5):
selected_patch = patch
break
if patch_activation >= best_patch_activation:
best_patch_activation = patch_activation
selected_patch = patch
n_trials +=1
if n_trials == 20:
break
## reput global random state
random.setstate(global_rng_state)
return selected_patch
| 40.929276 | 278 | 0.594937 | 11,632 | 0.467355 | 0 | 0 | 0 | 0 | 0 | 0 | 5,298 | 0.212865 |
649691d5c0e82103d6071fa1fc077a9de68296e7 | 874 | py | Python | 100_days_of_code/Intermediate+/day_32/practice/main.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp | 20a82443fe2e6ee9040ecd9a03853e6c6346592c | [
"MIT"
] | null | null | null | 100_days_of_code/Intermediate+/day_32/practice/main.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp | 20a82443fe2e6ee9040ecd9a03853e6c6346592c | [
"MIT"
] | null | null | null | 100_days_of_code/Intermediate+/day_32/practice/main.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp | 20a82443fe2e6ee9040ecd9a03853e6c6346592c | [
"MIT"
] | null | null | null | import smtplib
import random
import datetime as dt
from data import EML, PWD, DST, HOST_SERVER
if dt.datetime.now().weekday() == 0: #Monday
try:
with open("./quotes.txt") as file:
quotes = file.readlines()
except FileNotFoundError:
print("File not found. Please provide a valid file path")
else:
chosen_quote = random.choice(quotes)
try:
with smtplib.SMTP(HOST_SERVER) as connection:
connection.starttls()
connection.login(user=EML, password=PWD)
connection.sendmail(from_addr=EML, to_addrs=DST, msg=f"Subject:Motivational Monday\n\n{chosen_quote}")
except smtplib.SMTPAuthenticationError:
print("Error Found. Confirm SMTP settings and/or authentication information.")
else:
print("\nMotivational quote was sent!\n")
| 38 | 118 | 0.644165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.256293 |
64969f5fe7755132d49c138a9918dfb740289d3c | 7,870 | py | Python | tenant_workspace/templatetags/qt_066_tag.py | smegurus/smegurus-django | 053973b5ff0b997c52bfaca8daf8e07db64a877c | [
"BSD-4-Clause"
] | 1 | 2020-07-16T10:58:23.000Z | 2020-07-16T10:58:23.000Z | tenant_workspace/templatetags/qt_066_tag.py | smegurus/smegurus-django | 053973b5ff0b997c52bfaca8daf8e07db64a877c | [
"BSD-4-Clause"
] | 13 | 2018-11-30T02:29:39.000Z | 2022-03-11T23:35:49.000Z | tenant_workspace/templatetags/qt_066_tag.py | smegurus/smegurus-django | 053973b5ff0b997c52bfaca8daf8e07db64a877c | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django import template
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404
from foundation_tenant.utils import int_or_none
from foundation_tenant.models.base.naicsoption import NAICSOption
from foundation_tenant.models.base.imageupload import ImageUpload
from foundation_tenant.models.bizmula.question import Question
from foundation_tenant.models.bizmula.questionanswer import QuestionAnswer
from smegurus import constants
register = template.Library() # Register our custom template tag.
def get_answer_content(workspace, qid):
q = QuestionAnswer.objects.get(
question_id=qid,
workspace=workspace
)
return q.content
def get_long_term_assets(q4):
total_yr1 = 0.0
total_yr2 = 0.0
total_yr3 = 0.0
for item in q4:
if item['var_3'] == "Long-term asset":
total_yr1 += float(item['var_5'])
total_yr2 += float(item['var_6'])
total_yr3 += float(item['var_7'])
return {
'yr1': total_yr1,
'yr2': total_yr2,
'yr3': total_yr3
}
def get_assets_to_purchase(q1):
total_yr1 = 0.0
total_yr2 = 0.0
total_yr3 = 0.0
for item in q1:
total_yr1 += float(item['var_5'])
total_yr2 += float(item['var_6'])
total_yr3 += float(item['var_7'])
return {
'yr1': total_yr1,
'yr2': total_yr2,
'yr3': total_yr3
}
def get_inventory(q2, q5):
total = 0.00
index = q5['var_1']
if index >= 1:
total += q2['materials_month_1']
if index >= 2:
total += q2['materials_month_2']
if index >= 3:
total += q2['materials_month_3']
if index >= 4:
total += q2['materials_month_4']
if index >= 5:
total += q2['materials_month_5']
if index >= 6:
total += q2['materials_month_6']
if index >= 7:
total += q2['materials_month_7']
if index >= 8:
total += q2['materials_month_8']
if index >= 9:
total += q2['materials_month_9']
if index >= 10:
total += q2['materials_month_10']
if index >= 11:
total += q2['materials_month_11']
if index >= 12:
total += q2['materials_month_12']
return total
def get_salary(q3):
# Convert
value_str = q3['var_1_other'] if q3['var_1_other'] else q3['var_1']
value_str = value_str.replace('$', '')
value_str = value_str.replace(',', '')
value = float(value_str)
# Compute.
ent_cash_startup_req = value * 12.00
# Return.
return ent_cash_startup_req
def get_short_term_assets(answer_content):
total_yr1 = 0.0
total_yr2 = 0.0
total_yr3 = 0.0
for item in answer_content:
if item['var_3'] == "Short-term asset":
total_yr1 += float(item['var_5'])
total_yr2 += float(item['var_6'])
total_yr3 += float(item['var_7'])
return {
'yr1': total_yr1,
'yr2': total_yr2,
'yr3': total_yr3
}
def get_short_term_liabilities(answer_content):
total_yr1 = 0.0
total_yr2 = 0.0
total_yr3 = 0.0
for item in answer_content:
if item['var_3'] == "Short-term liability":
total_yr1 += float(item['var_5'])
total_yr2 += float(item['var_6'])
total_yr3 += float(item['var_7'])
return {
'yr1': total_yr1,
'yr2': total_yr2,
'yr3': total_yr3
}
def get_long_term_liabilities(answer_content):
total_yr1 = 0.0
total_yr2 = 0.0
total_yr3 = 0.0
for item in answer_content:
if item['var_3'] == "Long-term liability":
total_yr1 += float(item['var_5'])
total_yr2 += float(item['var_6'])
total_yr3 += float(item['var_7'])
return {
'yr1': total_yr1,
'yr2': total_yr2,
'yr3': total_yr3
}
@register.inclusion_tag('templatetags/question/template_066.html')
def render_question_type_066(workspace, module, node, question, answer):
#====================#
# FETCH DEPENDENCIES #
#====================#
q1_qid = int_or_none(question.dependency['q1_qid']) # "q1_qid": 139,
q2_qid = int_or_none(question.dependency['q2_qid']) # "q2_qid": 101,
q3_qid = int_or_none(question.dependency['q3_qid']) # "q3_qid": 146,
q4_qid = int_or_none(question.dependency['q4_qid']) # "q4_qid": 140,
q5_qid = int_or_none(question.dependency['q5_qid']) # "q4_qid": 141
#===============#
# FETCH ANSWERS #
#===============#
q1 = get_answer_content(workspace, q1_qid)
q2 = get_answer_content(workspace, q2_qid)
q3 = get_answer_content(workspace, q3_qid)
q4 = get_answer_content(workspace, q4_qid)
q5 = get_answer_content(workspace, q5_qid)
#==============#
# COMPUTATIONS #
#==============#
# Start-up Costs
#------------------------
# Assets Owned.
long_term_assets = get_long_term_assets(q4)
# Cash Required
#------------------------
# Assets to Purchase
equipment_costs = get_assets_to_purchase(q1)
# Inventory
startup_inventory_req = get_inventory(q2, q5)
# Salary
ent_cash_startup_req = get_salary(q3)
# Total Start-up Costs
cash_required = startup_inventory_req + ent_cash_startup_req + equipment_costs['yr1']
# Funds Available
short_term_assets = get_short_term_assets(q4)
short_term_liabilities = get_short_term_liabilities(q4)
long_term_liabilities = get_long_term_liabilities(q4)
# Total Funds Available
funds_available = short_term_assets['yr1'] + short_term_liabilities['yr1'] + long_term_liabilities['yr1']
# Start-up Deficit/Surplus
net_startup_deficit_surplus = cash_required - funds_available
#=============#
# SAVING DATA #
#=============#
answer.content = {
'asset_y1_costs_total': long_term_assets['yr1'],
'asset_y2_costs_total': long_term_assets['yr2'],
'asset_y3_costs_total': long_term_assets['yr3'],
'long_term_assets_total': long_term_assets['yr1'] + long_term_assets['yr2'] + long_term_assets['yr3'],
'equipment_y1_costs_total': equipment_costs['yr1'],
'equipment_y2_costs_total': equipment_costs['yr2'],
'equipment_y3_costs_total': equipment_costs['yr3'],
'startup_inventory_req': startup_inventory_req,
'ent_cash_startup_req': ent_cash_startup_req,
'cash_required': cash_required,
'short_term_assets_y1': short_term_assets['yr1'],
'short_term_assets_y2': short_term_assets['yr2'],
'short_term_assets_y3': short_term_assets['yr3'],
'short_term_assets_total': short_term_assets['yr1'] + short_term_assets['yr2'] + short_term_assets['yr3'],
'short_term_liabilities_y1': short_term_liabilities['yr1'],
'short_term_liabilities_y2': short_term_liabilities['yr2'],
'short_term_liabilities_y3': short_term_liabilities['yr3'],
'short_term_liabilities_total': short_term_liabilities['yr1'] + short_term_liabilities['yr2'] + short_term_liabilities['yr3'],
'long_term_liabilities_y1': long_term_liabilities['yr1'],
'long_term_liabilities_y2': long_term_liabilities['yr2'],
'long_term_liabilities_y3': long_term_liabilities['yr3'],
'long_term_liabilities_total': long_term_liabilities['yr1'] + long_term_liabilities['yr2'] + long_term_liabilities['yr3'],
'funds_available': funds_available,
'net_startup_deficit_surplus': net_startup_deficit_surplus
}
answer.save()
# Return result.
return {
'workspace': workspace,
'module': module,
'node': node,
'question': question,
'answer': answer,
'picked': answer.content,
}
| 30.862745 | 134 | 0.634943 | 0 | 0 | 0 | 0 | 3,862 | 0.490724 | 0 | 0 | 2,051 | 0.26061 |
6496baa59497d45ed96cfa1160da34e1c8492c6a | 2,087 | py | Python | octavia_f5/common/constants.py | notandy/octavia-f5-provider-driver | 29b33e55c369561a50791fd4f923ff3b10081759 | [
"Apache-2.0"
] | null | null | null | octavia_f5/common/constants.py | notandy/octavia-f5-provider-driver | 29b33e55c369561a50791fd4f923ff3b10081759 | [
"Apache-2.0"
] | null | null | null | octavia_f5/common/constants.py | notandy/octavia-f5-provider-driver | 29b33e55c369561a50791fd4f923ff3b10081759 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.common.constants import *
PROJECT_ID = 'project_id'
BIGIP = 'bigip'
PREFIX_PROJECT = 'project_'
PREFIX_LISTENER = 'listener_'
PREFIX_TLS_LISTENER = 'tls_listener_'
PREFIX_TLS_POOL = 'tls_pool_'
PREFIX_CONTAINER = 'container_'
PREFIX_CERTIFICATE = 'cert_'
PREFIX_POOL = 'pool_'
PREFIX_HEALTH_MONITOR = 'hm_'
PREFIX_LOADBALANCER = 'lb_'
PREFIX_POLICY = 'l7policy_'
PREFIX_NETWORK = 'net_'
PREFIX_IRULE = 'irule_'
PREFIX_MEMBER = 'member_'
PREFIX_SECRET = 'secret_'
APPLICATION_TCP = 'tcp'
APPLICATION_UDP = 'udp'
APPLICATION_HTTP = 'http'
APPLICATION_HTTPS = 'https'
APPLICATION_L4 = 'l4'
APPLICATION_GENERIC = 'generic'
APPLICATION_SHARED = 'shared'
SUPPORTED_APPLICATION_TEMPLATES = (APPLICATION_TCP, APPLICATION_UDP,
APPLICATION_HTTP, APPLICATION_HTTPS,
APPLICATION_L4, APPLICATION_GENERIC,
APPLICATION_SHARED)
SERVICE_TCP = 'Service_TCP'
SERVICE_UDP = 'Service_UDP'
SERVICE_HTTP = 'Service_HTTP'
SERVICE_HTTPS = 'Service_HTTPS'
SERVICE_L4 = 'Service_L4'
SERVICE_GENERIC = 'Service_Generic'
SUPPORTED_SERVICES = (SERVICE_TCP, SERVICE_UDP, SERVICE_HTTP,
SERVICE_HTTPS, SERVICE_L4, SERVICE_GENERIC)
SEGMENT = 'segment'
VIF_TYPE = 'f5'
ESD = 'esd'
RPC_NAMESPACE_CONTROLLER_AGENT = 'f5controller'
DEVICE_OWNER_LISTENER = 'network:' + 'f5listener'
PROFILE_L4 = 'basic'
OPEN = 'OPEN'
FULL = 'FULL'
UP = 'UP'
DOWN = 'DOWN'
DRAIN = 'DRAIN'
NO_CHECK = 'no check'
MAINT = 'MAINT' | 30.246377 | 75 | 0.729276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.447053 |
6496e3a04cad8fb52d2bf8ff8057483c20f1efdb | 1,788 | py | Python | tests/loris/parameters/api_tests.py | jpstroop/loris-redux | b5db56d5a250fdb24486afe01bad55b81761701a | [
"BSD-2-Clause"
] | 7 | 2016-08-09T17:39:05.000Z | 2016-09-26T19:37:30.000Z | tests/loris/parameters/api_tests.py | jpstroop/loris-redux | b5db56d5a250fdb24486afe01bad55b81761701a | [
"BSD-2-Clause"
] | 183 | 2016-06-02T22:07:05.000Z | 2022-03-11T23:23:01.000Z | tests/loris/parameters/api_tests.py | jpstroop/loris-redux | b5db56d5a250fdb24486afe01bad55b81761701a | [
"BSD-2-Clause"
] | 1 | 2016-08-09T17:39:11.000Z | 2016-08-09T17:39:11.000Z | from loris.parameters.api import AbstractParameter
from unittest.mock import Mock
import pytest
class ProperImpl(AbstractParameter):
def __init__(self, uri_slice, enabled_features):
super(ProperImpl, self).__init__(uri_slice, enabled_features)
@property
def canonical(self):
return "canonical version"
class TestAbstractParameter(object):
def test_canonical_required(self):
class WithoutCanonical(AbstractParameter):
def __init__(self, uri_slice, enabled_features):
super(WithoutCanonical, self).__init__(uri_slice, enabled_features)
with pytest.raises(TypeError) as type_error:
w = WithoutCanonical("abc", (), Mock())
assert "Can't instantiate abstract class" in str(type_error.value)
def test_init_required(self):
class WithoutInit(AbstractParameter):
@property
def canonical(self):
return "canonical version"
with pytest.raises(TypeError) as type_error:
w = WithoutInit("abc", (), Mock())
assert "Can't instantiate abstract class" in str(type_error.value)
def test_init_sig_required(self):
class WrongInitSig(AbstractParameter):
def __init__(self):
super(WrongInitSig, self).__init__()
@property
def canonical(self):
return "canonical version"
with pytest.raises(TypeError) as type_error:
WrongInitSig()
assert "__init__() missing 2 required positional" in str(type_error.value)
def test_proper_impl(self):
ProperImpl("foo", ())
def test_stuff_is_defined(self):
p = ProperImpl("foo", ())
assert p.uri_slice == "foo"
assert p.enabled_features == ()
| 32.509091 | 83 | 0.649888 | 1,686 | 0.942953 | 0 | 0 | 239 | 0.133669 | 0 | 0 | 192 | 0.107383 |
6496f1be53aa281264d7e156cc9fc75b4d6f2857 | 1,119 | py | Python | examples/scripts/generate-big-event.py | Neloop/pcrf-traffic-generator | 9aaf336c747bbd3dcfb11625a9af65bdddd5291c | [
"MIT"
] | 5 | 2018-07-20T11:31:23.000Z | 2021-03-24T16:22:10.000Z | examples/scripts/generate-big-event.py | Neloop/pcrf-traffic-generator | 9aaf336c747bbd3dcfb11625a9af65bdddd5291c | [
"MIT"
] | 1 | 2021-12-14T20:50:52.000Z | 2021-12-14T20:50:52.000Z | examples/scripts/generate-big-event.py | Neloop/pcrf-traffic-generator | 9aaf336c747bbd3dcfb11625a9af65bdddd5291c | [
"MIT"
] | 4 | 2018-08-22T00:41:28.000Z | 2021-12-03T17:47:04.000Z | import utils
times = range(3, 147, 3);
call_center_list = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 100, 200, 500, 500, 500, 500, 500, 500, 300, 200, 600, 700, 800, 800, 800, 700, 500, 300, 100, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ];
classic_list = [ 5000, 3000, 2000, 1500, 1500, 1000, 1000, 800, 1000, 1300, 2000, 3500, 6000, 10000, 20000, 30000, 50000, 70000, 90000, 100000, 150000, 130000, 115000, 105000, 100000, 100000, 100000, 100000, 100000, 100000, 100000, 95000, 95000, 90000, 80000, 75000, 120000, 110000, 100000, 80000, 50000, 40000, 30000, 25000, 15000, 10000, 8000, 5000 ];
malfunctioning_list = [ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 3, 5, 7, 10, 15, 20, 30, 40, 50, 60, 70, 80, 80, 80, 80, 80, 80, 80, 80, 80, 70, 60, 50, 50, 50, 50, 30, 30, 20, 20, 20, 20, 10, 10, 5, 5, 5, 3 ];
travelling_list = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 5, 10, 30, 50, 100, 200, 300, 500, 500, 500, 500, 400, 500, 600, 600, 700, 600, 500, 500, 300, 200, 100, 50, 30, 20, 10, 10, 10, 0, 0, 0, 0, 0, 0 ];
utils.print_real_life_based(times, call_center_list, classic_list, malfunctioning_list, travelling_list)
| 111.9 | 353 | 0.607685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6497af3ad4bf8548fe4f42285895e50ab7e9a15c | 3,992 | py | Python | 29_ticTacToeGame.py | hjlarrea/pythonExercises | f634489d181292bc51b4a4770b497c1f57e097d7 | [
"MIT"
] | null | null | null | 29_ticTacToeGame.py | hjlarrea/pythonExercises | f634489d181292bc51b4a4770b497c1f57e097d7 | [
"MIT"
] | null | null | null | 29_ticTacToeGame.py | hjlarrea/pythonExercises | f634489d181292bc51b4a4770b497c1f57e097d7 | [
"MIT"
] | null | null | null | #This exercise is Part 4 of 4 of the Tic Tac Toe exercise series. The other exercises are: Part 1, Part 2, and Part 3.
#
#In 3 previous exercises, we built up a few components needed to build a Tic Tac Toe game in Python:
#
# Draw the Tic Tac Toe game board
# Checking whether a game board has a winner
# Handle a player move from user input
#
#The next step is to put all these three components together to make a two-player Tic Tac Toe game! Your challenge in
# this exercise is to use the functions from those previous exercises all together in the same program to make a two-
# player game that you can play with a friend. There are a lot of choices you will have to make when completing this
# exercise, so you can go as far or as little as you want with it.
#
#Here are a few things to keep in mind:
#
# You should keep track of who won - if there is a winner, show a congratulatory message on the screen.
# If there are no more moves left, don’t ask for the next player’s move!
#
#As a bonus, you can ask the players if they want to play again and keep a running tally of who won more - Player 1
# or Player 2.
def printBoard(gameBoard):
board = ""
for i in range(0,3):
for _ in range(0,3):
board = board + " ---"
board = board + "\n"
for j in range(0,3):
board = board + "| {0} ".format(gameBoard[i][j] if gameBoard[i][j] != 0 else " ")
board = board + "|\n"
for _ in range(0,3):
board = board + " ---"
print(board)
def checkIfWin(board):
for i in range(0,len(board)):
if board[i][0] == board[i][1] and board[i][1] == board [i][2]:
return board[i][0]
elif board[0][i] == board[1][i] and board[1][i] == board [2][i]:
return board[i][0]
if board[0][0] == board[1][1] and board[1][1] == board [2][2]:
return board[0][0]
elif board[0][2] == board[1][1] and board[1][1] == board [2][0]:
return board[0][2]
else:
return 0
def placeMovement(board,x,y,playerSymbol):
if board[x][y] == 0:
board[x][y] = playerSymbol
return board
else:
raise NameError("Position Taken")
def getUserInput(playerNumber,playerSymbol,position):
while True:
try:
y = int(input("Player {0} ({1}), please input the {2} for your movement: ".format(playerNumber,playerSymbol,position)))
if y > 0 and y < 4:
y-=1
return y
else:
print("Value should be 1, 2 or 3.")
except ValueError:
print("Value is not a number.")
if __name__ == "__main__":
players = {
1: "X",
2: "O"
}
wins = {
1: 0,
2: 0
}
while True:
board = [[0,0,0],[0,0,0],[0,0,0]]
for i in range (0,9):
printBoard(board)
while True:
goesNext = 1 if i % 2 == 0 else 2
x = getUserInput(goesNext,players[goesNext],"row")
y = getUserInput(goesNext,players[goesNext],"column")
try:
board = placeMovement(board,x,y,players[goesNext])
break
except NameError:
print("Position is already taken. Try another one.")
if i >= 4:
if checkIfWin(board) != 0:
wins[goesNext]+=1
printBoard(board)
print("Player {0} won!".format(goesNext))
break
while True:
print("Player 1 won {0}, Player 2 won {1} times!".format(wins[1],wins[2]))
playAgain = input("Do you want to play again? (Yes/No): ")
if playAgain.lower() != 'yes' and playAgain.lower() != 'no':
print("Only valid options are 'Yes' and 'No'.")
else:
break
if playAgain.lower() == 'no':
print("Thanks for playing!")
break | 36.290909 | 131 | 0.548347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,529 | 0.382633 |
6499166ff9e4140b4e105ce3ebf038b47555e332 | 2,149 | py | Python | rdr_server/model/site.py | robabram/raw-data-repository-v2 | a8e1a387d9ea3e4be3ec44473d026e3218f23509 | [
"BSD-3-Clause"
] | null | null | null | rdr_server/model/site.py | robabram/raw-data-repository-v2 | a8e1a387d9ea3e4be3ec44473d026e3218f23509 | [
"BSD-3-Clause"
] | 2 | 2021-02-08T20:31:00.000Z | 2021-04-30T20:44:44.000Z | rdr_server/model/site.py | robabram/raw-data-repository-v2 | a8e1a387d9ea3e4be3ec44473d026e3218f23509 | [
"BSD-3-Clause"
] | null | null | null | from rdr_server.common.enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
from sqlalchemy import Column, Integer, String, Date, Float, ForeignKey, UnicodeText
from rdr_server.model.base_model import BaseModel, ModelMixin, ModelEnum
class Site(ModelMixin, BaseModel):
__tablename__ = 'site'
siteId = Column('site_id', Integer, unique=True)
siteName = Column('site_name', String(255), nullable=False)
# The Google group for the site; this is a unique key used externally.
googleGroup = Column('google_group', String(255), nullable=False, unique=True)
mayolinkClientNumber = Column('mayolink_client_number', Integer)
organizationId = Column('organization_id', Integer,
ForeignKey('organization.organization_id'))
# Deprecated; this is being replaced by organizationId.
hpoId = Column('hpo_id', Integer, ForeignKey('hpo.hpo_id'))
siteStatus = Column('site_status', ModelEnum(SiteStatus))
enrollingStatus = Column('enrolling_status', ModelEnum(EnrollingStatus))
digitalSchedulingStatus = Column('digital_scheduling_status', ModelEnum(DigitalSchedulingStatus))
scheduleInstructions = Column('schedule_instructions', String(2048))
scheduleInstructions_ES = Column('schedule_instructions_es', String(2048))
launchDate = Column('launch_date', Date)
notes = Column('notes', UnicodeText)
notes_ES = Column('notes_es', UnicodeText)
latitude = Column('latitude', Float)
longitude = Column('longitude', Float)
timeZoneId = Column('time_zone_id', String(1024))
directions = Column('directions', UnicodeText)
physicalLocationName = Column('physical_location_name', String(1024))
address1 = Column('address_1', String(1024))
address2 = Column('address_2', String(1024))
city = Column('city', String(255))
state = Column('state', String(2))
zipCode = Column('zip_code', String(10))
phoneNumber = Column('phone_number', String(80))
adminEmails = Column('admin_emails', String(4096))
link = Column('link', String(255))
isObsolete = Column('is_obsolete', ModelEnum(ObsoleteStatus))
| 51.166667 | 104 | 0.731038 | 1,882 | 0.875756 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.258725 |
6499a0c1afe410276dc4e6cb49747784c3d9840c | 1,842 | py | Python | hacku/contents/models/contents.py | aleducode/hacku_backend | d70f303dcbcb3fecf5d4a47f20e9985846fc766c | [
"MIT"
] | 1 | 2021-09-15T19:22:18.000Z | 2021-09-15T19:22:18.000Z | hacku/contents/models/contents.py | alejandroduquec/hacku_backend | d70f303dcbcb3fecf5d4a47f20e9985846fc766c | [
"MIT"
] | null | null | null | hacku/contents/models/contents.py | alejandroduquec/hacku_backend | d70f303dcbcb3fecf5d4a47f20e9985846fc766c | [
"MIT"
] | 1 | 2021-09-15T19:24:34.000Z | 2021-09-15T19:24:34.000Z | """Contents Models."""
# Django
from django.db import models
from django.contrib.postgres.fields import JSONField
# utilities
from hacku.utils.models import HackuModel
class ContentType(models.Model):
"""Content type format."""
name = models.CharField('Content type name', max_length=140)
slug_name = models.SlugField(unique=True, max_length=40)
def __str__(self):
"""Return content type name."""
return str(self.name)
class ContentArea(models.Model):
"""Content Area model."""
name = models.CharField('Content area name', max_length=140)
slug_name = models.SlugField(unique=True, max_length=40)
def __str__(self):
"""Return content area name."""
return str(self.name)
class Content(HackuModel):
"""Content type format."""
title = models.TextField(
'Content Title'
)
url = models.URLField(
'Url Field',
max_length=500,
null=True,
blank=True
)
html_content = models.TextField(
'Html Content',
null=True,
blank=True
)
content_type = models.ForeignKey(
'ContentType',
on_delete=models.CASCADE,
null=True,
help_text='Type of content.'
)
content_area = models.ForeignKey(
'ContentArea',
on_delete=models.CASCADE,
help_text='Content Area.'
)
meta_data = JSONField(
'Content Metada',
null=True,
blank=True
)
duration = models.TimeField(
'Estimated Duration',
auto_now=False,
auto_now_add=False,
null=True,
blank=True
)
expertise_percentage = models.FloatField(
'Expertise Percentage',
default=0
)
def __str__(self):
"""Return content title."""
return str(self.title)
| 20.696629 | 64 | 0.604235 | 1,662 | 0.90228 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.218241 |
649a993277a42a61a1570384faaec1b7bdd02010 | 2,096 | py | Python | passwd_validate/utils.py | clamytoe/Password-Validate | 3ce14ce0e3fa325dc578aaaf5e051e71d195f271 | [
"MIT"
] | 2 | 2018-07-08T17:36:59.000Z | 2018-10-19T22:51:33.000Z | passwd_validate/utils.py | clamytoe/Password-Validate | 3ce14ce0e3fa325dc578aaaf5e051e71d195f271 | [
"MIT"
] | 1 | 2018-05-16T00:25:42.000Z | 2018-05-16T00:25:42.000Z | passwd_validate/utils.py | clamytoe/Password-Validate | 3ce14ce0e3fa325dc578aaaf5e051e71d195f271 | [
"MIT"
] | 4 | 2018-04-18T18:18:40.000Z | 2018-09-26T16:33:54.000Z | # _*_ coding: utf-8 _*_
"""
password-validate.utils
-----------------------
This module provides utility functions that are used within password_validate
that are also useful for external consumption.
"""
import hashlib
from os.path import abspath, dirname, join
DICTIONARY_LOC = "dictionary_files"
DICTIONARY = "dictionary.txt"
PHPBB = "phpbb.txt"
ROCKYOU = "rockyou.txt"
DICTS = [
DICTIONARY,
PHPBB,
]
def hashit(password):
"""
Hashes any string sent to it with sha512.
:param password: String to hash
:return: String with a hexdigest of the hashed string.
"""
hash_object = hashlib.sha512()
hash_object.update(password.encode("utf-8"))
return hash_object.hexdigest()
def not_in_dict(password):
"""
Parses several dictionary files to see if the provided password is included
within them.
If the dictionary file contains any words that are under five characters in
length, they are skipped. If the string is found, this is considered to be
a failed check and therefore not a valid password.
:param password: String to check
:return: Boolean, True if not found, False if it is
"""
for passwd_file in DICTS:
dict_words = read_file(passwd_file)
for word in dict_words:
if "dictionary.txt" in passwd_file and len(word) < 5:
# skip common words under 5 characters long
continue
if password == word:
return False
return True
def read_file(filename):
"""
Helper function that simple iterates over the dictionary files.
:param filename: String with the path and filename of the dictionary
:return: String generator with each line of the dictionary
"""
file_loc = dirname(abspath(__file__))
data_loc = join(file_loc, DICTIONARY_LOC, filename)
with open(data_loc, "rb") as file:
for line in file:
try:
yield line.decode("utf-8").rstrip()
except UnicodeDecodeError:
# LOL, like my hack around this one??
continue
| 29.942857 | 79 | 0.656966 | 0 | 0 | 594 | 0.283397 | 0 | 0 | 0 | 0 | 1,158 | 0.552481 |
649b03207f4c323a4d1d709a72d7c801428bb675 | 8,129 | py | Python | downstream/finetune/eval.py | YihengZhang-CV/Sequence-Contrastive-Learning | f0b1b48731de808694e57da348e366df57dcd8c7 | [
"MIT"
] | 31 | 2020-12-14T13:58:34.000Z | 2022-03-24T02:43:32.000Z | downstream/finetune/eval.py | YihengZhang-CV/Sequence-Contrastive-Learning | f0b1b48731de808694e57da348e366df57dcd8c7 | [
"MIT"
] | 4 | 2021-02-26T08:46:39.000Z | 2022-03-26T06:57:25.000Z | downstream/finetune/eval.py | YihengZhang-CV/Sequence-Contrastive-Learning | f0b1b48731de808694e57da348e366df57dcd8c7 | [
"MIT"
] | 3 | 2021-02-02T12:54:54.000Z | 2022-01-17T06:48:31.000Z | import argparse
import os
import json
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torchvision import transforms
from seco_util import clip_transforms
from seco_util.logger import setup_logger
from dataset.video_dataset import VideoRGBTestDataset
from model.model_factory import get_model_by_name
import numpy as np
def parse_option():
parser = argparse.ArgumentParser('training')
# dataset
parser.add_argument('--list-file', type=str, required=True, help='list of dataset')
parser.add_argument('--root-path', type=str, required=True, help='root path of dataset')
parser.add_argument('--format', type=str, default='LMDB',
choices=["LMDB"], help="video format")
# other parameters
parser.add_argument('--time-dim', type=str, default='C',
choices=["T", "C"], help="dimension for time")
parser.add_argument('--crop-size', type=int, default=256, help='crop_size')
parser.add_argument('--num-classes', type=int, required=True, help='num of predict classes')
parser.add_argument('--batch-size', type=int, default=16, help='batch_size')
parser.add_argument('--num-workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--clip-length', type=int, default=16, help='num of clip length')
parser.add_argument('--num-steps', type=int, default=1, help='num of sampling steps')
parser.add_argument('--num-segments', type=int, default=1, help='num of segments')
parser.add_argument('--num-clips', type=int, default=20, help='num of sampled clips')
parser.add_argument('--num-gpu', type=int, default=4, help='num of gpu')
# network
parser.add_argument('--net-name', type=str, default='resnet50', help='name of network architecture')
parser.add_argument('--pooling-name', type=str, default='PoolingAverage', help='name of pooling architecture')
parser.add_argument('--dropout-ratio', type=float, default=0.5, help='dropout ratio')
# io
parser.add_argument('--pretrained-model', default='', type=str, metavar='PATH',
help='path to pretrained weights like imagenet (default: none)')
parser.add_argument('--output-dir', type=str, default='./output', help='output director')
parser.add_argument('--num-crop', type=int, default=3, help='the place index [0,1,2]')
# misc
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
args = parser.parse_args()
return args
def get_loader(args):
if args.crop_idx == 0:
crop = clip_transforms.ClipCenterCrop
elif args.crop_idx == 1:
crop = clip_transforms.ClipFirstCrop
elif args.crop_idx == 2:
crop = clip_transforms.ClipThirdCrop
test_transform = transforms.Compose([
clip_transforms.ClipResize(size=args.crop_size),
crop(size=args.crop_size),
clip_transforms.ToClipTensor(),
clip_transforms.ClipNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.Lambda(lambda clip: torch.stack(clip, dim=1)) if args.time_dim == "T" else transforms.Lambda(
lambda clip: torch.cat(clip, dim=0))
])
test_dataset = VideoRGBTestDataset(args.list_file, num_clips=args.num_clips,
transform=test_transform, root_path=args.root_path,
clip_length=args.clip_length, num_steps=args.num_steps,
num_segments=args.num_segments,
format=args.format)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset, shuffle=False)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True,
sampler=test_sampler, drop_last=False)
return test_loader
def build_model(args):
model = get_model_by_name(net_name=args.net_name, num_classes=args.num_classes, dropout_ratio=args.dropout_ratio).cuda()
if args.pretrained_model:
load_pretrained(args, model)
return model
def load_pretrained(args, model):
ckpt = torch.load(args.pretrained_model, map_location='cpu')
if 'model' in ckpt:
state_dict = {k.replace("module.", ""): v for k, v in ckpt['model'].items()}
else:
state_dict = ckpt
[misskeys, unexpkeys] = model.load_state_dict(state_dict, strict=False)
logger.info('Missing keys: {}'.format(misskeys))
logger.info('Unexpect keys: {}'.format(unexpkeys))
logger.info("==> loaded checkpoint '{}'".format(args.pretrained_model))
def merge_score(opt, logger):
num_gpu = opt.num_gpu
num_crop = opt.num_crop
num_cls = opt.num_classes
num_clip = opt.num_clips
score_dir = opt.output_dir
list_dir = opt.list_file
for crop_id in range(num_crop):
all_num = 0
all_data = []
for gpu_id in range(num_gpu):
all_data.append(np.load(os.path.join(score_dir, 'all_scores_' + str(crop_id * num_gpu + gpu_id) + '.npy')))
all_num += all_data[-1].shape[0]
merge_data = np.empty((all_num, num_cls))
for gpu_id in range(num_gpu):
merge_data[gpu_id::num_gpu, :] = all_data[gpu_id]
# make ave score
num_video = all_num // num_clip
merge_data = merge_data[0:num_video * num_clip, :]
if crop_id == 0:
reshape_data = np.zeros((num_video, num_clip, num_cls))
reshape_data += np.reshape(merge_data, (num_video, num_clip, num_cls)) / num_crop
# make gt
gt = np.zeros((num_video,))
lines = open(list_dir, 'r').readlines()
for idx, line in enumerate(lines):
ss = line.split(' ')
label = ss[-1]
gt[idx] = int(label)
pred = (reshape_data.mean(axis=1)).argmax(axis=1)
acc = (pred == gt).mean()
logger.info('Top-1 accuracy is {}'.format(acc))
def main(args):
model = build_model(args)
model.eval()
model = DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False)
for i in range(args.num_crop):
args.crop_idx = i
test_loader = get_loader(args)
n_data = len(test_loader.dataset)
logger.info("{}th crop for testing, length of testing dataset: {}".format(i, n_data))
# routine
all_scores = np.zeros([len(test_loader) * args.batch_size, args.num_classes], dtype=np.float)
top_idx = 0
with torch.no_grad():
for idx, (x, cls) in enumerate(test_loader):
if (idx % 100 == 0) or (idx == len(test_loader) - 1):
logger.info('{}/{}'.format(idx, len(test_loader)))
bsz = x.size(0)
score = model(x)
if isinstance(score, list):
score_numpy = (score[0].data.cpu().numpy() + score[1].data.cpu().numpy()) / 2
else:
score_numpy = score.data.cpu().numpy()
all_scores[top_idx: top_idx + bsz, :] = score_numpy
top_idx += bsz
all_scores = all_scores[:top_idx, :]
np.save(os.path.join(args.output_dir, 'all_scores_{}.npy'.format(
torch.distributed.get_world_size() * args.crop_idx + args.local_rank)), all_scores)
dist.barrier()
# evaluate
merge_score(args, logger)
logger.info('Finish !')
if __name__ == '__main__':
opt = parse_option()
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
cudnn.benchmark = True
os.makedirs(opt.output_dir, exist_ok=True)
logger = setup_logger(output=opt.output_dir, distributed_rank=dist.get_rank(), name="seco-finetune")
if dist.get_rank() == 0:
path = os.path.join(opt.output_dir, "seco_finetune_eval.config.json")
with open(path, 'w') as f:
json.dump(vars(opt), f, indent=2)
logger.info("Full config saved to {}".format(path))
main(opt)
| 40.849246 | 124 | 0.646697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,229 | 0.151187 |
649d5d0ba5bc8ab1592032b37368fffa67ba9234 | 2,260 | py | Python | notebooks/data_cleaning/track_meta.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 359 | 2018-03-23T15:57:52.000Z | 2022-03-25T21:56:28.000Z | notebooks/data_cleaning/track_meta.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 84 | 2018-06-14T00:06:52.000Z | 2022-02-08T17:25:54.000Z | notebooks/data_cleaning/track_meta.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 213 | 2018-05-02T19:06:31.000Z | 2022-03-20T15:40:34.000Z | track = dict(
author_username='alexisbcook',
course_name='Data Cleaning',
course_url='https://www.kaggle.com/learn/data-cleaning',
course_forum_url='https://www.kaggle.com/learn-forum/172650'
)
lessons = [ {'topic': topic_name} for topic_name in
['Handling missing values', #1
'Scaling and normalization', #2
'Parsing dates', #3
'Character encodings', #4
'Inconsistent data entry'] #5
]
notebooks = [
dict(
filename='tut1.ipynb',
lesson_idx=0,
type='tutorial',
dataset_sources=['maxhorowitz/nflplaybyplay2009to2016'],
),
dict(
filename='ex1.ipynb',
lesson_idx=0,
type='exercise',
dataset_sources=['aparnashastry/building-permit-applications-data'],
scriptid=10824396
),
dict(
filename='tut2.ipynb',
lesson_idx=1,
type='tutorial',
),
dict(
filename='ex2.ipynb',
lesson_idx=1,
type='exercise',
dataset_sources=['kemical/kickstarter-projects'],
scriptid=10824404
),
dict(
filename='tut3.ipynb',
lesson_idx=2,
type='tutorial',
dataset_sources=['nasa/landslide-events']
),
dict(
filename='ex3.ipynb',
lesson_idx=2,
type='exercise',
dataset_sources=['usgs/earthquake-database', 'smithsonian/volcanic-eruptions'],
scriptid=10824403
),
dict(
filename='tut4.ipynb',
lesson_idx=3,
type='tutorial',
dataset_sources=['kemical/kickstarter-projects']
),
dict(
filename='ex4.ipynb',
lesson_idx=3,
type='exercise',
dataset_sources=['kwullum/fatal-police-shootings-in-the-us'],
scriptid=10824401
),
dict(
filename='tut5.ipynb',
lesson_idx=4,
type='tutorial',
dataset_sources=['alexisbcook/pakistan-intellectual-capital']
),
dict(
filename='ex5.ipynb',
lesson_idx=4,
type='exercise',
dataset_sources=['alexisbcook/pakistan-intellectual-capital'],
scriptid=10824407
),
] | 27.901235 | 87 | 0.55531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 815 | 0.360619 |
649ec842193e89960a242bc874436fa6915e1321 | 2,921 | py | Python | three_sum.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | null | null | null | three_sum.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | 2 | 2019-11-13T19:55:49.000Z | 2019-11-13T19:55:57.000Z | three_sum.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/3sum/
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note:
The solution set must not contain duplicate triplets.
Example:
Given array nums = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
"""
class Solution(object):
def threeSum(self, nums):
"""
Strategy is to use sort first and then use three pointers:
1. One pointer is current value
2. One pointer is "left" index that starts from index after current value
3. One pointer is "right" index that starts from last index
Calculate sum of all three pointers.
If sum is greater than 0, need to make sum smaller, so decrease right index until next distinct integer
If sum is less than 0, need to make bigger, so increase left index until next distinct integer
If sum is 0, we found combination that works - when combination is found, need to increment left and right
indices until next distinct integers for both.
When left index surpasses right index, stop iterating and move on to next value.
Can only calculate sums when current value is not positive. This is because three positive values can't sum to 0.
:type nums: List[int]
:rtype: List[List[int]]
"""
triplets = []
nums.sort()
for index, target in enumerate(nums[:-2]):
if target <= 0 and (index == 0 or (index > 0 and nums[index] != nums[index - 1])):
left_index = index + 1
right_index = len(nums) - 1
while left_index < right_index:
triplet_sum = target + nums[left_index] + nums[right_index]
if triplet_sum > 0:
right_value = nums[right_index]
while left_index < right_index and right_value == nums[right_index]:
right_index -= 1
elif triplet_sum < 0:
left_value = nums[left_index]
while left_index < right_index and left_value == nums[left_index]:
left_index += 1
else:
triplets.append([
target,
nums[left_index],
nums[right_index],
])
right_value = nums[right_index]
while left_index < right_index and right_value == nums[right_index]:
right_index -= 1
left_value = nums[left_index]
while left_index < right_index and left_value == nums[left_index]:
left_index += 1
return triplets
| 37.448718 | 161 | 0.552208 | 2,544 | 0.870935 | 0 | 0 | 0 | 0 | 0 | 0 | 1,353 | 0.463198 |
649f90abe5a0e2278134d2c05c716ffaecd2b45f | 429 | py | Python | bookwyrm/migrations/0145_sitesettings_version.py | mouse-reeve/fedireads | e3471fcc3500747a1b1deaaca662021aae5b08d4 | [
"CC0-1.0"
] | 270 | 2020-01-27T06:06:07.000Z | 2020-06-21T00:28:18.000Z | bookwyrm/migrations/0145_sitesettings_version.py | mouse-reeve/fedireads | e3471fcc3500747a1b1deaaca662021aae5b08d4 | [
"CC0-1.0"
] | 158 | 2020-02-10T20:36:54.000Z | 2020-06-26T17:12:54.000Z | bookwyrm/migrations/0145_sitesettings_version.py | mouse-reeve/fedireads | e3471fcc3500747a1b1deaaca662021aae5b08d4 | [
"CC0-1.0"
] | 15 | 2020-02-13T21:53:33.000Z | 2020-06-17T16:52:46.000Z | # Generated by Django 3.2.12 on 2022-03-16 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0144_alter_announcement_display_type"),
]
operations = [
migrations.AddField(
model_name="sitesettings",
name="version",
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| 22.578947 | 73 | 0.624709 | 335 | 0.780886 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.277389 |