hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b401775f5af0e9b7b7978646db33631b271d516f | 4,351 | py | Python | scripts/build_folding_map.py | tsieprawski/md4c | 9d99b1262de3353f0530ac6b31d8c6934003b61f | [
"MIT"
] | 475 | 2016-11-27T18:37:51.000Z | 2022-03-30T19:46:29.000Z | scripts/build_folding_map.py | tsieprawski/md4c | 9d99b1262de3353f0530ac6b31d8c6934003b61f | [
"MIT"
] | 173 | 2016-12-05T01:38:37.000Z | 2022-01-14T10:06:30.000Z | scripts/build_folding_map.py | tsieprawski/md4c | 9d99b1262de3353f0530ac6b31d8c6934003b61f | [
"MIT"
] | 110 | 2016-11-29T20:02:16.000Z | 2022-03-30T23:51:58.000Z | #!/usr/bin/env python3
import os
import sys
import textwrap
self_path = os.path.dirname(os.path.realpath(__file__));
f = open(self_path + "/unicode/CaseFolding.txt", "r")
status_list = [ "C", "F" ]
folding_list = [ dict(), dict(), dict() ]
# Filter the foldings for "full" folding.
for line in f:
comment_off = line.find("#")
if comment_off >= 0:
line = line[:comment_off]
line = line.strip()
if not line:
continue
raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3)
if not status.strip() in status_list:
continue
codepoint = int(raw_codepoint.strip(), 16)
mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")]
mapping_len = len(mapping)
if mapping_len in range(1, 4):
folding_list[mapping_len-1][codepoint] = mapping
else:
assert(False)
f.close()
# If we assume that (index0 ... index-1) makes a range (as defined below),
# check that the newly provided index is compatible with the range too; i.e.
# verify that the range can be extended without breaking its properties.
#
# Currently, we can handle ranges which:
#
# (1) either form consecutive sequence of codepoints and which map that range
# to other consecutive range of codepoints (of the same length);
#
# (2) or a consecutive sequence of codepoints with step 2 where each codepoint
# CP is mapped to the codepoint CP+1
# (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...).
#
# Note: When the codepoints in the range are mapped to multiple codepoints,
# only the 1st mapped codepoint is considered. All the other ones have to be
# shared by all the mappings covered by the range.
for mapping_len in range(1, 4):
folding = folding_list[mapping_len-1]
codepoint_list = list(folding)
index0 = 0
count = len(folding)
records = list()
data_records = list()
while index0 < count:
index1 = index0 + 1
while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1):
index1 += 1
if index1 - index0 > 2:
# Range of codepoints
records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]]))
index0 = index1
else:
# Single codepoint
records.append("S(0x{:04x})".format(codepoint_list[index0]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
index0 += 1
sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
| 35.958678 | 107 | 0.6302 |
b4024d84d4513279dde8eeb7b78e3491e9770d6e | 1,038 | py | Python | app/api/v1/models/user_model.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
] | null | null | null | app/api/v1/models/user_model.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
] | null | null | null | app/api/v1/models/user_model.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
] | 1 | 2019-02-05T07:44:19.000Z | 2019-02-05T07:44:19.000Z | users = []
| 26.615385 | 85 | 0.531792 |
b402736fe41a1923f5e1f2be2b9ac727b56303ec | 6,644 | py | Python | Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py | JoaquinRodriguez2006/RoboCup_Junior_Material | 04f295010272fb8287c8f214bf69f1a61ee2b7cf | [
"MIT"
] | null | null | null | Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py | JoaquinRodriguez2006/RoboCup_Junior_Material | 04f295010272fb8287c8f214bf69f1a61ee2b7cf | [
"MIT"
] | null | null | null | Codigo/pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py | JoaquinRodriguez2006/RoboCup_Junior_Material | 04f295010272fb8287c8f214bf69f1a61ee2b7cf | [
"MIT"
] | 1 | 2022-03-19T22:57:33.000Z | 2022-03-19T22:57:33.000Z | from controller import Robot
from controller import Motor
from controller import PositionSensor
from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter
import cv2
import numpy as np
import math
import time
robot = Robot()
timeStep = 32
tile_size = 0.12
speed = 6.28
media_baldoza = 0.06
estado = 1
start = 0
global r
global g
global b
r = 0
g = 0
b = 0
# start = robot.getTime()
# Camera initialization
camera = robot.getDevice("camera3")
camera.enable(timeStep)
# Colour sensor initialization
colour_sensor = robot.getDevice("colour_sensor")
colour_sensor.enable(timeStep)
# Distance sensor initialization
distancia_sensor1 = robot.getDevice("distance sensor1")
distancia_sensor1.enable(timeStep)
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
rIzq_encoder = ruedaIzquierda.getPositionSensor()
rDer_encoder = ruedaDerecha.getPositionSensor()
rIzq_encoder.enable(timeStep)
rDer_encoder.enable(timeStep)
# Functions
"""
# Camara
image = camera.getImage()
imagen = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
frame = cv2.cvtColor(imagen, cv2.COLOR_BGRA2BGR)
cv2.imshow("frame", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale
cv2.imshow("grayScale", frame)
cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold
cv2.imshow("thresh", frame)
cv2.waitKey(1)
# Sensor de Distancia
print("Distancia: " + str(distancia_sensor1.getValue()))
"""
gyro = robot.getDevice("gyro")
gyro.enable(timeStep)
angulo_actual = 0
tiempo_anterior = robot.getTime()
contador = 0
while robot.step(timeStep) != -1:
avance("medio")
| 28.033755 | 124 | 0.615593 |
b403104a45ede1110a9c5cca95878c43993fc086 | 433 | py | Python | drip/migrations/0002_querysetrule_rule_type.py | RentFreeMedia/django-drip-campaigns | a71e5d3a3f242c04a6f7f921b85aa01daff467f8 | [
"MIT"
] | 46 | 2020-07-23T17:47:33.000Z | 2021-11-25T16:57:35.000Z | drip/migrations/0002_querysetrule_rule_type.py | RentFreeMedia/django-drip-campaigns | a71e5d3a3f242c04a6f7f921b85aa01daff467f8 | [
"MIT"
] | 54 | 2020-06-19T17:57:42.000Z | 2021-09-22T19:34:48.000Z | drip/migrations/0002_querysetrule_rule_type.py | kaozdl/django-drip | a71e5d3a3f242c04a6f7f921b85aa01daff467f8 | [
"MIT"
] | 19 | 2020-08-30T05:29:13.000Z | 2022-02-08T20:27:17.000Z | # Generated by Django 3.0.7 on 2020-11-25 13:13
from django.db import migrations, models
| 22.789474 | 104 | 0.577367 |
b4040d06558b8483134d9ca3f4c2ab385bbdc016 | 3,393 | py | Python | venv/lib/python3.6/site-packages/cligj/__init__.py | booklover98/A-_pathfinding | 09afebfc953ce9773bc4fc781eb6d0496caccfba | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/cligj/__init__.py | booklover98/A-_pathfinding | 09afebfc953ce9773bc4fc781eb6d0496caccfba | [
"MIT"
] | 7 | 2021-06-04T23:45:15.000Z | 2022-03-12T00:44:14.000Z | virtual/Lib/site-packages/cligj/__init__.py | owenabrams/bluemoonkampala | 8801df64e91683a2641f2cd4bcbe03ebc7f40828 | [
"MIT"
] | null | null | null | # cligj
# Shared arguments and options.
import click
from .features import normalize_feature_inputs
# Arguments.
# Multiple input files.
files_in_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS...")
# Multiple files, last of which is an output file.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS... OUTPUT")
# Features from files, command line args, or stdin.
# Returns the input data as an iterable of GeoJSON Feature-like
# dictionaries.
features_in_arg = click.argument(
'features',
nargs=-1,
callback=normalize_feature_inputs,
metavar="FEATURES...")
# Options.
verbose_opt = click.option(
'--verbose', '-v',
count=True,
help="Increase verbosity.")
quiet_opt = click.option(
'--quiet', '-q',
count=True,
help="Decrease verbosity.")
# Format driver option.
format_opt = click.option(
'-f', '--format', '--driver', 'driver',
default='GTiff',
help="Output format driver")
# JSON formatting options.
indent_opt = click.option(
'--indent',
type=int,
default=None,
help="Indentation level for JSON output")
compact_opt = click.option(
'--compact/--not-compact',
default=False,
help="Use compact separators (',', ':').")
# Coordinate precision option.
precision_opt = click.option(
'--precision',
type=int,
default=-1,
help="Decimal precision of coordinates.")
# Geographic (default), projected, or Mercator switch.
projection_geographic_opt = click.option(
'--geographic',
'projection',
flag_value='geographic',
default=True,
help="Output in geographic coordinates (the default).")
projection_projected_opt = click.option(
'--projected',
'projection',
flag_value='projected',
help="Output in dataset's own, projected coordinates.")
projection_mercator_opt = click.option(
'--mercator',
'projection',
flag_value='mercator',
help="Output in Web Mercator coordinates.")
# Feature collection or feature sequence switch.
sequence_opt = click.option(
'--sequence/--no-sequence',
default=False,
help="Write a LF-delimited sequence of texts containing individual "
"objects or write a single JSON text containing a feature "
"collection object (the default).")
use_rs_opt = click.option(
'--rs/--no-rs',
'use_rs',
default=False,
help="Use RS (0x1E) as a prefix for individual texts in a sequence "
"as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 "
"(default is False).")
# GeoJSON output mode option.
| 24.586957 | 78 | 0.660183 |
b404133dc455d3af035e0832fd933c69627e3b05 | 2,031 | py | Python | setup.py | ELC/testnbdev | 571400a9308ba91f05f6fabad5d3f79fd4417ab1 | [
"Apache-2.0"
] | 1 | 2021-02-19T15:34:58.000Z | 2021-02-19T15:34:58.000Z | setup.py | ELC/testnbdev | 571400a9308ba91f05f6fabad5d3f79fd4417ab1 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:49:28.000Z | 2022-02-26T10:24:52.000Z | setup.py | ELC/nbdev_template | 571400a9308ba91f05f6fabad5d3f79fd4417ab1 | [
"Apache-2.0"
] | null | null | null | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
config = config['DEFAULT']
config_keys = 'version description keywords author author_email'.split()
expected = config_keys + "lib_name user branch license status min_python audience language".split()
for setting in expected:
assert setting in config, f"missing expected setting: {setting}"
setup_config = {setting:config[setting] for setting in config_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9'.split()
requirements = config.get('requirements','').split()
lic = licenses[config['license']]
min_python = config['min_python']
setuptools.setup(
name = config['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(config['status'])],
'Intended Audience :: ' + config['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + config['language'].title(),
] + [f'Programming Language :: Python :: {version}' for version in py_versions[py_versions.index(min_python):]],
url = config['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
dependency_links = config.get('dep_links','').split(),
python_requires = '>=' + config['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': config.get('console_scripts','').split() },
**setup_config)
| 39.823529 | 116 | 0.681438 |
b40507b05e0b887443fd6d70a1bf0020514bacc1 | 3,730 | py | Python | amaascore/tools/generate_party.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
] | null | null | null | amaascore/tools/generate_party.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
] | 8 | 2017-06-06T09:42:41.000Z | 2018-01-16T10:16:16.000Z | amaascore/tools/generate_party.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
] | 8 | 2017-01-18T04:14:01.000Z | 2017-12-01T08:03:10.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from amaasutils.random_utils import random_string, random_decimal
import random
from amaascore.core.reference import Reference
from amaascore.parties.asset_manager import AssetManager
from amaascore.parties.broker import Broker
from amaascore.parties.children import Address, Email
from amaascore.parties.individual import Individual
from amaascore.parties.party import Party
| 44.404762 | 113 | 0.746917 |
b405b1ef752a1702183bea0b47a0bc6616babde1 | 9,291 | py | Python | fitgrid/utils/lmer.py | vishalbelsare/fitgrid | 0197e7a3fc2c937da03d768b5c91220eebe54a22 | [
"BSD-3-Clause"
] | 10 | 2020-02-01T22:58:32.000Z | 2022-03-29T11:31:00.000Z | fitgrid/utils/lmer.py | vishalbelsare/fitgrid | 0197e7a3fc2c937da03d768b5c91220eebe54a22 | [
"BSD-3-Clause"
] | 161 | 2018-09-11T16:41:30.000Z | 2021-08-03T19:26:23.000Z | fitgrid/utils/lmer.py | vishalbelsare/fitgrid | 0197e7a3fc2c937da03d768b5c91220eebe54a22 | [
"BSD-3-Clause"
] | 4 | 2019-02-27T08:11:31.000Z | 2021-07-21T20:50:36.000Z | # -*- coding: utf-8 -*-
"""User functions to streamline working with selected pymer4 LMER fit
attributes from lme4::lmer and lmerTest for ``fitgrid.lmer`` grids.
"""
import functools
import re
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import fitgrid
from fitgrid.fitgrid import LMERFitGrid
def get_lmer_dfbetas(epochs, factor, **kwargs):
r"""Fit lmers leaving out factor levels one by one, compute DBETAS.
Parameters
----------
epochs : Epochs
Epochs object
factor : str
column name of the factor of interest
**kwargs
keyword arguments to pass on to ``fitgrid.lmer``, like ``RHS``
Returns
-------
dfbetas : pandas.DataFrame
dataframe containing DFBETAS values
Examples
--------
Example calculation showing how to pass in model fitting parameters::
dfbetas = fitgrid.utils.lmer.get_lmer_dfbetas(
epochs=epochs,
factor='subject_id',
RHS='x + (x|a)
)
Notes
-----
DFBETAS is computed according to the following formula [NieGroPel2012]_:
.. math::
DFBETAS_{ij} = \frac{\hat{\gamma}_i - \hat{\gamma}_{i(-j)}}{se\left(\hat{\gamma}_{i(-j)}\right)}
for parameter :math:`i` and level :math:`j` of ``factor``.
"""
# get the factor levels
table = epochs.table.reset_index().set_index(
[epochs.epoch_id, epochs.time]
)
levels = table[factor].unique()
# produce epochs tables with each level left out
looo_epochs = (
fitgrid.epochs_from_dataframe(
table[table[factor] != level],
time=epochs.time,
epoch_id=epochs.epoch_id,
channels=epochs.channels,
)
for level in levels
)
# fit lmer on these epochs
fitter = functools.partial(fitgrid.lmer, **kwargs)
grids = map(fitter, looo_epochs)
coefs = (grid.coefs for grid in grids)
# get coefficient estimates and se from leave one out fits
looo_coefs = pd.concat(coefs, keys=levels, axis=1)
looo_estimates = looo_coefs.loc[pd.IndexSlice[:, :, 'Estimate'], :]
looo_se = looo_coefs.loc[pd.IndexSlice[:, :, 'SE'], :]
# get coefficient estimates from regular fit (all levels included)
all_levels_coefs = fitgrid.lmer(epochs, **kwargs).coefs
all_levels_estimates = all_levels_coefs.loc[
pd.IndexSlice[:, :, 'Estimate'], :
]
# drop outer level of index for convenience
for df in (looo_estimates, looo_se, all_levels_estimates):
df.index = df.index.droplevel(level=-1)
# (all_levels_estimate - level_excluded_estimate) / level_excluded_se
dfbetas = all_levels_estimates.sub(looo_estimates, level=1).div(
looo_se, level=1
)
return dfbetas.stack(level=0)
def get_lmer_warnings(lmer_grid):
"""grid the LMERFitGrid lme4::lmer4 warnings by type
lmer warnings are a mishmash of characters, punctuation, and digits, some with
numerical values specific to the message, for instance,
| Model failed to converge with max|grad| = 0.00222262 (tol = 0.002, component 1)
| unable to evaluate scaled gradient
| boundary (singular) fit: see ?isSingular
| np.nan
The warning strings are returned as-is except for stripping
leading and trailing whitespace and the "= N.NNNNNNNN" portion of the
max \|grad\| convergence failure.
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
Returns
-------
warning_grids : dict
A dictionary, the keys are lmer warning strings, each value
is a `pandas.DataFrame` indicator grid where grid.loc[time, channel] == 1 if the
lmer warning == key, otherwise 0.
"""
if not isinstance(lmer_grid, LMERFitGrid):
msg = (
"get_lmer_warnings() must be called on an "
f"LMERFitGrid not {type(lmer_grid)}"
)
raise ValueError(msg)
# In pymer4 0.7.1+ and lme4::lmer 0.22+ warnings come back from
# lme4::lmer via pymer4 as list of strings and each LMERFitgrid
# cell may have a list of 0, 1, 2, ... ? warnings. This means
# LMERFitGrid.warnings time index may have missing time stamps (= no
# warnings), a single time stamp (one warning), or duplicate time
# stamps (> 1 warning) and np.nan at channels where there is no
# warning at that timestamp.
# strip reported decimal values so max|grad| convergence failures are one kind
tidy_strings = lmer_grid.warnings.applymap(
lambda x: re.sub(
r"max\|grad\|\s+=\s+\d+\.\d+\s+", "max|grad| ", x
).strip()
if isinstance(x, str)
else x # no warning == np.nan
).rename_axis([lmer_grid.time, "wdx", "_empty"], axis=0)
# the number and types of warning generally vary by time and/or channel
warning_kinds = (
pd.Series(tidy_strings.to_numpy().flatten()).dropna().unique()
)
# collect messy gappy, multiple warnings as a dict of key==warning,
# value==tidy time x channel indicator grid (0, 1)
warning_grids = {}
assert lmer_grid._grid.shape == lmer_grid.has_warning.shape
for warning_kind in warning_kinds:
# empty grid w/ correct shape, row index and columns
warning_grid = pd.DataFrame(
np.zeros(lmer_grid._grid.shape, dtype=int),
index=lmer_grid._grid.index.copy(),
columns=lmer_grid._grid.columns.copy(),
)
# select rows w/ at least one non-na
warning_rows = tidy_strings[tidy_strings == warning_kind].dropna(
axis=0, how="all"
)
assert warning_rows.index.names[0] == lmer_grid._grid.index.name
assert all(
warning_rows.index.get_level_values(0)
== warning_rows.index.get_level_values(0).unique()
)
for rdx, row in warning_rows.iterrows():
warning_grid.loc[rdx[0], :] = (row == warning_kind).astype(int)
assert all(warning_grid.index == lmer_grid._grid.index)
assert all(warning_grid.columns == lmer_grid._grid.columns)
warning_grids[warning_kind] = warning_grid
return warning_grids
def plot_lmer_warnings(lmer_grid, which="each", verbose=True):
"""Raster plot lme4::lmer warning grids
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
which : {"each", "all", or list of str}
select the types of warnings to plot. `each` (default) plots
each type of warning separately. `all` plots one grid showing
where any type of warning occurred. A list of strings searches
the lmer warnings and plots those that match.
verbose : bool, default=True
If `True` warn of failed matches for warnings keywords.
Examples
--------
default, plot each warning grid separately
>>> plot_lmer_warnings(lmer_grid)
one plot shows everywhere there is a warning
>>> plot_lmer_warnings(lmer_grid, which="all")
plot just warnings that match these strings
>>> plot_lmer_warnings(lmer_grid, which=["converge", "singular"])
"""
# validate kwarg
if not (
isinstance(which, str)
or (
isinstance(which, list)
and all((isinstance(wrn, str) for wrn in which))
)
):
raise ValueError(
"The value for which=value must be 'any', 'each', a warning "
f"string pattern to match or list of them, not this: {which}"
)
warning_grids = get_lmer_warnings(lmer_grid)
warning_grids["all"] = lmer_grid.has_warning.astype(int)
keys = None
if which == "all":
keys = ["all"]
elif which == "each":
keys = list(warning_grids.keys())
else:
# lookup matching patterns var so as to not step on original kwarg
patterns = [which] if isinstance(which, str) else which
keys = []
for pattern in patterns:
matches = [key for key in warning_grids if pattern in key]
keys += matches # may be []
if verbose and not matches:
warnings.warn(f"warning pattern '{pattern}' not found")
assert isinstance(keys, list), f"this should be type list: {type(keys)}"
for key in keys:
if verbose:
print(f"{key}")
_plot_warnings(key, warning_grids[key])
if verbose and not keys:
warnings.warn(f"no model warnings match {which}")
| 32.486014 | 103 | 0.633839 |
b405ca5c19bd60bffd27ebed33907aa4cbf83da9 | 2,055 | py | Python | pyesasky/jupyter_server.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
] | 13 | 2019-05-30T19:57:37.000Z | 2021-09-10T09:43:49.000Z | pyesasky/jupyter_server.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
] | 21 | 2019-06-21T18:55:25.000Z | 2022-02-27T14:48:13.000Z | pyesasky/jupyter_server.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
] | 8 | 2019-05-30T12:20:48.000Z | 2022-03-04T04:01:20.000Z | import os
import json
from hashlib import md5
from tornado import web
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
__all__ = ['load_jupyter_server_extension']
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'nbextension', 'static');
CONFIG = os.path.expanduser('~/.pyesasky')
| 27.77027 | 79 | 0.620925 |
b4073a213da55b416141036502c3d25e2d22ed63 | 3,552 | py | Python | pingpongskill/pingpongskill.py | Garvys/PingPongSkill | 71749a34772326dd83121bb0ab6fad52b7d8d694 | [
"MIT"
] | 1 | 2017-09-22T13:30:20.000Z | 2017-09-22T13:30:20.000Z | pingpongskill/pingpongskill.py | Garvys/PingPongSkill | 71749a34772326dd83121bb0ab6fad52b7d8d694 | [
"MIT"
] | null | null | null | pingpongskill/pingpongskill.py | Garvys/PingPongSkill | 71749a34772326dd83121bb0ab6fad52b7d8d694 | [
"MIT"
] | null | null | null | # -*-: coding utf-8 -*-
""" Skeleton Snips skill. """
import re
import json
import os
import datetime
from text2num import text2num
from collections import defaultdict
FORMAT = '%Y.%m.%dT%H:%M:%S'
regex = re.compile('([\w\s]+)to([\w\s]+)')
if __name__ == '__main__':
scores = [
'eleven to two',
'twenty to eleven'
]
for score in scores:
print parse_core(score)
PingPongSkill().handle_loser()
PingPongSkill().handle_terminate_game('thib', 'alex', 'eleven to two')
PingPongSkill().handle_loser()
| 30.62069 | 79 | 0.566441 |
b407548d1539781a310dd11a278698c4338d7000 | 13,006 | py | Python | xarray/backends/npy_io.py | martinResearch/xarray | e921d1bfa4785b10310f8b5d46a1efacba7e1cc9 | [
"Apache-2.0"
] | null | null | null | xarray/backends/npy_io.py | martinResearch/xarray | e921d1bfa4785b10310f8b5d46a1efacba7e1cc9 | [
"Apache-2.0"
] | null | null | null | xarray/backends/npy_io.py | martinResearch/xarray | e921d1bfa4785b10310f8b5d46a1efacba7e1cc9 | [
"Apache-2.0"
] | null | null | null |
import numpy as np
import xarray as xr
import pandas as pd
import sys
import json
import os
import datetime
from xarray.core.utils import (
decode_numpy_dict_values,
either_dict_or_kwargs,
ensure_us_time_resolution,
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
from numpy.lib import format
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
if __name__ == "__main__":
test()
| 39.531915 | 267 | 0.541827 |
b408eeeaec183c35458c8ea0619e1ec8dfb285b7 | 14,222 | py | Python | applications/popart/bert/bert_data/squad_dataset.py | Alwaysproblem/examples-1 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | [
"MIT"
] | null | null | null | applications/popart/bert/bert_data/squad_dataset.py | Alwaysproblem/examples-1 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | [
"MIT"
] | null | null | null | applications/popart/bert/bert_data/squad_dataset.py | Alwaysproblem/examples-1 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import random
import pickle
import json
import fractions
import math
import subprocess
from logging import getLogger
from functools import reduce
from .dataset import DataSet
from .data_sampler import SequentialSampler, ShuffledSampler, DistributedDataSampler
from .tokenization import FullTokenizer
from .squad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, InputFeatures
logger = getLogger(__name__)
def load_or_cache_features(input_file,
vocab_file,
sequence_length,
is_training=True,
cache_file=None,
overwrite_cache=False,
do_lower_case=False):
if cache_file is None:
cache_file = input_file + f".{sequence_length}.cache"
if os.path.exists(cache_file) and not overwrite_cache:
examples = None
logger.info(f"Loading Cache {cache_file}")
with open(cache_file, "rb") as f:
features = pickle.load(f)
else:
logger.info("Reading Examples")
examples = read_squad_examples(input_file=input_file,
is_training=is_training,
version_2_with_negative=False)
# google-research/bert uses sequence_length 384 with doc_stride 128
# TODO: Find a good value for the doc_stride with sequence_length <384
doc_stride = 128
if sequence_length < 384:
doc_stride = 64
logger.info("Converting to Features")
features = convert_examples_to_features(examples=examples,
tokenizer=FullTokenizer(vocab_file, do_lower_case=do_lower_case),
max_seq_length=sequence_length,
doc_stride=doc_stride,
max_query_length=64,
is_training=is_training)
logger.info(f"Saving Cache {cache_file}")
with open(cache_file, "wb") as f:
pickle.dump(features, f)
return features, examples
| 36.84456 | 141 | 0.592181 |
b40913984e0d9a08276edd74c8a43fc4a6017a70 | 9,921 | py | Python | utils.py | sWizad/HashNeRF-pytorch | e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910 | [
"MIT"
] | null | null | null | utils.py | sWizad/HashNeRF-pytorch | e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910 | [
"MIT"
] | null | null | null | utils.py | sWizad/HashNeRF-pytorch | e8fe9b4879fc6ef3cdfa8fd3d268a92c4fa0d910 | [
"MIT"
] | null | null | null | import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
| 40.493878 | 123 | 0.627961 |
b4091bea05e2b9f2e78f9f40870c9ac7e8a9cac3 | 15,755 | py | Python | eval.py | dawnchen123/VS-Net | 21aa8873e32351716302934887f6a08e7d568ea2 | [
"Apache-2.0"
] | 55 | 2021-04-17T08:15:06.000Z | 2022-03-30T02:38:27.000Z | eval.py | dawnchen123/VS-Net | 21aa8873e32351716302934887f6a08e7d568ea2 | [
"Apache-2.0"
] | 3 | 2021-05-30T03:29:01.000Z | 2022-03-03T00:47:33.000Z | eval.py | dawnchen123/VS-Net | 21aa8873e32351716302934887f6a08e7d568ea2 | [
"Apache-2.0"
] | 11 | 2021-07-01T15:15:23.000Z | 2022-02-12T06:47:26.000Z | import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
if __name__ == "__main__":
main()
| 45.403458 | 124 | 0.548588 |
b40a24b1b84590432a339ee0e8fac4f84e897ac1 | 2,692 | py | Python | data/__init__.py | Joaomlg/multilayer-perceptron-mnist | 0454c4970c3a06a37ac7c20787a1bdf1cda7da0f | [
"MIT"
] | 13 | 2021-05-15T04:22:04.000Z | 2022-03-29T10:55:32.000Z | data/__init__.py | Joaomlg/multilayer-perceptron-mnist | 0454c4970c3a06a37ac7c20787a1bdf1cda7da0f | [
"MIT"
] | null | null | null | data/__init__.py | Joaomlg/multilayer-perceptron-mnist | 0454c4970c3a06a37ac7c20787a1bdf1cda7da0f | [
"MIT"
] | 4 | 2021-05-18T07:48:52.000Z | 2021-07-10T10:11:41.000Z | import numpy as np
import gzip
import pickle
import os
import urllib.request
| 32.829268 | 110 | 0.686478 |
b40aad26fdc784cc5dfaf249f1c167e4160e4887 | 2,279 | py | Python | Exemple.py | LVWolff/Python_Lesson_2 | ece186f988c94a1aaa1656a1e6e1093c3d5b6251 | [
"MIT"
] | null | null | null | Exemple.py | LVWolff/Python_Lesson_2 | ece186f988c94a1aaa1656a1e6e1093c3d5b6251 | [
"MIT"
] | null | null | null | Exemple.py | LVWolff/Python_Lesson_2 | ece186f988c94a1aaa1656a1e6e1093c3d5b6251 | [
"MIT"
] | null | null | null | # ------
#----------------------------------------
'''
1
, .
'''
for i in range(1, 6):
print(i, '0000000000000000000000000000000000000000000')
'''
2
10 . 5.
'''
count = 0
for i in range(10):
user_data = int(input(' : '))
if user_data == 5:
count += 1
print(count)
'''
3
1 100. .
'''
sum = 0
for i in range(1, 101):
sum += i
print(sum)
'''
4
1 10. .
'''
proiz = 1
for i in range(2, 11):
proiz *= i
print(proiz)
'''
5
.
'''
integer_number = 123456
start_del = len(str(integer_number)) - 1
delitel = 10 ** start_del
#print(integer_number % delitel, integer_number // delitel)
while integer_number > 0:
print(int(integer_number // delitel))
integer_number = integer_number % delitel
delitel /= 10
'''
6
.
'''
integer_number = 123456
sum = 0
while integer_number > 0:
sum += integer_number % 10
integer_number = integer_number // 10
print(sum)
'''
7
.
'''
integer_number = 123456
proiz = 1
while integer_number > 0:
proiz *= integer_number % 10
integer_number = integer_number // 10
print(proiz)
'''
8
: 5?
'''
integer_number = 125254
while integer_number > 0:
if integer_number % 10 == 5:
print('Yes')
break
integer_number = integer_number // 10
else:
print('No')
'''
9
'''
integer_number = 125278954
max_num = integer_number % 10
while integer_number > 0:
max_num = max(max_num, integer_number % 10)
integer_number = integer_number // 10
print(max_num)
'''
10
5
'''
integer_number = 125278954
count_num = 0
while integer_number > 0:
if integer_number % 10 == 5:
count_num += 1
integer_number = integer_number // 10
print(count_num)
| 18.087302 | 92 | 0.67749 |
b40bc88be7d9975ca6ad22574a73918dc37e3371 | 11,368 | py | Python | push_exp/main_CrouchSimulationForCOT.py | snumrl/DeepPushRecovery | dceb7f3114d4314cf3be875f43723255819e12a3 | [
"Apache-2.0"
] | null | null | null | push_exp/main_CrouchSimulationForCOT.py | snumrl/DeepPushRecovery | dceb7f3114d4314cf3be875f43723255819e12a3 | [
"Apache-2.0"
] | null | null | null | push_exp/main_CrouchSimulationForCOT.py | snumrl/DeepPushRecovery | dceb7f3114d4314cf3be875f43723255819e12a3 | [
"Apache-2.0"
] | 1 | 2021-07-26T15:08:58.000Z | 2021-07-26T15:08:58.000Z | import os
import numpy as np
import time
import multiprocessing as mp
import csv
import socket
import datetime
import math
import glob
from pypushexp import PushSim
# # input - [recorded item]
# [weight] : 48
# [height] : 160
# [crouch_angle] (deg)
# [step_length_ratio]
# [halfcycle_duration_ratio]
# [push_step] : 8
# [push_duration] (sec) : .2
# [push_force] (N)
# [push_start_timing] (half gait cycle percent)
#
# # output
# [pushed_length] (m) : sim.out_pushed_length
# [pushed_steps] : sim.out_pushed_steps
# [push_strength] : abs(push_force * push_duration / weight)
# [step_length] (m) : sim.getPushedLength()
# [walking_speed] (m/s) : sim.getWalkingSpeed()
# [halfcycle_duration] (s) : sim.getStepLength() /sim.getWalkingSpeed()
#
# # output for hospital
# [distance] : pushed_length * 1000.
# [speed] : walking_speed * 1000.
# [force] : push_strength * 1000.
# [stride] : step_length * 1000.
# [start_timing_time_ic] = sim.start_timing_time_ic
# [mid_timing_time_ic] = sim.mid_timing_time_ic
# [start_timing_foot_ic] = sim.getStartTimingFootIC()
# [mid_timing_foot_ic] = sim.getMidTimingFootIC()
# [start_timing_time_fl] = sim.getStartTimingTimeFL()
# [mid_timing_time_fl] = sim.getMidTimingTimeFL()
# [start_timing_foot_fl] = sim.getStartTimingFootFL()
# [mid_timing_foot_fl] = sim.getMidTimingFootFL()
# # not used
# subject no
# sex
# left leg length
# right leg length
# stride
# speed
# experiment
# file name
# trial no
# push timing : 'left stance'
# push direction : 'from left'
# normalized push length
# push length until first step
# push end timing (time)
# push end timing (foot pos)
# return during first step
# push duration
# push start time
if __name__ == '__main__':
import sys
import re
option = sys.argv[1]
trial_num = int(sys.argv[2])
_metadata_dir = os.path.dirname(os.path.abspath(__file__)) + '/../data/metadata/'
_nn_finding_dir = os.path.dirname(os.path.abspath(__file__)) + '/../nn/*/'
nn_dir = None
if _nn_finding_dir is not None:
nn_dir = glob.glob(_nn_finding_dir + option)[0]
meta_file = _metadata_dir + option + '.txt'
sim = None
if 'muscle' in option:
sim = PushSim(meta_file, nn_dir+'/max.pt', nn_dir+'/max_muscle.pt')
else:
sim = PushSim(meta_file, nn_dir+'/max.pt')
if "all" in option:
simulate(sim, 0, trial_num, option)
simulate(sim, 1, trial_num, option)
simulate(sim, 2, trial_num, option)
simulate(sim, 3, trial_num, option)
else:
crouch = re.findall(r'crouch\d+', option)[0][6:]
simulate(sim, ['0', '20', '30', '60'].index(crouch), trial_num, option)
| 34.344411 | 139 | 0.598522 |
b40c71ed0a4ab0b122f61556dae6f792302c5678 | 776 | py | Python | lepiota/lepiota/urls.py | sgelias/lepiota | 4b30aa25ac5308229f6d41f1720e1af02557826e | [
"MIT"
] | null | null | null | lepiota/lepiota/urls.py | sgelias/lepiota | 4b30aa25ac5308229f6d41f1720e1af02557826e | [
"MIT"
] | null | null | null | lepiota/lepiota/urls.py | sgelias/lepiota | 4b30aa25ac5308229f6d41f1720e1af02557826e | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import include
from django.views.generic import TemplateView, RedirectView
urlpatterns = [
# Administration
path('admin/', admin.site.urls),
# Accounts
path('account/', include('account.urls', namespace='account')),
# Oauth2
path('api/v1/o/', include('oauth.urls', namespace='oauth2_provider')),
# General purpose
path('welcome/', TemplateView.as_view(template_name="welcome.html")),
path('', RedirectView.as_view(url="/welcome/")),
re_path(r'^$', RedirectView.as_view(url="/welcome/")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 29.846154 | 74 | 0.716495 |
b40c87bef3a1437769ac688f07452b9daed5f901 | 189 | py | Python | src/base/admin.py | dhavall13/Decode | 8b9cbec72ade727d62edb90c3a38152e0285fe90 | [
"MIT"
] | null | null | null | src/base/admin.py | dhavall13/Decode | 8b9cbec72ade727d62edb90c3a38152e0285fe90 | [
"MIT"
] | null | null | null | src/base/admin.py | dhavall13/Decode | 8b9cbec72ade727d62edb90c3a38152e0285fe90 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Room, Topic, Message, User
admin.site.register(Room)
admin.site.register(Topic)
admin.site.register(Message)
admin.site.register(User)
| 23.625 | 46 | 0.804233 |
b40e2538e7eca239f3b41df3368718122f54c302 | 10,744 | py | Python | gorilla/config/_config.py | sunjiahao1999/gorilla-core | bf43e3a49c7f79834ae969db38edd50f17ef5288 | [
"MIT"
] | 4 | 2021-07-28T04:50:26.000Z | 2021-09-23T12:59:01.000Z | gorilla/config/_config.py | sunjiahao1999/gorilla-core | bf43e3a49c7f79834ae969db38edd50f17ef5288 | [
"MIT"
] | null | null | null | gorilla/config/_config.py | sunjiahao1999/gorilla-core | bf43e3a49c7f79834ae969db38edd50f17ef5288 | [
"MIT"
] | 2 | 2021-08-05T04:01:12.000Z | 2021-12-25T02:17:03.000Z | # Copyright (c) Open-MMLab. All rights reserved.
import os
import json
import tempfile
import warnings
from typing import Optional
from argparse import Namespace
from addict import Dict
from ..utils import check_file
BASE_KEY = "_base_"
RESERVED_KEYS = ["filename", "text"]
def __len__(self) -> int:
return len(self._cfg_dict)
def __getattr__(self, name: str):
return getattr(self._cfg_dict, name)
def __getitem__(self, name: str):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self, file: Optional[str] = None, **kwargs):
cfg_dict = self._cfg_dict.to_dict()
from gorilla.fileio import dump
if file is None:
# output the content
file_format = self.filename.split(".")[-1]
if file_format == "py":
return self.text
else:
return dump(cfg_dict, file_format=file_format, **kwargs)
else:
if file.endswith("py"):
with open(file, "w") as f:
f.write(self.text)
else:
dump(cfg_dict, file, **kwargs)
def merge_from_dict(self, options: Dict):
r"""Merge list into cfg_dict.
Merge the dict parsed by MultipleKVAction into this cfg.
Examples:
>>> options = {"model.backbone.depth": 50,
... "model.backbone.with_cp":True}
>>> cfg = Config(dict(model=dict(backbone=dict(type="ResNet"))))
>>> cfg.merge_from_dict(options)
>>> cfg_dict = super(Config, self).__getattribute__("_cfg_dict")
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
# Merge list element
>>> cfg = Config(dict(pipeline=[
... dict(type="LoadImage"), dict(type="LoadAnnotations")]))
>>> options = dict(pipeline={"0": dict(type="SelfLoadImage")})
Args:
options (dict): dict of configs to merge from.
"""
option_cfg_dict = {}
for full_key, v in options.items():
if v is None: # handle the case when a parameter simultaneously appears in argparse and config file
continue
d = option_cfg_dict
key_list = full_key.split(".")
for subkey in key_list[:-1]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[-1]
d[subkey] = v
cfg_dict = self._cfg_dict
cfg_dict = Config._merge_a_into_b(option_cfg_dict, cfg_dict)
# NOTE: strange phenomenon
# self._cfg_dict = cfg_dict
super(Config, self).__setattr__("_cfg_dict", cfg_dict)
def merge_cfg_and_args(cfg: Optional[Config] = None,
args: Optional[Namespace] = None) -> Config:
r"""merge args and cfg into a Config by calling 'merge_from_dict' func
Args:
cfg (Config, optional): Config from cfg file.
args (Namespace, optional): Argument parameters input.
Returns:
Config: Merged Config
"""
assert cfg is not None or args is not None, "'cfg' or 'args' can not be None simultaneously"
if cfg is None:
cfg = Config()
else:
assert isinstance(
cfg, Config
), f"'cfg' must be None or gorilla.Config, but got {type(cfg)}"
if args is None:
args = Namespace()
else:
assert isinstance(
args, Namespace
), f"'args' must be None or argsparse.Namespace, but got {type(args)}"
# convert namespace into dict
args_dict = vars(args)
cfg.merge_from_dict(args_dict)
return cfg
| 34.770227 | 112 | 0.560964 |
b40e4f7e84bc53160bafd291d5c8ea6b4b1f43bd | 2,643 | py | Python | Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py | karim-awad/kaspa | 701d935dd215bfd9a4810a4430973b33fecec257 | [
"MIT"
] | null | null | null | Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py | karim-awad/kaspa | 701d935dd215bfd9a4810a4430973b33fecec257 | [
"MIT"
] | null | null | null | Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py | karim-awad/kaspa | 701d935dd215bfd9a4810a4430973b33fecec257 | [
"MIT"
] | null | null | null | from Kaspa.modules.abstract_modules.abstractSubmodule import AbstractSubmodule
from Kaspa.modules.exceptions.impossibleActionError import ImpossibleActionError
from Kaspa.config import Config
| 36.205479 | 99 | 0.596292 |
b40e9592fe62c2017e79612d2b201dbc82a4fb4e | 2,768 | py | Python | screenshot-server/app/main.py | martindines/ScreenshotServer | 21d1529157f4625cd26196000c4a30342ab4d713 | [
"MIT"
] | 1 | 2019-12-31T18:43:08.000Z | 2019-12-31T18:43:08.000Z | screenshot-server/app/main.py | martindines/ScreenshotServer | 21d1529157f4625cd26196000c4a30342ab4d713 | [
"MIT"
] | 1 | 2019-12-31T19:35:24.000Z | 2019-12-31T19:35:24.000Z | screenshot-server/app/main.py | martindines/ScreenshotServer | 21d1529157f4625cd26196000c4a30342ab4d713 | [
"MIT"
] | null | null | null | import os
import sys
import pathlib
from utilities import get_random_hash
from flask import Flask, flash, request, redirect, url_for, send_from_directory, jsonify, Response
UPLOAD_FOLDER = os.environ.get('UPLOAD_FOLDER') if os.environ.get('UPLOAD_FOLDER') else '/tmp'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
SECRET = os.environ.get('SECRET')
app = Flask(__name__)
app.config['SERVER_NAME'] = os.environ.get('SERVER_NAME')
| 24.936937 | 98 | 0.610549 |
b40ee079a577a77555888197b34380d7e63acfd3 | 517 | py | Python | src/waldur_mastermind/notifications/migrations/0002_json_field.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | 2 | 2017-01-20T15:26:25.000Z | 2017-08-03T04:38:08.000Z | src/waldur_mastermind/notifications/migrations/0002_json_field.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | src/waldur_mastermind/notifications/migrations/0002_json_field.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2022-01-31 14:26
from django.db import migrations, models
| 21.541667 | 45 | 0.560928 |
b41042e5988e8d27b58649ccaf22e396c4b031cb | 2,800 | py | Python | gitgoggles/utils.py | nowells/git-goggles | 022dc0cd6dfe8f1641ccb33e85ab05309dba7dbf | [
"MIT"
] | 13 | 2015-03-10T08:48:51.000Z | 2019-04-16T09:06:55.000Z | gitgoggles/utils.py | nowells/git-goggles | 022dc0cd6dfe8f1641ccb33e85ab05309dba7dbf | [
"MIT"
] | null | null | null | gitgoggles/utils.py | nowells/git-goggles | 022dc0cd6dfe8f1641ccb33e85ab05309dba7dbf | [
"MIT"
] | 3 | 2016-04-29T05:38:56.000Z | 2020-07-06T13:04:05.000Z | import copy
import subprocess
import sys
import unicodedata
try:
from termcolor import colored as colored_func
except ImportError:
print 'You should run "pip install termcolor" to fully utilize these utilities.'
colored_func = disable_colored_func
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
if not supports_color():
colored_func = disable_colored_func
colored = Colored()
| 32.183908 | 127 | 0.658571 |
b410813c6c4297c46c6ca2597443a122ba6dda59 | 4,308 | py | Python | test/unit/tools/test_basisconstructors.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | test/unit/tools/test_basisconstructors.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | test/unit/tools/test_basisconstructors.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | import numpy as np
import pygsti.baseobjs.basisconstructors as bc
from ..util import BaseCase
| 40.261682 | 109 | 0.597957 |
b4130d04b43c706ebb56a9d6ede2201a268db5d7 | 7,913 | py | Python | tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
] | 384 | 2017-02-21T18:38:04.000Z | 2022-02-22T07:30:25.000Z | tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
] | 15 | 2017-03-01T20:18:43.000Z | 2020-05-07T10:33:51.000Z | tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | d3edb8c60ed4fd831d62833ed22f5c23486c561c | [
"Apache-2.0"
] | 81 | 2017-02-21T19:31:19.000Z | 2022-02-22T07:30:24.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
if __name__ == '__main__':
test.main()
| 40.372449 | 80 | 0.65841 |
b4148b82caffcb3d401203b514031ef55ddaf4b5 | 1,279 | py | Python | CodigoSOLID.py | JhonGalarza/SOLID | 5ee45f136a24cb2300837c8ed89accdc2f299c83 | [
"MIT"
] | null | null | null | CodigoSOLID.py | JhonGalarza/SOLID | 5ee45f136a24cb2300837c8ed89accdc2f299c83 | [
"MIT"
] | null | null | null | CodigoSOLID.py | JhonGalarza/SOLID | 5ee45f136a24cb2300837c8ed89accdc2f299c83 | [
"MIT"
] | null | null | null | #DATOS DE ENTRADA
ANIMAL= int(input("De cual animal quiere conocer la caracteristicas? 1.Leon 2.Ballena 3.Tucan? "))
if ANIMAL == 1 :
print ("debe imprimir las caracteristicas del leon, el leon es clase hija de animal y debe agragar animal_tierra" )
elif ANIMAL == 2 :
print ("lo mismo que el leon, pero con la ballena")
elif ANIMAL == 3 :
print("Lo mismo pero con el tucan") | 24.596154 | 120 | 0.620797 |
b414e74ae421f14965c6e966091b96bde22167db | 8,249 | py | Python | orca/topology/infra/k8s/__init__.py | filwie/orca | 84cfd53d309d85f7a7fb8649ba4abc8c2df9feac | [
"Apache-2.0"
] | null | null | null | orca/topology/infra/k8s/__init__.py | filwie/orca | 84cfd53d309d85f7a7fb8649ba4abc8c2df9feac | [
"Apache-2.0"
] | null | null | null | orca/topology/infra/k8s/__init__.py | filwie/orca | 84cfd53d309d85f7a7fb8649ba4abc8c2df9feac | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orca.topology import bundle
from orca.topology.infra.istio import linker as istio_linker
from orca.topology.infra.k8s import cluster, linker, probe
| 29.566308 | 74 | 0.562856 |
b415b852eb1504fe65a58d7db038c31b5386abda | 2,616 | py | Python | thelma/repositories/rdb/view.py | fogathmann/TheLMA | ac330a0005da4fea2f1387da9ff9938611ad1481 | [
"MIT"
] | 1 | 2020-07-12T22:47:58.000Z | 2020-07-12T22:47:58.000Z | thelma/repositories/rdb/view.py | papagr/TheLMA | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | [
"MIT"
] | null | null | null | thelma/repositories/rdb/view.py | papagr/TheLMA | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | [
"MIT"
] | 1 | 2020-07-12T22:40:36.000Z | 2020-07-12T22:40:36.000Z | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Utilities to create/drop views.
Based on a recipe published in:
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/Views
"""
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
__docformat__ = 'reStructuredText en'
__all__ = ['CreateView',
'DropView',
'view_factory',
]
def view_factory(name, metadata, selectable):
if not hasattr(metadata, 'views'):
metadata.views = {}
metadata.views[name] = table(name)
for c in selectable.c:
c._make_proxy(metadata.views[name]) # pylint: disable=W0212
CreateView(name, selectable).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return metadata.views[name]
| 33.974026 | 90 | 0.69419 |
b415cd56b8b968d2043025ce5a7780e981f5488b | 960 | py | Python | msblog/models.py | designermanjeets/mscreativepixel | 8fefa48296c97fc541bc6d4f9ad8fa7048d0e377 | [
"Apache-2.0"
] | null | null | null | msblog/models.py | designermanjeets/mscreativepixel | 8fefa48296c97fc541bc6d4f9ad8fa7048d0e377 | [
"Apache-2.0"
] | null | null | null | msblog/models.py | designermanjeets/mscreativepixel | 8fefa48296c97fc541bc6d4f9ad8fa7048d0e377 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from datetime import datetime
import string, random
import uuid
# Create your models here.
| 28.235294 | 114 | 0.660417 |
b415f8911ff14da18af621c103440493a6703472 | 1,281 | py | Python | Practical/Easy/HSV color wheel/colorwheel.py | saintwithataint/Pro-g-rammingChallenges4 | 3f720a375b89ee289237819c2dc89226634b7a5b | [
"Apache-2.0"
] | 1 | 2022-03-16T16:47:22.000Z | 2022-03-16T16:47:22.000Z | Practical/Easy/HSV color wheel/colorwheel.py | saintwithataint/Pro-g-rammingChallenges4 | 3f720a375b89ee289237819c2dc89226634b7a5b | [
"Apache-2.0"
] | null | null | null | Practical/Easy/HSV color wheel/colorwheel.py | saintwithataint/Pro-g-rammingChallenges4 | 3f720a375b89ee289237819c2dc89226634b7a5b | [
"Apache-2.0"
] | 2 | 2022-02-02T18:02:03.000Z | 2022-03-16T16:47:34.000Z | import colour
import matplotlib.pyplot as plt
import numpy as np
COLOUR_STYLE = colour.plotting.colour_style()
COLOUR_STYLE.update(
{
"figure.figsize": (11, 11),
"legend.framealpha": colour.plotting.COLOUR_STYLE_CONSTANTS.opacity.low,
}
)
plt.style.use(COLOUR_STYLE)
plt.style.use("dark_background")
colour.utilities.describe_environment()
colour.utilities.filter_warnings(*[True] * 4)
COLOUR_WHEEL = colour_wheel(method="Nuke")
colour.plotting.plot_image(COLOUR_WHEEL)
| 26.6875 | 80 | 0.640125 |
b4162ac39dacfccdd55b041dd156a4ebc43907ba | 40,090 | py | Python | kojen/smgen.py | kohjaen/kojen | e61855e48617e691d1fa0ddac4fdabac6b6a1eff | [
"MIT"
] | 3 | 2020-07-12T08:17:42.000Z | 2022-02-11T15:44:49.000Z | kojen/smgen.py | kohjaen/kojen | e61855e48617e691d1fa0ddac4fdabac6b6a1eff | [
"MIT"
] | null | null | null | kojen/smgen.py | kohjaen/kojen | e61855e48617e691d1fa0ddac4fdabac6b6a1eff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eugene'
'''
MIT License
Copyright (c) 2015 Eugene Grobbelaar (email : koh.jaen@yahoo.de)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Step 1) Load template files to memory
Step 2) Search and replace these tags in memory (including filenames).
<<<NAMESPACE>>>
<<<STATEMACHINENAME>>> or <<<CLASSNAME>>>
<<<AUTHOR>>>
Step 3) Search for the following pairs of tags
<<<PER_STATE_BEGIN>>>
<<<PER_STATE_END>>>
<<<PER_EVENT_BEGIN>>>
<<<PER_EVENT_END>>>
<<<PER_ACTION_BEGIN>>>
<<<PER_ACTION_END>>>
<<<PER_ACTION_SIGNATURE_BEGIN>>>
<<<PER_ACTION_SIGNATURE_END>>>
<<<PER_GUARD_BEGIN>>>
<<<PER_GUARD_END>>>
and duplicate the following for each item, replacing each tag with the item name
<<<STATENAME>>>
<<<EVENTNAME>>>
<<<ACTIONNAME>>>
<<<GUARDNAME>>>
These need to be expanded for event structs
<<<EVENTSIGNATURE>>>
<<<EVENTMEMBERSINSTANTIATE>>>
<<<EVENTMEMBERSDECLARE>>>
When looping <<<ALPH>>> should increment from a through Z.
When looping <<<NUM>>> should increment from 1 through 10000.
When reading the transition table, first state name (top, left) should be set to the value for this tag : <<<STATE_0>>>
Then, the transition table needs to go here, following the rules.
<<<TTT_BEGIN>>>
<<<TTT_END>>>
or
<<<TTT_LITE_BEGIN>>>
<<<TTT_LITE_END>>>
or
<<<TTT_LITE_SML_BEGIN>>>
<<<TTT_LITE_SML_END>>>
# EMBEDDED SM SUPPORT.
Step 4) In each <<PER_XXX tag, there might be more expansion required. The following tags apply in this pass
<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>
<<<PER_EVENT_NEXT_STATE_END>>>
and the following replacement tags will be correctly set
<<<EVENTSTATECURRENT>>>
<<<EVENTSTATENEXT>>>
Also, the original SM only allows a single state-based action to happen.
I want there to be several actions allowed in a State, based on several events valid in that state.
These tags provide for that.
<<<PER_STATE_ACTION_EVENT_BEGIN>>>
<<<PER_STATE_ACTION_EVENT_END>>>
and the following replacement tags will be correctly set
<<<PER_STATE_ACTION>>>
<<<PER_STATE_EVENT>>>
# END EMBEDDED SM SUPPORT.
'''
__TAG_AUTHOR__ = '<<<AUTHOR>>>'
__TAG_GROUP__ = '<<<GROUP>>>'
__TAG_BRIEF__ = '<<<BRIEF>>>'
__TAG_NAMESPACE__ = '<<<NAMESPACE>>>'
__TAG_SM_NAME__ = '<<<STATEMACHINENAME>>>'
__TAG_SM_NAME_UPPER__ = '<<<STATEMACHINENAMEUPPER>>>'
__TAG_CLASS_NAME__ = '<<<CLASSNAME>>>'
__TAG_PyIFGen_NAME__ = '<<<PYIFGENNAME>>>'
__TAG_PS_BEGIN__ = "<<<PER_STATE_BEGIN>>>"
__TAG_PS_END__ = "<<<PER_STATE_END>>>"
__TAG_PE_BEGIN__ = "<<<PER_EVENT_BEGIN>>>"
__TAG_PE_END__ = "<<<PER_EVENT_END>>>"
__TAG_PA_BEGIN__ = "<<<PER_ACTION_BEGIN>>>"
__TAG_PA_END__ = "<<<PER_ACTION_END>>>"
__TAG_PASIG_BEGIN__ = "<<<PER_ACTION_SIGNATURE_BEGIN>>>"
__TAG_PASIG_END__ = "<<<PER_ACTION_SIGNATURE_END>>>"
__TAG_PG_BEGIN__ = "<<<PER_GUARD_BEGIN>>>"
__TAG_PG_END__ = "<<<PER_GUARD_END>>>"
__TAG_EVENT_SIGNATURE__ = "<<<EVENTSIGNATURE>>>"
__TAG_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSINSTANTIATE>>>"
__TAG_LITE_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSLITEINSTANTIATE>>>"
__TAG_EVENT_MEMBERDECL__ = "<<<EVENTMEMBERSDECLARE>>>"
__TAG_STATENAME__ = '<<<STATENAME>>>'
__TAG_EVENTNAME__ = '<<<EVENTNAME>>>'
__TAG_EVENTNAME_SMALL_CAMEL__ = '<<<EVENTNAMESMALLCAMEL>>>'
__TAG_ACTIONNAME__ = '<<<ACTIONNAME>>>'
__TAG_GUARDNAME__ = '<<<GUARDNAME>>>'
__TAG_ABC__ = '<<<ALPH>>>'
__TAG_123__ = '<<<NUM>>>'
__TAG_INIT_STATE__ = '<<<STATE_0>>>'
__TAG_TTT_BEGIN__ = '<<<TTT_BEGIN>>>'
__TAG_TTT_END___ = '<<<TTT_END>>>'
__TAG_TTT_LITE_BEGIN__ = '<<<TTT_LITE_BEGIN>>>'
__TAG_TTT_LITE_END__ = '<<<TTT_LITE_END>>>'
__TAG_TTT_LITE_SML_BEGIN__ = '<<<TTT_LITE_SML_BEGIN>>>'
__TAG_TTT_LITE_SML_END__ = '<<<TTT_LITE_SML_END>>>'
__TAG_DECLSPEC_DLL_EXPORT__ = "<<<DLL_EXPORT>>>"
# EMBEDDED SM SUPPORT.
__TAG_EVENT_CURNEX_ST_BEG__ = "<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>"
__TAG_EVENT_CURNEX_ST_END__ = "<<<PER_EVENT_NEXT_STATE_END>>>"
__TAG_EVENT_ST_CUR__ = "<<<EVENTSTATECURRENT>>>"
__TAG_EVENT_ST_NXT__ = "<<<EVENTSTATENEXT>>>"
__TAG_PSAE_BEGIN__ = "<<<PER_STATE_ACTION_EVENT_BEGIN>>>"
__TAG_PSAE_END__ = "<<<PER_STATE_ACTION_EVENT_END>>>"
__TAG_PSAE_ACTION__ = "<<<PER_STATE_ACTION>>>"
__TAG_PSAE_EVENT__ = "<<<PER_STATE_EVENT>>>"
# END EMBEDDED SM SUPPORT.
# Python2 -> 3 shennanigans...try support both
try:
from interface_base import * # py2
except (ModuleNotFoundError, ImportError) as e:
from .interface_base import * # py3
try:
from .preservative import *
except (ModuleNotFoundError, ImportError) as e:
from preservative import *
try:
from .cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
except (ModuleNotFoundError, ImportError) as e:
from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
try:
from LanguageCPP import LanguageCPP
except (ModuleNotFoundError, ImportError) as e:
from .LanguageCPP import LanguageCPP
# Model that describes a state machine.
# Transition Table Model uses State Machine Model to generate all code required for a working state machine.
| 50.301129 | 200 | 0.60464 |
b419bda7c8455defc3ecb61092c5f3412e12801a | 1,744 | py | Python | roku/discovery.py | metagrapher/python-roku | 0cd209ec94531e7c4c29ca7f6a41a6199374c206 | [
"BSD-3-Clause"
] | null | null | null | roku/discovery.py | metagrapher/python-roku | 0cd209ec94531e7c4c29ca7f6a41a6199374c206 | [
"BSD-3-Clause"
] | null | null | null | roku/discovery.py | metagrapher/python-roku | 0cd209ec94531e7c4c29ca7f6a41a6199374c206 | [
"BSD-3-Clause"
] | null | null | null | """
Code adapted from Dan Krause.
https://gist.github.com/dankrause/6000248
http://github.com/dankrause
"""
import socket
from http.client import HTTPResponse
from io import BytesIO
ST_DIAL = 'urn:dial-multiscreen-org:service:dial:1'
ST_ECP = 'roku:ecp'
def discover(timeout=2, retries=1, st=ST_ECP):
group = ('239.255.255.250', 1900)
message = '\r\n'.join([
'M-SEARCH * HTTP/1.1',
'HOST: {0}:{1}'.format(*group),
'MAN: "ssdp:discover"',
'ST: {st}', 'MX: 3', '', ''])
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
m = message.format(st=st)
sock.sendto(m.encode(), group)
while 1:
try:
rhttp = HTTPResponse(_FakeSocket(sock.recv(1024)))
rhttp.begin()
if rhttp.status == 200:
rssdp = SSDPResponse(rhttp)
responses[rssdp.location] = rssdp
except socket.timeout:
break
return responses.values()
| 26.830769 | 79 | 0.598624 |
b41a1df236c0501272e47ba309bb8f6eaa3a041a | 4,113 | py | Python | Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py | cornell-zhang/GLAIVE | 8e29ac621a95a25c19ccfeb5071a9d3595093ef7 | [
"BSD-3-Clause"
] | 10 | 2020-11-21T04:13:33.000Z | 2022-01-03T23:08:09.000Z | Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py | cornell-zhang/GLAIVE | 8e29ac621a95a25c19ccfeb5071a9d3595093ef7 | [
"BSD-3-Clause"
] | null | null | null | Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py | cornell-zhang/GLAIVE | 8e29ac621a95a25c19ccfeb5071a9d3595093ef7 | [
"BSD-3-Clause"
] | null | null | null | import os, sys
from argparse import ArgumentParser
from datetime import datetime as dt
from pprint import pprint as pp
import shutil, glob
#from pyfiglet import figlet_format, Figlet
import datetime
'''
python run_gem5_gl.py -a radix -l inst
python run_gem5_gl.py -a radix -l bit
'''
parser = ArgumentParser()
parser.add_argument('-a', "--apps", help='Target application names seperated by comma', \
dest='targetapp', required=True)
parser.add_argument('-l', "--info_level", help='Target application architecture', \
dest='info_level', default='bit')
args = parser.parse_args()
apps = app(args.targetapp)
level = args.info_level
#num = args.num_progs
src_dir = os.environ.get('GRAPHLEARN')
gem5_dir= os.environ.get('APPROXGEM5') + '/gem5/scripts/relyzer/'
dest_dir = os.environ.get('APPROXGEM5') + '/workloads/x86/apps/'
for app in apps:
app1 = app + '_' + level
os.chdir(gem5_dir)
if level == 'bit':
# cp result from src to dest
gl_src_file = src_dir + 'sdc_output' +'/' + app1 + '_post.txt'
gl_dest_file = dest_dir + app +'/' + app1 + '_post.txt'
cmd = 'cp ' + gl_src_file + ' ' + gl_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in gl failure ' + app1)
exit(-1)
bit_rf_src_file = src_dir + 'sdc_output_ml_bit' +'/' + app1 + '_post_rf.txt'
bit_rf_dest_file = dest_dir + app +'/' + app1 + '_post_rf.txt'
cmd = 'cp ' + bit_rf_src_file + ' ' + bit_rf_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in rf_bit faigem5_dirlure ' + app1)
exit(-1)
bit_mlpc_src_file = src_dir + 'sdc_output_ml_bit' +'/' + app1 + '_post_mlpc.txt'
bit_mlpc_dest_file = dest_dir + app +'/' + app1 + '_post_mlpc.txt'
cmd = 'cp ' + bit_mlpc_src_file + ' ' + bit_mlpc_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in mlpc_bit failure ' + app1)
exit(-1)
#call sdc_comp
print('this is for %s comp_sdc under graph learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'gl'
status = os.system(cmd)
if status != 0:
print('sdc comp in gl_bit failure ' + app1)
exit(-1)
print('this is for %s comp_sdc under random forest learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'rf'
status = os.system(cmd)
if status != 0:
print('sdc comp in rf_bit failure ' + app1)
exit(-1)
print('this is for %s comp_sdc under MLP learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'mlpc'
status = os.system(cmd)
if status != 0:
print('sdc comp in mlpc_bit failure ' + app1)
exit(-1)
# call coverage_comp
log_file = src_dir + 'glog/' + app + '.log'
cmd = 'python sdc_coverage.py ' + app + ' ' + '5' + ' ' + '105' + ' > ' + log_file
status = os.system(cmd)
if status != 0:
print('coverage comp for all methods failure ' + app)
exit(-1)
elif level == 'inst':
inst_rf_src_file = src_dir + 'sdc_output_classic' +'/' + app1 + '_rf.sdclist'
inst_rf_dest_file = dest_dir + app +'/' + app1 + '_rf.sdclist'
cmd = 'cp ' + inst_rf_src_file + ' ' + inst_rf_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in inst_rf failure ' + app1)
exit(-1)
inst_svm_src_file = src_dir + 'sdc_output_classic' +'/' + app1 + '_svm.sdclist'
inst_svm_dest_file = dest_dir + app +'/' + app1 + '_svm.sdclist'
cmd = 'cp ' + inst_svm_src_file + ' ' + inst_svm_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in inst_svm failure ' + app1)
exit(-1)
| 32.904 | 90 | 0.556042 |
b41ac0fb4f1e55fdca39a67f5c6756119ab70fed | 68 | py | Python | onnxsim/__init__.py | Wheest/onnx-simplifier | 70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c | [
"Apache-2.0"
] | 1,977 | 2019-04-01T10:48:18.000Z | 2022-03-31T07:43:03.000Z | onnxsim/__init__.py | fedral/onnx-simplifier | 70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c | [
"Apache-2.0"
] | 167 | 2019-05-03T08:21:15.000Z | 2022-03-31T10:21:03.000Z | onnxsim/__init__.py | fedral/onnx-simplifier | 70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c | [
"Apache-2.0"
] | 251 | 2019-04-01T12:21:42.000Z | 2022-03-30T18:14:39.000Z | from onnxsim.onnx_simplifier import simplify
__version__ = '0.0.0'
| 17 | 44 | 0.794118 |
b41c9702fa909cdc15c31981b7aeb56a1df4c9bb | 534 | py | Python | src/commands/__init__.py | lysol/lvlss | ca068de516159be732d2cb8c4752dee4f4ef2e09 | [
"MIT"
] | null | null | null | src/commands/__init__.py | lysol/lvlss | ca068de516159be732d2cb8c4752dee4f4ef2e09 | [
"MIT"
] | null | null | null | src/commands/__init__.py | lysol/lvlss | ca068de516159be732d2cb8c4752dee4f4ef2e09 | [
"MIT"
] | null | null | null | from quit import Quit
from set_name import SetName
from who import Who
from say import Say
from look import Look
from go import Go
from take import Take
from inventory import Inventory
from drop import Drop
from make import Make
from landfill import Landfill
from item_info import ItemInfo
from script import SetScript, GetScript
from image_editing import ImageEditing
all_commands = (Quit, SetName, Who, Say, Look,
Go, Take, Inventory, Drop, Make, Landfill,
SetScript, GetScript, ItemInfo, ImageEditing)
| 28.105263 | 50 | 0.773408 |
b41db3bb0788a43b8d82ec7b22eb82e644666c44 | 2,141 | py | Python | Softmax.py | tranbamanh229289/Machine-and-Data-mining- | b43a3815b74365e6e5b05b49bb92f3db4606ffca | [
"Apache-2.0"
] | null | null | null | Softmax.py | tranbamanh229289/Machine-and-Data-mining- | b43a3815b74365e6e5b05b49bb92f3db4606ffca | [
"Apache-2.0"
] | null | null | null | Softmax.py | tranbamanh229289/Machine-and-Data-mining- | b43a3815b74365e6e5b05b49bb92f3db4606ffca | [
"Apache-2.0"
] | null | null | null | import Common
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
RATIO = 0.8
EPOCHS = 500
LEARN_RATE = 0.01
INDENTIFICATION_RATE = 0.6
# Read training data
X_train, Y_train, X_test, Y_test,scale_train,scale_test = Common.process(RATIO)
X_train, Y_train, X_test, Y_test =preprocessing(X_train,Y_train ,X_test ,Y_test)
# Cross Entropy
W = np.zeros((5, 3))
W, loss = gradient(Y_train, X_train, W, LEARN_RATE, EPOCHS)
acc, Y_predict = accuracy(W, X_test ,Y_test, INDENTIFICATION_RATE)
X_train=Common.inverse(scale_train ,X_train[:,:-1])
X_test=Common.inverse(scale_test,X_test[:,:-1])
graph_cost(loss, EPOCHS)
Common.graph_accuracy(X_test, Y_test, Y_predict)
print("Accuracy :")
print(acc * 100, "%")
| 29.328767 | 80 | 0.652032 |
b41e6039b9544ca2bf93ee054b91393cabc444ec | 1,343 | py | Python | Wallpaper change.py | Arbazkhan4712/Wallpaper-Changer-using-Python | a221443bc7e7b5410f06653fa741b9d7af0fe10f | [
"MIT"
] | 4 | 2020-04-17T06:39:23.000Z | 2021-12-25T11:05:16.000Z | Wallpaper change.py | Arbazkhan4712/Wallpaper-Changer-using-Python | a221443bc7e7b5410f06653fa741b9d7af0fe10f | [
"MIT"
] | null | null | null | Wallpaper change.py | Arbazkhan4712/Wallpaper-Changer-using-Python | a221443bc7e7b5410f06653fa741b9d7af0fe10f | [
"MIT"
] | 3 | 2020-04-03T12:36:20.000Z | 2020-06-06T15:12:04.000Z | import ctypes
import os
import time
from pynput.keyboard import Key,Controller
import Bing
if __name__=='__main__':
main()
| 26.86 | 97 | 0.581534 |
b41e78f19f2060ee9b4a3efdc51b5e3c612a3ca4 | 968 | py | Python | tests/sensitivity/sf2/sf2_test.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
] | null | null | null | tests/sensitivity/sf2/sf2_test.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
] | null | null | null | tests/sensitivity/sf2/sf2_test.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
] | null | null | null | # Import statements
import subprocess
from os import system
# Variable declarations
np = "10"
cexe = "./Boids"
nboids = "50"
nloops = "500"
k = "7"
maxv = "10"
acc = "1.25"
width = "1000"
height = "1000"
sf1 = "1"
sf2 = "32"
min = "50"
sf3 = "8"
sf4 = "10"
dataPath = "./data/"
jexe = "BoidModelTest"
bdata = "boid_data.boid"
# Test calls
collection = [0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 1048576]
for i in collection:
print "Running test %s" % (str(i))
boidData = "run"+str(i)+".boid"
gif = "run"+str(i)+".gif"
sf2 = str(i)
subprocess.call("mpirun -np " + np +" "+ cexe +" "+ nboids +" "+ nloops +" "+ k +" "+ maxv +" "+ acc +" "+ width +" "+ height +" "+ sf1 +" "+ sf2 +" "+ min +" "+ sf3 +" "+ sf4 + " > " + dataPath + boidData, shell=True)
subprocess.call("java " + jexe + " " + gif + " " + boidData, shell=True)
system('gnuplot ./data/boid_script.gp') | 31.225806 | 220 | 0.558884 |
b41e7a6675758027f59252fdd90ad0a28c111058 | 976 | py | Python | flask_start/flask_start/public/email.py | kostekci/flask_start | fa279fc8907aff9868e2596f4ed9c4d9428d2f75 | [
"MIT"
] | null | null | null | flask_start/flask_start/public/email.py | kostekci/flask_start | fa279fc8907aff9868e2596f4ed9c4d9428d2f75 | [
"MIT"
] | 95 | 2021-09-13T21:23:12.000Z | 2022-03-31T21:22:32.000Z | flask_start/flask_start/public/email.py | kostekci/flask_start | fa279fc8907aff9868e2596f4ed9c4d9428d2f75 | [
"MIT"
] | null | null | null | from flask_mail import Message
from flask import render_template
from flask_start.extensions import mail
'''
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
'''
| 32.533333 | 75 | 0.646516 |
b41f08666a2d2b54abb8df40e1f44d9b70d9644a | 7,784 | py | Python | demo/trace_model.py | furkankirac/maskrcnn-benchmark | a348dc36600e577c3ba569320f3a6a8e15986f72 | [
"MIT"
] | null | null | null | demo/trace_model.py | furkankirac/maskrcnn-benchmark | a348dc36600e577c3ba569320f3a6a8e15986f72 | [
"MIT"
] | null | null | null | demo/trace_model.py | furkankirac/maskrcnn-benchmark | a348dc36600e577c3ba569320f3a6a8e15986f72 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import os
import numpy
from io import BytesIO
from matplotlib import pyplot
import requests
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from maskrcnn_benchmark.structures.image_list import ImageList
if __name__ == "__main__":
# load config from file and command-line arguments
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cfg.merge_from_file(
os.path.join(project_dir,
"configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"))
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=480,
)
def process_image_with_traced_model(image):
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
boxes, labels, masks, scores = traced_model(image)
# todo: make this in one large thing
result_image = combine_masks(original_image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
def fetch_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")
if __name__ == "__main__":
pil_image = fetch_image(
url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# convert to BGR format
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
traced_model = torch.jit.trace(single_image_to_top_predictions, (image,))
end_to_end_model.save('end_to_end_model.pt')
result_image = process_image_with_traced_model(original_image)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image[:, :, [2, 1, 0]])
pyplot.show()
# second image
image2 = fetch_image(
url='http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg')
image2 = image2.resize((640, 480), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
result_image2 = process_image_with_traced_model(image2)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image2[:, :, [2, 1, 0]])
pyplot.show()
| 41.185185 | 163 | 0.654034 |
b42110e69fbba6f3cc1175f605afe65f09844634 | 5,211 | py | Python | validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 136 | 2015-05-07T05:47:43.000Z | 2022-02-16T03:07:40.000Z | validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 184 | 2015-05-03T09:27:54.000Z | 2021-12-20T04:22:48.000Z | validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 70 | 2015-03-18T07:35:22.000Z | 2021-11-01T07:07:29.000Z | """Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import sys
import anuga
from anuga import myid, finalize, distribute
from anuga import Domain as Domain
from math import cos
from numpy import zeros, ones, array, interp, polyval, ones_like, zeros_like
from numpy import where, logical_and
from time import localtime, strftime, gmtime
from scipy.interpolate import interp1d
from anuga.geometry.polygon import inside_polygon, is_inside_triangle
#from balanced_dev import *
#-------------------------------------------------------------------------------
# Copy scripts to time stamped output directory and capture screen
# output to file
#-------------------------------------------------------------------------------
time = strftime('%Y%m%d_%H%M%S',localtime())
#output_dir = 'varying_width'+time
output_dir = '.'
output_file = 'varying_width'
#anuga.copy_code_files(output_dir,__file__)
#start_screen_catcher(output_dir+'_')
args = anuga.get_args()
alg = args.alg
verbose = args.verbose
#------------------------------------------------------------------------------
# Setup domain
#------------------------------------------------------------------------------
dx = 1.
dy = dx
L = 1500.
W = 60.
#===============================================================================
# Create sequential domain
#===============================================================================
if myid == 0:
# structured mesh
points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.,-W/2.))
#domain = anuga.Domain(points, vertices, boundary)
domain = Domain(points, vertices, boundary)
domain.set_name(output_file)
domain.set_datadir(output_dir)
#------------------------------------------------------------------------------
# Setup Algorithm, either using command line arguments
# or override manually yourself
#------------------------------------------------------------------------------
domain.set_flow_algorithm(alg)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.0)
domain.set_quantity('stage', 12.0)
XX = array([0.,50.,100.,150.,250.,300.,350.,400.,425.,435.,450.,470.,475.,500.,
505.,530.,550.,565.,575.,600.,650.,700.,750.,800.,820.,900.,950.,
1000.,1500.])
ZZ = array([0.,0.,2.5,5.,5.,3.,5.,5.,7.5,8.,9.,9.,9.,9.1,9.,9.,6.,5.5,5.5,5.,
4.,3.,3.,2.3,2.,1.2,0.4,0.,0.])
WW = array([40.,40.,30.,30.,30.,30.,25.,25.,30.,35.,35.,40.,40.,40.,45.,45.,50.,
45.,40.,40.,30.,40.,40.,5.,40.,35.,25.,40.,40.])/2.
depth = interp1d(XX, ZZ)
width = interp1d(XX, WW)
domain.set_quantity('elevation', bed_elevation)
else:
domain = None
#===========================================================================
# Create Parallel domain
#===========================================================================
domain = distribute(domain)
#-----------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
from math import sin, pi, exp
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary
#Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Produce a documentation of parameters
#------------------------------------------------------------------------------
if myid == 0:
parameter_file=open('parameters.tex', 'w')
parameter_file.write('\\begin{verbatim}\n')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.write('\\end{verbatim}\n')
parameter_file.close()
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
import time
t0 = time.time()
for t in domain.evolve(yieldstep = 0.1, finaltime = 5.0):
#print(domain.timestepping_statistics(track_speeds=True))
if myid == 0 and verbose: print(domain.timestepping_statistics())
#vis.update()
if myid == 0 and verbose: print('That took %s sec' % str(time.time()-t0))
domain.sww_merge(delete_old=True)
finalize()
| 36.440559 | 96 | 0.459797 |
b424151af9b357850be4c70639941f09ba348b96 | 253 | py | Python | src/temperature/urls.py | JohanGovers/home-mon-server | e22ee05508597f11b313f8fa600a4867ad15f759 | [
"MIT"
] | null | null | null | src/temperature/urls.py | JohanGovers/home-mon-server | e22ee05508597f11b313f8fa600a4867ad15f759 | [
"MIT"
] | null | null | null | src/temperature/urls.py | JohanGovers/home-mon-server | e22ee05508597f11b313f8fa600a4867ad15f759 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, url
from temperature import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^save_temp_reading$', views.save_temp_reading, name='save_temp_reading'),
)
| 31.625 | 88 | 0.660079 |
b425096bf56f11b8a01b6bd3c09874f67758b609 | 5,767 | py | Python | FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 38 | 2021-06-18T12:56:15.000Z | 2022-03-12T20:38:40.000Z | FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 2 | 2021-06-20T16:28:12.000Z | 2021-11-17T21:33:56.000Z | FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 6 | 2021-06-18T18:18:36.000Z | 2021-12-22T08:01:32.000Z | from amitools.binfmt.BinImage import *
from .ELFFile import *
from .ELF import *
from .ELFReader import ELFReader
from .DwarfDebugLine import DwarfDebugLine
# mini test
if __name__ == "__main__":
import sys
bf = BinFmtELF()
for a in sys.argv[1:]:
if bf.is_image(a):
print("loading", a)
bi = bf.load_image(a)
print(bi)
else:
print("NO ELF:", a)
| 31.686813 | 85 | 0.521415 |
b425e1b4a3766b7202ee32581542acc01753bfbd | 11,532 | py | Python | recordtransform.py | Andresfgomez970/Managing-.wav-files-in-python | 2bf344a3217efe9dc15349ef4be14f2e5cb53ace | [
"MIT"
] | null | null | null | recordtransform.py | Andresfgomez970/Managing-.wav-files-in-python | 2bf344a3217efe9dc15349ef4be14f2e5cb53ace | [
"MIT"
] | null | null | null | recordtransform.py | Andresfgomez970/Managing-.wav-files-in-python | 2bf344a3217efe9dc15349ef4be14f2e5cb53ace | [
"MIT"
] | null | null | null | import pyaudio
import wave
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pylab as plt
from scipy.io import wavfile
import cmath as cm
from scipy.fftpack import fft
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.io.wavfile import write
from scipy import signal
def recordaudio(chunk,formato,Channels,Rate,Record_seconds,
wave_output_name):
'''Record and audio and get it as wave output.
chunk:
formato:
Channels:
Rate:
Record_seconds:
wave_output_name:
'''
p=pyaudio.PyAudio()
stream=p.open(format=formato,
channels=Channels,
rate=Rate,
input=True,
frames_per_buffer=chunk)
print("Recording..")
frames=[]
for i in range(0,int(Rate/chunk*Record_seconds)):
data=stream.read(chunk)
frames.append(data)
print("Done recording.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(wave_output_name, 'wb')
wf.setnchannels(Channels)
wf.setsampwidth(p.get_sample_size(formato))
wf.setframerate(Rate)
wf.writeframes(b''.join(frames))
wf.close()
if __name__ == "__main__":
chunk=1024 #number of frames
formato=pyaudio.paInt16 #format of the record
Channels=2 #Number of channels to record (this alter data)
Rate=16040 #Number of frames per second
Record_seconds=38 #lenghth of the recording
wavename1="records/test1withegeneratednoise.wav" #output file name
fi,ff=0,20000
norm = 'yes'
wavename2 = "records/test1.wav"
### Example 1
print("\nThe transform of the file 'test1withegeneratednoise.wav' is \n shown:\n")
plotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wavename1,fi,ff,norm)
### Example 2
print("\nThe transform of the file '3200.wav' is shown and also a \n zoom to the maximum value of the fourirer tranform:\n")
### This part measure a given frequency that is already in a wave format in the program; in
### addition a zoom is made to it with some tolerance
Rate=44100
Record_seconds=4.99
wavename2 = "records/3200.wav"
fi, ff = 0, 10000
zoomplotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wavename2,fi,ff,norm)
### Example 3
### This part record with the computer microphone and after that
### show the fourier transform of the record
#You could change the paramters of the record that is going to be made
Record_seconds=5
wave_output_name = 'recorded.wav'
recordtransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm)
### Example 4
###This part plot the transform of the two wave files and permits
### to compare the amplitues and the frequencies at the maximum
### amplitude
Record_seconds= 3.0
wavename1="records/1000.wav"
wavename2="records/1000t.wav"
ft = 3265
tol = 3
comparing(chunk,formato,Channels,Rate,Record_seconds,wavename1,
wavename2,fi,ff,norm,tol)
### Example 4
###This is basically the short fourier transform
### it is important to know that the algorithm
### chose as step time the nearer on to the one that
### you give that satisfy being a multiple of the
### recorded seconds.
wave_output_name = "records/1000.wav"
Record_seconds = 3
time = 0.1
Rate = 46080
f(wave_output_name,Record_seconds,time,Rate)
plt.show()
### Example 5
###This algorithm compare the Fourier transform given by python
### with one that I made, it is a way to test the the programed is
### expected to work with some cases at least, a further analysis
### could explain the differences (The graphs were scales for a value)
### chosen at hand.
wavename = 'records/3265.wav'
Record_seconds = 3
t, data, dt = getsignal(wavename,Record_seconds)
freq, dataft = Fourier1(t, data,dt)
data = data[1000:1500]
t = t[1000:1500]
w = np.arange(-np.pi/dt,np.pi/dt,2*np.pi/(len(t)*dt) )
t, ft = dft(data,w,t,1)
plt.plot(w/(2*np.pi),abs(ft.real)/abs(ft.real).sum()*(0.0169/0.0881) ,'b')
plt.plot(freq,abs(dataft.real)/abs(dataft.real).sum() ,'g')
plt.show()
| 31.508197 | 129 | 0.644034 |
b4266c4983e7f09a613d7773116f8f267c2d1a3a | 2,994 | py | Python | AllSidesScraper/allsides.py | Epicrider/polibalance | 88a0adf54d09baeac3dcad36ce119640d6aa990b | [
"MIT"
] | null | null | null | AllSidesScraper/allsides.py | Epicrider/polibalance | 88a0adf54d09baeac3dcad36ce119640d6aa990b | [
"MIT"
] | null | null | null | AllSidesScraper/allsides.py | Epicrider/polibalance | 88a0adf54d09baeac3dcad36ce119640d6aa990b | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
from communityFeedback import *
from time import sleep
from rich.progress import track
import json
page = [
'https://www.allsides.com/media-bias/media-bias-ratings',
]
if __name__ == '__main__':
main()
| 32.193548 | 94 | 0.59352 |
b426f99a8bac6c3327cab3da97ce79ef51269da3 | 1,068 | py | Python | commands/calc.py | periodicaidan/dalton-cli | 6a83e1a2675e335bf807c43c4201d78e5b389837 | [
"MIT"
] | 2 | 2018-12-21T19:09:49.000Z | 2018-12-22T10:41:36.000Z | commands/calc.py | periodicaidan/dalton-cli | 6a83e1a2675e335bf807c43c4201d78e5b389837 | [
"MIT"
] | null | null | null | commands/calc.py | periodicaidan/dalton-cli | 6a83e1a2675e335bf807c43c4201d78e5b389837 | [
"MIT"
] | null | null | null | """
File: commands/calc.py
Purpose: Performs calculations in response to user input, and outputs the result
"""
from sys import argv
import click
from calculator import *
from models import History
from models.Config import Config
from help_menus import calc_help
| 31.411765 | 85 | 0.652622 |
b42826894cb5a72b4000d0d8ef3a13b2f541b2b5 | 3,271 | py | Python | aot/meta_triggers/metatrigger_treasure.py | jaycheungchunman/age-of-triggers | f2a75685a0b0cc9e26132d4f52b6ed2c4798f6b4 | [
"MIT"
] | null | null | null | aot/meta_triggers/metatrigger_treasure.py | jaycheungchunman/age-of-triggers | f2a75685a0b0cc9e26132d4f52b6ed2c4798f6b4 | [
"MIT"
] | null | null | null | aot/meta_triggers/metatrigger_treasure.py | jaycheungchunman/age-of-triggers | f2a75685a0b0cc9e26132d4f52b6ed2c4798f6b4 | [
"MIT"
] | null | null | null | from aot import *
from aot.model.trigger import *
from aot.model.condition import *
from aot.model.effect import *
from aot.meta_triggers.metatrigger import MetaTrigger
from aot.model.enums.resource import EnumResource
from aot.model.enums.player import PlayerEnum
from aot.model.enums.unit import UnitConstant, UnitType
| 45.430556 | 101 | 0.551819 |
b4283b91c4a94a15dbf38eab20ef16e0e0641f20 | 2,625 | py | Python | agent/lm_agent/server_interfaces/lsdyna.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
] | 2 | 2020-11-15T22:54:39.000Z | 2022-02-15T07:58:55.000Z | agent/lm_agent/server_interfaces/lsdyna.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
] | 2 | 2022-02-18T19:36:45.000Z | 2022-03-16T23:07:44.000Z | agent/lm_agent/server_interfaces/lsdyna.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
] | null | null | null | """LS-Dyna license server interface."""
import typing
from lm_agent.config import settings
from lm_agent.exceptions import LicenseManagerBadServerOutput
from lm_agent.parsing import lsdyna
from lm_agent.server_interfaces.license_server_interface import LicenseReportItem, LicenseServerInterface
from lm_agent.server_interfaces.utils import run_command
| 38.602941 | 105 | 0.694476 |
b428942d04da784eb0b105b8727b2b0340163593 | 2,634 | py | Python | examples/gan/gan_embeddings.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
] | null | null | null | examples/gan/gan_embeddings.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
] | null | null | null | examples/gan/gan_embeddings.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print(('%s: %s' % (type(e).__name__, e.message)))
raise
| 29.595506 | 88 | 0.678056 |
b42ac51788c29cf80e7fbe118ac8d2f98639006c | 30 | py | Python | punch_version.py | joshua-s/punch | c29751844ecf654cc21966a14842e8165e0bc300 | [
"ISC"
] | null | null | null | punch_version.py | joshua-s/punch | c29751844ecf654cc21966a14842e8165e0bc300 | [
"ISC"
] | null | null | null | punch_version.py | joshua-s/punch | c29751844ecf654cc21966a14842e8165e0bc300 | [
"ISC"
] | null | null | null | major = 1
minor = 4
patch = 5
| 7.5 | 9 | 0.6 |
b42bbf006e731b294f9bc03e50361e650c57e4a5 | 97 | py | Python | 17.py | yonghuatang/python | 6d53bf2f1e75ac03b07eb56ff83e26d8b5155bb1 | [
"MIT"
] | null | null | null | 17.py | yonghuatang/python | 6d53bf2f1e75ac03b07eb56ff83e26d8b5155bb1 | [
"MIT"
] | null | null | null | 17.py | yonghuatang/python | 6d53bf2f1e75ac03b07eb56ff83e26d8b5155bb1 | [
"MIT"
] | null | null | null | from datetime import date
now = date.today()
print('The date today is', now, now.strftime("%A"))
| 24.25 | 51 | 0.701031 |
b42bc72a01713bbb619aec869a9dad62431b9ce2 | 4,613 | py | Python | pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 32da046a2bde542279824d6377aea116b679a2e7 | [
"MIT"
] | 127 | 2018-09-21T22:27:17.000Z | 2022-03-30T21:11:49.000Z | pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 32da046a2bde542279824d6377aea116b679a2e7 | [
"MIT"
] | 171 | 2018-08-06T07:10:24.000Z | 2022-03-29T00:59:53.000Z | pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 32da046a2bde542279824d6377aea116b679a2e7 | [
"MIT"
] | 50 | 2018-08-12T22:50:46.000Z | 2022-03-23T07:52:47.000Z | from pyxtal.molecule import *
from ase.build import molecule
from pymatgen.core import Molecule
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return Molecule(symbols, pos)
if __name__ == "__main__":
# ---------------------------------------------------
for name in ["H2", "H2O", "HCl", "CS2", "C2Cl4", "PH3", "CH4", "C6H6", "C60"]:
mol = get_ase_mol(name)
pga = PointGroupAnalyzer(mol)
# Symmetrize the molecule using pymatgen
mol = pga.symmetrize_molecule()["sym_mol"]
pga = PointGroupAnalyzer(mol)
print(name, " has point group symmetry: ", pga.get_pointgroup())
# Check if orders of rotation are detected correctly
pg = pga.get_pointgroup()
for op in pg:
opa = OperationAnalyzer(op)
if opa.order == "irrational":
print(opa)
elif opa.order > 10:
print(opa)
# orientation_in_wyckoff_position(mol, sg, WP's index in sg)
# returns a list of orientations consistent with the WP's symmetry.
# We can choose any of these orientations at random using np.random.choice
# To use an orientation, do mol.apply_operation(orientation)
# Spacegroup 16, index 6 has .2. symmetry
# check 2 fold rotation
allowed = orientation_in_wyckoff_position(mol, 16, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check reflection
allowed = orientation_in_wyckoff_position(mol, 25, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 3 fold rotation
allowed = orientation_in_wyckoff_position(mol, 147, 4, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 3",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check -1
allowed = orientation_in_wyckoff_position(mol, 2, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm -1",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 2/m
allowed = orientation_in_wyckoff_position(mol, 64, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2/m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 6
allowed = orientation_in_wyckoff_position(mol, 168, 3, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 6",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
| 36.904 | 82 | 0.506829 |
b42d69d014401c8b0ab94e331591c7f7f7c7c313 | 2,650 | py | Python | my_project/evolution_forces.py | Abhigyan-Mishra/Quantum-Animation | 675ac367461f6f2b3e0cee3a99db9e1541567e7a | [
"MIT"
] | null | null | null | my_project/evolution_forces.py | Abhigyan-Mishra/Quantum-Animation | 675ac367461f6f2b3e0cee3a99db9e1541567e7a | [
"MIT"
] | null | null | null | my_project/evolution_forces.py | Abhigyan-Mishra/Quantum-Animation | 675ac367461f6f2b3e0cee3a99db9e1541567e7a | [
"MIT"
] | null | null | null | from manimlib.imports import *
"""
TODO:
[ ] fix arrow head size
auto scale according to size?
have a default size, but, if the arrow size is too short, then shrink the head
[ ] slide the point according to the gradient
"""
| 24.311927 | 80 | 0.669057 |
b42dd19edf20cbabd2658c3670786d63ec526613 | 13,056 | py | Python | tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 22 | 2022-03-18T07:29:31.000Z | 2022-03-23T14:54:32.000Z | tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | null | null | null | tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 2 | 2022-03-18T08:26:34.000Z | 2022-03-20T06:02:48.000Z | import tvm
import sys
import time
import numpy as np
from tvm.tensor_graph.testing.models import resnet
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import CELoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler
from tvm.tensor_graph.core.scheduler import schedule_all
from tvm.tensor_graph.core.build_graph import build_all
from tvm.tensor_graph.core.runtime import run_all
if __name__ == "__main__":
with open("trace_resnet_subgraph.log", "w") as fout:
test2(file=fout)
# test3() | 32.157635 | 137 | 0.644455 |
b42f2c192af4e02268e2e461bdd471fe5bb67342 | 2,300 | py | Python | Python3/src/basicExample.py | emanuelen5/XPlaneConnect | 0d462ac306bc802a3b269227d3b98d2507abcd40 | [
"Unlicense"
] | 457 | 2015-01-02T14:21:11.000Z | 2022-03-27T02:56:47.000Z | Python3/src/basicExample.py | fseconomy/XPlaneConnect | 11a5f350bd6888873d293bf3c9f59b0fba1331c1 | [
"Unlicense"
] | 211 | 2015-03-24T16:41:33.000Z | 2022-03-27T18:36:11.000Z | Python3/src/basicExample.py | fseconomy/XPlaneConnect | 11a5f350bd6888873d293bf3c9f59b0fba1331c1 | [
"Unlicense"
] | 258 | 2015-01-01T17:02:27.000Z | 2022-03-31T19:36:03.000Z | from time import sleep
import xpc
if __name__ == "__main__":
ex() | 31.944444 | 81 | 0.541304 |
b42fa4f8536cb94842b8b435241c9e24e5dca076 | 27,419 | py | Python | venv/lib/python3.6/site-packages/pelican/readers.py | RyanHelgoth/CMPUT404-Lab5 | 82424bf5a9b80ff186bd69d224457c8b70a3bdf3 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.6/site-packages/pelican/readers.py | RyanHelgoth/CMPUT404-Lab5 | 82424bf5a9b80ff186bd69d224457c8b70a3bdf3 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.6/site-packages/pelican/readers.py | RyanHelgoth/CMPUT404-Lab5 | 82424bf5a9b80ff186bd69d224457c8b70a3bdf3 | [
"Apache-2.0"
] | null | null | null | import datetime
import logging
import os
import re
from collections import OrderedDict
from html import escape
from html.parser import HTMLParser
from io import StringIO
import docutils
import docutils.core
import docutils.io
from docutils.parsers.rst.languages import get_language as get_docutils_lang
from docutils.writers.html4css1 import HTMLTranslator, Writer
from pelican import rstdirectives # NOQA
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.plugins import signals
from pelican.utils import get_date, pelican_open, posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False,
'date': False,
'modified': False,
'status': False,
'category': False,
'author': False,
'save_as': False,
'url': False,
'authors': False,
'slug': False
}
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': datetime.datetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| 36.607477 | 79 | 0.581276 |
b4314fe64ec815899c36c9b326b930ecd497d54b | 4,017 | py | Python | xmuda/models/CP_v5.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
] | null | null | null | xmuda/models/CP_v5.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
] | null | null | null | xmuda/models/CP_v5.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from xmuda.models.DDR import Bottleneck3D
from xmuda.models.LMSCNet import SegmentationHead, ASPP
import numpy as np
from xmuda.models.modules import Process, Upsample, Downsample
import math
from xmuda.data.utils.preprocess import create_voxel_position
| 40.17 | 134 | 0.638536 |
b432caf11213235d03484242de9f5514f01637df | 10,511 | py | Python | gala/potential/potential/tests/helpers.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | 1 | 2021-10-14T03:36:15.000Z | 2021-10-14T03:36:15.000Z | gala/potential/potential/tests/helpers.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | null | null | null | gala/potential/potential/tests/helpers.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division, print_function
# Standard library
import time
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
from astropy.extern.six.moves import cPickle as pickle
import pytest
# Project
from ..io import load
from ..core import CompositePotential
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from ....integrate import LeapfrogIntegrator
| 38.785978 | 113 | 0.560936 |
b4343b1a76985ec5d57d6a76843b7a4f2ed671b3 | 9,677 | py | Python | main.py | ailzy/Reinforcement-learning-in-portfolio-management- | 6d850bf52637482636ed8336480343e0e4cef1bd | [
"MIT"
] | null | null | null | main.py | ailzy/Reinforcement-learning-in-portfolio-management- | 6d850bf52637482636ed8336480343e0e4cef1bd | [
"MIT"
] | null | null | null | main.py | ailzy/Reinforcement-learning-in-portfolio-management- | 6d850bf52637482636ed8336480343e0e4cef1bd | [
"MIT"
] | 1 | 2019-05-13T00:54:08.000Z | 2019-05-13T00:54:08.000Z | # -*- coding: utf-8 -*-
from argparse import ArgumentParser
import json
import time
import pandas as pd
import tensorflow as tf
import numpy as np
import math
from decimal import Decimal
import matplotlib.pyplot as plt
from agents.ornstein_uhlenbeck import OrnsteinUhlenbeckActionNoise
eps=10e-8
epochs=0
M=0
if __name__=="__main__":
main() | 36.516981 | 190 | 0.602563 |
b43620ea470685e6e28c7e7bc58a0b84c3272e13 | 7,365 | py | Python | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py | amakropoulos/structural-pipeline-measures | 70e22f9ad94cc57e72e510576cfc3129da83f7fc | [
"Apache-2.0"
] | 2 | 2017-09-11T15:25:14.000Z | 2019-09-27T17:08:31.000Z | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py | amakropoulos/structural-pipeline-measures | 70e22f9ad94cc57e72e510576cfc3129da83f7fc | [
"Apache-2.0"
] | 6 | 2019-08-22T06:29:45.000Z | 2021-09-19T18:59:46.000Z | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py | amakropoulos/structural-pipeline-measures | 70e22f9ad94cc57e72e510576cfc3129da83f7fc | [
"Apache-2.0"
] | 1 | 2018-02-12T14:38:33.000Z | 2018-02-12T14:38:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2016-03-16 11:28:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-04-04 13:50:50
"""
Batch export freesurfer results to animated gifs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as op
import subprocess as sp
from shutil import rmtree
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from tempfile import mkdtemp
from errno import EEXIST
import glob
from six import string_types
import numpy as np
import nibabel as nb
from skimage import exposure
def main():
"""Entry point"""
parser = ArgumentParser(description='Batch export freesurfer results to animated gifs',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-S', '--subjects-dir', action='store', default=os.getcwd())
g_input.add_argument('-s', '--subject-id', action='store')
g_input.add_argument('-t', '--temp-dir', action='store')
g_input.add_argument('--keep-temp', action='store_true', default=False)
g_input.add_argument('--zoom', action='store_true', default=False)
g_input.add_argument('--hist-eq', action='store_true', default=False)
g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif')
opts = parser.parse_args()
if opts.temp_dir is None:
tmpdir = mkdtemp()
else:
tmpdir = op.abspath(opts.temp_dir)
try:
os.makedirs(tmpdir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
out_dir = op.abspath(opts.output_dir)
try:
os.makedirs(out_dir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
subjects_dir = op.abspath(opts.subjects_dir)
subject_list = opts.subject_id
if subject_list is None:
subject_list = [name for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
elif isinstance(subject_list, string_types):
if '*' not in subject_list:
subject_list = [subject_list]
else:
all_dirs = [op.join(subjects_dir, name) for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
pattern = glob.glob(op.abspath(op.join(subjects_dir, opts.subject_id)))
subject_list = list(set(pattern).intersection(set(all_dirs)))
environ = os.environ.copy()
environ['SUBJECTS_DIR'] = subjects_dir
# tcl_file = pkgr.resource_filename('structural_dhcp_mriqc', 'data/fsexport.tcl')
tcl_contents = """
SetOrientation 0
SetCursor 0 128 128 128
SetDisplayFlag 3 0
SetDisplayFlag 22 1
set i 0
"""
for sub_path in subject_list:
subid = op.basename(sub_path)
tmp_sub = op.join(tmpdir, subid)
try:
os.makedirs(tmp_sub)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
niifile = op.join(tmp_sub, '%s.nii.gz') % subid
ref_file = op.join(sub_path, 'mri', 'T1.mgz')
sp.call(['mri_convert', op.join(sub_path, 'mri', 'norm.mgz'), niifile],
cwd=tmp_sub)
data = nb.load(niifile).get_data()
data[data > 0] = 1
# Compute brain bounding box
indexes = np.argwhere(data)
bbox_min = indexes.min(0)
bbox_max = indexes.max(0) + 1
center = np.average([bbox_min, bbox_max], axis=0)
if opts.hist_eq:
modnii = op.join(tmp_sub, '%s.nii.gz' % subid)
ref_file = op.join(tmp_sub, '%s.mgz' % subid)
img = nb.load(niifile)
data = exposure.equalize_adapthist(img.get_data(), clip_limit=0.03)
nb.Nifti1Image(data, img.get_affine(), img.get_header()).to_filename(modnii)
sp.call(['mri_convert', modnii, ref_file], cwd=tmp_sub)
if not opts.zoom:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, '%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),
'%s/%s.gif' % (out_dir, subid)])
else:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-lh-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=environ)
# Export tiffs for right hemisphere
tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-rh-' % (tmp_sub, subid) + '%03d.tif" $slice]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub, subid),
'%s/%s-lh.gif' % (out_dir, subid)])
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub, subid),
'%s/%s-rh.gif' % (out_dir, subid)])
if not opts.keep_temp:
try:
rmtree(tmp_sub)
except:
pass
if __name__ == '__main__':
main()
| 39.810811 | 118 | 0.567549 |
b4366804d5c82535ca7d92caff9e07608cd7136b | 10,751 | py | Python | DE_DataBase.py | almirjgomes/DE_DataBaseConnect | 2a369d77498c4c6c42b7447871472e5c4320b2ff | [
"MIT"
] | null | null | null | DE_DataBase.py | almirjgomes/DE_DataBaseConnect | 2a369d77498c4c6c42b7447871472e5c4320b2ff | [
"MIT"
] | null | null | null | DE_DataBase.py | almirjgomes/DE_DataBaseConnect | 2a369d77498c4c6c42b7447871472e5c4320b2ff | [
"MIT"
] | null | null | null | import os
import sqlite3 as sq3
import cx_Oracle as ora
import pandas as pd
import psycopg2 as ps2
import mysql.connector as mysql
import sqlalchemy
# Reponsabilidades desta classe:
# Apenas se conectar a uma das bases de dados abaixo especificadas
# Bases conhecidas: SQLITE, ORACLE, MYSQL, POSTGRES
if __name__ == "__main__":
pass
| 40.878327 | 167 | 0.474467 |
b4372d11f9380b54abe868161855c4d8eb68fe8d | 3,301 | py | Python | peter_lists/blog/views.py | pvize1/peter_lists | 77e9f30cfc45f500e059b7b163db541335180332 | [
"MIT"
] | null | null | null | peter_lists/blog/views.py | pvize1/peter_lists | 77e9f30cfc45f500e059b7b163db541335180332 | [
"MIT"
] | 8 | 2021-05-12T05:53:42.000Z | 2022-03-31T04:08:18.000Z | peter_lists/blog/views.py | pvize1/peter_lists | 77e9f30cfc45f500e059b7b163db541335180332 | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from django.shortcuts import render
from django.db.models import Count
from django.db.models.functions import Trim, Lower
from django.urls import reverse_lazy
from .models import Blog
from .forms import EditBlogForm
# Create your views here.
| 29.212389 | 94 | 0.684035 |
b4378b3e91302a7b53287f43ef0ed313d4ff8c2f | 1,992 | py | Python | tests/test_pythonpath.py | browniebroke/pytest-srcpaths | c0bf4a9b521c8f7af029f9923b344936cf425bf1 | [
"MIT"
] | 26 | 2021-02-18T20:49:41.000Z | 2022-02-08T21:06:20.000Z | tests/test_pythonpath.py | browniebroke/pytest-srcpaths | c0bf4a9b521c8f7af029f9923b344936cf425bf1 | [
"MIT"
] | null | null | null | tests/test_pythonpath.py | browniebroke/pytest-srcpaths | c0bf4a9b521c8f7af029f9923b344936cf425bf1 | [
"MIT"
] | 2 | 2021-04-04T01:45:37.000Z | 2022-02-07T11:28:51.000Z | import sys
from typing import Generator
from typing import List
from typing import Optional
import pytest
from _pytest.pytester import Pytester
def test_clean_up_pythonpath(pytester: Pytester) -> None:
"""Test that the srcpaths plugin cleans up after itself."""
pytester.makefile(".ini", pytest="[pytest]\npythonpath=I_SHALL_BE_REMOVED\n")
pytester.makepyfile(test_foo="""def test_foo(): pass""")
before: Optional[List[str]] = None
after: Optional[List[str]] = None
result = pytester.runpytest_inprocess(plugins=[Plugin()])
assert result.ret == 0
assert before is not None
assert after is not None
assert any("I_SHALL_BE_REMOVED" in entry for entry in before)
assert not any("I_SHALL_BE_REMOVED" in entry for entry in after)
| 30.181818 | 81 | 0.676205 |
b4379f94d32e1eef87fdbc70ab371bde034c9874 | 1,735 | py | Python | coretemp.py | InScene/dht22-mqtt-daemon | 9a73715f4074f11222d1a6b263c12c897fadf0de | [
"MIT"
] | null | null | null | coretemp.py | InScene/dht22-mqtt-daemon | 9a73715f4074f11222d1a6b263c12c897fadf0de | [
"MIT"
] | null | null | null | coretemp.py | InScene/dht22-mqtt-daemon | 9a73715f4074f11222d1a6b263c12c897fadf0de | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
import paho.mqtt.client as mqtt
import time
import Adafruit_DHT
from configparser import ConfigParser
import json
config = ConfigParser(delimiters=('=', ))
config.read('config.ini')
sensor_type = config['sensor'].get('type', 'dht22').lower()
if sensor_type == 'dht22':
sensor = Adafruit_DHT.DHT22
elif sensor_type == 'dht11':
sensor = Adafruit_DHT.dht11
elif sensor_type == 'am2302':
sensor = Adafruit_DHT.AM2302
else:
raise Exception('Supported sensor types: DHT22, DHT11, AM2302')
pin = config['sensor'].get('pin', 10)
topic = config['mqtt'].get('topic', 'temperature/dht22')
decim_digits = config['sensor'].getint('decimal_digits', 2)
sleep_time = config['sensor'].getint('interval', 60)
user = config['mqtt'].get('user', 'guest')
password = config['mqtt'].get('password', 'guest')
# The callback for when the client receives a CONNACK response from the server.
client = mqtt.Client()
client.on_connect = on_connect
client.username_pw_set(user, password)
client.connect(config['mqtt'].get('hostname', 'homeassistant'),
config['mqtt'].getint('port', 1883),
config['mqtt'].getint('timeout', 60))
client.loop_start()
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
data = {'temperature': round(temperature, decim_digits),
'humidity': round(humidity, decim_digits)}
client.publish(topic, json.dumps(data))
print('Published. Sleeping ...')
else:
print('Failed to get reading. Skipping ...')
time.sleep(sleep_time)
| 30.438596 | 79 | 0.688184 |
b437d795dd924c40c4d023f3c55940133611431e | 663 | py | Python | mythril/support/support_utils.py | step21/mythril | d26a68e5473a57bd38091e1a5cad96a2b4e2c2ab | [
"MIT"
] | null | null | null | mythril/support/support_utils.py | step21/mythril | d26a68e5473a57bd38091e1a5cad96a2b4e2c2ab | [
"MIT"
] | 21 | 2019-04-12T17:54:51.000Z | 2021-11-04T18:47:45.000Z | mythril/support/support_utils.py | step21/mythril | d26a68e5473a57bd38091e1a5cad96a2b4e2c2ab | [
"MIT"
] | 1 | 2021-09-06T03:14:58.000Z | 2021-09-06T03:14:58.000Z | """This module contains utility functions for the Mythril support package."""
from typing import Dict
| 28.826087 | 81 | 0.627451 |
b437ff845481fd16be2f8fc1d410e6c3c3a17c1d | 554 | py | Python | tests/functions/list/test_lists_map.py | sukovanej/mplisp | a3faf8c06936bcc5cde59899abf41a1b379090f5 | [
"MIT"
] | null | null | null | tests/functions/list/test_lists_map.py | sukovanej/mplisp | a3faf8c06936bcc5cde59899abf41a1b379090f5 | [
"MIT"
] | null | null | null | tests/functions/list/test_lists_map.py | sukovanej/mplisp | a3faf8c06936bcc5cde59899abf41a1b379090f5 | [
"MIT"
] | null | null | null | import unittest
import mplisp.evaluator as evaluator
| 22.16 | 50 | 0.534296 |
b4387eea371c6bde1ade7a6d0d94c1c04a7c6258 | 1,210 | py | Python | malpickle/main.py | erose1337/malpickle | 3c708426d5f5e33d3e232d77cbbfca0a955d6ebf | [
"MIT"
] | null | null | null | malpickle/main.py | erose1337/malpickle | 3c708426d5f5e33d3e232d77cbbfca0a955d6ebf | [
"MIT"
] | null | null | null | malpickle/main.py | erose1337/malpickle | 3c708426d5f5e33d3e232d77cbbfca0a955d6ebf | [
"MIT"
] | null | null | null | import argparse
from __init__ import insert_code
if __name__ == "__main__":
#test_insert_code()
main()
| 31.842105 | 121 | 0.680165 |
b43894ad3119624561e61e4cdbc634a63ac5df12 | 1,923 | py | Python | src/redis_lock/django_cache.py | suligap/python-redis-lock | 369e95bb5e26284ef0944e551f93d9f2596e5345 | [
"BSD-2-Clause"
] | null | null | null | src/redis_lock/django_cache.py | suligap/python-redis-lock | 369e95bb5e26284ef0944e551f93d9f2596e5345 | [
"BSD-2-Clause"
] | null | null | null | src/redis_lock/django_cache.py | suligap/python-redis-lock | 369e95bb5e26284ef0944e551f93d9f2596e5345 | [
"BSD-2-Clause"
] | null | null | null | from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django_redis.cache import RedisCache as PlainRedisCache
from redis_lock import Lock
from redis_lock import reset_all
| 33.155172 | 92 | 0.598024 |
b438f353825f2b371f64bd83071ca8831b7f58ce | 3,510 | py | Python | nets/facenet.py | QiongWang-l/llfr | 00f62f03dd2964add1ff1b007292d06afff708f4 | [
"MIT"
] | null | null | null | nets/facenet.py | QiongWang-l/llfr | 00f62f03dd2964add1ff1b007292d06afff708f4 | [
"MIT"
] | null | null | null | nets/facenet.py | QiongWang-l/llfr | 00f62f03dd2964add1ff1b007292d06afff708f4 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision.models.utils import load_state_dict_from_url
from nets.inception_resnetv1 import InceptionResnetV1
from nets.mobilenet import MobileNetV1
| 37.340426 | 189 | 0.61396 |
b439967634fbd815c14f34a574722d653f74e466 | 367 | py | Python | distributed_social_network/posts/migrations/0003_auto_20190308_2055.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | null | null | null | distributed_social_network/posts/migrations/0003_auto_20190308_2055.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | 51 | 2019-03-22T00:31:06.000Z | 2021-06-10T21:17:30.000Z | distributed_social_network/posts/migrations/0003_auto_20190308_2055.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | 1 | 2019-02-08T01:33:57.000Z | 2019-02-08T01:33:57.000Z | # Generated by Django 2.1.7 on 2019-03-08 20:55
from django.db import migrations
| 19.315789 | 47 | 0.588556 |
b439fd956c9d132bc84b304fc1984cd145eb18b5 | 2,260 | py | Python | minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py | djsan15/url-minifier | 00ff087dadc7e14015cc5640e135f8454afd11dc | [
"MIT"
] | null | null | null | minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py | djsan15/url-minifier | 00ff087dadc7e14015cc5640e135f8454afd11dc | [
"MIT"
] | null | null | null | minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py | djsan15/url-minifier | 00ff087dadc7e14015cc5640e135f8454afd11dc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 52.55814 | 217 | 0.615487 |
b43cafc5d4e3e3709f5f5f9476d5698dfa194510 | 1,182 | py | Python | Validation/EcalRecHits/test/EcalTBValidationData_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | Validation/EcalRecHits/test/EcalTBValidationData_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | Validation/EcalRecHits/test/EcalTBValidationData_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
process = cms.Process("h4ValidData")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:./ECALH4TB_data_hits.root')
)
process.tbValidData = cms.EDAnalyzer("EcalTBValidation",
rootfile = cms.untracked.string('EcalTBValidationData.root'),
eventHeaderProducer = cms.string('ecalTBunpack'),
hitProducer = cms.string('ecal2006TBWeightUncalibRecHit'),
digiCollection = cms.string(''),
tdcRecInfoCollection = cms.string('EcalTBTDCRecInfo'),
data_ = cms.untracked.int32(0),
digiProducer = cms.string('ecalUnsuppressedDigis'),
xtalInBeam = cms.untracked.int32(1104),
hitCollection = cms.string('EcalUncalibRecHitsEB'),
hodoRecInfoProducer = cms.string('ecal2006TBHodoscopeReconstructor'),
eventHeaderCollection = cms.string(''),
hodoRecInfoCollection = cms.string('EcalTBHodoscopeRecInfo'),
tdcRecInfoProducer = cms.string('ecal2006TBTDCReconstructor')
)
process.p = cms.Path(process.tbValidData)
| 36.9375 | 73 | 0.756345 |
b43dc0c04bfb765d1057fbf1d173d5c4374ca965 | 1,948 | py | Python | database/domains.py | changyc9928/Genshin-Discord-Bot | be64481f43755c0031b469e79271ec7f0753cb0a | [
"MIT"
] | null | null | null | database/domains.py | changyc9928/Genshin-Discord-Bot | be64481f43755c0031b469e79271ec7f0753cb0a | [
"MIT"
] | null | null | null | database/domains.py | changyc9928/Genshin-Discord-Bot | be64481f43755c0031b469e79271ec7f0753cb0a | [
"MIT"
] | null | null | null | import asyncio
from query_graphql import query_artifact_domains, query_weapon_materials_book
| 34.785714 | 98 | 0.627823 |
b43e6c43008ba217cff97642ff4168d07bf643bc | 23,644 | py | Python | policy.py | nyu-dl/dl4mt-simul-trans | 392ff3148e944be6fbc475d5285441807902e2e0 | [
"BSD-3-Clause"
] | 34 | 2016-12-01T07:59:43.000Z | 2021-09-13T10:46:15.000Z | policy.py | yifanjun233/dl4mt-simul-trans | 392ff3148e944be6fbc475d5285441807902e2e0 | [
"BSD-3-Clause"
] | 1 | 2020-09-14T08:35:00.000Z | 2020-09-14T08:35:00.000Z | policy.py | yifanjun233/dl4mt-simul-trans | 392ff3148e944be6fbc475d5285441807902e2e0 | [
"BSD-3-Clause"
] | 18 | 2016-12-15T01:43:33.000Z | 2021-09-29T07:24:08.000Z | """
-- Policy Network for decision making [more general]
"""
from nmt_uni import *
from layers import _p
import os
import time, datetime
import cPickle as pkl
# hyper params
TINY = 1e-7
PI = numpy.pi
E = numpy.e
A = 0.2
B = 1
# ===================================================================
# Policy Grident: REINFORCE with Adam
# ===================================================================
# ==================================================================================== #
# Trust Region Policy Optimization
# ==================================================================================== #
# ====================================================================== #
# Save & Load
# ====================================================================== #
def save(self, history, it):
_params = OrderedDict()
_params = unzip(self.tparams, _params)
_params = unzip(self.tparams_b, _params)
print 'save the policy network >> {}'.format(self.model)
numpy.savez('%s.current' % (self.model),
history=history,
it=it,
**_params)
numpy.savez('{}.iter={}'.format(self.model, it),
history=history,
it=it,
**_params)
def load(self):
if os.path.exists(self.model):
print 'loading from the existing model (current)'
rmodel = numpy.load(self.model)
history = rmodel['history']
it = rmodel['it']
self.params = load_params(rmodel, self.params)
self.params_b = load_params(rmodel, self.params_b)
self.tparams = init_tparams(self.params)
self.tparams_b = init_tparams(self.params_b)
print 'the dataset need to go over {} lines'.format(it)
return history, it
else:
return [], -1
| 39.871838 | 118 | 0.514084 |
b43f15ecbdb1d9b59ec1324ee2719d330bd46baf | 3,637 | py | Python | src/app/drivers/pycolator/splitmerge.py | husensofteng/msstitch | a917ed24fbc8b018b3f2bbec31e852aa76cc715c | [
"MIT"
] | null | null | null | src/app/drivers/pycolator/splitmerge.py | husensofteng/msstitch | a917ed24fbc8b018b3f2bbec31e852aa76cc715c | [
"MIT"
] | null | null | null | src/app/drivers/pycolator/splitmerge.py | husensofteng/msstitch | a917ed24fbc8b018b3f2bbec31e852aa76cc715c | [
"MIT"
] | null | null | null | from app.drivers.pycolator import base
from app.actions.pycolator import splitmerge as preparation
from app.readers import pycolator as readers
from app.drivers.options import pycolator_options
| 36.37 | 79 | 0.668958 |
b442fb148ab72708b2f20e85644d227c7977348c | 453 | py | Python | ejercicio 14.py | Davidpadilla1234/taller_estructura-secuencial | 3a65931ad75fd4902f406c6c872053169dad1a0b | [
"MIT"
] | null | null | null | ejercicio 14.py | Davidpadilla1234/taller_estructura-secuencial | 3a65931ad75fd4902f406c6c872053169dad1a0b | [
"MIT"
] | null | null | null | ejercicio 14.py | Davidpadilla1234/taller_estructura-secuencial | 3a65931ad75fd4902f406c6c872053169dad1a0b | [
"MIT"
] | null | null | null | """
Entradas:
lectura actual--->float--->lect2
lectura anterior--->float--->lect1
valor kw--->float--->valorkw
Salidas:
consumo--->float--->consumo
total factura-->flotante--->total
"""
lect2 = float ( entrada ( "Digite lectura real:" ))
lect1 = float ( entrada ( "Digite lectura anterior:" ))
valorkw = float ( input ( "Valor del kilowatio: " ))
consumo = ( lect2 - lect1 )
total = ( consumo * valorkw )
print ( "El valor a pagar es: " + str ( total )) | 30.2 | 55 | 0.653422 |
b4430cd61e95dcd15b900c13c175b1309fa0cc87 | 4,955 | py | Python | src/workers/correct.py | brainsqueeze/Image_correction | db19088fb101ce760601416d19622d46d76f482c | [
"MIT"
] | 10 | 2017-08-31T06:16:56.000Z | 2022-03-12T19:44:50.000Z | src/workers/correct.py | brainsqueeze/Image_correction | db19088fb101ce760601416d19622d46d76f482c | [
"MIT"
] | 2 | 2018-06-01T09:27:07.000Z | 2018-07-23T01:43:16.000Z | src/workers/correct.py | brainsqueeze/Image_correction | db19088fb101ce760601416d19622d46d76f482c | [
"MIT"
] | 3 | 2018-10-24T04:59:10.000Z | 2021-09-03T10:37:35.000Z | # __author__ = 'Dave'
import cv2
from skimage import io
from skimage.transform import probabilistic_hough_line
import matplotlib.pyplot as plt
import os
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore', category=RuntimeWarning)
| 32.598684 | 105 | 0.559435 |
b443c0485b44fdad4aad919722875c535cf37d83 | 2,469 | py | Python | plot_scripts/CC_timeline_plot.py | idunnam/Thesis | a567a25aa037c949de285158804a6ee396fc0e6c | [
"MIT"
] | null | null | null | plot_scripts/CC_timeline_plot.py | idunnam/Thesis | a567a25aa037c949de285158804a6ee396fc0e6c | [
"MIT"
] | 1 | 2022-01-28T13:12:26.000Z | 2022-01-28T13:12:26.000Z | plot_scripts/CC_timeline_plot.py | idunnam/Thesis | a567a25aa037c949de285158804a6ee396fc0e6c | [
"MIT"
] | null | null | null | """
This code is used for plotting induvidual timelines of seasonal CC for each CMIP5 and CMIP6 model
"""
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import seaborn as sns
import pandas as pd
#=== Import SEB Anomalies ====
#from seasonal_SEB_components import *
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_ACCESS.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_HADGEM.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CSIRO.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_IPSL.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_MIROC5.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_NORESM.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CESM.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CNRM_CM6.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CNRM_ESM2.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_MRI.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_UKMO.nc')
fig, axs = plt.subplots(1,2, sharey = True, figsize=(30, 10))
axs[0].plot(ACCESS.CC.year, ACCESS.CC.mean(dim=["X10_105","Y21_199"]), label='ACCESS')
axs[0].plot(HADGEM.CC.year, HADGEM.CC.mean(dim=["X10_105","Y21_199"]),label='HADGEM')
axs[0].plot(IPSL.CC.year, IPSL.CC.mean(dim=["X10_105","Y21_199"]),label='IPSL')
axs[0].plot(MIROC5.CC.year, MIROC5.CC.mean(dim=["X10_105","Y21_199"]),label='MIROC5')
axs[0].plot(NORESM.CC.year, NORESM.CC.mean(dim=["X10_105","Y21_199"]),label='NORESM')
axs[0].plot(CSIRO.CC.year, CSIRO.CC.mean(dim=["X10_105","Y21_199"]),label='CSIRO')
axs[0].legend(loc='upper left')
axs[0].set_xlabel('year')
axs[0].set_ylabel('CC')
axs[0].set_title('Cloud Cover - CMIP5 Models')
axs[1].plot(CESM.CC.year, ACCESS.CC.mean(dim=["X10_105","Y21_199"]), label='CESM')
axs[1].plot(CNRM_CM6.CC.year, CNRM_CM6.CC.mean(dim=["X10_105","Y21_199"]),label='CNRM_CM6')
axs[1].plot(CNRM_ESM2.CC.year, CNRM_ESM2.CC.mean(dim=["X10_105","Y21_199"]),label='CNRM_ESM2')
axs[1].plot(MIROC5.CC.year, MIROC5.CC.mean(dim=["X10_105","Y21_199"]),label='MRI')
axs[1].plot(UKMO.CC.year, UKMO.CC.mean(dim=["X10_105","Y21_199"]),label='UKMO')
axs[1].legend(loc='upper left')
axs[1].set_xlabel('year')
axs[1].set_ylabel('CC')
axs[1].set_title('Cloud Cover - CMIP5 Models')
sns.set_palette('colorblind')
plt.savefig('CC_test_2.png')
plt.show()
| 46.584906 | 97 | 0.722155 |
b443e69cd16f1827fe9ba10cb1499425321f1ac2 | 1,059 | py | Python | manage.py | xinbingliang/dockertest | aca2a508658681a5e6b1beab714059bf1b43d9ed | [
"MIT"
] | 30 | 2018-05-23T16:58:12.000Z | 2021-10-18T21:25:01.000Z | manage.py | xinbingliang/dockertest | aca2a508658681a5e6b1beab714059bf1b43d9ed | [
"MIT"
] | 2 | 2019-12-01T13:32:50.000Z | 2019-12-01T13:32:53.000Z | manage.py | xinbingliang/dockertest | aca2a508658681a5e6b1beab714059bf1b43d9ed | [
"MIT"
] | 136 | 2018-02-04T14:13:33.000Z | 2022-03-09T08:26:07.000Z | # manage.py
import unittest
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from skeleton.server import app, db
from skeleton.server.models import User
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 18.578947 | 79 | 0.685552 |
b444035780c265816dfc1fd4e30cb0ee8b926672 | 610 | py | Python | client/middleware.py | uktrade/directory-forms-api | 078e38ddf7a761d2d34a0e1ab2dc3f20cd32e6aa | [
"MIT"
] | null | null | null | client/middleware.py | uktrade/directory-forms-api | 078e38ddf7a761d2d34a0e1ab2dc3f20cd32e6aa | [
"MIT"
] | 77 | 2018-10-29T14:38:37.000Z | 2022-03-23T14:20:39.000Z | client/middleware.py | uktrade/directory-forms-api | 078e38ddf7a761d2d34a0e1ab2dc3f20cd32e6aa | [
"MIT"
] | 1 | 2021-08-05T10:20:17.000Z | 2021-08-05T10:20:17.000Z | import sigauth.middleware
import sigauth.helpers
from client import helpers
| 30.5 | 80 | 0.703279 |
b444a932576d7caabe2a8eb3dc47c1e354d4d5e3 | 3,867 | py | Python | scripts/prepare-kernel-headers.py | sonicyang/mctest | 39c26c43e9fcf1fd94322effad4ca211d495339a | [
"BSD-2-Clause"
] | 4 | 2017-05-22T07:05:33.000Z | 2020-10-22T02:34:48.000Z | scripts/prepare-kernel-headers.py | sonicyang/mctest | 39c26c43e9fcf1fd94322effad4ca211d495339a | [
"BSD-2-Clause"
] | null | null | null | scripts/prepare-kernel-headers.py | sonicyang/mctest | 39c26c43e9fcf1fd94322effad4ca211d495339a | [
"BSD-2-Clause"
] | 2 | 2020-02-19T13:23:16.000Z | 2020-12-08T02:26:16.000Z | import os
import subprocess
import errno
import shutil
import re
import sys
kernel_path = ''
install_path = ''
patch_rules = []
arch = ''
def main():
"""Main function."""
argv = sys.argv
assert len(argv) == 4, 'Invalid arguments'
global kernel_path
global install_path
global arch
kernel_path = argv[1]
install_path = argv[2]
arch = argv[3]
# avoid the conflic with the 'new' operator in C++
patch_rule_append('new', 'anew')
# TODO: Add "extern "C"" to function declaration in string_64.h
# while we want to compile module with C++ code.
if 'x86' in arch:
patch_rule_append('void \*memset\(void \*s, int c, size_t n\)\;',
'extern \"C\" {\nvoid *memset(void *s, int c, size_t n);')
patch_rule_append('int strcmp\(const char \*cs, const char \*ct\);',
'int strcmp(const char *cs, const char *ct);}')
# wrap the declaration of extern function with extern "C"
# e.g. extern void func(void); => extern "C" {void func(void);}
pattern = re.compile(r'^extern\s*[\w_][\w\d_]*[\s\*]*[\w_][\w\d_]*\(.*\);$')
patch_rule_append(pattern, wrapped_with_externC)
# avoid duplicated keyword definition
# e.g. typedef _Bool bool;
# => #ifndef __cplusplus
# typedef _Bool bool;
# #endif
pattern = re.compile(r'^\s*typedef.*\s*(false|true|bool);$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
pattern = re.compile(r'^\s*(false|true|bool)\s*=.*$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
# Use find command to find out all headers
find_cmd = 'find -L ' + kernel_path + ' -name *.h'
proc = subprocess.Popen(find_cmd, shell = True, stdout = subprocess.PIPE)
lines = proc.stdout.readlines()
for line in lines:
if line == '':
break
# Remove the newline character
src = line.replace('\n', "")
file_patch_and_install(src)
if __name__ == '__main__':
sys.exit(main())
| 27.820144 | 82 | 0.60693 |
b446c92bc9ef0b8ec976811e71bda60bd2a8e30d | 18,912 | py | Python | model/loss.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | model/loss.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | model/loss.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/9/18 11:19
# @Author : DaiPuWei
# @Email : 771830171@qq.com
# @File : loss.py
# @Software: PyCharm
"""
YOLOsmooth Label;
GIOU LossDIOU LossCIOU Loss
"""
import math
import tensorflow as tf
from tensorflow.keras import backend as K
# ---------------------------------------------------#
#
# ---------------------------------------------------#
# ---------------------------------------------------#
#
# ---------------------------------------------------#
# ---------------------------------------------------#
# iou
# ---------------------------------------------------#
def box_giou(b_true, b_pred):
"""
Calculate GIoU loss on anchor boxes
Reference Paper:
"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression"
https://arxiv.org/abs/1902.09630
Parameters
----------
b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
Returns
-------
giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b_true_area + b_pred_area - intersect_area
# calculate IoU, add epsilon in denominator to avoid dividing by 0
iou = intersect_area / (union_area + K.epsilon())
# get enclosed area
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
# calculate GIoU, add epsilon in denominator to avoid dividing by 0
giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon())
giou = K.expand_dims(giou, -1)
return giou
def box_diou(b_true, b_pred,use_ciou_loss=False):
"""
----------
b1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
-------
ciou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
#
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
#
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# iou
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b1_area + b_pred_area - intersect_area
iou = intersect_area / K.maximum(union_area, K.epsilon())
#
center_distance = K.sum(K.square(b_true_xy - b_pred_xy), axis=-1)
#
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
#
enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)
diou = iou - 1.0 * (center_distance) / K.maximum(enclose_diagonal, K.epsilon())
if use_ciou_loss:
v = 4 * K.square(tf.math.atan2(b_true_wh[..., 0], K.maximum(b_true_wh[..., 1], K.epsilon()))
- tf.math.atan2(b_pred_wh[..., 0],K.maximum(b_pred_wh[..., 1],K.epsilon()))) / (math.pi * math.pi)
# a trick: here we add an non-gradient coefficient w^2+h^2 to v to customize it's back-propagate,
# to match related description for equation (12) in original paper
#
#
# v'/w' = (8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (h/(w^2+h^2)) (12)
# v'/h' = -(8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (w/(w^2+h^2))
#
# The dominator w^2+h^2 is usually a small value for the cases
# h and w ranging in [0; 1], which is likely to yield gradient
# explosion. And thus in our implementation, the dominator
# w^2+h^2 is simply removed for stable convergence, by which
# the step size 1/(w^2+h^2) is replaced by 1 and the gradient direction
# is still consistent with Eqn. (12).
v = v * tf.stop_gradient(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])
alpha = v / K.maximum((1.0 - iou + v), K.epsilon())
diou = diou - alpha * v
diou = K.expand_dims(diou, -1)
diou = tf.where(tf.math.is_nan(diou), tf.zeros_like(diou), diou)
return diou
# ---------------------------------------------------#
# loss
# ---------------------------------------------------# | 46.239609 | 123 | 0.519882 |
b4470139b4eff5eadddd95183f7509c2d7a4cf79 | 59,405 | py | Python | electrum_vtc/tests/test_lnpeer.py | samdisk11/electrum | 4fffb4328a1764b5cd969b5c733e67bced2548a0 | [
"MIT"
] | null | null | null | electrum_vtc/tests/test_lnpeer.py | samdisk11/electrum | 4fffb4328a1764b5cd969b5c733e67bced2548a0 | [
"MIT"
] | 2 | 2022-01-11T17:19:40.000Z | 2022-01-14T16:32:23.000Z | electrum_vtc/tests/test_lnpeer.py | samdisk11/electrum | 4fffb4328a1764b5cd969b5c733e67bced2548a0 | [
"MIT"
] | 2 | 2022-01-13T05:04:16.000Z | 2022-01-14T11:48:39.000Z | import asyncio
import tempfile
from decimal import Decimal
import os
from contextlib import contextmanager
from collections import defaultdict
import logging
import concurrent
from concurrent import futures
import unittest
from typing import Iterable, NamedTuple, Tuple, List, Dict
from aiorpcx import TaskGroup, timeout_after, TaskTimeout
import electrum_vtc as electrum
import electrum_vtc.trampoline
from electrum_vtc import bitcoin
from electrum_vtc import constants
from electrum_vtc.network import Network
from electrum_vtc.ecc import ECPrivkey
from electrum_vtc import simple_config, lnutil
from electrum_vtc.lnaddr import lnencode, LnAddr, lndecode
from electrum_vtc.bitcoin import COIN, sha256
from electrum_vtc.util import bh2u, create_and_start_event_loop, NetworkRetryManager, bfh
from electrum_vtc.lnpeer import Peer, UpfrontShutdownScriptViolation
from electrum_vtc.lnutil import LNPeerAddr, Keypair, privkey_to_pubkey
from electrum_vtc.lnutil import LightningPeerConnectionClosed, RemoteMisbehaving
from electrum_vtc.lnutil import PaymentFailure, LnFeatures, HTLCOwner
from electrum_vtc.lnchannel import ChannelState, PeerState, Channel
from electrum_vtc.lnrouter import LNPathFinder, PathEdge, LNPathInconsistent
from electrum_vtc.channel_db import ChannelDB
from electrum_vtc.lnworker import LNWallet, NoPathFound
from electrum_vtc.lnmsg import encode_msg, decode_msg
from electrum_vtc import lnmsg
from electrum_vtc.logging import console_stderr_handler, Logger
from electrum_vtc.lnworker import PaymentInfo, RECEIVED
from electrum_vtc.lnonion import OnionFailureCode
from electrum_vtc.lnutil import derive_payment_secret_from_payment_preimage
from electrum_vtc.lnutil import LOCAL, REMOTE
from electrum_vtc.invoices import PR_PAID, PR_UNPAID
from .test_lnchannel import create_test_channels
from .test_bitcoin import needs_test_with_all_chacha20_implementations
from . import TestCaseForTestnet
def channel_state_changed(self, chan):
pass
def save_channel(self, chan):
print("Ignoring channel save")
def diagnostic_name(self):
return self.name
get_payments = LNWallet.get_payments
get_payment_info = LNWallet.get_payment_info
save_payment_info = LNWallet.save_payment_info
set_invoice_status = LNWallet.set_invoice_status
set_request_status = LNWallet.set_request_status
set_payment_status = LNWallet.set_payment_status
get_payment_status = LNWallet.get_payment_status
check_received_mpp_htlc = LNWallet.check_received_mpp_htlc
htlc_fulfilled = LNWallet.htlc_fulfilled
htlc_failed = LNWallet.htlc_failed
save_preimage = LNWallet.save_preimage
get_preimage = LNWallet.get_preimage
create_route_for_payment = LNWallet.create_route_for_payment
create_routes_for_payment = LNWallet.create_routes_for_payment
_check_invoice = staticmethod(LNWallet._check_invoice)
pay_to_route = LNWallet.pay_to_route
pay_to_node = LNWallet.pay_to_node
pay_invoice = LNWallet.pay_invoice
force_close_channel = LNWallet.force_close_channel
try_force_closing = LNWallet.try_force_closing
get_first_timestamp = lambda self: 0
on_peer_successfully_established = LNWallet.on_peer_successfully_established
get_channel_by_id = LNWallet.get_channel_by_id
channels_for_peer = LNWallet.channels_for_peer
_calc_routing_hints_for_invoice = LNWallet._calc_routing_hints_for_invoice
handle_error_code_from_failed_htlc = LNWallet.handle_error_code_from_failed_htlc
is_trampoline_peer = LNWallet.is_trampoline_peer
wait_for_received_pending_htlcs_to_get_removed = LNWallet.wait_for_received_pending_htlcs_to_get_removed
on_proxy_changed = LNWallet.on_proxy_changed
_decode_channel_update_msg = LNWallet._decode_channel_update_msg
_handle_chanupd_from_failed_htlc = LNWallet._handle_chanupd_from_failed_htlc
_on_maybe_forwarded_htlc_resolved = LNWallet._on_maybe_forwarded_htlc_resolved
class MockTransport:
def __init__(self, name):
self.queue = asyncio.Queue()
self._name = name
def transport_pair(k1, k2, name1, name2):
t1 = PutIntoOthersQueueTransport(k1, name1)
t2 = PutIntoOthersQueueTransport(k2, name2)
t1.other_mock_transport = t2
t2.other_mock_transport = t1
return t1, t2
high_fee_channel = {
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'local_base_fee_msat': 500_000,
'local_fee_rate_millionths': 500,
'remote_base_fee_msat': 500_000,
'remote_fee_rate_millionths': 500,
}
low_fee_channel = {
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'local_base_fee_msat': 1_000,
'local_fee_rate_millionths': 1,
'remote_base_fee_msat': 1_000,
'remote_fee_rate_millionths': 1,
}
GRAPH_DEFINITIONS = {
'square_graph': {
'alice': {
'channels': {
# we should use copies of channel definitions if
# we want to independently alter them in a test
'bob': high_fee_channel.copy(),
'carol': low_fee_channel.copy(),
},
},
'bob': {
'channels': {
'dave': high_fee_channel.copy(),
},
'config': {
'lightning_forward_payments': True,
'lightning_forward_trampoline_payments': True,
},
},
'carol': {
'channels': {
'dave': low_fee_channel.copy(),
},
'config': {
'lightning_forward_payments': True,
'lightning_forward_trampoline_payments': True,
},
},
'dave': {
},
}
}
def _run_mpp(self, graph, fail_kwargs, success_kwargs):
"""Tests a multipart payment scenario for failing and successful cases."""
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.peers.values()
with self.assertRaises(NoPathFound):
run(f(fail_kwargs))
with self.assertRaises(PaymentDone):
run(f(success_kwargs))
def test_channel_usage_after_closing(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
lnaddr = w1._check_invoice(pay_req)
route, amount_msat = run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
assert amount_msat == lnaddr.get_amount_msat()
run(w1.force_close_channel(alice_channel.channel_id))
# check if a tx (commitment transaction) was broadcasted:
assert q1.qsize() == 1
with self.assertRaises(NoPathFound) as e:
run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))
peer = w1.peers[route[0].node_id]
# AssertionError is ok since we shouldn't use old routes, and the
# route finding should fail when channel is closed
with self.assertRaises(PaymentFailure):
run(f())
| 46.775591 | 138 | 0.647908 |
b4484ab703976e8f170a719cc81c5d0146cb13ba | 533 | py | Python | dictionaries/lab/06_students.py | Galchov/python-fundamentals | 4939bdd1c66a7b458fd9ffd0a01d714de26724b5 | [
"MIT"
] | null | null | null | dictionaries/lab/06_students.py | Galchov/python-fundamentals | 4939bdd1c66a7b458fd9ffd0a01d714de26724b5 | [
"MIT"
] | null | null | null | dictionaries/lab/06_students.py | Galchov/python-fundamentals | 4939bdd1c66a7b458fd9ffd0a01d714de26724b5 | [
"MIT"
] | null | null | null | data = input()
courses = {}
while ":" in data:
student_name, id, course_name = data.split(":")
if course_name not in courses:
courses[course_name] = {}
courses[course_name][id] = student_name
data = input()
searched_course = data
searched_course_name_as_list = searched_course.split("_")
searched_course = " ".join(searched_course_name_as_list)
for course_name in courses:
if course_name == searched_course:
for id, name in courses[course_name].items():
print(f"{name} - {id}")
| 23.173913 | 57 | 0.669794 |
b44863efc63447d4fc28f184aca9628762468a02 | 637 | py | Python | eth_tester/normalization/common.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
] | 215 | 2018-05-17T19:09:07.000Z | 2021-03-05T18:10:15.000Z | eth_tester/normalization/common.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
] | 1 | 2021-03-25T21:51:01.000Z | 2021-03-25T21:51:01.000Z | eth_tester/normalization/common.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
] | 1 | 2019-02-27T21:29:16.000Z | 2019-02-27T21:29:16.000Z | from cytoolz.functoolz import (
curry,
)
from eth_utils import (
to_dict,
to_tuple,
)
| 17.694444 | 71 | 0.657771 |
b448742ef1c956bf4c670f1ca4c802b2271cb5bd | 1,030 | py | Python | je_open_cv/modules/image_operations.py | JE-Chen/Python-OPENCV-JE | d5dd3823f0a1cfc195da66bdcbe738c9bbdfc59b | [
"MIT"
] | null | null | null | je_open_cv/modules/image_operations.py | JE-Chen/Python-OPENCV-JE | d5dd3823f0a1cfc195da66bdcbe738c9bbdfc59b | [
"MIT"
] | null | null | null | je_open_cv/modules/image_operations.py | JE-Chen/Python-OPENCV-JE | d5dd3823f0a1cfc195da66bdcbe738c9bbdfc59b | [
"MIT"
] | null | null | null | import cv2
'''
'''
#
#
#
#
'''
The B,G,R channels of an image can be split into their individual planes when needed. Then,
the individual channels can be merged back together to form a BGR image again. This can be performed by:
b = img[:,:,0]
Suppose, you want to make all the red pixels to zero, you need not split like this and put it equal to zero.
You can simply use Numpy indexing which is faster.
img[:,:,2] = 0
'''
#
# 2
| 18.727273 | 109 | 0.7 |
b44950222260e5d85816513148e16767252becb1 | 9,124 | py | Python | Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | #!/usr/bin/env python
"""Virtual filesystem module based on pyfsntfs."""
import stat
from typing import Any, Callable, Dict, Iterable, Optional, Text, Type
import pyfsntfs
from grr_response_client import client_utils
from grr_response_client.vfs_handlers import base as vfs_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# Caches pyfsntfs.volume instances.
MOUNT_CACHE = utils.TimeBasedCache()
# See
# https://github.com/libyal/libfsntfs/blob/master/documentation/New%20Technologies%20File%20System%20(NTFS).asciidoc#file_attribute_flags
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
| 36.790323 | 137 | 0.694432 |
b44954b2c2b3e9462c5ae4cfc721ce64071a8588 | 1,184 | py | Python | 04.Encapsulation/Exe/pizza_maker/project/main.py | nmoskova/Python-OOP | 07327bcb93eee3a7db5d7c0bbdd1b54eb9e8b864 | [
"MIT"
] | null | null | null | 04.Encapsulation/Exe/pizza_maker/project/main.py | nmoskova/Python-OOP | 07327bcb93eee3a7db5d7c0bbdd1b54eb9e8b864 | [
"MIT"
] | null | null | null | 04.Encapsulation/Exe/pizza_maker/project/main.py | nmoskova/Python-OOP | 07327bcb93eee3a7db5d7c0bbdd1b54eb9e8b864 | [
"MIT"
] | null | null | null | from encapsulation_04.exe.pizza_maker.project.dough import Dough
from encapsulation_04.exe.pizza_maker.project.pizza import Pizza
from encapsulation_04.exe.pizza_maker.project.topping import Topping
tomato_topping = Topping("Tomato", 60)
print(tomato_topping.topping_type)
print(tomato_topping.weight)
mushrooms_topping = Topping("Mushroom", 75)
print(mushrooms_topping.topping_type)
print(mushrooms_topping.weight)
mozzarella_topping = Topping("Mozzarella", 80)
print(mozzarella_topping.topping_type)
print(mozzarella_topping.weight)
cheddar_topping = Topping("Cheddar", 150)
pepperoni_topping = Topping("Pepperoni", 120)
white_flour_dough = Dough("White Flour", "Mixing", 200)
print(white_flour_dough.flour_type)
print(white_flour_dough.weight)
print(white_flour_dough.baking_technique)
whole_wheat_dough = Dough("Whole Wheat Flour", "Mixing", 200)
print(whole_wheat_dough.weight)
print(whole_wheat_dough.flour_type)
print(whole_wheat_dough.baking_technique)
p = Pizza("Margherita", whole_wheat_dough, 2)
p.add_topping(tomato_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
| 29.6 | 68 | 0.831081 |
b4498ac05bf8ea7aa023efd2ecbb1bd7c7b56fb2 | 1,158 | py | Python | src/unicon/plugins/iosxe/cat9k/__init__.py | nielsvanhooy/unicon.plugins | 3416fd8223f070cbb67a2cbe604e3c5d13584318 | [
"Apache-2.0"
] | null | null | null | src/unicon/plugins/iosxe/cat9k/__init__.py | nielsvanhooy/unicon.plugins | 3416fd8223f070cbb67a2cbe604e3c5d13584318 | [
"Apache-2.0"
] | null | null | null | src/unicon/plugins/iosxe/cat9k/__init__.py | nielsvanhooy/unicon.plugins | 3416fd8223f070cbb67a2cbe604e3c5d13584318 | [
"Apache-2.0"
] | null | null | null | """ cat9k IOS-XE connection implementation.
"""
__author__ = "Rob Trotter <rlt@cisco.com>"
from unicon.plugins.iosxe import (
IosXESingleRpConnection,
IosXEDualRPConnection,
IosXEServiceList,
HAIosXEServiceList)
from .statemachine import IosXECat9kSingleRpStateMachine, IosXECat9kDualRpStateMachine
from .settings import IosXECat9kSettings
from . import service_implementation as svc
| 26.930233 | 86 | 0.768566 |
b44998685fc665e80493c8e5ef4cef6084f68ca9 | 4,875 | py | Python | ludopediaAnuncios.py | christianbobsin/LudopediaDataMiner | d136a40b024b3611a8a88371b4a47a673c782180 | [
"MIT"
] | 2 | 2018-03-16T23:05:51.000Z | 2021-08-05T03:23:44.000Z | ludopediaAnuncios.py | christianbobsin/LudopediaDataMiner | d136a40b024b3611a8a88371b4a47a673c782180 | [
"MIT"
] | null | null | null | ludopediaAnuncios.py | christianbobsin/LudopediaDataMiner | d136a40b024b3611a8a88371b4a47a673c782180 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from lxml import html
from time import sleep
from datetime import datetime
import requests
import os
import sqlite3
import sys
# No terminal usar ~: python ludopedia.py [idIni] [regs]
# por ex. ~: python ludopedia.py 451 3000
con = sqlite3.connect('ludopedia.db')
cursor = con.cursor()
cursor.execute("""SELECT (ANUNCIO + 1) FROM JOGOS WHERE ANUNCIO=(SELECT MAX(ANUNCIO) FROM JOGOS WHERE TIPO='ANUNCIO') """)
anuncios = cursor.fetchall()
con.close()
idIni = int(anuncios[0][0])
#idIni = 75691
#regs = int(sys.argv[2])
regs = 9999
idMax = ( idIni + regs )
jogosAdicionados = 0
for id in range(idIni, idMax):
# 'http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id)
#url = 'http://www.ludopedia.com.br/anuncio?id_anuncio=' % id
try:
page = requests.get('http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id))
tree = html.fromstring(page.content)
except:
print 'nova tentativa em 10s'
sleep(10)
page = requests.get('http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id))
tree = html.fromstring(page.content)
#jogoNome = tree.xpath('//div[@class="col-xs-10"]/h3/a/text()')
jogoNome = tree.xpath('//*[@id="page-content"]/div/div/div/div[2]/h3/a/text()')
#jogoFlavor = tree.xpath('//div[@class="col-xs-10"]/h3/span/text()')
jogoFlavor = tree.xpath('//*[@id="page-content"]/div/div/div/div[2]/h3/span/text()')
if len(jogoFlavor):
detalhes = jogoFlavor[0]
else:
detalhes = 'NA'
jogoPreco = tree.xpath('//span[@class="negrito proximo_lance"]/text()')
if len(jogoPreco):
jogoPreco =jogoPreco[0].split()
jogoPreco[1] = jogoPreco[1].replace('.','')
preco = float( jogoPreco[1].replace( ',','.' ) )
else:
preco = 0.0
status = tree.xpath('//td/span/text()')
validadeAnuncio = tree.xpath('//td/text()')
if len(validadeAnuncio):
validadeAnuncio[4] = validadeAnuncio[4].replace(',',' ')
data = validadeAnuncio[4].split()
ano = data[0].split('/')
hora = data[1].split(':')
data = datetime( int(ano[2]), int(ano[1]),int(ano[0]), int(hora[0]), int(hora[1]))
if ( data > datetime.now() and status[1] == 'Vendido'):
data = datetime.now()
else:
data = datetime( 1979, 8, 10 )
pessoa = tree.xpath('//td/a/text()')
if len(pessoa):
vendedor = pessoa[1]
if len(pessoa) < 3:
comprador = 'NA'
else:
comprador = pessoa[2]
current = id - idIni + 1
total = idMax - idIni
progress = (current/float(total))*100
#print str(current) + ' / ' + str(total) + " : " + "%.2f" % round(progress,2) + "%"
#print 'Id: ', id
#jogoCount = id - idIni
if len(jogoNome):
jogosAdicionados = jogosAdicionados + 1
if ( len(status[1]) > 15 ):
status[1] = 'Ativo'
#print 'Jogo: ', jogoNome[0]
#print 'Detalhes ', detalhes
#print 'Preco: ', str(preco)
#print 'Status: ', status[1]
#print 'Validade: ', data
#print 'Estado: ', validadeAnuncio[6]
#print 'Local: ', validadeAnuncio[8]
#print 'Vendedor: ', vendedor
#print 'Comprador:', comprador
print str( current ).zfill( 4 ) + ' '+ str ( id ) + ' ' + ano[2] + '-' +str( ano[1] ).zfill(2) + '-'+ str( ano[0] ).zfill(2) + ' ' + status[1] + '\t\t' + validadeAnuncio[6] + '\t' + str(preco) + '\t ' + jogoNome[0]
con = sqlite3.connect('ludopedia.db')
cursor = con.cursor()
cursor.execute("""INSERT INTO JOGOS ( ANUNCIO, JOGO, SUBTITULO, PRECO, STATUS, VALIDADE, ESTADO, ORIGEM, VENDEDOR, COMPRADOR, TIPO )
VALUES (?,?,?,?,?,?,?,?,?,?,?)""", (id, jogoNome[0], detalhes, preco, status[1], data, validadeAnuncio[6],
validadeAnuncio[8], vendedor, comprador, 'ANUNCIO' ) )
try:
con.commit()
except:
print 'Falha no Commit, tentando novamente em 10s.'
sleep(10)
con.commit()
con.close()
#print '-----------------------'
#print 'Jogos Adicionados: ' + str( jogosAdicionados )
#print '-----------------------'
else:
print str( current ).zfill( 4 ) + ' ' + str ( id ) + '\t ' + '-------' + ' \t ' + '-------' + ' \t ' + '------' + '\t ' + '---'
sleep(0.05)
#os.system('clear')
print '---------------------------------------------------------------'
print 'Jogos Adicionados: ' + str( jogosAdicionados )
print '---------------------------------------------------------------'
########################################################################
#sTable = sorted( table, key = getKey )
#print tabulate(sTable, tablefmt="plain" )
#f = open ( 'LudopediaLeaks %s-%s.csv' % ( idIni, idMax) , 'w' )
#for x in range ( 0, len( sTable ) ):
# row = "%s;%s;%s;%s;%s;%s;%s;%s;%s;%s" % ( sTable[x][0],
# sTable[x][1].encode('utf8'),
# sTable[x][2].encode('utf8'),
# sTable[x][3],
# sTable[x][4].encode('utf8'),
# sTable[x][5],
# sTable[x][6].encode('utf8'),
# sTable[x][7].encode('utf8'),
# sTable[x][8].encode('utf8'),
# sTable[x][9].encode('utf8') )
# print row
# f.write(row + '\n' )
#f.close()
| 28.676471 | 223 | 0.570256 |
b449d07b5e029400778e8d16d3a55f2ee36130ff | 18,334 | py | Python | midterm/yolo_utils.py | ClarkBrun/emotic | ea4c1d846ac8aa18a902c0e68fb6e5dc5e1ae2d1 | [
"MIT"
] | null | null | null | midterm/yolo_utils.py | ClarkBrun/emotic | ea4c1d846ac8aa18a902c0e68fb6e5dc5e1ae2d1 | [
"MIT"
] | null | null | null | midterm/yolo_utils.py | ClarkBrun/emotic | ea4c1d846ac8aa18a902c0e68fb6e5dc5e1ae2d1 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
def xywh2xyxy(x):
''' Convert bounding box from [x, y, w, h] to [x1, y1, x2, y2]
:param x: bounding boxes array
:return: Converted bounding box array
'''
y = x.new(x.shape)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def rescale_boxes(boxes, current_dim, original_shape):
""" Rescales bounding boxes to the original shape """
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = xywh2xyxy(prediction[..., :4])
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
def prepare_yolo(model_dir):
''' Download yolo model files and load the model weights
:param model_dir: Directory path where to store yolo model weights and yolo model configuration file.
:return: Yolo model after loading model weights
'''
cfg_file = os.path.join(model_dir, 'yolov3.cfg')
if not os.path.exists(cfg_file):
download_command = 'wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg -O ' + cfg_file
os.system(download_command)
weight_file = os.path.join(model_dir, 'yolov3.weights')
if not os.path.exists(weight_file):
download_command = 'wget https://pjreddie.com/media/files/yolov3.weights -O ' + weight_file
os.system(download_command)
yolo_model = Darknet(cfg_file, 416)
yolo_model.load_darknet_weights(weight_file)
print ('prepared yolo model')
return yolo_model
# if __name__ == '__main__':
# prepare_yolo(model_dir = '/home/face-r/Steps_face_recognition/emotic/debug/models') | 37.038384 | 115 | 0.678412 |
b44ac7b8e26906825e3b89cdfb277cf731bbe790 | 5,557 | py | Python | pytracking-master/ltr/train_settings/bbreg/atom.py | wsumel/AMMC | ef101878b4a97f07984186ea09146348c0526fa6 | [
"Apache-2.0"
] | 3 | 2021-12-02T11:34:37.000Z | 2021-12-19T09:30:10.000Z | pytracking-master/ltr/train_settings/bbreg/atom.py | wsumel/AMMC | ef101878b4a97f07984186ea09146348c0526fa6 | [
"Apache-2.0"
] | null | null | null | pytracking-master/ltr/train_settings/bbreg/atom.py | wsumel/AMMC | ef101878b4a97f07984186ea09146348c0526fa6 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.optim as optim
from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k
from ltr.data import processing, sampler, LTRLoader
import ltr.models.bbreg.atom as atom_models
from ltr import actors
from ltr.trainers import LTRTrainer
import ltr.data.transforms as tfm
| 55.57 | 133 | 0.638114 |
b44b2dc4ce40901657329bbb40489909361c416f | 281 | py | Python | exercise 8.6.py | tuyanyang/python_exercise | c1027c2451d7f3c0fd00152a5430386d930ef9ef | [
"Apache-2.0"
] | null | null | null | exercise 8.6.py | tuyanyang/python_exercise | c1027c2451d7f3c0fd00152a5430386d930ef9ef | [
"Apache-2.0"
] | null | null | null | exercise 8.6.py | tuyanyang/python_exercise | c1027c2451d7f3c0fd00152a5430386d930ef9ef | [
"Apache-2.0"
] | null | null | null | nums = list()
while True:
nStr = input('Enter a number: ')
try:
if nStr == 'done':
break
n = float(nStr)
nums.append(n)
except:
print('Invalid input')
continue
print('Maximum: ',max(nums))
print('Minimum: ',min(nums)) | 21.615385 | 36 | 0.519573 |
b44cdf1520f9983049c66891c92f13dc5a062fff | 5,899 | py | Python | gui/activity_list.py | keremkoseoglu/Kifu | bed7a15f71e2345c654b1adab07a5edecdbae342 | [
"MIT"
] | null | null | null | gui/activity_list.py | keremkoseoglu/Kifu | bed7a15f71e2345c654b1adab07a5edecdbae342 | [
"MIT"
] | 82 | 2020-06-25T09:45:01.000Z | 2022-03-31T09:35:31.000Z | gui/activity_list.py | keremkoseoglu/Kifu | bed7a15f71e2345c654b1adab07a5edecdbae342 | [
"MIT"
] | null | null | null | """ Activity list window """
import tkinter
import tkinter.ttk
from model import activity, invoice
from model.activity import Activity
from model.company import Company
from gui.activity import ActivityWindow
from gui.activity_split import ActivitySplit
from gui.invoice import InvoiceWindow
from gui.popup_file import popup_email
from gui.prime_singleton import PrimeSingleton
from util import activity_xlsx_report, backup, date_time
import config
| 34.098266 | 97 | 0.66418 |
b44e0121e131edfd41c92b9e516f42e320c6b70f | 3,551 | py | Python | src/cactus/shared/commonTest.py | thiagogenez/cactus | 910234eb8bafca33e6a219079c8d988b6f43bc59 | [
"MIT-0"
] | 209 | 2016-11-12T14:16:50.000Z | 2022-03-30T04:44:11.000Z | src/cactus/shared/commonTest.py | thiagogenez/cactus | 910234eb8bafca33e6a219079c8d988b6f43bc59 | [
"MIT-0"
] | 468 | 2016-11-06T01:16:43.000Z | 2022-03-31T16:24:37.000Z | src/cactus/shared/commonTest.py | thiagogenez/cactus | 910234eb8bafca33e6a219079c8d988b6f43bc59 | [
"MIT-0"
] | 75 | 2017-03-09T22:19:27.000Z | 2022-03-14T22:03:33.000Z | import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
if __name__ == '__main__':
unittest.main()
| 34.475728 | 84 | 0.619825 |
b44e0a41d16e0ba8bfc1be48250cce3e7506e1d1 | 7,185 | py | Python | porespy/networks/__getnet__.py | hfathian/porespy | 8747e675ba8e6410d8448492c70f6911e0eb816a | [
"MIT"
] | 3 | 2020-09-02T20:02:55.000Z | 2021-07-09T03:50:49.000Z | porespy/networks/__getnet__.py | hfathian/porespy | 8747e675ba8e6410d8448492c70f6911e0eb816a | [
"MIT"
] | null | null | null | porespy/networks/__getnet__.py | hfathian/porespy | 8747e675ba8e6410d8448492c70f6911e0eb816a | [
"MIT"
] | null | null | null | import sys
import numpy as np
import openpnm as op
from tqdm import tqdm
import scipy.ndimage as spim
from porespy.tools import extend_slice
import openpnm.models.geometry as op_gm
def regions_to_network(im, dt=None, voxel_size=1):
r"""
Analyzes an image that has been partitioned into pore regions and extracts
the pore and throat geometry as well as network connectivity.
Parameters
----------
im : ND-array
An image of the pore space partitioned into individual pore regions.
Note that this image must have zeros indicating the solid phase.
dt : ND-array
The distance transform of the pore space. If not given it will be
calculated, but it can save time to provide one if available.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
Returns
-------
A dictionary containing all the pore and throat size data, as well as the
network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
print('-' * 60, flush=True)
print('Extracting pore and throat information from image', flush=True)
from skimage.morphology import disk, ball
struc_elem = disk if im.ndim == 2 else ball
# if ~np.any(im == 0):
# raise Exception('The received image has no solid phase (0\'s)')
if dt is None:
dt = spim.distance_transform_edt(im > 0)
dt = spim.gaussian_filter(input=dt, sigma=0.5)
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im)+1)
Np = np.size(Ps)
p_coords = np.zeros((Np, im.ndim), dtype=float)
p_volume = np.zeros((Np, ), dtype=float)
p_dia_local = np.zeros((Np, ), dtype=float)
p_dia_global = np.zeros((Np, ), dtype=float)
p_label = np.zeros((Np, ), dtype=int)
p_area_surf = np.zeros((Np, ), dtype=int)
t_conns = []
t_dia_inscribed = []
t_area = []
t_perimeter = []
t_coords = []
# dt_shape = np.array(dt.shape)
# Start extracting size information for pores and throats
for i in tqdm(Ps, file=sys.stdout):
pore = i - 1
if slices[pore] is None:
continue
s = extend_slice(slices[pore], im.shape)
sub_im = im[s]
sub_dt = dt[s]
pore_im = sub_im == i
padded_mask = np.pad(pore_im, pad_width=1, mode='constant')
pore_dt = spim.distance_transform_edt(padded_mask)
s_offset = np.array([i.start for i in s])
p_label[pore] = i
p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
p_volume[pore] = np.sum(pore_im)
p_dia_local[pore] = (2*np.amax(pore_dt)) - np.sqrt(3)
p_dia_global[pore] = 2*np.amax(sub_dt)
p_area_surf[pore] = np.sum(pore_dt == 1)
im_w_throats = spim.binary_dilation(input=pore_im, structure=struc_elem(1))
im_w_throats = im_w_throats*sub_im
Pn = np.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > pore:
t_conns.append([pore, j])
vx = np.where(im_w_throats == (j + 1))
t_dia_inscribed.append(2*np.amax(sub_dt[vx]))
t_perimeter.append(np.sum(sub_dt[vx] < 2))
t_area.append(np.size(vx[0]))
t_inds = tuple([i+j for i, j in zip(vx, s_offset)])
temp = np.where(dt[t_inds] == np.amax(dt[t_inds]))[0][0]
if im.ndim == 2:
t_coords.append(tuple((t_inds[0][temp],
t_inds[1][temp])))
else:
t_coords.append(tuple((t_inds[0][temp],
t_inds[1][temp],
t_inds[2][temp])))
# Clean up values
Nt = len(t_dia_inscribed) # Get number of throats
if im.ndim == 2: # If 2D, add 0's in 3rd dimension
p_coords = np.vstack((p_coords.T, np.zeros((Np, )))).T
t_coords = np.vstack((np.array(t_coords).T, np.zeros((Nt, )))).T
net = {}
net['pore.all'] = np.ones((Np, ), dtype=bool)
net['throat.all'] = np.ones((Nt, ), dtype=bool)
net['pore.coords'] = np.copy(p_coords)*voxel_size
net['pore.centroid'] = np.copy(p_coords)*voxel_size
net['throat.centroid'] = np.array(t_coords)*voxel_size
net['throat.conns'] = np.array(t_conns)
net['pore.label'] = np.array(p_label)
net['pore.volume'] = np.copy(p_volume)*(voxel_size**3)
net['throat.volume'] = np.zeros((Nt, ), dtype=float)
net['pore.diameter'] = np.copy(p_dia_local)*voxel_size
net['pore.inscribed_diameter'] = np.copy(p_dia_local)*voxel_size
net['pore.equivalent_diameter'] = 2*((3/4*net['pore.volume']/np.pi)**(1/3))
net['pore.extended_diameter'] = np.copy(p_dia_global)*voxel_size
net['pore.surface_area'] = np.copy(p_area_surf)*(voxel_size)**2
net['throat.diameter'] = np.array(t_dia_inscribed)*voxel_size
net['throat.inscribed_diameter'] = np.array(t_dia_inscribed)*voxel_size
net['throat.area'] = np.array(t_area)*(voxel_size**2)
net['throat.perimeter'] = np.array(t_perimeter)*voxel_size
net['throat.equivalent_diameter'] = (np.array(t_area) * (voxel_size**2))**0.5
P12 = net['throat.conns']
PT1 = np.sqrt(np.sum(((p_coords[P12[:, 0]]-t_coords) * voxel_size)**2, axis=1))
PT2 = np.sqrt(np.sum(((p_coords[P12[:, 1]]-t_coords) * voxel_size)**2, axis=1))
net['throat.total_length'] = PT1 + PT2
PT1 = PT1-p_dia_local[P12[:, 0]]/2*voxel_size
PT2 = PT2-p_dia_local[P12[:, 1]]/2*voxel_size
net['throat.length'] = PT1 + PT2
dist = (p_coords[P12[:, 0]]-p_coords[P12[:, 1]])*voxel_size
net['throat.direct_length'] = np.sqrt(np.sum(dist**2, axis=1))
# Make a dummy openpnm network to get the conduit lengths
pn = op.network.GenericNetwork()
pn.update(net)
pn.add_model(propname='throat.endpoints',
model=op_gm.throat_endpoints.spherical_pores,
pore_diameter='pore.inscribed_diameter',
throat_diameter='throat.inscribed_diameter')
pn.add_model(propname='throat.conduit_lengths',
model=op_gm.throat_length.conduit_lengths)
pn.add_model(propname='pore.area',
model=op_gm.pore_area.sphere)
net['throat.endpoints.head'] = pn['throat.endpoints.head']
net['throat.endpoints.tail'] = pn['throat.endpoints.tail']
net['throat.conduit_lengths.pore1'] = pn['throat.conduit_lengths.pore1']
net['throat.conduit_lengths.pore2'] = pn['throat.conduit_lengths.pore2']
net['throat.conduit_lengths.throat'] = pn['throat.conduit_lengths.throat']
net['pore.area'] = pn['pore.area']
prj = pn.project
prj.clear()
wrk = op.Workspace()
wrk.close_project(prj)
return net
| 43.545455 | 83 | 0.622825 |
b44ef5d465bb9fde348df90c5e65dba1ad7814be | 67,560 | py | Python | pandas/core/internals.py | lodagro/pandas | dfcf74679a273395cc9d7b3db78a1fbbc17c4f57 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/internals.py | lodagro/pandas | dfcf74679a273395cc9d7b3db78a1fbbc17c4f57 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/internals.py | lodagro/pandas | dfcf74679a273395cc9d7b3db78a1fbbc17c4f57 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | import itertools
from datetime import datetime
from numpy import nan
import numpy as np
from pandas.core.common import _possibly_downcast_to_dtype, isnull
from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.expressions as expressions
from pandas.tslib import Timestamp
from pandas.util import py3compat
def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
masked_idx = indexer[indexer != -1]
new_values = com.take_nd(self.values, masked_idx, axis=0,
allow_fill=False)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def downcast(self, dtypes = None):
""" try to downcast each item to the dict of dtypes if present """
if dtypes is None:
dtypes = dict()
values = self.values
blocks = []
for i, item in enumerate(self.items):
dtype = dtypes.get(item,self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i])
blocks.append(make_block(nv, [ item ], self.ref_items))
continue
nv = _possibly_downcast_to_dtype(values[i], np.dtype(dtype))
nv = _block_shape(nv)
blocks.append(make_block(nv, [ item ], self.ref_items))
return blocks
def astype(self, dtype, copy = True, raise_on_error = True):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
try:
newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy),
self.items, self.ref_items)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if (newb.shape != self.shape or
(not copy and newb.itemsize < self.itemsize)):
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name, newb.itemsize))
return newb
def convert(self, copy = True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return self.copy() if copy else self
def _try_cast_result(self, result):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
return result
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def to_native_types(self, slicer=None, na_rep='', **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
return values.tolist()
def replace(self, to_replace, value, inplace=False, filter=None):
""" replace the to_replace value with value, possible to create new blocks here
this is just a call to putmask """
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
for i, item in enumerate(self.items):
if item not in filter:
mask[i] = False
if not mask.any():
if inplace:
return [ self ]
return [ self.copy() ]
return self.putmask(mask, value, inplace=inplace)
def putmask(self, mask, new, inplace=False):
""" putmask the data to the block; it is possible that we may create a new dtype of block
return the resulting block(s) """
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
axis = getattr(new, '_het_axis', 0)
new = new.reindex_axis(self.items, axis=axis, copy=False).values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
axis = getattr(mask, '_het_axis', 0)
mask = mask.reindex_axis(self.items, axis=axis, copy=False).values.T
if self._can_hold_element(new):
new = self._try_cast(new)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
for i, item in enumerate(self.items):
m = mask[i]
# need a new block
if m.any():
n = new[i] if isinstance(new, np.ndarray) else new
# type of the new block
dtype, _ = com._maybe_promote(np.array(n).dtype)
# we need to exiplicty astype here to make a copy
nv = new_values[i].astype(dtype)
# we create a new block type
np.putmask(nv, m, n)
else:
nv = new_values[i] if inplace else new_values[i].copy()
nv = _block_shape(nv)
new_blocks.append(make_block(nv, [ item ], self.ref_items))
return new_blocks
if inplace:
return [ self ]
return [ make_block(new_values, self.items, self.ref_items) ]
def diff(self, n):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=1)
return make_block(new_values, self.items, self.ref_items)
def shift(self, indexer, periods):
""" shift the block by periods, possibly upcast """
new_values = self.values.take(indexer, axis=1)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:, :periods] = fill_value
else:
new_values[:, periods:] = fill_value
return make_block(new_values, self.items, self.ref_items)
def eval(self, func, other, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
axis = getattr(other, '_het_axis', 0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
values, other = self._try_coerce_args(values, other)
args = [ values, other ]
try:
result = self._try_coerce_result(func(*args))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(other),str(detail)))
else:
# return the values
result = np.empty(values.shape,dtype='O')
result.fill(np.nan)
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, self.items, self.ref_items)
def where(self, other, cond, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other,'reindex_axis'):
axis = getattr(other,'_het_axis',0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond,'shape'):
raise ValueError("where must have a condition that is ndarray like")
if hasattr(cond,'reindex_axis'):
axis = getattr(cond,'_het_axis',0)
cond = cond.reindex_axis(self.items, axis=axis, copy=True).values
else:
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
# our where function
# see if we can operate on the entire block, or need item-by-item
if not self._can_hold_na:
axis = cond.ndim-1
result_blocks = []
for item in self.items:
loc = self.items.get_loc(item)
item = self.items.take([loc])
v = values.take([loc],axis=axis)
c = cond.take([loc],axis=axis)
o = other.take([loc],axis=axis) if hasattr(other,'shape') else other
result = func(c,v,o)
if len(result) == 1:
result = np.repeat(result,self.shape[1:])
result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
result_blocks.append(create_block(result, item, transpose = False))
return result_blocks
else:
result = func(cond,values,other)
return create_block(result, self.items)
class NumericBlock(Block):
is_numeric = True
_can_hold_na = True
class FloatBlock(NumericBlock):
_downcast_dtype = 'int64'
def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
if float_format:
imask = (-mask).ravel()
values.flat[imask] = np.array([ float_format % val for val in values.ravel()[imask] ])
return values.tolist()
class ComplexBlock(NumericBlock):
class IntBlock(NumericBlock):
_can_hold_na = False
class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
class ObjectBlock(Block):
is_object = True
_can_hold_na = True
def convert(self, convert_dates = True, convert_numeric = True, copy = True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
for i, c in enumerate(self.items):
values = self.get(c)
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
values = _block_shape(values)
items = self.items.take([i])
newb = make_block(values, items, self.ref_items)
blocks.append(newb)
return blocks
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
# TODO: flexible with index=None and/or items=None
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shallow copy (do not copy data)
Returns
-------
copy : BlockManager
"""
copy_blocks = [block.copy(deep=deep) for block in self.blocks]
# copy_axes = [ax.copy() for ax in self.axes]
copy_axes = list(self.axes)
return BlockManager(copy_blocks, copy_axes, do_integrity_check=False)
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
if items.is_unique:
for block in self.blocks:
indexer = items.get_indexer(block.items)
if (indexer == -1).any():
raise AssertionError('Items must contain all block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
else:
for block in self.blocks:
mask = items.isin(block.items)
indexer = mask.nonzero()[0]
if (len(indexer) != len(block.items)):
raise AssertionError('All items must be in block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = blk._gi((j, loc))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
value = _block_shape(value,self.ndim-1)
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
if isinstance(loc, int):
_set_item(self.items[loc], value)
else:
subset = self.items[loc]
if len(value) != len(subset):
raise AssertionError(
'Number of items to set did not match')
for i, (item, arr) in enumerate(zip(subset, value)):
_set_item(item, arr[None, :])
except KeyError:
# insert at end
self.insert(len(self.items), item, value)
self._known_consolidated = False
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
for b in block.split_block_at(item):
self.blocks.append(b)
def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
"""
pandas-indexer with -1's only.
"""
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer, fill_value)
new_blocks = []
for block in self.blocks:
newb = block.reindex_axis(indexer, axis=axis, fill_value=fill_value)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def reindex_items(self, new_items, copy=True, fill_value=np.nan):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindex(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if copy:
new_blocks.append(blk.reindex_items_from(new_items))
else:
blk.ref_items = new_items
new_blocks.append(blk)
else:
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
if len(newb.items) > 0:
new_blocks.append(newb)
mask = indexer == -1
if mask.any():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def construction_error(tot_items, block_shape, axes):
""" raise a helpful message about our construction """
raise ValueError("Shape of passed values is %s, indices imply %s" % (
tuple(map(int, [tot_items] + list(block_shape))),
tuple(map(int, [len(ax) for ax in axes]))))
def _simple_blockify(tuples, ref_items, dtype):
""" return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """
block_items, values = _stack_arrays(tuples, ref_items, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
return [ make_block(values, block_items, ref_items) ]
def _multi_blockify(tuples, ref_items, dtype = None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
new_blocks = []
for dtype, tup_block in grouper:
block_items, values = _stack_arrays(list(tup_block), ref_items, dtype)
block = make_block(values, block_items, ref_items)
new_blocks.append(block)
return new_blocks
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
"""
get_dtype = lambda x: x.dtype.name
# sort by dtype
grouper = itertools.groupby(sorted(blocks, key=get_dtype),
lambda x: x.dtype)
new_blocks = []
for dtype, group_blocks in grouper:
new_block = _merge_blocks(list(group_blocks), items, dtype)
new_blocks.append(new_block)
return new_blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim == ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
| 33.004397 | 120 | 0.574571 |
b44f004ae7c6b3eb8725a6532e9b3868344a526e | 4,919 | py | Python | Sketches/MH/PipeBuilder/BuildViewer.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/MH/PipeBuilder/BuildViewer.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/MH/PipeBuilder/BuildViewer.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# Simple control window for a looping audio player
import pygame
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Kamaelia.Visualisation.PhysicsGraph.TopologyViewerComponent import TopologyViewerComponent
from Kamaelia.Physics.Simple import SimpleLaws, Particle
import time
| 36.708955 | 121 | 0.551332 |
b44f498d26d9dd58f69d6d12b6ff289ae252ed43 | 2,076 | py | Python | examples/exp_example.py | physimals/avb | 16663a935de35e4042c77000ea47abd7e5cd16ad | [
"Apache-2.0"
] | null | null | null | examples/exp_example.py | physimals/avb | 16663a935de35e4042c77000ea47abd7e5cd16ad | [
"Apache-2.0"
] | null | null | null | examples/exp_example.py | physimals/avb | 16663a935de35e4042c77000ea47abd7e5cd16ad | [
"Apache-2.0"
] | null | null | null | """
Example of usage of the AVB framework to infer a single exponential decay
model.
This uses the Python classes directly to infer the parameters for a single
instance of noisy data constructed as a Numpy array.
"""
import sys
import logging
import numpy as np
from vaby_avb import Avb
import vaby
# Uncomment line below to start the random number generator off with the same seed value
# each time, for repeatable results
#np.random.seed(0)
# Ground truth parameters
PARAMS_TRUTH = [42, 0.5]
NOISE_PREC_TRUTH = 0.1
NOISE_VAR_TRUTH = 1/NOISE_PREC_TRUTH
NOISE_STD_TRUTH = np.sqrt(NOISE_VAR_TRUTH)
print("Ground truth: a=%f, r=%f, noise=%f (precision)" % (PARAMS_TRUTH[0], PARAMS_TRUTH[1], NOISE_PREC_TRUTH))
# Create single exponential model
model = vaby.get_model_class("exp")(None)
# Observed data samples are generated by Numpy from the ground truth
# Gaussian distribution. Reducing the number of samples should make
# the inference less 'confident' - i.e. the output variances for
# MU and BETA will increase
N = 100
DT = 0.02
t = np.array([float(t)*DT for t in range(N)])
DATA_CLEAN = model.evaluate(PARAMS_TRUTH, t).numpy()
DATA_NOISY = DATA_CLEAN + np.random.normal(0, NOISE_STD_TRUTH, [N])
print("Time values:")
print(t)
print("Data samples (clean):")
print(DATA_CLEAN)
print("Data samples (noisy):")
print(DATA_NOISY)
# Run Fabber as a comparison if desired
#import os
#import nibabel as nib
#niidata = DATA_NOISY.reshape((1, 1, 1, N))
#nii = nib.Nifti1Image(niidata, np.identity(4))
#nii.to_filename("data_noisy.nii.gz")
#os.system("fabber_exp --data=data_noisy --print-free-energy --output=fabberout --dt=%.3f --model=exp --num-exps=1 --method=vb --noise=white --overwrite --debug" % DT)
# Log to stdout
logging.getLogger().setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(levelname)s : %(message)s'))
logging.getLogger().addHandler(handler)
# Run AVB inference
avb = Avb(t, vaby.DataModel(DATA_NOISY), model)
avb.run(method="leastsq", maxits=20, learning_rate=0.1, debug="--debug" in sys.argv)
| 33.483871 | 167 | 0.750482 |